diff --git a/.circleci/config.yml b/.circleci/config.yml index 2ce2e610724c6..3c854e5873a14 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -15,7 +15,7 @@ experimental: templates: job_template: &job_template docker: - - image: gcr.io/datadoghq/agent-circleci-runner:v50263243-1a30c934 + - image: gcr.io/datadoghq/agent-circleci-runner:v53759313-14a41bca environment: USE_SYSTEM_LIBS: "1" working_directory: /go/src/github.com/DataDog/datadog-agent diff --git a/.copyright-overrides.yml b/.copyright-overrides.yml index 46d30394c173d..8b2abfaad8be4 100644 --- a/.copyright-overrides.yml +++ b/.copyright-overrides.yml @@ -378,3 +378,15 @@ github.com/GoogleCloudPlatform/grpc-gcp-go/grpcgcp/multiendpoint: Copyright 2024 github.com/NVIDIA/go-nvml/*: Copyright 2023 NVIDIA CORPORATION github.com/jonboulle/clockwork: Copyright 2014 Nell Boulle + +github.com/nozzle/throttler: Copyright 2018 Derek Perkins +github.com/openvex/discovery/*: Copyright 2023 The OpenVEX Authors +github.com/sassoftware/relic/*: Copyright (c) SAS Institute Inc. +github.com/transparency-dev/merkle: Copyright 2017 Google LLC. All Rights Reserved. +github.com/transparency-dev/merkle/compact: Copyright 2019 Google LLC. All Rights Reserved. +github.com/transparency-dev/merkle/proof: +- Copyright 2017 Google LLC. All Rights Reserved. +- Copyright 2022 Google LLC. All Rights Reserved. +github.com/transparency-dev/merkle/rfc6962: Copyright 2016 Google LLC. All Rights Reserved. + +gopkg.in/go-jose/go-jose.v2: Copyright 2014 Square Inc. \ No newline at end of file diff --git a/.ddqa/config.toml b/.ddqa/config.toml index bcb6804917e41..897db37c31379 100644 --- a/.ddqa/config.toml +++ b/.ddqa/config.toml @@ -103,14 +103,6 @@ github_team = "agent-integrations" github_labels = ["team/integrations"] exclude_members = ["ofek", "alopezz"] -[teams."Platform Integrations"] -jira_project = "PLINT" -jira_issue_type = "Task" -jira_statuses = ["To Do", "In Progress", "Done"] -github_team = "platform-integrations" -github_labels = ["team/platform-integrations"] -exclude_members = ["hithwen"] - [teams."APM"] jira_project = "APMSP" jira_component = "Trace Agent" diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 24c15c4a3b6cd..ea9e9115fb5dc 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -27,8 +27,8 @@ /CHANGELOG-DCA.rst @DataDog/container-integrations @DataDog/container-platform /CHANGELOG-INSTALLSCRIPT.rst @DataDog/agent-delivery @DataDog/container-ecosystems -/*.md @DataDog/agent-devx-infra @DataDog/documentation -/NOTICE @DataDog/agent-delivery @DataDog/documentation +/*.md @DataDog/agent-devx-infra +/NOTICE @DataDog/agent-delivery /LICENSE* # do not notify anyone @@ -42,7 +42,7 @@ /repository.datadog.yml @DataDog/agent-devx-infra /generate_tools.go @DataDog/agent-devx-infra /service.datadog.yaml @DataDog/agent-delivery -/static-analysis.datadog.yml @DataDog/software-integrity-and-trust @DataDog/agent-devx-infra +/static-analysis.datadog.yml @DataDog/software-integrity-and-trust @DataDog/sdlc-security @DataDog/agent-devx-infra /modules.yml @DataDog/agent-shared-components # if go.work changes then either .go-version or modules.yml changed too, so ASC might as well own it @@ -103,7 +103,6 @@ /.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml @DataDog/agent-delivery /.gitlab/internal_kubernetes_deploy/rc_kubernetes_deploy.yml @DataDog/agent-delivery /.gitlab/package_deps_build/package_deps_build.yml @DataDog/agent-devx-infra @DataDog/ebpf-platform -/.gitlab/powershell_script_signing/powershell_script_signing.yml @DataDog/agent-delivery @DataDog/windows-agent /.gitlab/source_test/golang_deps_diff.yml @DataDog/agent-devx-infra @DataDog/agent-devx-loops /.gitlab/source_test/* @DataDog/agent-devx-infra /.gitlab/source_test/linux.yml @DataDog/agent-devx-infra @DataDog/agent-devx-loops @@ -112,7 +111,7 @@ /.gitlab/source_test/slack.yml @DataDog/agent-devx-infra @DataDog/agent-devx-loops /.gitlab/source_test/tooling_unit_tests.yml @DataDog/agent-devx-infra @DataDog/agent-devx-loops -/.gitlab/binary_build/cluster_agent_cloudfoundry.yml @DataDog/platform-integrations @DataDog/agent-delivery +/.gitlab/binary_build/cluster_agent_cloudfoundry.yml @DataDog/agent-integrations @DataDog/agent-delivery /.gitlab/binary_build/cluster_agent.yml @DataDog/container-integrations @DataDog/agent-delivery /.gitlab/binary_build/fakeintake.yml @DataDog/agent-devx-loops /.gitlab/binary_build/otel_agent.yml @DataDog/opentelemetry @DataDog/agent-delivery @@ -129,7 +128,7 @@ /.gitlab/deploy_packages/oci.yml @DataDog/agent-delivery @DataDog/fleet /.gitlab/deploy_packages/windows.yml @DataDog/agent-delivery @DataDog/windows-agent /.gitlab/deploy_packages/winget.yml @DataDog/agent-delivery @DataDog/windows-agent -/.gitlab/deploy_packages/cluster_agent_cloudfoundry.yml @DataDog/platform-integrations @DataDog/agent-devx-infra +/.gitlab/deploy_packages/cluster_agent_cloudfoundry.yml @DataDog/agent-integrations @DataDog/agent-devx-infra /.gitlab/deploy_packages/e2e.yml @DataDog/agent-devx-loops @DataDog/agent-e2e-testing @DataDog/fleet /.gitlab/deps_build/ @DataDog/ebpf-platform @DataDog/agent-delivery @DataDog/windows-agent @@ -146,8 +145,6 @@ /.gitlab/functional_test/serverless.yml @DataDog/serverless @Datadog/serverless-aws @DataDog/agent-devx-infra /.gitlab/functional_test/oracle.yml @DataDog/agent-devx-infra @DataDog/database-monitoring -/.gitlab/powershell_script_deploy @DataDog/agent-delivery @DataDog/windows-agent - /.gitlab/choco_build/choco_build.yml @DataDog/agent-delivery @DataDog/windows-agent /.gitlab/integration_test/windows.yml @DataDog/agent-devx-infra @DataDog/windows-agent @@ -215,8 +212,8 @@ /cmd/agent/dist/conf.d/win32_event_log.d/ @DataDog/windows-agent /cmd/agent/install*.sh @DataDog/container-ecosystems @DataDog/agent-delivery /cmd/cluster-agent/ @DataDog/container-platform -/cmd/cluster-agent-cloudfoundry/ @DataDog/platform-integrations -/cmd/cluster-agent/api/v1/cloudfoundry_metadata.go @DataDog/platform-integrations +/cmd/cluster-agent-cloudfoundry/ @DataDog/agent-integrations +/cmd/cluster-agent/api/v1/cloudfoundry_metadata.go @DataDog/agent-integrations /cmd/cws-instrumentation/ @DataDog/agent-security /cmd/dogstatsd/ @DataDog/agent-metrics-logs /cmd/otel-agent/ @DataDog/opentelemetry @@ -255,14 +252,14 @@ /Dockerfiles/agent-ot @DataDog/opentelemetry /Dockerfiles/agent/bouncycastle-fips @DataDog/agent-metrics-logs -/docs/ @DataDog/documentation @DataDog/agent-devx-loops -/docs/dev/checks/ @DataDog/documentation @DataDog/agent-metrics-logs +/docs/ @DataDog/agent-devx-loops +/docs/dev/checks/ @DataDog/agent-metrics-logs /docs/cloud-workload-security/ @DataDog/documentation @DataDog/agent-security -/docs/public/components/ @DataDog/documentation @DataDog/agent-shared-components -/docs/public/hostname/ @DataDog/documentation @DataDog/agent-shared-components -/docs/public/architecture/dogstatsd/ @DataDog/documentation @DataDog/agent-metrics-logs -/docs/public/guidelines/deprecated-components-documentation/ @DataDog/documentation @DataDog/agent-shared-components +/docs/public/components/ @DataDog/agent-shared-components +/docs/public/hostname/ @DataDog/agent-shared-components +/docs/public/architecture/dogstatsd/ @DataDog/agent-metrics-logs +/docs/public/guidelines/deprecated-components-documentation/ @DataDog/agent-shared-components /google-marketplace/ @DataDog/container-ecosystems @@ -276,7 +273,7 @@ /Makefile.trace @DataDog/agent-apm /omnibus/ @DataDog/agent-delivery -/omnibus/python-scripts/ @DataDog/platform-integrations +/omnibus/python-scripts/ @DataDog/agent-shared-components /omnibus/config/patches/openscap/ @DataDog/agent-cspm /omnibus/config/software/datadog-agent-integrations-*.rb @DataDog/agent-integrations /omnibus/config/software/datadog-security-agent*.rb @Datadog/agent-security @DataDog/agent-delivery @@ -308,7 +305,7 @@ /comp/systray @DataDog/windows-agent /comp/trace @DataDog/agent-apm /comp/updater @DataDog/fleet @DataDog/windows-agent -/comp/agent/cloudfoundrycontainer @DataDog/platform-integrations +/comp/agent/cloudfoundrycontainer @DataDog/agent-integrations /comp/agent/jmxlogger @DataDog/agent-metrics-logs /comp/aggregator/diagnosesendermanager @DataDog/agent-shared-components /comp/checks/agentcrashdetect @DataDog/windows-kernel-integrations @@ -325,7 +322,8 @@ /comp/haagent @DataDog/ndm-core /comp/languagedetection/client @DataDog/container-platform /comp/rdnsquerier @DataDog/ndm-integrations -/comp/serializer/compression @DataDog/agent-metrics-logs +/comp/serializer/logscompression @DataDog/agent-processing-and-routing +/comp/serializer/metricscompression @DataDog/agent-processing-and-routing /comp/snmpscan @DataDog/ndm-core # END COMPONENTS @@ -345,6 +343,7 @@ /pkg/cli/subcommands/clusterchecks @DataDog/container-platform /pkg/discovery/ @DataDog/universal-service-monitoring /pkg/errors/ @DataDog/agent-shared-components +/pkg/fips @DataDog/agent-shared-components /pkg/gohai @DataDog/agent-shared-components /pkg/gpu/ @DataDog/ebpf-platform /pkg/jmxfetch/ @DataDog/agent-metrics-logs @@ -367,14 +366,14 @@ /pkg/trace/telemetry/ @DataDog/apm-trace-storage /pkg/trace/transform/ @DataDog/opentelemetry /comp/core/autodiscovery/listeners/ @DataDog/container-platform -/comp/core/autodiscovery/listeners/cloudfoundry*.go @DataDog/platform-integrations +/comp/core/autodiscovery/listeners/cloudfoundry*.go @DataDog/agent-integrations /comp/core/autodiscovery/listeners/snmp*.go @DataDog/ndm-core /comp/core/autodiscovery/providers/ @DataDog/container-platform /comp/core/autodiscovery/providers/file*.go @DataDog/agent-metrics-logs /comp/core/autodiscovery/providers/config_reader*.go @DataDog/container-platform @DataDog/agent-metrics-logs -/comp/core/autodiscovery/providers/cloudfoundry*.go @DataDog/platform-integrations +/comp/core/autodiscovery/providers/cloudfoundry*.go @DataDog/agent-integrations /comp/core/autodiscovery/providers/remote_config*.go @DataDog/remote-config -/pkg/cloudfoundry @Datadog/platform-integrations +/pkg/cloudfoundry @Datadog/agent-integrations /pkg/clusteragent/ @DataDog/container-platform /pkg/clusteragent/autoscaling/ @DataDog/container-integrations /pkg/clusteragent/admission/mutate/autoscaling @DataDog/container-integrations @@ -398,18 +397,18 @@ /pkg/collector/corechecks/gpu/ @DataDog/ebpf-platform /pkg/collector/corechecks/network-devices/ @DataDog/ndm-integrations /pkg/collector/corechecks/orchestrator/ @DataDog/container-app -/pkg/collector/corechecks/net/ @DataDog/platform-integrations +/pkg/collector/corechecks/net/ @DataDog/agent-shared-components /pkg/collector/corechecks/oracle @DataDog/database-monitoring /pkg/collector/corechecks/sbom/ @DataDog/container-integrations /pkg/collector/corechecks/servicediscovery/ @DataDog/universal-service-monitoring /pkg/collector/corechecks/snmp/ @DataDog/ndm-core -/pkg/collector/corechecks/system/ @DataDog/platform-integrations -/pkg/collector/corechecks/system/**/*_windows*.go @DataDog/platform-integrations @DataDog/windows-agent +/pkg/collector/corechecks/system/ @DataDog/agent-shared-components +/pkg/collector/corechecks/system/**/*_windows*.go @DataDog/agent-shared-components @DataDog/windows-agent /pkg/collector/corechecks/system/wincrashdetect/ @DataDog/windows-kernel-integrations /pkg/collector/corechecks/system/winkmem/ @DataDog/windows-agent /pkg/collector/corechecks/system/winproc/ @DataDog/windows-agent /pkg/collector/corechecks/systemd/ @DataDog/agent-integrations -/pkg/collector/corechecks/nvidia/ @DataDog/platform-integrations +/pkg/collector/corechecks/nvidia/ @DataDog/agent-shared-components /pkg/config/ @DataDog/agent-shared-components /pkg/config/config_template.yaml @DataDog/agent-shared-components @DataDog/documentation /pkg/config/setup/apm.go @DataDog/agent-apm @@ -423,15 +422,16 @@ /pkg/config/setup/system_probe_cws_windows.go @DataDog/windows-kernel-integrations /pkg/config/setup/security_agent.go @DataDog/agent-security /pkg/config/remote/ @DataDog/remote-config -/pkg/config/remote/meta/ @DataDog/remote-config @DataDog/software-integrity-and-trust +/pkg/config/remote/meta/ @DataDog/remote-config @DataDog/software-integrity-and-trust @DataDog/sdlc-security /pkg/containerlifecycle/ @Datadog/container-integrations /pkg/diagnose/ @Datadog/container-platform /pkg/diagnose/connectivity/ @DataDog/agent-shared-components -/pkg/diagnose/ports/ @DataDog/agent-shared-components +/pkg/diagnose/ports/ @DataDog/agent-shared-components +/pkg/diagnose/ports/*windows*.go @DataDog/windows-agent /pkg/eventmonitor/ @DataDog/ebpf-platform @DataDog/agent-security /pkg/dynamicinstrumentation/ @DataDog/debugger /pkg/flare/ @DataDog/agent-shared-components -/pkg/flare/manifests.go @DataDog/container-ecosystems +/pkg/flare/clusteragent/manifests.go @DataDog/container-ecosystems /pkg/flare/*_win.go @Datadog/windows-agent /pkg/flare/*_windows.go @Datadog/windows-agent /pkg/flare/*_windows_test.go @Datadog/windows-agent @@ -449,21 +449,26 @@ /pkg/tagset/ @DataDog/agent-shared-components /pkg/util/ @DataDog/agent-shared-components /pkg/util/aggregatingqueue @DataDog/container-integrations @DataDog/container-platform -/pkg/util/cloudproviders/cloudfoundry/ @DataDog/platform-integrations +/pkg/util/cloudproviders/cloudfoundry/ @DataDog/agent-integrations /pkg/util/clusteragent/ @DataDog/container-platform /pkg/util/containerd/ @DataDog/container-integrations /pkg/util/containers/ @DataDog/container-integrations /pkg/util/crio/ @DataDog/container-integrations /pkg/util/docker/ @DataDog/container-integrations /pkg/util/ecs/ @DataDog/container-integrations +/pkg/util/encoding/ @DataDog/ebpf-platform /pkg/util/funcs/ @DataDog/ebpf-platform /pkg/util/gpu/ @DataDog/container-platform /pkg/util/kernel/ @DataDog/ebpf-platform /pkg/util/safeelf/ @DataDog/ebpf-platform +/pkg/util/slices/ @DataDog/ebpf-platform /pkg/util/ktime @DataDog/agent-security /pkg/util/kubernetes/ @DataDog/container-integrations @DataDog/container-platform @DataDog/container-app /pkg/util/podman/ @DataDog/container-integrations +/pkg/util/port/ @DataDog/agent-shared-components +/pkg/util/port/portlist/*windows*.go @DataDog/windows-agent /pkg/util/prometheus @DataDog/container-integrations +/pkg/util/tags/ @DataDog/container-platform /pkg/util/trivy/ @DataDog/container-integrations @DataDog/agent-security /pkg/util/uuid/ @DataDog/agent-shared-components /pkg/util/cgroups/ @DataDog/container-integrations @@ -542,7 +547,8 @@ /pkg/snmp/ @DataDog/ndm-core /pkg/tagger/ @DataDog/container-platform /pkg/windowsdriver/ @DataDog/windows-kernel-integrations -/comp/core/workloadmeta/collectors/internal/cloudfoundry @DataDog/platform-integrations +/comp/core/workloadmeta/collectors/internal/cloudfoundry @DataDog/agent-integrations +/comp/core/workloadmeta/collectors/internal/nvml @DataDog/ebpf-platform /pkg/sbom/ @DataDog/container-integrations @DataDog/agent-security /pkg/internaltelemetry @DataDog/windows-kernel-integrations @DataDog/fleet /pkg/networkpath/ @DataDog/network-device-monitoring @DataDog/Networks @@ -560,7 +566,7 @@ /tasks/dogstatsd.py @DataDog/agent-metrics-logs /tasks/update_go.py @DataDog/agent-shared-components /tasks/unit_tests/update_go_tests.py @DataDog/agent-shared-components -/tasks/cluster_agent_cloudfoundry.py @DataDog/platform-integrations +/tasks/cluster_agent_cloudfoundry.py @DataDog/agent-integrations /tasks/new_e2e_tests.py @DataDog/agent-e2e-testing @DataDog/agent-devx-loops /tasks/process_agent.py @DataDog/container-intake /tasks/system_probe.py @DataDog/ebpf-platform @@ -608,6 +614,7 @@ /test/new-e2e/tests/agent-subcommands @DataDog/agent-shared-components /test/new-e2e/tests/containers @DataDog/container-integrations @DataDog/container-platform /test/new-e2e/tests/discovery @DataDog/universal-service-monitoring +/test/new-e2e/tests/fips-compliance @DataDog/agent-shared-components /test/new-e2e/tests/ha-agent @DataDog/ndm-core /test/new-e2e/tests/language-detection @DataDog/container-intake /test/new-e2e/tests/ndm @DataDog/ndm-core diff --git a/.github/dependabot.yaml b/.github/dependabot.yaml index 1af1fdc9fdcc5..f8f32aa098443 100644 --- a/.github/dependabot.yaml +++ b/.github/dependabot.yaml @@ -11,20 +11,9 @@ updates: # These dependencies are replaced in the main go.mod file. # They are ignored since bumping them would have no effect on the build. # To update them automatically, it is necessary to remove the `replace` directive. - - dependency-name: k8s.io/* - - dependency-name: github.com/cihub/seelog - - dependency-name: github.com/containerd/cgroups - - dependency-name: github.com/containerd/containerd - - dependency-name: github.com/coreos/go-systemd - # We only replace github.com/docker/distribution but we ignore all Docker dependencies. - - dependency-name: github.com/docker/* - - dependency-name: github.com/florianl/go-conntrack - - dependency-name: github.com/iovisor/gobpf - - dependency-name: github.com/lxn/walk - - dependency-name: github.com/mholt/archiver - - dependency-name: github.com/prometheus/client_golang + - dependency-name: github.com/cihub/seelog # https://github.com/DataDog/datadog-agent/pull/5647 - dependency-name: github.com/spf13/cast - - dependency-name: github.com/ugorji/go + # We only replace github.com/docker/distribution but we ignore all Docker dependencies. # Ignore internal modules - dependency-name: github.com/DataDog/datadog-agent/* # Ignore golang.org/x/... deps to avoid noise, they are updated together, pretty regularly @@ -45,6 +34,11 @@ updates: aws-sdk-go-v2: patterns: - "github.com/aws/aws-sdk-go-v2*" + k8s-io: + patterns: + - "k8s.io/*" + update-types: + - patch - package-ecosystem: gomod directory: /pkg/trace labels: @@ -167,6 +161,13 @@ updates: - dependency-name: github.com/pulumi* # Ignore golang.org/x/... deps to avoid noise, they are updated together, pretty regularly - dependency-name: golang.org/x/* + groups: + aws-sdk-go-v2: + patterns: + - "github.com/aws/aws-sdk-go-v2*" + k8s-io: + patterns: + - "k8s.io/*" schedule: interval: weekly diff --git a/.github/workflows/backport-pr.yml b/.github/workflows/backport-pr.yml index 3acf8cda7ebf2..a70bc2f9c723b 100644 --- a/.github/workflows/backport-pr.yml +++ b/.github/workflows/backport-pr.yml @@ -24,7 +24,7 @@ jobs: contents: write pull-requests: write steps: - - uses: actions/create-github-app-token@5d869da34e18e7287c1daad50e0b8ea0f506ce69 # v1.11.0 + - uses: actions/create-github-app-token@c1a285145b9d317df6ced56c09f525b5c2b6f755 # v1.11.1 id: app-token with: app-id: ${{ vars.DD_GITHUB_TOKEN_GENERATOR_APP_ID }} diff --git a/.github/workflows/buildimages-update.yml b/.github/workflows/buildimages-update.yml index 77e03aaea03e1..419ec2016efca 100644 --- a/.github/workflows/buildimages-update.yml +++ b/.github/workflows/buildimages-update.yml @@ -62,8 +62,7 @@ jobs: - name: Setup Python and pip uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: - # use Python < 3.12 so that distutil is still available by default - python-version: 3.11 + python-version-file: .python-version cache: "pip" - uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 diff --git a/.github/workflows/chase_release_managers.yml b/.github/workflows/chase_release_managers.yml index 475068736efea..e5c1281d7f6b6 100644 --- a/.github/workflows/chase_release_managers.yml +++ b/.github/workflows/chase_release_managers.yml @@ -22,7 +22,7 @@ jobs: - name: Install python uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: - python-version: 3.11 + python-version-file: .python-version cache: "pip" - name: Install Python dependencies run: | diff --git a/.github/workflows/code_review_complexity.yml b/.github/workflows/code_review_complexity.yml index 3f36b387e0f34..9ce2d5590370a 100644 --- a/.github/workflows/code_review_complexity.yml +++ b/.github/workflows/code_review_complexity.yml @@ -27,7 +27,7 @@ jobs: - name: Setup python uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: - python-version: 3.12 + python-version-file: .python-version cache: 'pip' cache-dependency-path: '**/requirements*.txt' - name: Install dependencies diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 958e4cbbb1a5c..cce7b79bfe9bb 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -31,7 +31,7 @@ jobs: - name: Setup Python3 uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: - python-version: "3.12.6" + python-version-file: .python-version cache: "pip" - run: pip3 install -r requirements.txt diff --git a/.github/workflows/collector-generate-and-update.yml b/.github/workflows/collector-generate-and-update.yml index 0fae5d04dbd15..851f9cf758111 100644 --- a/.github/workflows/collector-generate-and-update.yml +++ b/.github/workflows/collector-generate-and-update.yml @@ -9,27 +9,22 @@ jobs: update-and-generate: runs-on: ubuntu-latest permissions: + contents: write pull-requests: write steps: - name: Checkout repository uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 - - name: Read tool versions - id: tool-versions - run: | - echo "PYTHON_VERSION=$(cat .python-version)" >> $GITHUB_ENV - echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV - - name: Set up Python uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: - python-version: ${{ env.PYTHON_VERSION }} + python-version-file: .python-version cache: 'pip' - name: Set up Go uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 with: - go-version: ${{ env.GO_VERSION }} + go-version-file: .go-version - name: Install Dependencies run: | diff --git a/.github/workflows/create_rc_pr.yml b/.github/workflows/create_rc_pr.yml index 66fbf9462a168..caba7218cb0d2 100644 --- a/.github/workflows/create_rc_pr.yml +++ b/.github/workflows/create_rc_pr.yml @@ -21,27 +21,34 @@ jobs: warning: ${{ steps.warning.outputs.value }} steps: - name: Checkout repository - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 with: sparse-checkout: 'tasks' persist-credentials: false - name: Install python - if: ${{ env.IS_AGENT6_RELEASE == 'false' }} uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: - python-version: 3.11 + python-version-file: .python-version cache: "pip" - name: Install Python dependencies - if: ${{ env.IS_AGENT6_RELEASE == 'false' }} run: | python -m pip install --upgrade pip pip install -r requirements.txt pip install -r tasks/libs/requirements-github.txt pip install -r tasks/requirements_release_tasks.txt + - name: Check previous agent 6 RC status + if: ${{ env.IS_AGENT6_RELEASE == 'true' }} + env: + DD_SITE: 'datadoghq.com' + DD_API_KEY: ${{ secrets.DD_API_KEY }} + DD_APP_KEY: ${{ secrets.DD_APP_KEY }} + SLACK_DATADOG_AGENT_CI_WEBHOOK: ${{ secrets.SLACK_DATADOG_AGENT_CI_WEBHOOK }} + run: | + inv -e release.check-previous-agent6-rc + - name: Determine the release active branches id: branches run: | @@ -76,7 +83,7 @@ jobs: - name: Install python uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: - python-version: 3.11 + python-version-file: .python-version cache: "pip" - name: Install Python dependencies @@ -107,7 +114,7 @@ jobs: MATRIX: ${{ matrix.value }} run: | if ${{ env.IS_AGENT6_RELEASE == 'true' }}; then - inv -e release.create-rc -r "$MATRIX" --slack-webhook=${{ secrets.AGENT_RELEASE_SYNC_SLACK_WEBHOOK }} --patch-version + inv -e release.create-rc -r "$MATRIX" --slack-webhook=${{ secrets.AGENT6_RELEASE_SLACK_WEBHOOK }} --patch-version else inv -e release.create-rc -r "$MATRIX" --slack-webhook=${{ secrets.AGENT_RELEASE_SYNC_SLACK_WEBHOOK }} fi diff --git a/.github/workflows/create_release_schedule.yml b/.github/workflows/create_release_schedule.yml index 4b749ba3bdc97..9ddd708643543 100644 --- a/.github/workflows/create_release_schedule.yml +++ b/.github/workflows/create_release_schedule.yml @@ -7,8 +7,8 @@ on: description: 'Full version of the release to schedule (e.g. 7.31.0)' required: true type: string - freeze_date: - description: 'Isoformat date when we freeze CI (e.g. 2023-12-31)' + cutoff_date: + description: 'Isoformat date when we the cut-off happens (e.g. 2023-12-31)' required: true type: string @@ -27,7 +27,7 @@ jobs: - name: Install python uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: - python-version: 3.11 + python-version-file: .python-version cache: "pip" - name: Install Python dependencies run: | @@ -38,6 +38,6 @@ jobs: ATLASSIAN_USERNAME : ${{ secrets.ATLASSIAN_USERNAME }} ATLASSIAN_PASSWORD : ${{ secrets.ATLASSIAN_PASSWORD }} INPUT_VERSION: ${{ github.event.inputs.version }} - FREEZE_DATE: ${{ github.event.inputs.freeze_date }} + CUTOFF_DATE: ${{ github.event.inputs.cutoff_date }} run: | - inv -e release.create-schedule --version "$INPUT_VERSION" --freeze-date "$FREEZE_DATE" + inv -e release.create-schedule --version "$INPUT_VERSION" --cutoff-date "$CUTOFF_DATE" diff --git a/.github/workflows/cws-btfhub-sync.yml b/.github/workflows/cws-btfhub-sync.yml index 1737720006753..5d3a2a163e220 100644 --- a/.github/workflows/cws-btfhub-sync.yml +++ b/.github/workflows/cws-btfhub-sync.yml @@ -8,11 +8,6 @@ on: required: false default: 'main' type: string - force_refresh: - description: 'Force refresh of the constants' - required: false - default: 'false' - type: boolean schedule: - cron: '30 4 * * 5' # at 4:30 UTC on Friday @@ -68,7 +63,7 @@ jobs: - name: Install python uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: - python-version: 3.11 + python-version-file: .python-version cache: 'pip' - run: pip install -r requirements.txt @@ -83,19 +78,11 @@ jobs: echo "ARTIFACT_NAME=constants-${{ matrix.cone }}" | tr '/' '-' >> $GITHUB_OUTPUT - name: Sync constants - if: ${{ !inputs.force_refresh }} env: ARTIFACT_NAME: ${{ steps.artifact-name.outputs.ARTIFACT_NAME }} run: | inv -e security-agent.generate-btfhub-constants --archive-path=./dev/dist/archive --output-path=./"$ARTIFACT_NAME".json - - name: Force sync constants - if: ${{ inputs.force_refresh }} - env: - ARTIFACT_NAME: ${{ steps.artifact-name.outputs.ARTIFACT_NAME }} - run: | - inv -e security-agent.generate-btfhub-constants --archive-path=./dev/dist/archive --output-path=./"$ARTIFACT_NAME".json --force-refresh - - name: Upload artifact uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 with: @@ -117,7 +104,7 @@ jobs: - name: Install python uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: - python-version: 3.11 + python-version-file: .python-version cache: 'pip' - run: pip install -r requirements.txt @@ -136,6 +123,7 @@ jobs: - name: Combine constants run: | inv -e security-agent.combine-btfhub-constants --archive-path=./dev/dist/constants + inv -e security-agent.split-btfhub-constants - name: Compute branch name id: branch-name diff --git a/.github/workflows/datadog-static-analysis.yml b/.github/workflows/datadog-static-analysis.yml index a77f4ba632c1d..9d33303557c8c 100644 --- a/.github/workflows/datadog-static-analysis.yml +++ b/.github/workflows/datadog-static-analysis.yml @@ -16,7 +16,7 @@ jobs: persist-credentials: false - name: Check code meets quality and security standards id: datadog-static-analysis - uses: DataDog/datadog-static-analyzer-github-action@v1 + uses: DataDog/datadog-static-analyzer-github-action@06d501a75f56e4075c67a7dbc61a74b6539a05c8 # v1.2.1 with: dd_api_key: ${{ secrets.DD_STATIC_ANALYZER_API_KEY }} dd_app_key: ${{ secrets.DD_STATIC_ANALYZER_APP_KEY }} diff --git a/.github/workflows/docs-dev.yml b/.github/workflows/docs-dev.yml index 59e803598f6cd..d84266b5c5908 100644 --- a/.github/workflows/docs-dev.yml +++ b/.github/workflows/docs-dev.yml @@ -34,7 +34,7 @@ jobs: - name: Set up Python uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: - python-version: '3.12' + python-version-file: .python-version - name: Install dependencies run: pip install -r tasks/requirements.txt diff --git a/.github/workflows/external-contributor.yml b/.github/workflows/external-contributor.yml index 03cf46fd03511..4878e02cf6b46 100644 --- a/.github/workflows/external-contributor.yml +++ b/.github/workflows/external-contributor.yml @@ -25,7 +25,7 @@ jobs: - name: Setup python uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: - python-version: 3.11 + python-version-file: .python-version cache: 'pip' cache-dependency-path: '**/requirements*.txt' - name: Install dependencies diff --git a/.github/workflows/go_mod_tidy.yml b/.github/workflows/go_mod_tidy.yml index daa8b9779b741..546a930e9d6c8 100644 --- a/.github/workflows/go_mod_tidy.yml +++ b/.github/workflows/go_mod_tidy.yml @@ -3,16 +3,10 @@ on: pull_request: types: - labeled - workflow_dispatch: - inputs: - pr_number: - description: "PR number" - required: true - type: number jobs: mod_tidy_and_generate_licenses: - if: ${{ github.event_name == 'workflow_dispatch' || (github.event_name == 'pull_request' && github.actor == 'dependabot[bot]' && contains(github.event.pull_request.labels.*.name, 'dependencies-go')) }} + if: ${{ github.repository == 'DataDog/datadog-agent' && github.event.pull_request.user.login == 'dependabot[bot]' && contains(github.event.pull_request.labels.*.name, 'dependencies-go') }} runs-on: ubuntu-latest permissions: contents: write @@ -20,13 +14,6 @@ jobs: - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: ref: ${{ github.head_ref }} - - name: Checkout PR - # run only if triggered manually, otherwise we are already on the right branch and we won't have `pr_number` - if: ${{ github.event_name == 'workflow_dispatch' }} - run: gh pr checkout "$PR_NUMBER" - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - PR_NUMBER: ${{ github.event.inputs.pr_number }} - name: Install go uses: actions/setup-go@3041bf56c941b39c61721a86cd11f3bb1338122a # v5.2.0 with: @@ -34,19 +21,19 @@ jobs: - name: Install python uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: - python-version: 3.11 + python-version-file: .python-version cache: "pip" - name: Install python requirements.txt run: python3 -m pip install -r requirements.txt - name: Go mod tidy run: inv -e tidy - name: Update LICENSE-3rdparty.csv - if: ${{ github.event_name == 'workflow_dispatch' || !contains(github.event.pull_request.labels.*.name, 'dependencies-go-tools') }} + if: ${{ !contains(github.event.pull_request.labels.*.name, 'dependencies-go-tools') }} run: | inv -e install-tools inv -e generate-licenses - name: Update mocks - if: ${{ github.event_name == 'workflow_dispatch' || !contains(github.event.pull_request.labels.*.name, 'dependencies-go-tools') }} + if: ${{ !contains(github.event.pull_request.labels.*.name, 'dependencies-go-tools') }} run: inv -e security-agent.gen-mocks # generate both security agent and process mocks - uses: stefanzweifel/git-auto-commit-action@8621497c8c39c72f3e2a999a26b4ca1b5058a842 # v5.0.1 id: autocommit diff --git a/.github/workflows/label-analysis.yml b/.github/workflows/label-analysis.yml index a7ace970705cf..b0154672ef868 100644 --- a/.github/workflows/label-analysis.yml +++ b/.github/workflows/label-analysis.yml @@ -29,7 +29,7 @@ jobs: - name: Setup python uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: - python-version: 3.11 + python-version-file: .python-version cache: 'pip' cache-dependency-path: '**/requirements*.txt' - name: Install dependencies @@ -50,7 +50,7 @@ jobs: - name: Setup python uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: - python-version: 3.11 + python-version-file: .python-version cache: 'pip' cache-dependency-path: '**/requirements*.txt' - name: Install dependencies @@ -109,7 +109,7 @@ jobs: - name: Setup Python3 uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: - python-version: "3.12.6" + python-version-file: .python-version cache: "pip" cache-dependency-path: '**/requirements*.txt' - name: Install python dependencies @@ -131,7 +131,7 @@ jobs: - name: Setup python uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: - python-version: 3.11 + python-version-file: .python-version cache: 'pip' cache-dependency-path: '**/requirements*.txt' - name: Install dependencies @@ -140,7 +140,7 @@ jobs: run: inv -e github.agenttelemetry-list-change-ack-check --pr-id=${{ github.event.pull_request.number }} ask-reviews: - if: github.triggering_actor != 'dd-devflow[bot]' && github.event.action != 'synchronize' + if: github.triggering_actor != 'dd-devflow[bot]' && github.event.action == 'labeled' && github.event.label.name == 'ask-review' runs-on: ubuntu-latest steps: - name: Checkout repository diff --git a/.github/workflows/report-merged-pr.yml b/.github/workflows/report-merged-pr.yml index 879b5b7e13330..570205469fe4d 100644 --- a/.github/workflows/report-merged-pr.yml +++ b/.github/workflows/report-merged-pr.yml @@ -27,7 +27,7 @@ jobs: - name: Setup Python3 uses: actions/setup-python@0b93645e9fea7318ecaed2b359559ac225c90a2b # v5.3.0 with: - python-version: "3.12.6" + python-version-file: .python-version cache: "pip" cache-dependency-path: '**/requirements*.txt' diff --git a/.github/workflows/serverless-integration.yml b/.github/workflows/serverless-integration.yml index f11209b420ef0..bf9cd919a99e7 100644 --- a/.github/workflows/serverless-integration.yml +++ b/.github/workflows/serverless-integration.yml @@ -17,7 +17,7 @@ permissions: {} jobs: test: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 strategy: fail-fast: false matrix: diff --git a/.github/workflows/update_dependencies.yml b/.github/workflows/update_dependencies.yml index f9094fffbe61d..67169c34ee266 100644 --- a/.github/workflows/update_dependencies.yml +++ b/.github/workflows/update_dependencies.yml @@ -17,7 +17,7 @@ jobs: runs-on: ubuntu-latest permissions: {} # the workflow uses the GitHub App token to create the PR so no specific permissions needed here steps: - - uses: actions/create-github-app-token@5d869da34e18e7287c1daad50e0b8ea0f506ce69 # v1.11.0 + - uses: actions/create-github-app-token@c1a285145b9d317df6ced56c09f525b5c2b6f755 # v1.11.1 id: app-token with: app-id: ${{ vars.DD_GITHUB_TOKEN_GENERATOR_APP_ID }} diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 82599a4b84956..696ced687a42d 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -6,8 +6,6 @@ include: - .gitlab/check_deploy/check_deploy.yml - .gitlab/check_merge/do_not_merge.yml - .gitlab/choco_build/choco_build.yml - - .gitlab/powershell_script_signing/powershell_script_signing.yml - - .gitlab/powershell_script_deploy/powershell_script_deploy.yml - .gitlab/common/shared.yml - .gitlab/common/skip_ci_check.yml - .gitlab/common/test_infra_version.yml @@ -44,7 +42,9 @@ include: default: retry: max: 2 - exit_codes: 42 + exit_codes: + - 42 + - 101 # Failed to extract dependencies when: - runner_system_failure - stuck_or_timeout_failure @@ -83,7 +83,7 @@ stages: - deploy_packages - deploy_cws_instrumentation - deploy_dca - - choco_and_install_script_build + - choco_build - trigger_release - install_script_deploy - internal_image_deploy @@ -170,49 +170,49 @@ variables: # To use images from datadog-agent-buildimages dev branches, set the corresponding # SUFFIX variable to _test_only DATADOG_AGENT_BUILDIMAGES_SUFFIX: "" - DATADOG_AGENT_BUILDIMAGES: v50263243-1a30c934 + DATADOG_AGENT_BUILDIMAGES: v53759313-14a41bca DATADOG_AGENT_WINBUILDIMAGES_SUFFIX: "" - DATADOG_AGENT_WINBUILDIMAGES: v50263243-1a30c934 + DATADOG_AGENT_WINBUILDIMAGES: v53759313-14a41bca DATADOG_AGENT_ARMBUILDIMAGES_SUFFIX: "" - DATADOG_AGENT_ARMBUILDIMAGES: v50263243-1a30c934 + DATADOG_AGENT_ARMBUILDIMAGES: v53759313-14a41bca DATADOG_AGENT_SYSPROBE_BUILDIMAGES_SUFFIX: "" - DATADOG_AGENT_SYSPROBE_BUILDIMAGES: v50263243-1a30c934 + DATADOG_AGENT_SYSPROBE_BUILDIMAGES: v53759313-14a41bca DATADOG_AGENT_BTF_GEN_BUILDIMAGES_SUFFIX: "" - DATADOG_AGENT_BTF_GEN_BUILDIMAGES: v50263243-1a30c934 + DATADOG_AGENT_BTF_GEN_BUILDIMAGES: v53759313-14a41bca # New images to enable different version per image - not used yet - CI_IMAGE_BTF_GEN: v50263243-1a30c934 + CI_IMAGE_BTF_GEN: v53759313-14a41bca CI_IMAGE_BTF_GEN_SUFFIX: "" - CI_IMAGE_DEB_X64: v50263243-1a30c934 + CI_IMAGE_DEB_X64: v53759313-14a41bca CI_IMAGE_DEB_X64_SUFFIX: "" - CI_IMAGE_DEB_ARM64: v50263243-1a30c934 + CI_IMAGE_DEB_ARM64: v53759313-14a41bca CI_IMAGE_DEB_ARM64_SUFFIX: "" - CI_IMAGE_DEB_ARMHF: v50263243-1a30c934 + CI_IMAGE_DEB_ARMHF: v53759313-14a41bca CI_IMAGE_DEB_ARMHF_SUFFIX: "" - CI_IMAGE_DD_AGENT_TESTING: v50263243-1a30c934 + CI_IMAGE_DD_AGENT_TESTING: v53759313-14a41bca CI_IMAGE_DD_AGENT_TESTING_SUFFIX: "" - CI_IMAGE_DOCKER_X64: v50263243-1a30c934 + CI_IMAGE_DOCKER_X64: v53759313-14a41bca CI_IMAGE_DOCKER_X64_SUFFIX: "" - CI_IMAGE_DOCKER_ARM64: v50263243-1a30c934 + CI_IMAGE_DOCKER_ARM64: v53759313-14a41bca CI_IMAGE_DOCKER_ARM64_SUFFIX: "" - CI_IMAGE_GITLAB_AGENT_DEPLOY: v50263243-1a30c934 + CI_IMAGE_GITLAB_AGENT_DEPLOY: v53759313-14a41bca CI_IMAGE_GITLAB_AGENT_DEPLOY_SUFFIX: "" - CI_IMAGE_LINUX_GLIBC_2_17_X64: v50263243-1a30c934 + CI_IMAGE_LINUX_GLIBC_2_17_X64: v53759313-14a41bca CI_IMAGE_LINUX_GLIBC_2_17_X64_SUFFIX: "" - CI_IMAGE_LINUX_GLIBC_2_23_ARM64: v50263243-1a30c934 + CI_IMAGE_LINUX_GLIBC_2_23_ARM64: v53759313-14a41bca CI_IMAGE_LINUX_GLIBC_2_23_ARM64_SUFFIX: "" - CI_IMAGE_SYSTEM_PROBE_X64: v50263243-1a30c934 + CI_IMAGE_SYSTEM_PROBE_X64: v53759313-14a41bca CI_IMAGE_SYSTEM_PROBE_X64_SUFFIX: "" - CI_IMAGE_SYSTEM_PROBE_ARM64: v50263243-1a30c934 + CI_IMAGE_SYSTEM_PROBE_ARM64: v53759313-14a41bca CI_IMAGE_SYSTEM_PROBE_ARM64_SUFFIX: "" - CI_IMAGE_RPM_X64: v50263243-1a30c934 + CI_IMAGE_RPM_X64: v53759313-14a41bca CI_IMAGE_RPM_X64_SUFFIX: "" - CI_IMAGE_RPM_ARM64: v50263243-1a30c934 + CI_IMAGE_RPM_ARM64: v53759313-14a41bca CI_IMAGE_RPM_ARM64_SUFFIX: "" - CI_IMAGE_RPM_ARMHF: v50263243-1a30c934 + CI_IMAGE_RPM_ARMHF: v53759313-14a41bca CI_IMAGE_RPM_ARMHF_SUFFIX: "" - CI_IMAGE_WIN_1809_X64: v50263243-1a30c934 + CI_IMAGE_WIN_1809_X64: v53759313-14a41bca CI_IMAGE_WIN_1809_X64_SUFFIX: "" - CI_IMAGE_WIN_LTSC2022_X64: v50263243-1a30c934 + CI_IMAGE_WIN_LTSC2022_X64: v53759313-14a41bca CI_IMAGE_WIN_LTSC2022_X64_SUFFIX: "" DATADOG_AGENT_EMBEDDED_PATH: /opt/datadog-agent/embedded @@ -343,9 +343,6 @@ variables: .if_installer_tests: &if_installer_tests if: ($CI_COMMIT_BRANCH == "main" || $DEPLOY_AGENT == "true" || $RUN_E2E_TESTS == "on" || $DDR_WORKFLOW_ID != null) && $RUN_E2E_TESTS != "off" -.if_testing_cleanup: &if_testing_cleanup - if: $TESTING_CLEANUP == "true" - .if_run_all_e2e_tests: &if_run_all_e2e_tests if: $RUN_E2E_TESTS == "on" @@ -381,7 +378,7 @@ variables: if: ($DEPLOY_AGENT == "true" || $DDR_WORKFLOW_ID != null) && $BUCKET_BRANCH == "beta" && $CI_COMMIT_TAG =~ /^[0-9]+\.[0-9]+\.[0-9]+-rc\.[0-9]+$/ .if_scheduled_main: &if_scheduled_main - if: $CI_PIPELINE_SOURCE == "schedule" && $CI_COMMIT_BRANCH == "main" + if: ($CI_PIPELINE_SOURCE == "schedule" || ($DDR_WORKFLOW_ID != null && $APPS =~ /^beta-build-/)) && $CI_COMMIT_BRANCH == "main" # Rule to trigger jobs only when a branch matches the mergequeue pattern. .if_mergequeue: &if_mergequeue @@ -645,11 +642,6 @@ workflow: .on_all_builds: - <<: *if_run_all_builds -.on_all_builds_manual: - - <<: *if_run_all_builds - when: manual - allow_failure: true - .on_e2e_tests: - <<: *if_installer_tests @@ -665,13 +657,6 @@ workflow: variables: E2E_OSVERS: $E2E_BRANCH_OSVERS -.on_main_or_testing_cleanup: - - <<: *if_main_branch - - <<: *if_testing_cleanup - -.on_testing_cleanup: - - <<: *if_testing_cleanup - .security_agent_change_paths: &security_agent_change_paths - pkg/ebpf/**/* - pkg/security/**/* @@ -708,6 +693,11 @@ workflow: - test/new-e2e/tests/windows/install-test/**/* - test/new-e2e/tests/windows/domain-test/**/* - tasks/msi.py + - omnibus/python-scripts/**/* + - omnibus/lib/**/* + - omnibus/config/projects/agent.rb + - omnibus/config/software/**/* + - omnibus/config/templates/**/* compare_to: main # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916 .except_windows_installer_changes: @@ -887,7 +877,7 @@ workflow: - comp/dogstatsd/**/* - comp/forwarder/**/* - comp/logs/**/* - - comp/serializer/compression/**/* + - comp/serializer/**/* - pkg/aggregator/**/* - pkg/collector/**/* - pkg/commonchecks/**/* @@ -1183,4 +1173,30 @@ workflow: - pkg/gpu/**/* - test/new-e2e/tests/gpu/**/* - pkg/collector/corechecks/gpu/**/* + - comp/core/workloadmeta/collectors/internal/nvml/**/* + - comp/core/autodiscovery/providers/gpu.go + - pkg/config/autodiscovery/autodiscovery.go compare_to: main # TODO: use a variable, when this is supported https://gitlab.com/gitlab-org/gitlab/-/issues/369916 + +# windows_docker_2022 configures the job to use the Windows Server 2022 runners. +# Use in jobs that need to run on Windows Server 2022 runners. +.windows_docker_2022: + tags: ["runner:windows-docker", "windowsversion:2022"] + variables: + # Full image name for Agent windows build image, for use in docker run command + WINBUILDIMAGE: registry.ddbuild.io/ci/datadog-agent-buildimages/windows_ltsc2022_${ARCH}${DATADOG_AGENT_WINBUILDIMAGES_SUFFIX}:${DATADOG_AGENT_WINBUILDIMAGES} + +# windows_docker_2019 configures the job to use the Windows Server 2019 runners. +# Use in jobs that need to run on Windows Server 2019 runners. +.windows_docker_2019: + tags: ["runner:windows-docker", "windowsversion:1809"] + variables: + # Full image name for Agent windows build image, for use in docker run command + WINBUILDIMAGE: registry.ddbuild.io/ci/datadog-agent-buildimages/windows_1809_${ARCH}${DATADOG_AGENT_WINBUILDIMAGES_SUFFIX}:${DATADOG_AGENT_WINBUILDIMAGES} + +# windows_docker_default configures the job to use the default Windows Server runners +# Use in jobs that may need to have their version updated in the future. +# +# Current default: Windows Server 2022 +.windows_docker_default: + extends: .windows_docker_2022 diff --git a/.gitlab/.ci-linters.yml b/.gitlab/.ci-linters.yml index fd1af4a9868c5..5e998ecd7edec 100644 --- a/.gitlab/.ci-linters.yml +++ b/.gitlab/.ci-linters.yml @@ -21,6 +21,7 @@ needs-rules: - deploy_containers-cws-instrumentation-rc-mutable - deploy_containers-cws-instrumentation-rc-versioned - dogstatsd_x64_size_test + - generate_windows_gitlab_runner_bump_pr - go_mod_tidy_check - lint_flavor_dogstatsd_linux-x64 - lint_flavor_heroku_linux-x64 @@ -31,6 +32,7 @@ needs-rules: - lint_linux-x64 - lint_macos_gitlab_amd64 - new-e2e-eks-cleanup-on-failure + - protobuf_test - publish_winget_7_x64 - revert_latest_7 - security_go_generate_check diff --git a/.gitlab/JOBOWNERS b/.gitlab/JOBOWNERS index 2678bd7a5de6d..5d86e1317e8b9 100644 --- a/.gitlab/JOBOWNERS +++ b/.gitlab/JOBOWNERS @@ -18,12 +18,13 @@ fetch_openjdk @DataDog/agent-metrics-logs # Source test # Notifications are handled separately for more fine-grained control on go tests -tests_* @DataDog/multiple -tests_ebpf* @DataDog/ebpf-platform -tests_windows_sysprobe* @DataDog/windows-kernel-integrations -security_go_generate_check @DataDog/agent-security +tests_* @DataDog/multiple +tests_ebpf* @DataDog/ebpf-platform +tests_windows_sysprobe* @DataDog/windows-kernel-integrations +security_go_generate_check @DataDog/agent-security prepare_sysprobe_ebpf_functional_tests* @DataDog/ebpf-platform prepare_secagent_ebpf_functional_tests* @DataDog/agent-security +protobuf_test @DataDog/multiple # Send count metrics about Golang dependencies golang_deps_send_count_metrics @DataDog/agent-shared-components @@ -33,7 +34,7 @@ golang_deps_commenter @DataDog/ebpf-platform # Binary build build_system-probe* @DataDog/ebpf-platform -cluster_agent_cloudfoundry-build* @Datadog/platform-integrations +cluster_agent_cloudfoundry-build* @Datadog/agent-integrations cluster_agent-build* @DataDog/container-integrations cws_instrumentation-build* @DataDog/agent-security build_serverless* @DataDog/serverless @@ -113,7 +114,7 @@ deploy_installer* @DataDog/agent-delivery deploy_packages* @DataDog/agent-delivery deploy_staging* @DataDog/agent-delivery publish_winget* @DataDog/windows-agent -powershell_script_deploy @DataDog/windows-agent +powershell_script_signing @DataDog/windows-agent windows_bootstrapper_deploy @DataDog/windows-agent qa_*_oci @DataDog/agent-delivery qa_installer_script* @DataDog/agent-delivery @@ -127,6 +128,7 @@ deploy_containers-cws-instrumentation* @DataDog/agent-security # Trigger release trigger_manual_prod_release @DataDog/agent-delivery trigger_auto_staging_release @DataDog/agent-delivery +generate_windows_gitlab_runner_bump_pr* @DataDog/agent-delivery # Integration test integration_tests_windows* @DataDog/windows-agent diff --git a/.gitlab/binary_build/system_probe.yml b/.gitlab/binary_build/system_probe.yml index 5d897fb7f5f3e..dc7ffc511b4aa 100644 --- a/.gitlab/binary_build/system_probe.yml +++ b/.gitlab/binary_build/system_probe.yml @@ -12,9 +12,7 @@ - find "$CI_BUILDS_DIR" ! -path '*DataDog/datadog-agent*' -delete || true # Allow failure, we can't remove parent folders of datadog-agent script: - inv check-go-version - - inv -e system-probe.build --strip-object-files - # fail if references to glibc >= 2.18 - - objdump -p $CI_PROJECT_DIR/$SYSTEM_PROBE_BINARIES_DIR/system-probe | egrep 'GLIBC_2\.(1[8-9]|[2-9][0-9])' && exit 1 + - inv -e system-probe.build-object-files --strip-object-files - inv -e system-probe.save-build-outputs $CI_PROJECT_DIR/sysprobe-build-outputs.tar.xz variables: KUBERNETES_MEMORY_REQUEST: "6Gi" diff --git a/.gitlab/binary_build/windows.yml b/.gitlab/binary_build/windows.yml index c2ba326f7006c..311648324f3cb 100644 --- a/.gitlab/binary_build/windows.yml +++ b/.gitlab/binary_build/windows.yml @@ -5,7 +5,7 @@ build_windows_container_entrypoint: - !reference [.except_mergequeue] - when: on_success stage: binary_build - tags: ["runner:windows-docker", "windowsversion:1809"] + extends: .windows_docker_default needs: ["lint_windows-x64"] variables: ARCH: "x64" @@ -23,7 +23,7 @@ build_windows_container_entrypoint: -e WINDOWS_BUILDER=true -e AWS_NETWORKING=true -e TARGET_ARCH="$ARCH" - registry.ddbuild.io/ci/datadog-agent-buildimages/windows_1809_${ARCH}${Env:DATADOG_AGENT_WINBUILDIMAGES_SUFFIX}:${Env:DATADOG_AGENT_WINBUILDIMAGES} + ${WINBUILDIMAGE} c:\mnt\Dockerfiles\agent\windows\entrypoint\build.bat - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } - get-childitem build-out\${CI_JOB_ID} diff --git a/.gitlab/choco_build/choco_build.yml b/.gitlab/choco_build/choco_build.yml index 06ff3c6bb5d0d..5d5c92450224e 100644 --- a/.gitlab/choco_build/choco_build.yml +++ b/.gitlab/choco_build/choco_build.yml @@ -6,8 +6,8 @@ .windows_choco_7_x64: rules: !reference [.on_deploy_stable_or_beta_repo_branch] - stage: choco_and_install_script_build - tags: ["runner:windows-docker", "windowsversion:1809"] + stage: choco_build + extends: .windows_docker_default variables: ARCH: "x64" script: @@ -21,7 +21,7 @@ -e CI_PIPELINE_ID=${CI_PIPELINE_ID} -e BUCKET_BRANCH="$BUCKET_BRANCH" -e AWS_NETWORKING=true - registry.ddbuild.io/ci/datadog-agent-buildimages/windows_1809_${ARCH}${Env:DATADOG_AGENT_WINBUILDIMAGES_SUFFIX}:${Env:DATADOG_AGENT_WINBUILDIMAGES} + ${WINBUILDIMAGE} powershell.exe -C "C:\mnt\tasks\winbuildscripts\Generate-Chocolatey-Package.ps1 -MSIDirectory c:\mnt\omnibus\pkg -Flavor $FLAVOR -InstallDeps 1" - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } - $CopyNupkgToS3 = "$S3_CP_CMD --recursive --exclude '*' --include '*.nupkg' build-out $S3_RELEASE_ARTIFACTS_URI/choco/nupkg" diff --git a/.gitlab/common/test_infra_version.yml b/.gitlab/common/test_infra_version.yml index 944343f20da2f..87aa0a1aa94ae 100644 --- a/.gitlab/common/test_infra_version.yml +++ b/.gitlab/common/test_infra_version.yml @@ -1,4 +1,9 @@ +# File generated by inv buildimages.update-test-infra-definitions +# Please do not edit this file manually +# To update the test-infra-definitions version, run `inv buildimages.update-test-infra-definitions --commit-sha ` [--is-dev-image] + + --- variables: - TEST_INFRA_DEFINITIONS_BUILDIMAGES: 221bbc806266 + TEST_INFRA_DEFINITIONS_BUILDIMAGES: 9e836ad2dc14 TEST_INFRA_DEFINITIONS_BUILDIMAGES_SUFFIX: '' diff --git a/.gitlab/container_build/docker_windows.yml b/.gitlab/container_build/docker_windows.yml index f2402de687d30..2a3e5f65e7cf3 100644 --- a/.gitlab/container_build/docker_windows.yml +++ b/.gitlab/container_build/docker_windows.yml @@ -54,6 +54,7 @@ variables: AGENT_ZIP: "datadog-agent-7*-x86_64.zip" BUILD_ARG: "--build-arg BASE_IMAGE=mcr.microsoft.com/powershell:lts-nanoserver-${VARIANT} --build-arg WITH_JMX=${WITH_JMX} --build-arg VARIANT=${VARIANT} --build-arg INSTALL_INFO=nano-${VARIANT}" + retry: 2 .docker_build_agent7_windows_servercore_common: extends: diff --git a/.gitlab/container_build/docker_windows_agent7.yml b/.gitlab/container_build/docker_windows_agent7.yml index 4a3c9393d5ba1..30f5561772eac 100644 --- a/.gitlab/container_build/docker_windows_agent7.yml +++ b/.gitlab/container_build/docker_windows_agent7.yml @@ -2,7 +2,7 @@ docker_build_agent7_windows1809: extends: - .docker_build_agent7_windows_common - tags: ["runner:windows-docker", "windowsversion:1809"] + - .windows_docker_2019 variables: VARIANT: 1809 TAG_SUFFIX: -7 @@ -11,7 +11,7 @@ docker_build_agent7_windows1809: docker_build_agent7_windows1809_jmx: extends: - .docker_build_agent7_windows_common - tags: ["runner:windows-docker", "windowsversion:1809"] + - .windows_docker_2019 variables: VARIANT: 1809 TAG_SUFFIX: -7-jmx @@ -20,7 +20,7 @@ docker_build_agent7_windows1809_jmx: docker_build_agent7_windows2022_jmx: extends: - .docker_build_agent7_windows_common - tags: ["runner:windows-docker", "windowsversion:2022"] + - .windows_docker_2022 variables: VARIANT: ltsc2022 TAG_SUFFIX: -7-jmx @@ -29,7 +29,7 @@ docker_build_agent7_windows2022_jmx: docker_build_agent7_windows2022: extends: - .docker_build_agent7_windows_common - tags: ["runner:windows-docker", "windowsversion:2022"] + - .windows_docker_2022 variables: VARIANT: ltsc2022 TAG_SUFFIX: "-7" @@ -38,7 +38,7 @@ docker_build_agent7_windows2022: docker_build_agent7_windows1809_core: extends: - .docker_build_agent7_windows_servercore_common - tags: ["runner:windows-docker", "windowsversion:1809"] + - .windows_docker_2019 variables: VARIANT: 1809 TAG_SUFFIX: -7 @@ -47,7 +47,7 @@ docker_build_agent7_windows1809_core: docker_build_agent7_windows1809_core_jmx: extends: - .docker_build_agent7_windows_servercore_common - tags: ["runner:windows-docker", "windowsversion:1809"] + - .windows_docker_2019 variables: VARIANT: 1809 TAG_SUFFIX: -7-jmx @@ -56,7 +56,7 @@ docker_build_agent7_windows1809_core_jmx: docker_build_agent7_windows2022_core: extends: - .docker_build_agent7_windows_servercore_common - tags: ["runner:windows-docker", "windowsversion:2022"] + - .windows_docker_2022 variables: VARIANT: ltsc2022 TAG_SUFFIX: "-7" @@ -65,7 +65,7 @@ docker_build_agent7_windows2022_core: docker_build_agent7_windows2022_core_jmx: extends: - .docker_build_agent7_windows_servercore_common - tags: ["runner:windows-docker", "windowsversion:2022"] + - .windows_docker_2022 variables: VARIANT: ltsc2022 TAG_SUFFIX: -7-jmx @@ -74,7 +74,7 @@ docker_build_agent7_windows2022_core_jmx: docker_build_fips_agent7_windows2022_core: extends: - .docker_build_fips_agent7_windows_servercore_common - tags: ["runner:windows-docker", "windowsversion:2022"] + - .windows_docker_2022 variables: VARIANT: ltsc2022 TAG_SUFFIX: "-7-fips" @@ -83,7 +83,7 @@ docker_build_fips_agent7_windows2022_core: docker_build_fips_agent7_windows2022_core_jmx: extends: - .docker_build_fips_agent7_windows_servercore_common - tags: ["runner:windows-docker", "windowsversion:2022"] + - .windows_docker_2022 variables: VARIANT: ltsc2022 TAG_SUFFIX: -7-fips-jmx diff --git a/.gitlab/deploy_containers/conditions.yml b/.gitlab/deploy_containers/conditions.yml index d41ed80de261e..c4170740cbedf 100644 --- a/.gitlab/deploy_containers/conditions.yml +++ b/.gitlab/deploy_containers/conditions.yml @@ -42,30 +42,6 @@ DSD_REPOSITORY: dogstatsd IMG_REGISTRIES: public -.manual_on_deploy_auto_on_rc-ot: - - if: $BUCKET_BRANCH != "beta" && $BUCKET_BRANCH != "stable" - when: manual - allow_failure: true - variables: - AGENT_REPOSITORY: agent-dev - IMG_REGISTRIES: dev - - if: $CI_COMMIT_TAG =~ /^[0-9]+\.[0-9]+\.[0-9]+-v[0-9]+\.[0-9]+\.[0-9]+-rc\.[0-9]+$/ && $FORCE_MANUAL != "true" - when: on_success - variables: - AGENT_REPOSITORY: agent - IMG_REGISTRIES: public - - if: $CI_COMMIT_TAG =~ /^[0-9]+\.[0-9]+\.[0-9]+-v[0-9]+\.[0-9]+\.[0-9]+-rc\.[0-9]+$/ && $FORCE_MANUAL == "true" - when: manual - allow_failure: true - variables: - AGENT_REPOSITORY: agent - IMG_REGISTRIES: public - - when: manual - allow_failure: true - variables: - AGENT_REPOSITORY: agent - IMG_REGISTRIES: public - # Rule for job that are triggered on_success on RC pipelines .on_rc: - if: $FORCE_MANUAL == "true" && $CI_COMMIT_TAG =~ /^[0-9]+\.[0-9]+\.[0-9]+-rc\.[0-9]+$/ @@ -82,20 +58,6 @@ DSD_REPOSITORY: dogstatsd IMG_REGISTRIES: public -# Rule for job that are triggered on_success on RC pipelines for OTel Beta -.on_rc-ot: - - if: $FORCE_MANUAL == "true" && $CI_COMMIT_TAG =~ /^[0-9]+\.[0-9]+\.[0-9]+-v[0-9]+\.[0-9]+\.[0-9]+-rc\.[0-9]+$/ - when: manual - allow_failure: true - variables: - AGENT_REPOSITORY: agent - IMG_REGISTRIES: public - - if: $CI_COMMIT_TAG =~ /^[0-9]+\.[0-9]+\.[0-9]+-v[0-9]+\.[0-9]+\.[0-9]+-rc\.[0-9]+$/ - when: on_success - variables: - AGENT_REPOSITORY: agent - IMG_REGISTRIES: public - # Rule for job that can be triggered manually on final build, deploy to prod repository on stable branch deploy, else to dev repository .on_final: - if: $BUCKET_BRANCH == "beta" @@ -114,24 +76,6 @@ DSD_REPOSITORY: dogstatsd IMG_REGISTRIES: public -# Rule for job that can be triggered manually on final build, deploy to prod repository on stable branch deploy, else to dev repository -# For OTel Beta builds -.on_final-ot: - - if: $BUCKET_BRANCH == "beta" - when: never - - if: $BUCKET_BRANCH != "beta" && $BUCKET_BRANCH != "stable" && $CI_COMMIT_TAG =~ /^[0-9]+\.[0-9]+\.[0-9]+-v[0-9]+\.[0-9]+\.[0-9]+$/ - when: manual - allow_failure: true - variables: - AGENT_REPOSITORY: agent-dev - IMG_REGISTRIES: dev - - if: $CI_COMMIT_TAG =~ /^[0-9]+\.[0-9]+\.[0-9]+-v[0-9]+\.[0-9]+\.[0-9]+$/ - when: manual - allow_failure: true - variables: - AGENT_REPOSITORY: agent - IMG_REGISTRIES: public - # Rule to deploy to our internal repository, on stable branch deploy .on_internal_final: - if: $BUCKET_BRANCH == "beta" @@ -148,21 +92,6 @@ CWS_INSTRUMENTATION_REPOSITORY: ci/datadog-agent/cws-instrumentation-release IMG_REGISTRIES: internal-aws-ddbuild -# Rule to deploy to our internal repository, on stable branch deploy -.on_internal_final-ot: - ## Jobs will be triggered on beta repo-branch so it's fine - ## if the "final" artifacts job is available on non-stable BUCKET_BRANCH - # - if: $BUCKET_BRANCH == "beta" - # when: never - - if: $BUCKET_BRANCH != "beta" && $BUCKET_BRANCH != "stable" - when: never - - if: $CI_COMMIT_TAG =~ /^[0-9]+\.[0-9]+\.[0-9]+-v[0-9]+\.[0-9]+\.[0-9]+$/ - when: manual - allow_failure: true - variables: - AGENT_REPOSITORY: ci/datadog-agent/agent-release - IMG_REGISTRIES: internal-aws-ddbuild - # Rule to deploy to our internal repository on RC .on_internal_rc: - if: $FORCE_MANUAL == "true" && $CI_COMMIT_TAG =~ /^[0-9]+\.[0-9]+\.[0-9]+-rc\.[0-9]+$/ @@ -182,17 +111,3 @@ DSD_REPOSITORY: ci/datadog-agent/dogstatsd-release CWS_INSTRUMENTATION_REPOSITORY: ci/datadog-agent/cws-instrumentation-release IMG_REGISTRIES: internal-aws-ddbuild - -# Rule to deploy to our internal repository on RC for OTel Agent Beta -.on_internal_rc-ot: - - if: $FORCE_MANUAL == "true" && $CI_COMMIT_TAG =~ /^[0-9]+\.[0-9]+\.[0-9]+-v[0-9]+\.[0-9]+\.[0-9]+-rc\.[0-9]+$/ - when: manual - allow_failure: true - variables: - AGENT_REPOSITORY: ci/datadog-agent/agent-release - IMG_REGISTRIES: internal-aws-ddbuild - - if: $CI_COMMIT_TAG =~ /^[0-9]+\.[0-9]+\.[0-9]+-v[0-9]+\.[0-9]+\.[0-9]+-rc\.[0-9]+$/ - when: on_success - variables: - AGENT_REPOSITORY: ci/datadog-agent/agent-release - IMG_REGISTRIES: internal-aws-ddbuild diff --git a/.gitlab/deploy_containers/deploy_containers_a7.yml b/.gitlab/deploy_containers/deploy_containers_a7.yml index 62c845a825aae..599dd113193b6 100644 --- a/.gitlab/deploy_containers/deploy_containers_a7.yml +++ b/.gitlab/deploy_containers/deploy_containers_a7.yml @@ -92,7 +92,7 @@ deploy_containers-a7-win-only: deploy_containers-a7-ot: extends: .deploy_containers-a7-base-ot rules: - !reference [.manual_on_deploy_auto_on_rc-ot] + !reference [.manual_on_deploy_auto_on_rc] parallel: matrix: - JMX: @@ -116,7 +116,7 @@ deploy_containers-a7-win-only-rc: deploy_containers-a7-ot-rc: extends: .deploy_containers-a7-base-ot rules: - !reference [.on_rc-ot] + !reference [.on_rc] variables: VERSION: 7-ot-beta-rc parallel: @@ -155,14 +155,14 @@ deploy_containers-a7_internal-rc: deploy_containers-a7-ot_internal: extends: .deploy_containers-a7-base-ot rules: - !reference [.on_internal_final-ot] + !reference [.on_internal_final] variables: JMX: "-jmx" deploy_containers-a7-ot_internal-rc: extends: .deploy_containers-a7-base-ot rules: - !reference [.on_internal_rc-ot] + !reference [.on_internal_rc] variables: VERSION: 7-ot-beta-rc parallel: @@ -199,21 +199,21 @@ deploy_containers_latest-a7: # Windows only images - IMG_VARIABLES: "BASE=${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7" IMG_SOURCES: "%BASE%-win1809-amd64" - IMG_DESTINATIONS: ${AGENT_REPOSITORY}:7,${AGENT_REPOSITORY}:latest-ltsc2019 + IMG_DESTINATIONS: ${AGENT_REPOSITORY}:7-ltsc2019,${AGENT_REPOSITORY}:latest-ltsc2019 - IMG_VARIABLES: "BASE=${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7" IMG_SOURCES: "%BASE%-winltsc2022-amd64" - IMG_DESTINATIONS: ${AGENT_REPOSITORY}:7,${AGENT_REPOSITORY}:latest-ltsc2022 + IMG_DESTINATIONS: ${AGENT_REPOSITORY}:7-ltsc2022,${AGENT_REPOSITORY}:latest-ltsc2022 - IMG_VARIABLES: "BASE=${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7" IMG_SOURCES: "%BASE%-win1809-servercore-amd64" IMG_DESTINATIONS: ${AGENT_REPOSITORY}:7-servercore-ltsc2019,${AGENT_REPOSITORY}:latest-servercore-ltsc2019 - IMG_VARIABLES: "BASE=${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7" - IMG_SOURCES: "BASE%-winltsc2022-servercore-amd64" + IMG_SOURCES: "%BASE%-winltsc2022-servercore-amd64" IMG_DESTINATIONS: ${AGENT_REPOSITORY}:7-servercore-ltsc2022,${AGENT_REPOSITORY}:latest-servercore-ltsc2022 - IMG_VARIABLES: "BASE=${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-jmx" IMG_SOURCES: "%BASE%-winltsc2022-servercore-amd64" IMG_DESTINATIONS: ${AGENT_REPOSITORY}:7-servercore-ltsc2022-jmx,${AGENT_REPOSITORY}:latest-servercore-ltsc2022-jmx - IMG_VARIABLES: "BASE=${SRC_AGENT}:v${PARENT_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-jmx" - IMG_SOURCES: "%BASE%-winltsc1809-servercore-amd64" + IMG_SOURCES: "%BASE%-win1809-servercore-amd64" IMG_DESTINATIONS: ${AGENT_REPOSITORY}:7-servercore-ltsc2019-jmx,${AGENT_REPOSITORY}:latest-servercore-ltsc2019-jmx deploy_containers_latest-a7_internal: @@ -242,7 +242,7 @@ deploy_containers_latest-a7-ot: extends: .deploy_containers-a7-base-ot stage: deploy_containers rules: - !reference [.on_final-ot] + !reference [.on_final] dependencies: [] variables: VERSION: 7-ot-beta diff --git a/.gitlab/deploy_cws_instrumentation/deploy_cws_instrumentation.yml b/.gitlab/deploy_cws_instrumentation/deploy_cws_instrumentation.yml index 03dfa9440f353..71c74a817b347 100644 --- a/.gitlab/deploy_cws_instrumentation/deploy_cws_instrumentation.yml +++ b/.gitlab/deploy_cws_instrumentation/deploy_cws_instrumentation.yml @@ -9,7 +9,11 @@ include: .deploy_containers-cws-instrumentation-base: extends: .docker_publish_job_definition stage: deploy_cws_instrumentation - dependencies: [] + needs: + - job: "docker_build_cws_instrumentation_amd64" + artifacts: false + - job: "docker_build_cws_instrumentation_arm64" + artifacts: false before_script: - if [[ "$VERSION" == "" ]]; then VERSION="$(inv agent.version --url-safe)" || exit $?; fi - if [[ "$CWS_INSTRUMENTATION_REPOSITORY" == "" ]]; then export CWS_INSTRUMENTATION_REPOSITORY="cws-instrumentation"; fi @@ -20,7 +24,7 @@ include: # will push the `7.xx.y-rc.z` tags deploy_containers-cws-instrumentation-rc-versioned: extends: .deploy_containers-cws-instrumentation-base - rules: !reference [.on_deploy_rc] + rules: !reference [.on_deploy_manual_auto_on_rc] # will update the `rc` tag deploy_containers-cws-instrumentation-rc-mutable: diff --git a/.gitlab/deploy_packages/deploy_common.yml b/.gitlab/deploy_packages/deploy_common.yml index ab3daee583890..e3d0323e7e96a 100644 --- a/.gitlab/deploy_packages/deploy_common.yml +++ b/.gitlab/deploy_packages/deploy_common.yml @@ -121,4 +121,4 @@ deploy_installer_install_scripts: before_script: - ls $OMNIBUS_PACKAGE_DIR script: - - $S3_CP_CMD --recursive --exclude "*" --include "install-*.sh" "$OMNIBUS_PACKAGE_DIR" "${S3_RELEASE_INSTALLER_ARTIFACTS_URI}/scripts/" + - $S3_CP_CMD --recursive --exclude "*" --include "install*.sh" "$OMNIBUS_PACKAGE_DIR" "${S3_RELEASE_INSTALLER_ARTIFACTS_URI}/scripts/" diff --git a/.gitlab/deploy_packages/e2e.yml b/.gitlab/deploy_packages/e2e.yml index 78bb3286f1292..fd3fca5403a8c 100644 --- a/.gitlab/deploy_packages/e2e.yml +++ b/.gitlab/deploy_packages/e2e.yml @@ -12,4 +12,5 @@ qa_installer_script: before_script: - ls $OMNIBUS_PACKAGE_DIR script: - - $S3_CP_CMD --recursive --exclude "*" --include "install-*.sh" "$OMNIBUS_PACKAGE_DIR" "s3://${INSTALLER_TESTING_S3_BUCKET}/${CI_COMMIT_SHA}/scripts/" + - $S3_CP_CMD --recursive --exclude "*" --include "install*.sh" "$OMNIBUS_PACKAGE_DIR" "s3://${INSTALLER_TESTING_S3_BUCKET}/${CI_COMMIT_SHA}/scripts/" + - $S3_CP_CMD --recursive --exclude "*" --include "install*.sh" "$OMNIBUS_PACKAGE_DIR" "s3://${INSTALLER_TESTING_S3_BUCKET}/pipeline-${CI_PIPELINE_ID}/scripts/" diff --git a/.gitlab/deploy_packages/windows.yml b/.gitlab/deploy_packages/windows.yml index dd794d832b56c..204bcb5786df7 100644 --- a/.gitlab/deploy_packages/windows.yml +++ b/.gitlab/deploy_packages/windows.yml @@ -51,13 +51,30 @@ deploy_staging_windows_tags-7: full=id=3a6e02b08553fd157ae3fb918945dd1eaae5a1aa818940381ef07a430cf25732 # Datadog Installer +powershell_script_signing: + extends: .windows_docker_default + stage: deploy_packages + needs: [] + variables: + ARCH: "x64" + rules: + !reference [.on_deploy_installer] + artifacts: + expire_in: 2 weeks + paths: + - $WINDOWS_POWERSHELL_DIR + script: + - mkdir $WINDOWS_POWERSHELL_DIR + - docker run --rm -v "$(Get-Location):c:\mnt" -e AWS_NETWORKING=true -e IS_AWS_CONTAINER=true ${WINBUILDIMAGE} powershell -C "dd-wcs sign \mnt\tools\windows\DatadogAgentInstallScript\Install-Datadog.ps1" + - copy .\tools\windows\DatadogAgentInstallScript\Install-Datadog.ps1 $WINDOWS_POWERSHELL_DIR\Install-Datadog.ps1 + deploy_installer_packages_windows-x64: rules: !reference [.on_deploy_installer] stage: deploy_packages image: registry.ddbuild.io/ci/datadog-agent-buildimages/gitlab_agent_deploy$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:amd64"] - needs: ["windows-installer-amd64"] + needs: ["windows-installer-amd64", "powershell_script_signing"] before_script: - ls $OMNIBUS_PACKAGE_DIR script: @@ -69,6 +86,7 @@ deploy_installer_packages_windows-x64: --include "datadog-installer-*-1-x86_64.zip" --include "datadog-installer-*-1-x86_64.exe" $OMNIBUS_PACKAGE_DIR $S3_RELEASE_INSTALLER_ARTIFACTS_URI/msi/x86_64/ + - $S3_CP_CMD $WINDOWS_POWERSHELL_DIR/Install-Datadog.ps1 $S3_RELEASE_INSTALLER_ARTIFACTS_URI/scripts/Install-Datadog.ps1 deploy_packages_windows-x64-7-fips: rules: diff --git a/.gitlab/deploy_packages/winget.yml b/.gitlab/deploy_packages/winget.yml index 6bed0d001f834..02320209f05fc 100644 --- a/.gitlab/deploy_packages/winget.yml +++ b/.gitlab/deploy_packages/winget.yml @@ -6,7 +6,7 @@ publish_winget_7_x64: dependencies: [] rules: !reference [.on_deploy_stable_or_beta_repo_branch_manual] stage: deploy_packages - tags: ["runner:windows-docker", "windowsversion:1809"] + extends: .windows_docker_default variables: ARCH: "x64" before_script: @@ -23,7 +23,7 @@ publish_winget_7_x64: -v "$(Get-Location):c:\mnt" -e WINGET_GITHUB_ACCESS_TOKEN=${wingetPat} -e GENERAL_ARTIFACTS_CACHE_BUCKET_URL=${GENERAL_ARTIFACTS_CACHE_BUCKET_URL} - registry.ddbuild.io/ci/datadog-agent-buildimages/windows_1809_${ARCH}${Env:DATADOG_AGENT_WINBUILDIMAGES_SUFFIX}:${Env:DATADOG_AGENT_WINBUILDIMAGES} + ${WINBUILDIMAGE} Powershell -C "C:\mnt\tasks\winbuildscripts\Update-Winget.ps1" - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } diff --git a/.gitlab/deps_fetch/deps_fetch.yml b/.gitlab/deps_fetch/deps_fetch.yml index 65bcda4673fdd..89c38920434d0 100644 --- a/.gitlab/deps_fetch/deps_fetch.yml +++ b/.gitlab/deps_fetch/deps_fetch.yml @@ -4,15 +4,15 @@ # to reuse them in further jobs that need them. .retrieve_linux_go_deps: - - mkdir -p $GOPATH/pkg/mod/cache && tar xJf modcache.tar.xz -C $GOPATH/pkg/mod/cache + - mkdir -p $GOPATH/pkg/mod/cache && tar xJf modcache.tar.xz -C $GOPATH/pkg/mod/cache || exit 101 - rm -f modcache.tar.xz .retrieve_linux_go_tools_deps: - - mkdir -p $GOPATH/pkg/mod/cache && tar xJf modcache_tools.tar.xz -C $GOPATH/pkg/mod/cache + - mkdir -p $GOPATH/pkg/mod/cache && tar xJf modcache_tools.tar.xz -C $GOPATH/pkg/mod/cache || exit 101 - rm -f modcache_tools.tar.xz .retrieve_linux_go_e2e_deps: - - mkdir -p $GOPATH/pkg/mod/cache && tar xJf modcache_e2e.tar.xz -C $GOPATH/pkg/mod/cache + - mkdir -p $GOPATH/pkg/mod/cache && tar xJf modcache_e2e.tar.xz -C $GOPATH/pkg/mod/cache || exit 101 - rm -f modcache_e2e.tar.xz .cache: diff --git a/.gitlab/dev_container_deploy/docker_linux.yml b/.gitlab/dev_container_deploy/docker_linux.yml index 8994a6cbb9bce..8569bc9987ebd 100644 --- a/.gitlab/dev_container_deploy/docker_linux.yml +++ b/.gitlab/dev_container_deploy/docker_linux.yml @@ -31,6 +31,8 @@ dev_branch_multiarch-a7: IMG_DESTINATIONS: agent-dev:${CI_COMMIT_REF_SLUG}-py3 - IMG_SOURCES: ${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-jmx-amd64,${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-jmx-arm64 IMG_DESTINATIONS: agent-dev:${CI_COMMIT_REF_SLUG}-py3-jmx + - IMG_SOURCES: ${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-jmx-amd64,${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-jmx-arm64 + IMG_DESTINATIONS: agent-dev:${CI_COMMIT_SHORT_SHA}-py3-jmx dev_branch_multiarch-fips: extends: .docker_publish_job_definition @@ -124,7 +126,7 @@ dca_dev_branch: dca_dev_branch_multiarch: extends: .docker_publish_job_definition stage: dev_container_deploy - rules: !reference [.on_all_builds_manual] + rules: !reference [.manual] needs: - docker_build_cluster_agent_amd64 - docker_build_cluster_agent_arm64 @@ -147,7 +149,7 @@ dca_dev_master: cws_instrumentation_dev_branch_multiarch: extends: .docker_publish_job_definition stage: dev_container_deploy - rules: !reference [.on_all_builds_manual] + rules: !reference [.manual] needs: - docker_build_cws_instrumentation_amd64 - docker_build_cws_instrumentation_arm64 diff --git a/.gitlab/dev_container_deploy/e2e.yml b/.gitlab/dev_container_deploy/e2e.yml index d0d721559059c..68d35d1f09731 100644 --- a/.gitlab/dev_container_deploy/e2e.yml +++ b/.gitlab/dev_container_deploy/e2e.yml @@ -17,6 +17,22 @@ qa_agent: IMG_SOURCES: ${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-amd64,${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-arm64,${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-win1809-amd64,${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-winltsc2022-amd64 IMG_DESTINATIONS: agent:${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA} +qa_agent_fips: + extends: .docker_publish_job_definition + stage: dev_container_deploy + rules: + - !reference [.except_mergequeue] + - !reference [.except_disable_e2e_tests] + - when: on_success + needs: + - docker_build_fips_agent7 + - docker_build_fips_agent7_arm64 + - docker_build_fips_agent7_windows2022_core + variables: + IMG_REGISTRIES: agent-qa + IMG_SOURCES: ${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-fips-amd64,${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-fips-arm64,${SRC_AGENT}:v${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-7-fips-winltsc2022-servercore-amd64 + IMG_DESTINATIONS: agent:${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA}-fips + qa_agent_jmx: extends: .docker_publish_job_definition stage: dev_container_deploy diff --git a/.gitlab/e2e/e2e.yml b/.gitlab/e2e/e2e.yml index f944f407fa2dd..7ff524d9bd268 100644 --- a/.gitlab/e2e/e2e.yml +++ b/.gitlab/e2e/e2e.yml @@ -196,6 +196,19 @@ new-e2e-agent-subcommands: - EXTRA_PARAMS: --run "Test(Linux|Windows)CheckSuite" - EXTRA_PARAMS: --run "Test(Linux|Windows)RunSuite" +new-e2e-fips-compliance-test: + extends: .new_e2e_template_needs_deb_x64 + needs: + - !reference [.needs_new_e2e_template] + - qa_agent_fips + - deploy_deb_testing-a7_x64 + rules: + - !reference [.on_asc_or_e2e_changes] + - !reference [.manual] + variables: + TARGETS: ./tests/fips-compliance + TEAM: agent-shared-components + new-e2e-windows-service-test: extends: .new_e2e_template needs: @@ -294,6 +307,7 @@ new-e2e-aml: - deploy_windows_testing-a7 - qa_agent - qa_agent_jmx + - qa_agent_fips_jmx - qa_dca rules: - !reference [.on_aml_or_e2e_changes] @@ -315,7 +329,6 @@ new-e2e-cws: - qa_agent - qa_dca variables: - SHOULD_RUN_IN_FLAKES_FINDER: "false" # Currently broken in flake finder ADXT-687 TARGETS: ./tests/cws TEAM: csm-threats-agent CWS_INSTRUMENTATION_FULLIMAGEPATH: 669783387624.dkr.ecr.us-east-1.amazonaws.com/cws-instrumentation:${CI_PIPELINE_ID}-${CI_COMMIT_SHORT_SHA} @@ -333,6 +346,7 @@ new-e2e-discovery: needs: - !reference [.needs_new_e2e_template] - deploy_deb_testing-a7_x64 + - qa_agent rules: - !reference [.on_discovery_or_e2e_changes] - !reference [.manual] @@ -392,6 +406,12 @@ new-e2e-installer-script: - !reference [.manual] needs: - !reference [.needs_new_e2e_template] + - deploy_deb_testing-a7_arm64 + - deploy_deb_testing-a7_x64 + - deploy_rpm_testing-a7_arm64 + - deploy_rpm_testing-a7_x64 + - deploy_suse_rpm_testing_arm64-a7 + - deploy_suse_rpm_testing_x64-a7 - deploy_installer_oci - qa_installer_script variables: @@ -436,8 +456,8 @@ new-e2e-installer-windows: # Note: this is similar to the WINDOWS_AGENT_VERSION in new-e2e_windows_msi but this job is running cross platforms # Note 2: new_e2e_template does not define AGENT_MAJOR_VERSION, so define it as 7 below. - CURRENT_AGENT_VERSION=$(invoke agent.version) || exit $?; export CURRENT_AGENT_VERSION - - export STABLE_AGENT_VERSION_PACKAGE=$(curl -sS https://hub.docker.com/v2/namespaces/datadog/repositories/agent-package/tags | jq -r '.results[] | .name' | sort | tail -n 2 | head -n 1) - - export STABLE_INSTALLER_VERSION_PACKAGE=$(curl -sS https://hub.docker.com/v2/namespaces/datadog/repositories/installer-package/tags | jq -r '.results[] | .name' | sort | tail -n 2 | head -n 1) + - export STABLE_AGENT_VERSION_PACKAGE=$(curl --retry 10 --retry-all-errors -sS https://hub.docker.com/v2/namespaces/datadog/repositories/agent-package/tags | jq -r '.results[] | .name' | sort | tail -n 2 | head -n 1) + - export STABLE_INSTALLER_VERSION_PACKAGE=$(curl --retry 10 --retry-all-errors -sS https://hub.docker.com/v2/namespaces/datadog/repositories/installer-package/tags | jq -r '.results[] | .name' | sort | tail -n 2 | head -n 1) - !reference [.new_e2e_template, before_script] variables: TARGETS: ./tests/installer/windows @@ -532,7 +552,6 @@ new-e2e-windows-systemprobe: variables: TARGETS: ./tests/sysprobe-functional TEAM: windows-kernel-integrations - SHOULD_RUN_IN_FLAKES_FINDER: "false" # Currently broken in flake finder ADXT-687 parallel: matrix: - EXTRA_PARAMS: --run TestUSMAutoTaggingSuite @@ -642,6 +661,7 @@ new-e2e-gpu: variables: TARGETS: ./tests/gpu # the target path where tests are TEAM: ebpf-platform + E2E_PULUMI_LOG_LEVEL: 10 # incident-33572 needs: # list of required jobs. By default gitlab waits for any previous jobs. - !reference [.needs_new_e2e_template] - deploy_deb_testing-a7_x64 # agent 7 debian package @@ -670,6 +690,7 @@ generate-flakes-finder-pipeline: - qa_dogstatsd - qa_agent - qa_agent_ot + - tests_windows_sysprobe_x64 tags: ["arch:amd64"] script: - inv -e testwasher.generate-flake-finder-pipeline @@ -687,6 +708,7 @@ trigger-flakes-finder: variables: PARENT_PIPELINE_ID: $CI_PIPELINE_ID PARENT_COMMIT_SHA: $CI_COMMIT_SHORT_SHA + PARENT_COMMIT_SHORT_SHA: $CI_COMMIT_SHORT_SHA trigger: include: - artifact: flake-finder-gitlab-ci.yml diff --git a/.gitlab/e2e_install_packages/centos.yml b/.gitlab/e2e_install_packages/centos.yml index edb4424eb6fb9..4ea91d5cf69a5 100644 --- a/.gitlab/e2e_install_packages/centos.yml +++ b/.gitlab/e2e_install_packages/centos.yml @@ -64,6 +64,16 @@ new-e2e-agent-platform-install-script-centos-dogstatsd-a7-x86_64: variables: FLAVOR: datadog-dogstatsd +new-e2e-agent-platform-install-script-centos-datadog-fips-agent-a7-x86_64: + extends: + - .new_e2e_template + - .new-e2e_install_script + - .new-e2e_os_centos + - .new-e2e_centos_a7_x86_64 + - .new-e2e_agent_a7 + variables: + FLAVOR: datadog-fips-agent + new-e2e-agent-platform-install-script-centos-fips-a7-x86_64: extends: - .new_e2e_template @@ -96,6 +106,16 @@ new-e2e-agent-platform-install-script-centos-fips-dogstatsd-a7-x86_64: variables: FLAVOR: datadog-dogstatsd +new-e2e-agent-platform-install-script-centos-fips-datadog-fips-agent-a7-x86_64: + extends: + - .new_e2e_template + - .new-e2e_install_script + - .new-e2e_os_centos + - .new-e2e_centos-fips_a7_x86_64 + - .new-e2e_agent_a7 + variables: + FLAVOR: datadog-fips-agent + new-e2e-agent-platform-step-by-step-centos-a7-x86_64: extends: - .new_e2e_template diff --git a/.gitlab/e2e_install_packages/debian.yml b/.gitlab/e2e_install_packages/debian.yml index a07cb0dc8ff56..6fa9e933c5fc4 100644 --- a/.gitlab/e2e_install_packages/debian.yml +++ b/.gitlab/e2e_install_packages/debian.yml @@ -75,6 +75,16 @@ new-e2e-agent-platform-install-script-debian-dogstatsd-a7-x86_64: variables: FLAVOR: datadog-dogstatsd +new-e2e-agent-platform-install-script-debian-fips-agent-a7-x86_64: + extends: + - .new_e2e_template + - .new-e2e_install_script + - .new-e2e_os_debian + - .new-e2e_debian_a7_x86_64 + - .new-e2e_agent_a7 + variables: + FLAVOR: datadog-fips-agent + new-e2e-agent-platform-install-script-debian-heroku-agent-a7-x86_64: extends: - .new_e2e_template diff --git a/.gitlab/e2e_install_packages/installer.yml b/.gitlab/e2e_install_packages/installer.yml index da1cb2536b626..d6af7e7339878 100644 --- a/.gitlab/e2e_install_packages/installer.yml +++ b/.gitlab/e2e_install_packages/installer.yml @@ -12,4 +12,4 @@ qa_installer_script_main: before_script: - ls $OMNIBUS_PACKAGE_DIR script: - - $S3_CP_CMD --recursive --exclude "*" --include "install-*.sh" "$OMNIBUS_PACKAGE_DIR" "s3://${INSTALLER_TESTING_S3_BUCKET}/scripts/" + - $S3_CP_CMD --recursive --exclude "*" --include "install*.sh" "$OMNIBUS_PACKAGE_DIR" "s3://${INSTALLER_TESTING_S3_BUCKET}/scripts/" diff --git a/.gitlab/e2e_install_packages/suse.yml b/.gitlab/e2e_install_packages/suse.yml index fd25763ed2363..00b7477c6460d 100644 --- a/.gitlab/e2e_install_packages/suse.yml +++ b/.gitlab/e2e_install_packages/suse.yml @@ -71,6 +71,16 @@ new-e2e-agent-platform-install-script-suse-dogstatsd-a7-x86_64: variables: FLAVOR: datadog-dogstatsd +new-e2e-agent-platform-install-script-suse-fips-agent-a7-x86_64: + extends: + - .new_e2e_template + - .new-e2e_install_script + - .new-e2e_os_suse + - .new-e2e_suse_a7_x86_64 + - .new-e2e_agent_a7 + variables: + FLAVOR: datadog-fips-agent + new-e2e-agent-platform-step-by-step-suse-a7-x86_64: extends: - .new_e2e_template diff --git a/.gitlab/e2e_install_packages/ubuntu.yml b/.gitlab/e2e_install_packages/ubuntu.yml index 8ae1604230d75..9d96260fb3be6 100644 --- a/.gitlab/e2e_install_packages/ubuntu.yml +++ b/.gitlab/e2e_install_packages/ubuntu.yml @@ -99,6 +99,16 @@ new-e2e-agent-platform-install-script-ubuntu-heroku-agent-a7-x86_64: variables: FLAVOR: datadog-heroku-agent +new-e2e-agent-platform-install-script-ubuntu-fips-agent-a7-x86_64: + extends: + - .new_e2e_template + - .new-e2e_install_script + - .new-e2e_os_ubuntu + - .new-e2e_ubuntu_a7_x86_64 + - .new-e2e_agent_a7 + variables: + FLAVOR: datadog-fips-agent + new-e2e-agent-platform-step-by-step-ubuntu-a7-x86_64: extends: - .new_e2e_template diff --git a/.gitlab/e2e_install_packages/windows.yml b/.gitlab/e2e_install_packages/windows.yml index 86958e1a9c9ad..63912aab6b787 100644 --- a/.gitlab/e2e_install_packages/windows.yml +++ b/.gitlab/e2e_install_packages/windows.yml @@ -35,6 +35,10 @@ - E2E_MSI_TEST: TestInstall - E2E_MSI_TEST: TestRepair - E2E_MSI_TEST: TestUpgrade + - E2E_MSI_TEST: TestUpgradeFromLatest + - E2E_MSI_TEST: TestPersistingIntegrations + - E2E_MSI_TEST: TestIntegrationFolderPermissions + - E2E_MSI_TEST: TestIntegrationRollback - E2E_MSI_TEST: TestUpgradeRollback - E2E_MSI_TEST: TestUpgradeRollbackWithoutCWS - E2E_MSI_TEST: TestUpgradeChangeUser @@ -51,6 +55,7 @@ - E2E_MSI_TEST: TestInstallExistingAltDir - E2E_MSI_TEST: TestInstallAltDirAndCorruptForUninstall - E2E_MSI_TEST: TestInstallFail + - E2E_MSI_TEST: TestInstallWithLanmanServerDisabled # These tests are v7 only - E2E_MSI_TEST: TestNPMUpgradeToNPM - E2E_MSI_TEST: TestNPMUpgradeNPMToNPM @@ -113,7 +118,13 @@ new-e2e-windows-agent-a7-x86_64-fips: - .new-e2e_agent_a7 needs: - !reference [.needs_new_e2e_template] + - deploy_windows_testing-a7 - deploy_windows_testing-a7-fips + parallel: + matrix: + - EXTRA_PARAMS: --run "TestFIPSAgent$" + - EXTRA_PARAMS: --run "TestFIPSAgentDoesNotInstallOverAgent$" + - EXTRA_PARAMS: --run "TestAgentDoesNotInstallOverFIPSAgent$" rules: - !reference [.on_deploy] - !reference [.on_e2e_or_windows_installer_changes] diff --git a/.gitlab/e2e_testing_deploy/e2e_deploy.yml b/.gitlab/e2e_testing_deploy/e2e_deploy.yml index d47fcb6748da3..ef4ebebb79c53 100644 --- a/.gitlab/e2e_testing_deploy/e2e_deploy.yml +++ b/.gitlab/e2e_testing_deploy/e2e_deploy.yml @@ -52,6 +52,7 @@ deploy_deb_testing-a7_x64: [ "installer_deb-amd64", "agent_deb-x64-a7", + "agent_deb-x64-a7-fips", "agent_heroku_deb-x64-a7", "iot_agent_deb-x64", "dogstatsd_deb-x64", @@ -75,7 +76,7 @@ deploy_deb_testing-a7_arm64: - !reference [.manual] extends: - .deploy_deb_testing-a7 - needs: ["installer_deb-arm64", "agent_deb-arm64-a7", "lint_linux-arm64"] + needs: ["installer_deb-arm64", "agent_deb-arm64-a7", "agent_deb-arm64-a7-fips", "lint_linux-arm64"] script: - *setup_apt_signing_key - set +x # make sure we don't output the creds to the build log @@ -105,6 +106,7 @@ deploy_rpm_testing-a7_x64: [ "installer_rpm-amd64", "agent_rpm-x64-a7", + "agent_rpm-x64-a7-fips", "iot_agent_rpm-x64", "dogstatsd_rpm-x64", "lint_linux-x64", @@ -121,7 +123,7 @@ deploy_rpm_testing-a7_arm64: - !reference [.manual] extends: - .deploy_rpm_testing-a7 - needs: ["installer_rpm-arm64", "agent_rpm-arm64-a7", "lint_linux-arm64"] + needs: ["installer_rpm-arm64", "agent_rpm-arm64-a7", "agent_rpm-arm64-a7-fips", "lint_linux-arm64"] script: - *setup_rpm_signing_key - set +x @@ -141,6 +143,7 @@ deploy_suse_rpm_testing_x64-a7: "agent_suse-x64-a7", "iot_agent_suse-x64", "dogstatsd_suse-x64", + "agent_suse-x64-a7-fips", "lint_linux-x64", ] variables: @@ -160,7 +163,7 @@ deploy_suse_rpm_testing_arm64-a7: stage: e2e_deploy image: registry.ddbuild.io/ci/datadog-agent-buildimages/gitlab_agent_deploy$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:amd64"] - needs: ["installer_suse_rpm-arm64", "agent_suse-arm64-a7", "lint_linux-arm64"] + needs: ["installer_suse_rpm-arm64", "agent_suse-arm64-a7", "agent_suse-arm64-a7-fips", "lint_linux-arm64"] variables: DD_PIPELINE_ID: $CI_PIPELINE_ID-a7 before_script: @@ -187,6 +190,7 @@ deploy_windows_testing-a7: --recursive --exclude "*" --include "datadog-agent-7.*.msi" + --include "datadog-agent-upgrade-test-7.*.msi" --include "datadog-installer-*-1-x86_64.msi" --include "datadog-installer-*-1-x86_64.exe" $OMNIBUS_PACKAGE_DIR s3://$WIN_S3_BUCKET/$WINDOWS_TESTING_S3_BUCKET_A7 diff --git a/.gitlab/functional_test/regression_detector.yml b/.gitlab/functional_test/regression_detector.yml index ed86bbc9fa0c0..cd13e211e8d75 100644 --- a/.gitlab/functional_test/regression_detector.yml +++ b/.gitlab/functional_test/regression_detector.yml @@ -22,7 +22,7 @@ single-machine-performance-regression_detector: - outputs/decision_record.md # for posterity, this is appended to final PR comment when: always variables: - SMP_VERSION: 0.19.3 + SMP_VERSION: 0.20.1 # See 'decision_record.md' for the determination of whether this job passes or fails. allow_failure: false script: diff --git a/.gitlab/integration_test/windows.yml b/.gitlab/integration_test/windows.yml index 31e9eab840101..f1b7386ef1a21 100644 --- a/.gitlab/integration_test/windows.yml +++ b/.gitlab/integration_test/windows.yml @@ -5,7 +5,7 @@ - !reference [.except_mergequeue] - when: on_success needs: ["go_deps", "go_tools_deps"] - tags: ["runner:windows-docker", "windowsversion:1809"] + extends: .windows_docker_default before_script: - $tmpfile = [System.IO.Path]::GetTempFileName() - (& "$CI_PROJECT_DIR\tools\ci\fetch_secret.ps1" -parameterName "$Env:VCPKG_BLOB_SAS_URL" -tempFile "$tmpfile") @@ -29,8 +29,8 @@ -e GOMODCACHE="c:\modcache" -e VCPKG_BINARY_SOURCES="clear;x-azblob,${vcpkgBlobSaSUrl}" -e PIP_INDEX_URL=${PIP_INDEX_URL} - registry.ddbuild.io/ci/datadog-agent-buildimages/windows_1809_${ARCH}${Env:DATADOG_AGENT_WINBUILDIMAGES_SUFFIX}:${Env:DATADOG_AGENT_WINBUILDIMAGES} - c:\mnt\tasks\winbuildscripts\integrationtests.bat + ${WINBUILDIMAGE} + powershell.exe -c "c:\mnt\tasks\winbuildscripts\Invoke-IntegrationTests.ps1 -BuildOutOfSource 1 -CheckGoVersion 1 -InstallDeps 1" - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } integration_tests_windows-x64: diff --git a/.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml b/.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml index 017863198dad0..1762ac56972ae 100644 --- a/.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml +++ b/.gitlab/internal_kubernetes_deploy/internal_kubernetes_deploy.yml @@ -12,7 +12,7 @@ internal_kubernetes_deploy_experimental: when: always - if: $CI_COMMIT_BRANCH != "main" when: never - - if: $DDR != "true" + - if: $DDR_WORKFLOW_ID == null when: never - if: $APPS !~ "/^datadog-agent/" when: never @@ -74,7 +74,7 @@ notify-slack: when: always - if: $CI_COMMIT_BRANCH != "main" when: never - - if: $DDR != "true" + - if: $DDR_WORKFLOW_ID == null when: never - if: $APPS !~ "/^datadog-agent/" when: never diff --git a/.gitlab/kernel_matrix_testing/common.yml b/.gitlab/kernel_matrix_testing/common.yml index 032950ad4b929..27d5f339cfcd6 100644 --- a/.gitlab/kernel_matrix_testing/common.yml +++ b/.gitlab/kernel_matrix_testing/common.yml @@ -218,12 +218,7 @@ # setup_env job hasn't finished. This causes instances to be leftover for more time than necessary. - inv kmt.wait-for-setup-job --pipeline-id $CI_PIPELINE_ID --arch $ARCH --component $TEST_COMPONENT - aws ec2 describe-instances --filters $FILTER_TEAM $FILTER_MANAGED $FILTER_PIPELINE $FILTER_ARCH $FILTER_INSTANCE_TYPE $FILTER_TEST_COMPONENT --output json --query $QUERY_INSTANCE_IDS | tee -a instance.json - - INSTANCE_ID="$(jq -r '.[0][0]' < instance.json)" - - echo ${INSTANCE_ID} - - | - if [[ "${INSTANCE_ID}" != "" ]] && [[ "${INSTANCE_ID}" != "null" ]]; then - aws ec2 terminate-instances --instance-ids "${INSTANCE_ID}" - fi + - cat instance.json | jq -r 'map(.[]) | .[]' | grep -v "null" | xargs -n 1 -t aws ec2 terminate-instances --instance-ids after_script: - DD_API_KEY=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $AGENT_API_KEY_ORG2 token) || exit $?; export DD_API_KEY - !reference [.tag_kmt_ci_job] diff --git a/.gitlab/kernel_matrix_testing/system_probe.yml b/.gitlab/kernel_matrix_testing/system_probe.yml index d86742b6e44e4..39f3fe8f3c7b1 100644 --- a/.gitlab/kernel_matrix_testing/system_probe.yml +++ b/.gitlab/kernel_matrix_testing/system_probe.yml @@ -230,6 +230,7 @@ kmt_run_sysprobe_tests_x64: - "debian_12" - "centos_7.9" - "centos_8" + - "rocky_9.4" TEST_SET: ["only_usm", "no_usm"] after_script: - !reference [.collect_outcomes_kmt] @@ -265,6 +266,7 @@ kmt_run_sysprobe_tests_arm64: - "debian_12" - "centos_7.9" - "centos_8" + - "rocky_9.4" TEST_SET: ["only_usm", "no_usm"] after_script: - !reference [.collect_outcomes_kmt] diff --git a/.gitlab/lint/macos.yml b/.gitlab/lint/macos.yml index 0743c7c7790cc..d4da06b2a518d 100644 --- a/.gitlab/lint/macos.yml +++ b/.gitlab/lint/macos.yml @@ -13,7 +13,7 @@ include: lint_macos_gitlab_amd64: extends: .lint_macos_gitlab - tags: ["macos:monterey-amd64", "specific:true"] + tags: ["macos:ventura-amd64", "specific:true"] rules: - !reference [.except_mergequeue] - when: on_success @@ -24,4 +24,4 @@ lint_macos_gitlab_arm64: rules: - !reference [.on_main] - !reference [.manual] - tags: ["macos:monterey-arm64", "specific:true"] + tags: ["macos:ventura-arm64", "specific:true"] diff --git a/.gitlab/lint/windows.yml b/.gitlab/lint/windows.yml index 2ae2710902f00..f532e79e2feb6 100644 --- a/.gitlab/lint/windows.yml +++ b/.gitlab/lint/windows.yml @@ -3,7 +3,7 @@ .lint_windows_base: stage: lint needs: ["go_deps", "go_tools_deps"] - tags: ["runner:windows-docker", "windowsversion:1809"] + extends: .windows_docker_default script: - $ErrorActionPreference = "Stop" - '$_instance_id = (iwr -UseBasicParsing http://169.254.169.254/latest/meta-data/instance-id).content ; Write-Host "Running on instance $($_instance_id)"' @@ -20,7 +20,8 @@ -e CI_PIPELINE_ID=${CI_PIPELINE_ID} -e CI_PROJECT_NAME=${CI_PROJECT_NAME} -e GOMODCACHE="c:\modcache" - registry.ddbuild.io/ci/datadog-agent-buildimages/windows_1809_${ARCH}${Env:DATADOG_AGENT_WINBUILDIMAGES_SUFFIX}:$Env:DATADOG_AGENT_WINBUILDIMAGES c:\mnt\tasks\winbuildscripts\lint.bat + ${WINBUILDIMAGE} + powershell.exe -c "c:\mnt\tasks\winbuildscripts\Invoke-Linters.ps1 -BuildOutOfSource 1 -CheckGoVersion 1 -InstallDeps 1" - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } lint_windows-x64: diff --git a/.gitlab/package_build/installer.yml b/.gitlab/package_build/installer.yml index 9073336a735ff..199cc52b82414 100644 --- a/.gitlab/package_build/installer.yml +++ b/.gitlab/package_build/installer.yml @@ -101,11 +101,14 @@ installer-install-scripts: RELEASE_VERSION: "$RELEASE_VERSION_7" script: - !reference [.retrieve_linux_go_deps] - - echo "About to build for $RELEASE_VERSION" + - VERSION="$(inv agent.version --url-safe)-1" || exit $? + - echo "About to build for $VERSION" - mkdir -p $OMNIBUS_PACKAGE_DIR - - inv -e installer.build-linux-script "databricks" "$RELEASE_VERSION" - - inv -e installer.build-linux-script "emr" "$RELEASE_VERSION" - - mv ./bin/installer/install-*.sh $OMNIBUS_PACKAGE_DIR/ + - inv -e installer.build-linux-script "default" "$VERSION" + - inv -e installer.build-linux-script "databricks" "$VERSION" + - inv -e installer.build-linux-script "emr" "$VERSION" + - inv -e installer.build-linux-script "dataproc" "$VERSION" + - mv ./bin/installer/install*.sh $OMNIBUS_PACKAGE_DIR/ - ls -la $OMNIBUS_PACKAGE_DIR artifacts: expire_in: 2 weeks @@ -193,7 +196,7 @@ installer-arm64-oci: windows-installer-amd64: stage: package_build - tags: ["runner:windows-docker", "windowsversion:1809"] + extends: .windows_docker_default needs: ["go_mod_tidy_check", "go_deps"] rules: - !reference [.except_mergequeue] @@ -221,7 +224,7 @@ windows-installer-amd64: -e S3_OMNIBUS_CACHE_BUCKET="$S3_OMNIBUS_CACHE_BUCKET" -e USE_S3_CACHING="$USE_S3_CACHING" -e API_KEY_ORG2=${API_KEY_ORG2} - registry.ddbuild.io/ci/datadog-agent-buildimages/windows_1809_${ARCH}${Env:DATADOG_AGENT_WINBUILDIMAGES_SUFFIX}:${Env:DATADOG_AGENT_WINBUILDIMAGES} + ${WINBUILDIMAGE} powershell -C "c:\mnt\tasks\winbuildscripts\Build-InstallerPackages.ps1 -BuildOutOfSource 1 -InstallDeps 1 -CheckGoVersion 1" after_script: - '$_instance_id = (iwr -UseBasicParsing http://169.254.169.254/latest/meta-data/instance-id).content ; Write-Host "Running on instance $($_instance_id)"' diff --git a/.gitlab/package_build/windows.yml b/.gitlab/package_build/windows.yml index ea458c9db77a7..0d3c936d8112e 100644 --- a/.gitlab/package_build/windows.yml +++ b/.gitlab/package_build/windows.yml @@ -1,7 +1,7 @@ --- .windows_msi_base: stage: package_build - tags: ["runner:windows-docker", "windowsversion:1809"] + extends: .windows_docker_default needs: ["go_mod_tidy_check", "go_deps"] script: - $ErrorActionPreference = 'Stop' @@ -38,8 +38,10 @@ -e API_KEY_ORG2=${API_KEY_ORG2} -e OMNIBUS_GIT_CACHE_DIR=${Env:TEMP}/${CI_PIPELINE_ID}/omnibus-git-cache -e AGENT_FLAVOR=${AGENT_FLAVOR} - registry.ddbuild.io/ci/datadog-agent-buildimages/windows_1809_${ARCH}${Env:DATADOG_AGENT_WINBUILDIMAGES_SUFFIX}:${Env:DATADOG_AGENT_WINBUILDIMAGES} - powershell -C "c:\mnt\tasks\winbuildscripts\Build-AgentPackages.ps1 -BuildOutOfSource 1 -InstallDeps 1 -CheckGoVersion 1" + -e OMNIBUS_SOFTWARE_VERSION="${OMNIBUS_SOFTWARE_VERSION}" + -e OMNIBUS_RUBY_VERSION="${OMNIBUS_RUBY_VERSION}" + ${WINBUILDIMAGE} + powershell -C "c:\mnt\tasks\winbuildscripts\Build-AgentPackages.ps1 -BuildOutOfSource 1 -InstallDeps 1 -CheckGoVersion 1 -BuildUpgrade 1" - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } - get-childitem omnibus\pkg - !reference [.upload_sbom_artifacts_windows] @@ -85,7 +87,7 @@ windows_zip_agent_binaries_x64-a7: rules: - !reference [.except_mergequeue] - when: on_success - tags: ["runner:windows-docker", "windowsversion:1809"] + extends: .windows_docker_default needs: ["go_mod_tidy_check", "go_deps"] variables: ARCH: "x64" @@ -121,7 +123,7 @@ windows_zip_agent_binaries_x64-a7: -e BUNDLE_MIRROR__RUBYGEMS__ORG=${BUNDLE_MIRROR__RUBYGEMS__ORG} -e PIP_INDEX_URL=${PIP_INDEX_URL} -e API_KEY_ORG2=${API_KEY_ORG2} - registry.ddbuild.io/ci/datadog-agent-buildimages/windows_1809_${ARCH}${Env:DATADOG_AGENT_WINBUILDIMAGES_SUFFIX}:${Env:DATADOG_AGENT_WINBUILDIMAGES} + ${WINBUILDIMAGE} powershell -C "c:\mnt\tasks\winbuildscripts\Build-OmnibusTarget.ps1 -BuildOutOfSource 1 -InstallDeps 1 -CheckGoVersion 1" - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } - get-childitem omnibus\pkg diff --git a/.gitlab/packaging/rpm.yml b/.gitlab/packaging/rpm.yml index 883f3910cc086..2bb7a0bb32015 100644 --- a/.gitlab/packaging/rpm.yml +++ b/.gitlab/packaging/rpm.yml @@ -111,14 +111,14 @@ agent_suse-x64-a7-fips: extends: [.package_suse_rpm_common, .package_rpm_agent_7, .package_rpm_x86] needs: ["datadog-agent-7-x64-fips"] variables: - OMNIBUS_EXTRA_ARGS: "--flavor fips" + OMNIBUS_EXTRA_ARGS: "--host-distribution=suse --flavor fips" DD_PROJECT: agent agent_suse-arm64-a7-fips: extends: [.package_suse_rpm_common, .package_rpm_agent_7, .package_rpm_arm64] needs: ["datadog-agent-7-arm64-fips"] variables: - OMNIBUS_EXTRA_ARGS: "--flavor fips" + OMNIBUS_EXTRA_ARGS: "--host-distribution=suse --flavor fips" DD_PROJECT: agent installer_rpm-arm64: diff --git a/.gitlab/pkg_metrics/pkg_metrics.yml b/.gitlab/pkg_metrics/pkg_metrics.yml index ed09fa8ed1ee2..949382cf90c7f 100644 --- a/.gitlab/pkg_metrics/pkg_metrics.yml +++ b/.gitlab/pkg_metrics/pkg_metrics.yml @@ -82,6 +82,12 @@ check_pkg_size: image: registry.ddbuild.io/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES tags: ["arch:amd64"] rules: + - if: $CI_COMMIT_BRANCH == "main" + when: on_success + allow_failure: true + - if: $CI_COMMIT_BRANCH =~ /^[0-9]+\.[0-9]+\.x$/ + when: on_success + allow_failure: true - !reference [.except_mergequeue] - when: on_success needs: diff --git a/.gitlab/powershell_script_deploy/powershell_script_deploy.yml b/.gitlab/powershell_script_deploy/powershell_script_deploy.yml deleted file mode 100644 index 5269045adde05..0000000000000 --- a/.gitlab/powershell_script_deploy/powershell_script_deploy.yml +++ /dev/null @@ -1,28 +0,0 @@ -# We could (should?) piggy back on deploy_installer_packages_windows-x64 to also deploy this -# script to $S3_RELEASE_INSTALLER_ARTIFACTS_URI and have the agent-release-management repository -# publish it to a production bucket like ddagent-windows-stable. -# For now we can use the dd-agent-mstesting bucket to store the PowerShell script. -powershell_script_deploy: - image: registry.ddbuild.io/ci/datadog-agent-buildimages/gitlab_agent_deploy$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES - tags: ["arch:amd64"] - stage: install_script_deploy - rules: - !reference [.manual] - needs: ["powershell_script_signing"] - script: - - ls $WINDOWS_POWERSHELL_DIR - - $S3_CP_CMD $WINDOWS_POWERSHELL_DIR/Install-Datadog.ps1 s3://dd-agent-mstesting/Install-Datadog.ps1 --grants read=uri=http://acs.amazonaws.com/groups/global/AllUsers full=id=3a6e02b08553fd157ae3fb918945dd1eaae5a1aa818940381ef07a430cf25732 - -# Technically deploy_installer_packages_windows-x64 also uploads the bootstrapper to $S3_RELEASE_INSTALLER_ARTIFACTS_URI -# but it requires changes in agent-release-management to deploy to a production bucket like ddagent-windows-stable. -# For now we can use the dd-agent-mstesting bucket to store the bootstrapper. -windows_bootstrapper_deploy: - image: registry.ddbuild.io/ci/datadog-agent-buildimages/gitlab_agent_deploy$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES - tags: ["arch:amd64"] - stage: install_script_deploy - rules: - !reference [.manual] - needs: ["windows-installer-amd64"] - script: - - ls $OMNIBUS_PACKAGE_DIR - - $S3_CP_CMD $OMNIBUS_PACKAGE_DIR/datadog-installer-*-1-x86_64.exe s3://dd-agent-mstesting/datadog-installer-x86_64.exe --grants read=uri=http://acs.amazonaws.com/groups/global/AllUsers full=id=3a6e02b08553fd157ae3fb918945dd1eaae5a1aa818940381ef07a430cf25732 diff --git a/.gitlab/powershell_script_signing/powershell_script_signing.yml b/.gitlab/powershell_script_signing/powershell_script_signing.yml deleted file mode 100644 index e5e505b14b7f8..0000000000000 --- a/.gitlab/powershell_script_signing/powershell_script_signing.yml +++ /dev/null @@ -1,16 +0,0 @@ -powershell_script_signing: - tags: ["runner:windows-docker", "windowsversion:1809"] - stage: choco_and_install_script_build - needs: [] - variables: - ARCH: "x64" - rules: - !reference [.manual] - artifacts: - expire_in: 2 weeks - paths: - - $WINDOWS_POWERSHELL_DIR - script: - - mkdir $WINDOWS_POWERSHELL_DIR - - docker run --rm -v "$(Get-Location):c:\mnt" -e AWS_NETWORKING=true -e IS_AWS_CONTAINER=true registry.ddbuild.io/ci/datadog-agent-buildimages/windows_1809_${ARCH}${Env:DATADOG_AGENT_WINBUILDIMAGES_SUFFIX}:${Env:DATADOG_AGENT_WINBUILDIMAGES} powershell -C "dd-wcs sign \mnt\tools\windows\DatadogAgentInstallScript\Install-Datadog.ps1" - - copy .\tools\windows\DatadogAgentInstallScript\Install-Datadog.ps1 $WINDOWS_POWERSHELL_DIR\Install-Datadog.ps1 diff --git a/.gitlab/source_test/include.yml b/.gitlab/source_test/include.yml index 629e88b551294..3a7f051b856c1 100644 --- a/.gitlab/source_test/include.yml +++ b/.gitlab/source_test/include.yml @@ -13,4 +13,5 @@ include: - .gitlab/source_test/slack.yml - .gitlab/source_test/golang_deps_diff.yml - .gitlab/source_test/notify.yml + - .gitlab/source_test/protobuf.yml - .gitlab/source_test/tooling_unit_tests.yml diff --git a/.gitlab/source_test/linux.yml b/.gitlab/source_test/linux.yml index 1a6a9696814b4..aed975dccc481 100644 --- a/.gitlab/source_test/linux.yml +++ b/.gitlab/source_test/linux.yml @@ -174,4 +174,4 @@ new-e2e-unit-tests: KUBERNETES_CPU_REQUEST: 6 # Not using the entrypoint script for the e2e runner image FF_KUBERNETES_HONOR_ENTRYPOINT: false - timeout: 10m + timeout: 20m # Not less than 20m because job startup can take time. diff --git a/.gitlab/source_test/macos.yml b/.gitlab/source_test/macos.yml index 1a92a354d47b8..0da65270b98fb 100644 --- a/.gitlab/source_test/macos.yml +++ b/.gitlab/source_test/macos.yml @@ -5,6 +5,8 @@ include: .tests_macos_gitlab: stage: source_test extends: .macos_gitlab + rules: + - !reference [.fast_on_dev_branch_only] needs: ["go_deps", "go_tools_deps"] variables: TEST_OUTPUT_FILE: test_output.json @@ -29,7 +31,7 @@ include: tests_macos_gitlab_amd64: extends: .tests_macos_gitlab - tags: ["macos:monterey-amd64", "specific:true"] + tags: ["macos:ventura-amd64", "specific:true"] after_script: - !reference [.vault_login] - !reference [.select_python_env_commands] @@ -40,7 +42,7 @@ tests_macos_gitlab_arm64: extends: .tests_macos_gitlab rules: !reference [.manual] - tags: ["macos:monterey-arm64", "specific:true"] + tags: ["macos:ventura-arm64", "specific:true"] allow_failure: true after_script: - !reference [.vault_login] diff --git a/.gitlab/source_test/protobuf.yml b/.gitlab/source_test/protobuf.yml new file mode 100644 index 0000000000000..75a0e3deea163 --- /dev/null +++ b/.gitlab/source_test/protobuf.yml @@ -0,0 +1,8 @@ +protobuf_test: + stage: source_test + image: registry.ddbuild.io/ci/datadog-agent-buildimages/deb_x64$DATADOG_AGENT_BUILDIMAGES_SUFFIX:$DATADOG_AGENT_BUILDIMAGES + tags: ["arch:amd64"] + needs: [] + script: + - inv install-tools + - inv -e generate-protobuf diff --git a/.gitlab/source_test/windows.yml b/.gitlab/source_test/windows.yml index b0565ce028273..a7c402b78744d 100644 --- a/.gitlab/source_test/windows.yml +++ b/.gitlab/source_test/windows.yml @@ -6,7 +6,7 @@ - !reference [.except_disable_unit_tests] - !reference [.fast_on_dev_branch_only] needs: ["go_deps", "go_tools_deps"] - tags: ["runner:windows-docker", "windowsversion:1809"] + extends: .windows_docker_default script: - $ErrorActionPreference = "Stop" - '$_instance_id = (iwr -UseBasicParsing http://169.254.169.254/latest/meta-data/instance-id).content ; Write-Host "Running on instance $($_instance_id)"' @@ -43,8 +43,8 @@ -e CODECOV_TOKEN="${CODECOV_TOKEN}" -e S3_PERMANENT_ARTIFACTS_URI="${S3_PERMANENT_ARTIFACTS_URI}" -e COVERAGE_CACHE_FLAG="${COVERAGE_CACHE_FLAG}" - registry.ddbuild.io/ci/datadog-agent-buildimages/windows_1809_${ARCH}${Env:DATADOG_AGENT_WINBUILDIMAGES_SUFFIX}:${Env:DATADOG_AGENT_WINBUILDIMAGES} - c:\mnt\tasks\winbuildscripts\unittests.bat + ${WINBUILDIMAGE} + powershell.exe -c "c:\mnt\tasks\winbuildscripts\Invoke-UnitTests.ps1 -BuildOutOfSource 1 -CheckGoVersion 1 -InstallDeps 1 -UploadCoverage 1 -UploadTestResults 1" - If ($lastExitCode -ne "0") { exit "$lastExitCode" } variables: TEST_OUTPUT_FILE: test_output.json @@ -65,7 +65,7 @@ tests_windows-x64: .tests_windows_sysprobe: stage: source_test needs: ["go_deps", "go_tools_deps"] - tags: ["runner:windows-docker", "windowsversion:1809"] + extends: .windows_docker_default script: - $ErrorActionPreference = "Stop" - '$_instance_id = (iwr -UseBasicParsing http://169.254.169.254/latest/meta-data/instance-id).content ; Write-Host "Running on instance $($_instance_id)"' @@ -79,7 +79,7 @@ tests_windows-x64: -e SIGN_WINDOWS_DD_WCS=true -e GOMODCACHE="c:\modcache" -e PIP_INDEX_URL=${PIP_INDEX_URL} - registry.ddbuild.io/ci/datadog-agent-buildimages/windows_1809_${ARCH}${Env:DATADOG_AGENT_WINBUILDIMAGES_SUFFIX}:${Env:DATADOG_AGENT_WINBUILDIMAGES} + ${WINBUILDIMAGE} c:\mnt\tasks\winbuildscripts\sysprobe.bat - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } artifacts: @@ -90,7 +90,7 @@ tests_windows-x64: .tests_windows_secagent: stage: source_test needs: ["go_deps", "go_tools_deps"] - tags: ["runner:windows-docker", "windowsversion:1809"] + extends: .windows_docker_default script: - $ErrorActionPreference = "Stop" - '$_instance_id = (iwr -UseBasicParsing http://169.254.169.254/latest/meta-data/instance-id).content ; Write-Host "Running on instance $($_instance_id)"' @@ -104,7 +104,7 @@ tests_windows-x64: -e SIGN_WINDOWS_DD_WCS=true -e GOMODCACHE="c:\modcache" -e PIP_INDEX_URL=${PIP_INDEX_URL} - registry.ddbuild.io/ci/datadog-agent-buildimages/windows_1809_${ARCH}${Env:DATADOG_AGENT_WINBUILDIMAGES_SUFFIX}:${Env:DATADOG_AGENT_WINBUILDIMAGES} + ${WINBUILDIMAGE} c:\mnt\tasks\winbuildscripts\secagent.bat - If ($lastExitCode -ne "0") { throw "Previous command returned $lastExitCode" } artifacts: diff --git a/.gitlab/trigger_release/trigger_release.yml b/.gitlab/trigger_release/trigger_release.yml index 0ecd5e0b02606..d994f520651ce 100644 --- a/.gitlab/trigger_release/trigger_release.yml +++ b/.gitlab/trigger_release/trigger_release.yml @@ -37,8 +37,6 @@ trigger_auto_staging_release: AUTO_RELEASE: "true" TARGET_REPO: staging rules: - - if: $DDR == "true" - when: never - if: $CI_COMMIT_TAG =~ /^[0-9]+\.[0-9]+\.[0-9]+-v[0-9]+\.[0-9]+\.[0-9]+(-rc\.[0-9]+){0,1}$/ when: never - !reference [.on_deploy] @@ -55,3 +53,53 @@ trigger_manual_prod_release: - if: $CI_COMMIT_TAG =~ /^[0-9]+\.[0-9]+\.[0-9]+-v[0-9]+\.[0-9]+\.[0-9]+(-rc\.[0-9]+){0,1}$/ when: never - !reference [.on_deploy_stable_or_beta_manual_auto_on_stable] + +include: + - https://gitlab-templates.ddbuild.io/slack-notifier/v3-sdm/template.yml + +.setup_github_app_agent_platform_auto_pr: + # GitHub App rate-limits are per-app. Since we are rarely calling the job, we are only using the instance 2 + - | + GITHUB_KEY_B64=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_APP_2 key_b64) || exit $?; export GITHUB_KEY_B64 + GITHUB_APP_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_APP_2 app_id) || exit $?; export GITHUB_APP_ID + GITHUB_INSTALLATION_ID=$($CI_PROJECT_DIR/tools/ci/fetch_secret.sh $MACOS_GITHUB_APP_2 installation_id) || exit $?; export GITHUB_INSTALLATION_ID + echo "Using GitHub App instance 2" + +generate_windows_gitlab_runner_bump_pr: + stage: trigger_release + extends: .slack-notifier-base + needs: ["trigger_auto_staging_release"] + tags: ["arch:amd64"] + rules: + - if: $DDR_WORKFLOW_ID != null + when: never + - if: $CI_COMMIT_TAG =~ /^[0-9]+\.[0-9]+\.[0-9]+-v[0-9]+\.[0-9]+\.[0-9]+(-rc\.[0-9]+){0,1}$/ + when: never + - if: $CI_COMMIT_TAG =~ /^[0-9]+\.[0-9]+\.[0-9]+(-rc\.[0-9]+)?$/ + + script: + # We are using the agent platform auto PR github app to access the buildenv repository (already used for macOS builds) + - !reference [.setup_github_app_agent_platform_auto_pr] + - python3 -m pip install -r requirements.txt -r tasks/libs/requirements-notifications.txt + - $S3_CP_CMD $S3_ARTIFACTS_URI/agent-version.cache . + - inv -e github.update-windows-runner-version + +# Manual job to generate the gitlab bump pr on buildenv if trigger_auto_staging_release fails +generate_windows_gitlab_runner_bump_pr_manual: + stage: trigger_release + extends: .slack-notifier-base + needs: ["trigger_auto_staging_release"] + tags: ["arch:amd64"] + rules: + - if: $DDR_WORKFLOW_ID != null + when: never + - if: $CI_COMMIT_TAG =~ /^[0-9]+\.[0-9]+\.[0-9]+-v[0-9]+\.[0-9]+\.[0-9]+(-rc\.[0-9]+){0,1}$/ + when: never + - if: $CI_COMMIT_TAG =~ /^[0-9]+\.[0-9]+\.[0-9]+(-rc\.[0-9]+)?$/ + when: manual + script: + # We are using the agent platform auto PR github app to access the buildenv repository (already used for macOS builds) + - !reference [.setup_github_app_agent_platform_auto_pr] + - python3 -m pip install -r requirements.txt -r tasks/libs/requirements-notifications.txt + - $S3_CP_CMD $S3_ARTIFACTS_URI/agent-version.cache . + - inv -e github.update-windows-runner-version diff --git a/.go-version b/.go-version index ac1df3fce34b7..ca8ec414e7872 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.23.3 +1.23.5 diff --git a/.golangci.yml b/.golangci.yml index ece414c77fc1b..f90d001655e02 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -59,6 +59,8 @@ issues: # We are using it and it's not clear how to replace it. - text: "Temporary has been deprecated since Go 1.18" linters: [staticcheck] + - text: ".IsSet is deprecated: this method will be removed once all settings have a default, use 'IsConfigured' instead" + linters: [staticcheck] # Treat this list as a TODO for fixing issues with pkgconfigusage custom linter # DO NOT ADD NEW ENTRIES - path: comp/api/api/apiimpl/internal/config/endpoint.go @@ -565,6 +567,7 @@ issues: - path: comp/dogstatsd/packets/packet_manager_windows.go linters: - pkgconfigusage + linters: disable-all: true enable: @@ -605,6 +608,8 @@ linters-settings: desc: "Not really forbidden to use, but it is usually imported by mistake instead of github.com/stretchr/testify/assert, and confusing since it actually has the behavior of github.com/stretchr/testify/require" - pkg: "debug/elf" desc: "prefer pkg/util/safeelf to prevent panics during parsing" + - pkg: "golang.org/x/exp/slices" + desc: "use the std slices package instead" logger: files: - "!**/pkg/util/log/**" diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c665daa0c6a33..67e99f27e765e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -38,6 +38,7 @@ repos: hooks: - id: go-fmt args: [ "-w", "-s" ] + exclude: ^.*.(pb.go|_gen.go)$ - repo: https://github.com/pre-commit/pre-commit-hooks rev: 2c9f875913ee60ca25ce70243dc24d5b6415598c # v4.6.0 hooks: diff --git a/.wwhrd.yml b/.wwhrd.yml index 85b1253962755..9ca0e52d9758e 100644 --- a/.wwhrd.yml +++ b/.wwhrd.yml @@ -45,4 +45,4 @@ exceptions: additional: # list here paths to additional licenses - golang/go: "raw.githubusercontent.com/golang/go/go1.23.3/LICENSE" + golang/go: "raw.githubusercontent.com/golang/go/go1.23.5/LICENSE" diff --git a/CHANGELOG-DCA.rst b/CHANGELOG-DCA.rst index 7b5abd00a3bc2..44698011d00bb 100644 --- a/CHANGELOG-DCA.rst +++ b/CHANGELOG-DCA.rst @@ -2,6 +2,51 @@ Release Notes ============= +.. _Release Notes_7.61.0: + +7.61.0 +====== + +.. _Release Notes_7.61.0_Prelude: + +Prelude +------- + +Released on: 2025-01-13 +Pinned to datadog-agent v7.61.0: `CHANGELOG `_. + +.. _Release Notes_7.61.0_New Features: + +New Features +------------ + +- Implements the Kubernetes Admission Events webhooks. This new webhooks will emit Datadog Events + when receving Validation Admission requests. It will track deployments operations made by non-system + users. + The webhook is controlled by using the `admission_controller.kubernetes_admission_events.enabled` setting. + +- The cluster-agent now can collect pod disruption budgets from the cluster. + + +.. _Release Notes_7.61.0_Enhancement Notes: + +Enhancement Notes +----------------- + +- Cluster Agent: ``DatadogAgent`` custom resource, cluster Agent deployment, and node Agent daemonset manifests are now added to the flare archive when the Cluster Agent is deployed with the Datadog Operator (version 1.11.0+). + +- Cluster Agent: Don't overwrite the LD_PRELOAD environment variable if it's already set, append the path to Datadog's injection library instead. + + +.. _Release Notes_7.61.0_Bug Fixes: + +Bug Fixes +--------- + +- The auto-instrumentation webhook no longer injects the default environment + variables when disabled. + + .. _Release Notes_7.60.1: 7.60.1 @@ -148,7 +193,7 @@ Bug Fixes - Fixed an issue that prevented the Kubernetes autoscaler from evicting pods injected by the Admission Controller. - + .. _Release Notes_7.57.1: @@ -206,7 +251,7 @@ Bug Fixes - Library package versions for auto-instrumentation are now set to the latest major version of the library-package instead of `latest`. - + * java:v1 * dotnet:v2 * python:v2 diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 1def73a37b2e1..cee121731e627 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,205 @@ Release Notes ============= +.. _Release Notes_7.61.0: + +7.61.0 +====== + +.. _Release Notes_7.61.0_Prelude: + +Prelude +------- + +Release on: 2025-01-13 + +- Please refer to the `7.61.0 tag on integrations-core `_ for the list of changes on the Core Checks + + +.. _Release Notes_7.61.0_Upgrade Notes: + +Upgrade Notes +------------- + +- Upgraded JMXFetch to `0.49.6 `_ which fixes a ``NullPointerException`` on + JBoss when user and password not set. See `0.49.6 `_ for more details. + +- Windows containers were updated to use OpenJDK 11.0.25+9. + + +.. _Release Notes_7.61.0_New Features: + +New Features +------------ + +- Add metrics origins for Nvidia Nim integration. + +- APM: New configuration apm_config.obfuscation.credit_cards.keep_values (DD_APM_OBFUSCATION_CREDIT_CARDS_KEEP_VALUES) + can be used to skip specific tag keys that are known to never contain credit card numbers. This is especially useful + in cases where a span tag value is a number that triggers false positives from the credit card obfuscator. + +- Add new metric, ``container.restarts``, which indicates the number of times a container has been restarted due to the restart policy. + For more details: https://docs.docker.com/engine/containers/start-containers-automatically/. + +- APM: Introducing the Error Tracking Standalone config option. Only span chunks + that contain errors or exception OpenTelemetry span events are taken into + consideration by sampling. + +- Add new windows images for LTSC 2019 and LTSC 2022: + - `datadog-agent:7-servercore-ltsc2019-amd64` + - `datadog-agent:7-servercore-ltsc2022-amd64` + - `datadog-agent:7-servercore-ltsc2019-jmx-amd64` + - `datadog-agent:7-servercore-ltsc2022-jmx-amd64` + - `datadog-agent:latest-servercore-ltsc2019-jmx` + - `datadog-agent:latest-servercore-ltsc2022-jmx` + - `datadog-agent:latest-servercore-ltsc2019` + - `datadog-agent:latest-servercore-ltsc2022` + - `datadog-agent:7.X.Y-ltsc2019` + - `datadog-agent:7.X.Y-ltsc2022` + - `datadog-agent:7.X.Y-ltsc2019-jmx` + - `datadog-agent:7.X.Y-ltsc2022-jmx` + - `datadog-agent:7.X.Y-servercore-ltsc2019` + - `datadog-agent:7.X.Y-servercore-ltsc2022` + - `datadog-agent:7.X.Y-servercore-ltsc2019-jmx` + - `datadog-agent:7.X.Y-servercore-ltsc2022-jmx` + - `datadog-agent:latest-ltsc2019` + - `datadog-agent:latest-ltsc2022` + +- [ha-agent] Add haagent component used for HA Agent feature. + +- Added support for collecting container image metadata when running on a CRI-O runtime. + +- USM now monitors TLS traffic encrypted with Go TLS by default. + To disable this feature, set the `service_monitoring_config.tls.go.enabled` + configuration option to false. + +- USM now monitors traffic encrypted with Istio mTLS by default. + To disable this feature, set the `service_monitoring_config.tls.istio.enabled` configuration option to false. + +- Introduced a new configuration variable `logs_config.http_protocol`, allowing users to enforce HTTP/1.1 for outgoing HTTP connections in the Datadog Agent. This provides better control over transport protocols and improves compatibility with systems that do not support HTTP/2. + By default, the log agent will now attempt to use HTTP/2 (unless a proxy is configured) and fall back to the best available protocol if HTTP/2 is not supported. + +- Added a new feature flag `enable_operation_and_resource_name_logic_v2` in DD_APM_FEATURES. Enabling this flag modifies the logic for computing operation and resource names from OTLP spans to produce shorter, more readable names and improve alignment with OpenTelemetry specifications. + +- Add support for PHP Single Step Instrumentation in Kubernetes (not enabled by default) + + +.. _Release Notes_7.61.0_Enhancement Notes: + +Enhancement Notes +----------------- + +- [ha-agent] Run HA enabled integrations only on leader Agent + +- [ha-agent] Add agent_group tag to datadog.agent.running metric + +- Add new host tag `provider_kind` from the value of `DD_PROVIDER_KIND` for Agents running in GCE. + +- Add ``query_timeout`` to customize the timeout for queries in the Oracle check. + Previously, this was fixed at 20,000 seconds. + +- Add ability to show Agent telemetry payloads to be sent by Agent + if the telemetry is enabled. One can run it with the following command: + `agent diagnose show-metadata agent-telemetry`. See + `docs ` for more details. + +- Convert Prometheus style Counters and Histograms used in Agent telemetry + from monotonically increasing to non-monotonic values (reset on each scrape). + In addition de-accumulate Prometheus Histogram bucket values on each scrape. + +- Added support for more than 100 Aurora clusters in a user's account when using database autodiscovery + +- Adds some information about the SNMP autodiscovery status in the Agent status. + +- Adds a dedicated CRI-O Workloadmeta collector, enabling metadata collection + for containers running on a CRI-O runtime. + +- Enables a cache for SQL and MongoDB obfuscation. This cache is enabled by default but can be disabled by setting `apm_config.obfuscation.cache.enabled` to `false`. + +- Improved logging to add visibility for latency and transport protocol + +- Add a new configuration option ``log_level`` for commands where the logger is disabled by default. + +- Adds initial Windows support for TCP probes in Network Path. + +- Query Aurora instances per cluster to allow up to 100 instances per cluster + rather than 100 instances total. + +- The AWS Lambda Extension is now able to read the full 128-bit trace ID + from the headers of the end-invocation HTTP request made by dd-trace or the + datadog-lambda-go library. + +- Standardized cluster check tagging across all environments, allowing DD_TAGS, DD_EXTRA_TAGS, DD_CLUSTER_CHECKS_EXTRA_TAGS, and DD_ORCHESTRATOR_EXPLORER_EXTRA_TAGS to apply to all cluster check data when operating on the Cluster Agent, Node Agent, or Cluster Checks Runner. + + +.. _Release Notes_7.61.0_Deprecation Notes: + +Deprecation Notes +----------------- + +- Deprecates the `apm_config.obfuscation.sql.cache` option in favor of `apm_config.obfuscation.cache`. + +- Remove deprecated config `otlp_config.metrics.instrumentation_library_metadata_as_tags`. Use `otlp_config.metrics.instrumentation_scope_metadata_as_tags` instead. + +- The remote tagger will attempt to connect to the core agent indefinitely until it is successful. + The ``remote_tagger_timeout_seconds`` configuration is removed, and the timeout is no longer configurable. + +- The remote tagger for the trace-agent and security-agent is now always enabled and can not be disabled + ``apm_config.remote_tagger``, ``security_agent.remote_tagger``, and ``event_monitoring_config.remote_tagger`` config entries are removed. + + +.. _Release Notes_7.61.0_Security Notes: + +Security Notes +-------------- + +- Fix CVE-2025-21613 + +- Update ``golang.org/x/crypto`` to fix CVE-2024-45337. + + +.. _Release Notes_7.61.0_Bug Fixes: + +Bug Fixes +--------- + +- Fix an issue where the remote workloadmeta was not receiving some unset + events for ECS containers, causing incorrect billing in CWS, CSPM, CSM Pro, CSM + Enterprise, and DevSecOps Enterprise Containers. + +- Corrects the method call for gauges to be Set instead of Add. + +- Fix Oracle execution plan collection failures caused by an out-of-range position column, which can occur if the execution plan is excessively large. + +- Fix excessive number of rows coming from active session history. + +- OTLP ingestion: Stop prefixing `http_server_duration`, `http_server_request_size` and `http_server_response_size` with `otelcol`. + +- Fixes the issue of disabled services producing an error message in the event log on start. Now produces an informational message. + +- Change `kubernetes.memory.working_set` and `kubernetes.memory.usage` + metrics to be of type gauge instead of rate. + + +.. _Release Notes_7.61.0_Other Notes: + +Other Notes +----------- + +- Add metric origins for Platform Integrations: Fly.io, Kepler, Octopus Deploy, and Scaphandre. + +- Extend Agent Telemetry to start reporting ``logs.sender_latency`` metric. + +- The `enable_receive_resource_spans_v2` flag now defaults to true in Converged Agent. This enables the refactored + version of the OTLP span receiver in trace agent, improves performance by 10%, and deprecates the following functionality: + - No longer checks for information about the resource in HTTP headers (ContainerID, Lang, LangVersion, Interpreter, LangVendor). + - No longer checks for resource-related values (container, env, hostname) in span attributes. This previous behavior did not follow the OTel spec. + +- Bumps the default value for `kube_cache_sync_timeout_seconds` from 5 to 10 seconds. + +- Added origin for new Milvus integration. + + .. _Release Notes_7.60.1: 7.60.1 @@ -44,11 +243,11 @@ Upgrade Notes ------------- - * Parameter ``peer_tags_aggregation`` (a.k.a. environment variable ``DD_APM_PEER_TAGS_AGGREGATION``) is now enabled by default. This means that aggregation of peer related tags (e.g., `peer.service`, `db.instance`, etc.) now happens in the Agent, which enables statistics for Inferred Entities. If you want to disable this feature, set `peer_tags_aggregation` to `false` in your Agent configuration. - + * Parameter ``compute_stats_by_span_kind`` (a.k.a. environment variable ``DD_APM_COMPUTE_STATS_BY_SPAN_KIND``) is now enabled by default. This means spans with an eligible `span.kind` will have stats computed. If disabled, only top-level and measured spans will have stats computed. If you want to disable this feature, set `compute_stats_by_span_kind` to `false` in your Agent configuration. - + Note: When using ``peer_tags_aggregation`` and ``compute_stats_by_span_kind``, a high cardinality of peer tags or APM resources can contribute to higher CPU and memory consumption. If enabling both causes the Agent to consume too many resources, try disabling `compute_stats_by_span_kind` first. - + It is recommended that you update your tracing libraries according to the instructions `here `_ and set ``DD_TRACE_REMOVE_INTEGRATION_SERVICE_NAMES_ENABLED`` (or ``dd.trace.remove.integration-service-names.enabled``) to ``true``. - Upgraded JMXFetch to `0.49.5 `_ which adds support for ``UnloadedClassCount`` metric @@ -63,7 +262,7 @@ New Features - `Inferred Service dependencies `_ are now Generally Available (exiting Beta) and enabled by default. Inferred Services of all kinds now have trace metrics and are available in dependency maps. `apm_config.peer_tags_aggregation` and `apm_config.compute_stats_by_span_kind` both now default to `true` unless explicitly set to `false`. - Add `check_tag_cardinality` parameter config check. - + By default `check_tag_cardinality` is not set which doesn't change the behavior of the checks. Once it is set in pod annotaions, it overrides the cardinality value provided in the base agent configuration. Example of usage: @@ -71,7 +270,7 @@ New Features ad.datadoghq.com/redis.checks: | { "redisdb": { - "check_tag_cardinality": "high", + "check_tag_cardinality": "high", "instances": [ { "host": "%%host%%", @@ -100,7 +299,7 @@ Enhancement Notes based paths in Network Path. A cache of reverse DNS lookups is used to reduce the number of DNS queries. Additionally, reverse DNS lookups are now performed only - for private IPs and not for public IPs. + for private IPs and not for public IPs. - Agent flare now includes system-probe telemetry data via ``system-probe/system_probe_telemetry.log``. @@ -235,7 +434,7 @@ Enhancement Notes information about the Datadog Agent. This may include diagnostic logs and crash dumps of the Datadog Agent with obfuscated stack traces to support and further improve the Datadog Agent. - + More details could be found in the `docs `_ @@ -247,10 +446,10 @@ Enhancement Notes - Agents are now built with Go ``1.22.8``. -- While using the AWS Lambda Extension, when a Lambda Function is invoked by +- While using the AWS Lambda Extension, when a Lambda Function is invoked by a [properly instrumented][1] Step Function, the Lambda Function will create - its Trace and Parent IDs deterministically based on the Step Function's - execution context. + its Trace and Parent IDs deterministically based on the Step Function's + execution context. [1]: https://docs.datadoghq.com/serverless/step_functions/installation/?tab=custom "Install Serverless Monitoring for AWS Step Functions" - Updates default .NET library used for auto-instrumentation from v2 to v3 @@ -425,8 +624,8 @@ New Features - [oracle] Add the ``active_session_history`` configuration parameter to optionally ingest Oracle active session history samples instead of query sampling. - Added config option ``logs_config.tag_truncated_logs``. When - enabled, file logs will come with a tag ``truncated:true`` if - they were truncated by the Agent. + enabled, file logs will come with a tag ``truncated:true`` if + they were truncated by the Agent. .. _Release Notes_7.58.0_Enhancement Notes: @@ -480,7 +679,7 @@ Bug Fixes - Fixed issue with openSUSE 15 RC 6 where the eBPF tracer wouldn't start due to a failed validation of the ``tcp_sendpage`` probe. -- Fixed a rare issue where short-lived containers could cause +- Fixed a rare issue where short-lived containers could cause logs to be sent with the wrong container ID. - Fix Windows Process Agent argument stripping to account for spaces in the executable path. diff --git a/Dockerfiles/agent/cont-init.d/60-sysprobe-check.sh b/Dockerfiles/agent/cont-init.d/60-sysprobe-check.sh index d556aac48974b..dff29ebc3fe71 100644 --- a/Dockerfiles/agent/cont-init.d/60-sysprobe-check.sh +++ b/Dockerfiles/agent/cont-init.d/60-sysprobe-check.sh @@ -1,15 +1,29 @@ #!/bin/bash -if grep -Eq '^ *enable_tcp_queue_length *: *true' /etc/datadog-agent/system-probe.yaml || [[ "$DD_SYSTEM_PROBE_CONFIG_ENABLE_TCP_QUEUE_LENGTH" == "true" ]]; then +sysprobe_cfg="/etc/datadog-agent/system-probe.yaml" + +if grep -Eq '^ *enable_tcp_queue_length *: *true' $sysprobe_cfg || [[ "$DD_SYSTEM_PROBE_CONFIG_ENABLE_TCP_QUEUE_LENGTH" == "true" ]]; then if [ -f /etc/datadog-agent/conf.d/tcp_queue_length.d/conf.yaml.example ]; then mv /etc/datadog-agent/conf.d/tcp_queue_length.d/conf.yaml.example \ /etc/datadog-agent/conf.d/tcp_queue_length.d/conf.yaml.default fi fi -if grep -Eq '^ *enable_oom_kill *: *true' /etc/datadog-agent/system-probe.yaml || [[ "$DD_SYSTEM_PROBE_CONFIG_ENABLE_OOM_KILL" == "true" ]]; then +if grep -Eq '^ *enable_oom_kill *: *true' $sysprobe_cfg || [[ "$DD_SYSTEM_PROBE_CONFIG_ENABLE_OOM_KILL" == "true" ]]; then if [ -f /etc/datadog-agent/conf.d/oom_kill.d/conf.yaml.example ]; then mv /etc/datadog-agent/conf.d/oom_kill.d/conf.yaml.example \ /etc/datadog-agent/conf.d/oom_kill.d/conf.yaml.default fi fi + +# Match the key gpu_monitoring.enabled: true using Python's YAML parser, which is included in the base image +# and is more robust than using regexes. +gpu_monitoring_enabled=$(python3 -c "import yaml, sys; data=yaml.safe_load(sys.stdin); print(bool(data.get('gpu_monitoring', {}).get('enabled')))" < $sysprobe_cfg) + +# Note gpu_monitoring_enabled is a Python boolean, so casing is important +if [[ "$gpu_monitoring_enabled" == "True" ]] || [[ "$DD_GPU_MONITORING_ENABLED" == "true" ]]; then + if [ -f /etc/datadog-agent/conf.d/gpu.d/conf.yaml.example ]; then + mv /etc/datadog-agent/conf.d/gpu.d/conf.yaml.example \ + /etc/datadog-agent/conf.d/gpu.d/conf.yaml.default + fi +fi diff --git a/Dockerfiles/agent/install-fips.ps1 b/Dockerfiles/agent/install-fips.ps1 index 05324776e9dda..5245d0b339de9 100644 --- a/Dockerfiles/agent/install-fips.ps1 +++ b/Dockerfiles/agent/install-fips.ps1 @@ -32,7 +32,43 @@ if ("$env:WITH_JMX" -ne "false") { cd \ } -# TODO: Run openssl fipsinstall command here when embedded Python work is completed -# HERE +# Configure Python's OpenSSL FIPS module +# The OpenSSL security policy states: +# "The Module shall have the self-tests run, and the Module config file output generated on each +# platform where it is intended to be used. The Module config file output data shall not be copied from +# one machine to another." +# https://github.com/openssl/openssl/blob/master/README-FIPS.md +# We provide the -self_test_onload option to ensure that the install-status and install-mac options +# are NOT written to fipsmodule.cnf. This allows us to create the config during the image build, +# and means the self tests will be run on every container start. +# https://docs.openssl.org/master/man5/fips_config +# Discussion about putting the commands in image vs entrypoint: +# https://github.com/openssl/openssl/discussions/23920 +$embeddedPath = "C:\Program Files\Datadog\Datadog Agent\embedded3" +$fipsProviderPath = "$embeddedPath\lib\ossl-modules\fips.dll" +$fipsConfPath = "$embeddedPath\ssl\fipsmodule.cnf" +& "$embeddedPath\bin\openssl.exe" fipsinstall -module "$fipsProviderPath" -out "$fipsConfPath" -self_test_onload +$err = $LASTEXITCODE +if ($err -ne 0) { + Write-Error ("openssl fipsinstall exited with code: {0}" -f $err) + exit $err +} +# Run again with -verify option +& "$embeddedPath\bin\openssl.exe" fipsinstall -module "$fipsProviderPath" -in "$fipsConfPath" -verify +$err = $LASTEXITCODE +if ($err -ne 0) { + Write-Error ("openssl fipsinstall verification of FIPS compliance failed, exited with code: {0}" -f $err) + exit $err +} +# We don't need to modify the .include directive in openssl.cnf here because the container +# always uses the default installation path. +$opensslConfPath = "$embeddedPath\ssl\openssl.cnf" +$opensslConfTemplate = "$embeddedPath\ssl\openssl.cnf.tmp" +Copy-Item "$opensslConfTemplate" "$opensslConfPath" + +# Configure Windows FIPS mode +# This system-wide setting is used by Windows as well as the Microsoft Go fork used by the Agent +# https://github.com/microsoft/go/blob/microsoft/main/eng/doc/fips/README.md#windows-fips-mode-cng +Set-ItemProperty -Path "HKLM:\SYSTEM\CurrentControlSet\Control\Lsa\FipsAlgorithmPolicy" -Name "Enabled" -Value 1 -Type DWORD Remove-TempFiles diff --git a/LICENSE-3rdparty.csv b/LICENSE-3rdparty.csv index f23ff3ab8c11c..a6d851d8fac71 100644 --- a/LICENSE-3rdparty.csv +++ b/LICENSE-3rdparty.csv @@ -343,10 +343,10 @@ core,github.com/aquasecurity/trivy-db/pkg/utils/strings,Apache-2.0,Copyright 201 core,github.com/aquasecurity/trivy-db/pkg/vulnsrc/alma,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy-db/pkg/vulnsrc/alpine,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy-db/pkg/vulnsrc/amazon,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd +core,github.com/aquasecurity/trivy-db/pkg/vulnsrc/azure,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd +core,github.com/aquasecurity/trivy-db/pkg/vulnsrc/azure/oval,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy-db/pkg/vulnsrc/chainguard,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy-db/pkg/vulnsrc/debian,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd -core,github.com/aquasecurity/trivy-db/pkg/vulnsrc/mariner,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd -core,github.com/aquasecurity/trivy-db/pkg/vulnsrc/mariner/oval,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy-db/pkg/vulnsrc/oracle-oval,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy-db/pkg/vulnsrc/photon,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy-db/pkg/vulnsrc/redhat-oval,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd @@ -359,14 +359,22 @@ core,github.com/aquasecurity/trivy-java-db/pkg/db,Apache-2.0,Copyright 2019-2020 core,github.com/aquasecurity/trivy-java-db/pkg/types,Apache-2.0,Copyright 2019-2020 Aqua Security core,github.com/aquasecurity/trivy/pkg/attestation,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/attestation/sbom,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd +core,github.com/aquasecurity/trivy/pkg/cache,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/clock,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/compliance/report,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/compliance/spec,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd +core,github.com/aquasecurity/trivy/pkg/db,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/dependency,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/dependency/parser/c/conan,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd +core,github.com/aquasecurity/trivy/pkg/dependency/parser/conda/environment,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/dependency/parser/conda/meta,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/dependency/parser/dart/pub,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/dependency/parser/dotnet/core_deps,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd +core,github.com/aquasecurity/trivy/pkg/dependency/parser/executable,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd +core,github.com/aquasecurity/trivy/pkg/dependency/parser/executable/java,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd +core,github.com/aquasecurity/trivy/pkg/dependency/parser/executable/nodejs,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd +core,github.com/aquasecurity/trivy/pkg/dependency/parser/executable/php,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd +core,github.com/aquasecurity/trivy/pkg/dependency/parser/executable/python,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/dependency/parser/golang/binary,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/dependency/parser/golang/mod,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/dependency/parser/golang/sum,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd @@ -374,6 +382,7 @@ core,github.com/aquasecurity/trivy/pkg/dependency/parser/gradle/lockfile,Apache- core,github.com/aquasecurity/trivy/pkg/dependency/parser/hex/mix,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/dependency/parser/java/jar,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/dependency/parser/java/pom,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd +core,github.com/aquasecurity/trivy/pkg/dependency/parser/julia/manifest,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/dependency/parser/nodejs/npm,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/dependency/parser/nodejs/packagejson,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/dependency/parser/nodejs/pnpm,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd @@ -391,10 +400,10 @@ core,github.com/aquasecurity/trivy/pkg/dependency/parser/ruby/bundler,Apache-2.0 core,github.com/aquasecurity/trivy/pkg/dependency/parser/ruby/gemspec,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/dependency/parser/rust/binary,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/dependency/parser/rust/cargo,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd +core,github.com/aquasecurity/trivy/pkg/dependency/parser/sbt/lockfile,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/dependency/parser/swift/cocoapods,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/dependency/parser/swift/swift,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/dependency/parser/utils,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd -core,github.com/aquasecurity/trivy/pkg/dependency/types,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/detector/library,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/detector/library/compare,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/detector/library/compare/bitnami,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd @@ -406,9 +415,9 @@ core,github.com/aquasecurity/trivy/pkg/detector/ospkg,Apache-2.0,Copyright 2019- core,github.com/aquasecurity/trivy/pkg/detector/ospkg/alma,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/detector/ospkg/alpine,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/detector/ospkg/amazon,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd +core,github.com/aquasecurity/trivy/pkg/detector/ospkg/azure,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/detector/ospkg/chainguard,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/detector/ospkg/debian,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd -core,github.com/aquasecurity/trivy/pkg/detector/ospkg/mariner,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/detector/ospkg/oracle,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/detector/ospkg/photon,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/detector/ospkg/redhat,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd @@ -425,6 +434,7 @@ core,github.com/aquasecurity/trivy/pkg/fanal/analyzer/buildinfo,Apache-2.0,Copyr core,github.com/aquasecurity/trivy/pkg/fanal/analyzer/executable,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/fanal/analyzer/language,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/c/conan,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd +core,github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/conda/environment,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/conda/meta,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/dart/pub,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/dotnet/deps,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd @@ -436,6 +446,8 @@ core,github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/golang/mod,Apache core,github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/java/gradle,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/java/jar,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/java/pom,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd +core,github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/java/sbt,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd +core,github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/julia/pkg,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/nodejs/license,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/nodejs/npm,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/nodejs/pkg,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd @@ -457,7 +469,6 @@ core,github.com/aquasecurity/trivy/pkg/fanal/analyzer/os,Apache-2.0,Copyright 20 core,github.com/aquasecurity/trivy/pkg/fanal/analyzer/os/alpine,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/fanal/analyzer/os/amazonlinux,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/fanal/analyzer/os/debian,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd -core,github.com/aquasecurity/trivy/pkg/fanal/analyzer/os/mariner,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/fanal/analyzer/os/redhatbase,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/fanal/analyzer/os/release,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/fanal/analyzer/os/ubuntu,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd @@ -468,12 +479,12 @@ core,github.com/aquasecurity/trivy/pkg/fanal/analyzer/repo/apk,Apache-2.0,Copyri core,github.com/aquasecurity/trivy/pkg/fanal/analyzer/sbom,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/fanal/applier,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/fanal/artifact,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd +core,github.com/aquasecurity/trivy/pkg/fanal/artifact/container,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/fanal/artifact/image,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/fanal/artifact/local,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/fanal/artifact/repo,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/fanal/artifact/sbom,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/fanal/artifact/vm,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd -core,github.com/aquasecurity/trivy/pkg/fanal/cache,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/fanal/handler,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/fanal/handler/all,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/fanal/handler/sysfile,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd @@ -481,7 +492,7 @@ core,github.com/aquasecurity/trivy/pkg/fanal/handler/unpackaged,Apache-2.0,Copyr core,github.com/aquasecurity/trivy/pkg/fanal/image,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/fanal/image/daemon,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/fanal/image/registry,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd -core,github.com/aquasecurity/trivy/pkg/fanal/log,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd +core,github.com/aquasecurity/trivy/pkg/fanal/image/registry/intf,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/fanal/types,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/fanal/utils,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/fanal/vm,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd @@ -490,6 +501,7 @@ core,github.com/aquasecurity/trivy/pkg/fanal/vm/filesystem,Apache-2.0,Copyright core,github.com/aquasecurity/trivy/pkg/fanal/walker,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/flag,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/iac/detection,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd +core,github.com/aquasecurity/trivy/pkg/iac/rego,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/iac/types,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/javadb,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/licensing,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd @@ -531,11 +543,14 @@ core,github.com/aquasecurity/trivy/pkg/semaphore,Apache-2.0,Copyright 2019-2020 core,github.com/aquasecurity/trivy/pkg/types,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/utils/fsutils,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/uuid,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd -core,github.com/aquasecurity/trivy/pkg/version,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd +core,github.com/aquasecurity/trivy/pkg/version/app,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd +core,github.com/aquasecurity/trivy/pkg/version/doc,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/vex,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd +core,github.com/aquasecurity/trivy/pkg/vex/repo,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/vulnerability,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/x/io,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/x/path,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd +core,github.com/aquasecurity/trivy/pkg/x/slices,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/x/strings,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/pkg/x/sync,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd core,github.com/aquasecurity/trivy/rpc/cache,Apache-2.0,Copyright 2019-2020 Aqua Security Software Ltd @@ -655,7 +670,6 @@ core,github.com/aws/aws-sdk-go/private/protocol/rest,Apache-2.0,"Copyright 2014- core,github.com/aws/aws-sdk-go/private/protocol/restjson,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go/service/ec2,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." -core,github.com/aws/aws-sdk-go/service/ec2/ec2iface,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go/service/ecs,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go/service/lightsail,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." core,github.com/aws/aws-sdk-go/service/sso,Apache-2.0,"Copyright 2014-2015 Stripe, Inc. | Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved." @@ -693,6 +707,7 @@ core,github.com/beorn7/perks/quantile,MIT,Copyright (C) 2013 Blake Mizerany core,github.com/bhmj/jsonslice,MIT,Copyright (c) 2018 bhmj core,github.com/bitnami/go-version/pkg/version,Apache-2.0,Copyright (c) 2023-2024 Carlos Rodríguez Hernández core,github.com/blabber/go-freebsd-sysctl/sysctl,0BSD,Copyright (c) 2014-2020 by Tobias Rehbein +core,github.com/blang/semver,MIT,Copyright (c) 2014 Benedikt Lang core,github.com/blang/semver/v4,MIT,Copyright (c) 2014 Benedikt Lang core,github.com/bmatcuk/doublestar/v4,MIT,Copyright (c) 2014 Bob Matcuk core,github.com/bmizerany/pat,MIT,"Copyright (C) 2012 by Keith Rarick, Blake Mizerany" @@ -763,6 +778,7 @@ core,github.com/containerd/containerd/api/services/tasks/v1,Apache-2.0,"Copyrigh core,github.com/containerd/containerd/api/services/transfer/v1,Apache-2.0,"Copyright 2012-2015 Docker, Inc." core,github.com/containerd/containerd/api/services/version/v1,Apache-2.0,"Copyright 2012-2015 Docker, Inc." core,github.com/containerd/containerd/api/types,Apache-2.0,"Copyright 2012-2015 Docker, Inc." +core,github.com/containerd/containerd/api/types/runc/options,Apache-2.0,"Copyright 2012-2015 Docker, Inc." core,github.com/containerd/containerd/api/types/task,Apache-2.0,"Copyright 2012-2015 Docker, Inc." core,github.com/containerd/containerd/api/types/transfer,Apache-2.0,"Copyright 2012-2015 Docker, Inc." core,github.com/containerd/containerd/archive,Apache-2.0,"Copyright 2012-2015 Docker, Inc." @@ -817,7 +833,6 @@ core,github.com/containerd/containerd/remotes/errors,Apache-2.0,"Copyright 2012- core,github.com/containerd/containerd/rootfs,Apache-2.0,"Copyright 2012-2015 Docker, Inc." core,github.com/containerd/containerd/runtime/linux/runctypes,Apache-2.0,"Copyright 2012-2015 Docker, Inc." core,github.com/containerd/containerd/runtime/restart,Apache-2.0,"Copyright 2012-2015 Docker, Inc." -core,github.com/containerd/containerd/runtime/v2/runc/options,Apache-2.0,"Copyright 2012-2015 Docker, Inc." core,github.com/containerd/containerd/sandbox,Apache-2.0,"Copyright 2012-2015 Docker, Inc." core,github.com/containerd/containerd/sandbox/proxy,Apache-2.0,"Copyright 2012-2015 Docker, Inc." core,github.com/containerd/containerd/services,Apache-2.0,"Copyright 2012-2015 Docker, Inc." @@ -827,6 +842,7 @@ core,github.com/containerd/containerd/snapshots,Apache-2.0,"Copyright 2012-2015 core,github.com/containerd/containerd/snapshots/proxy,Apache-2.0,"Copyright 2012-2015 Docker, Inc." core,github.com/containerd/containerd/tracing,Apache-2.0,"Copyright 2012-2015 Docker, Inc." core,github.com/containerd/containerd/version,Apache-2.0,"Copyright 2012-2015 Docker, Inc." +core,github.com/containerd/continuity/devices,Apache-2.0,"Copyright 2012-2015 Docker, Inc." core,github.com/containerd/continuity/fs,Apache-2.0,"Copyright 2012-2015 Docker, Inc." core,github.com/containerd/continuity/sysx,Apache-2.0,"Copyright 2012-2015 Docker, Inc." core,github.com/containerd/errdefs,Apache-2.0,"Copyright 2012-2015 Docker, Inc." @@ -860,6 +876,7 @@ core,github.com/coreos/pkg/dlopen,Apache-2.0,"Copyright 2017 CoreOS, Inc" core,github.com/cri-o/ocicni/pkg/ocicni,Apache-2.0,"Copyright 2016 Red Hat, Inc" core,github.com/csaf-poc/csaf_distribution/v3/csaf,MIT,Copyright 2021-2023 German Federal Office for Information Security (BSI) core,github.com/csaf-poc/csaf_distribution/v3/util,MIT,Copyright 2021-2023 German Federal Office for Information Security (BSI) +core,github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer,Apache-2.0,Copyright 2018 Anders Rundgren core,github.com/cyphar/filepath-securejoin,BSD-3-Clause,Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved | Copyright (C) 2017-2024 SUSE LLC. All rights reserved core,github.com/davecgh/go-spew/spew,ISC,Copyright (c) 2012-2016 Dave Collins core,github.com/dennwc/varint,MIT,Copyright (c) 2019 Denys Smirnov @@ -867,6 +884,8 @@ core,github.com/dgryski/go-jump,MIT,Copyright (c) 2014 Damian Gryski core,github.com/digitalocean/godo,MIT,Copyright (c) 2013 The go-github AUTHORS. All rights reserved | Copyright (c) 2014-2016 The godo AUTHORS. All rights reserved core,github.com/digitalocean/godo/metrics,MIT,Copyright (c) 2013 The go-github AUTHORS. All rights reserved | Copyright (c) 2014-2016 The godo AUTHORS. All rights reserved +core,github.com/digitorus/pkcs7,MIT,Copyright (c) 2015 Andrew Smith +core,github.com/digitorus/timestamp,BSD-2-Clause,"Copyright (c) 2017, Digitorus B.V" core,github.com/distribution/reference,Apache-2.0,Copyright 2014 The CNCF Distribution Project Authors core,github.com/docker/cli/cli/config,Apache-2.0,"Copyright 2012-2017 Docker, Inc." core,github.com/docker/cli/cli/config/configfile,Apache-2.0,"Copyright 2012-2017 Docker, Inc." @@ -896,6 +915,7 @@ core,github.com/docker/docker/api/types/volume,Apache-2.0,"Copyright 2012-2017 D core,github.com/docker/docker/client,Apache-2.0,"Copyright 2012-2017 Docker, Inc." core,github.com/docker/docker/errdefs,Apache-2.0,"Copyright 2012-2017 Docker, Inc." core,github.com/docker/docker/internal/multierror,Apache-2.0,"Copyright 2012-2017 Docker, Inc." +core,github.com/docker/docker/pkg/system,Apache-2.0,"Copyright 2012-2017 Docker, Inc." core,github.com/docker/go-connections/nat,Apache-2.0,"Copyright 2012-2017 Docker, Inc." core,github.com/docker/go-connections/sockets,Apache-2.0,"Copyright 2012-2017 Docker, Inc." core,github.com/docker/go-connections/tlsconfig,Apache-2.0,"Copyright 2012-2017 Docker, Inc." @@ -965,6 +985,8 @@ core,github.com/fsnotify/fsnotify/internal,BSD-3-Clause,Copyright (c) 2012 The G core,github.com/fxamacker/cbor/v2,MIT,Copyright (c) 2019-present Faye Amacker | Copyright © 2019-2024 [Faye Amacker](https://github.com/fxamacker) core,github.com/ghodss/yaml,MIT,Copyright (c) 2012 The Go Authors. All rights reserved | Copyright (c) 2014 Sam Ghods core,github.com/glaslos/ssdeep,MIT,"Copyright (c) 2015, Arbo von Monkiewitsch All rights reserved | Copyright (c) 2017 Lukas Rist" +core,github.com/go-chi/chi,MIT,"Copyright (c) 2015-present Peter Kieltyka (https://github.com/pkieltyka), Google Inc | Copyright (c) 2015-present [Peter Kieltyka](https://github.com/pkieltyka)" +core,github.com/go-chi/chi/middleware,MIT,"Copyright (c) 2015-present Peter Kieltyka (https://github.com/pkieltyka), Google Inc | Copyright (c) 2015-present [Peter Kieltyka](https://github.com/pkieltyka)" core,github.com/go-delve/delve/pkg/dwarf,MIT,Copyright (c) 2014 Derek Parker core,github.com/go-delve/delve/pkg/dwarf/godwarf,MIT,Copyright (c) 2014 Derek Parker core,github.com/go-delve/delve/pkg/dwarf/leb128,MIT,Copyright (c) 2014 Derek Parker @@ -1075,6 +1097,7 @@ core,github.com/go-sql-driver/mysql,MPL-2.0,"Aaron Hopkins " +core,github.com/gobuffalo/flect,MIT,Copyright (c) 2019 Mark Bates core,github.com/gobwas/glob,MIT,Copyright (c) 2016 Sergey Kamardin core,github.com/gobwas/glob/compiler,MIT,Copyright (c) 2016 Sergey Kamardin core,github.com/gobwas/glob/match,MIT,Copyright (c) 2016 Sergey Kamardin @@ -1121,25 +1144,12 @@ core,github.com/gogo/protobuf/protoc-gen-gogo/descriptor,BSD-3-Clause,"Copyright core,github.com/gogo/protobuf/sortkeys,BSD-3-Clause,"Copyright (c) 2013, The GoGo Authors. All rights reserved. | Copyright 2010 The Go Authors. All rights reserved." core,github.com/gogo/protobuf/types,BSD-3-Clause,"Copyright (c) 2013, The GoGo Authors. All rights reserved. | Copyright 2010 The Go Authors. All rights reserved." core,github.com/golang-jwt/jwt/v5,MIT,Copyright (c) 2012 Dave Grijalva | Copyright (c) 2021 golang-jwt maintainers -core,github.com/golang/glog,Apache-2.0,Copyright (c) 2009 The Go Authors. All rights reserved. -core,github.com/golang/glog/internal/logsink,Apache-2.0,Copyright (c) 2009 The Go Authors. All rights reserved. -core,github.com/golang/glog/internal/stackdump,Apache-2.0,Copyright (c) 2009 The Go Authors. All rights reserved. core,github.com/golang/groupcache/lru,Apache-2.0,Copyright (c) 2009 The Go Authors. All rights reserved. core,github.com/golang/mock/gomock,Apache-2.0,Copyright (c) 2009 The Go Authors. All rights reserved. -core,github.com/golang/protobuf/descriptor,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved. core,github.com/golang/protobuf/internal/gengogrpc,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved. -core,github.com/golang/protobuf/jsonpb,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved. core,github.com/golang/protobuf/proto,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved. core,github.com/golang/protobuf/protoc-gen-go,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved. -core,github.com/golang/protobuf/protoc-gen-go/descriptor,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved. -core,github.com/golang/protobuf/protoc-gen-go/plugin,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved. -core,github.com/golang/protobuf/ptypes,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved. -core,github.com/golang/protobuf/ptypes/any,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved. -core,github.com/golang/protobuf/ptypes/duration,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved. core,github.com/golang/protobuf/ptypes/empty,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved. -core,github.com/golang/protobuf/ptypes/struct,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved. -core,github.com/golang/protobuf/ptypes/timestamp,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved. -core,github.com/golang/protobuf/ptypes/wrappers,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved. core,github.com/golang/snappy,BSD-3-Clause,Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. core,github.com/google/cel-go/cel,Apache-2.0,Copyright (c) 2018 The Go Authors. All rights reserved core,github.com/google/cel-go/checker,Apache-2.0,Copyright (c) 2018 The Go Authors. All rights reserved @@ -1163,6 +1173,13 @@ core,github.com/google/cel-go/interpreter,Apache-2.0,Copyright (c) 2018 The Go A core,github.com/google/cel-go/interpreter/functions,Apache-2.0,Copyright (c) 2018 The Go Authors. All rights reserved core,github.com/google/cel-go/parser,Apache-2.0,Copyright (c) 2018 The Go Authors. All rights reserved core,github.com/google/cel-go/parser/gen,Apache-2.0,Copyright (c) 2018 The Go Authors. All rights reserved +core,github.com/google/certificate-transparency-go,Apache-2.0,"Adam Eijdenberg | Al Cutter | Alex Cohn | Ben Laurie | Chris Kennelly | Comodo CA Limited | David Drysdale | Deyan Bektchiev | Ed Maste | Emilia Kasper | Eran Messeri | Fiaz Hossain | Gary Belvin | Google LLC | Internet Security Research Group | Jeff Trawick | Joe Tsai | Kat Joyce | Katriel Cohn-Gordon | Kiril Nikolov | Konrad Kraszewski | Laël Cellier | Linus Nordberg | Mark Schloesser | NORDUnet A/S | Nicholas Galbreath | Oliver Weidner | Pascal Leroy | Paul Hadfield | Paul Lietar | Pavel Kalinnikov | Pierre Phaneuf | PrimeKey Solutions AB | Rob Percival | Rob Stradling | Roger Ng | Roland Shoemaker | Ruslan Kovalov | Samuel Lidén Borell | Tatiana Merkulova | Venafi, Inc. | Vladimir Rutsky | Ximin Luo " +core,github.com/google/certificate-transparency-go/asn1,Apache-2.0,"Adam Eijdenberg | Al Cutter | Alex Cohn | Ben Laurie | Chris Kennelly | Comodo CA Limited | David Drysdale | Deyan Bektchiev | Ed Maste | Emilia Kasper | Eran Messeri | Fiaz Hossain | Gary Belvin | Google LLC | Internet Security Research Group | Jeff Trawick | Joe Tsai | Kat Joyce | Katriel Cohn-Gordon | Kiril Nikolov | Konrad Kraszewski | Laël Cellier | Linus Nordberg | Mark Schloesser | NORDUnet A/S | Nicholas Galbreath | Oliver Weidner | Pascal Leroy | Paul Hadfield | Paul Lietar | Pavel Kalinnikov | Pierre Phaneuf | PrimeKey Solutions AB | Rob Percival | Rob Stradling | Roger Ng | Roland Shoemaker | Ruslan Kovalov | Samuel Lidén Borell | Tatiana Merkulova | Venafi, Inc. | Vladimir Rutsky | Ximin Luo " +core,github.com/google/certificate-transparency-go/gossip/minimal/x509ext,Apache-2.0,"Adam Eijdenberg | Al Cutter | Alex Cohn | Ben Laurie | Chris Kennelly | Comodo CA Limited | David Drysdale | Deyan Bektchiev | Ed Maste | Emilia Kasper | Eran Messeri | Fiaz Hossain | Gary Belvin | Google LLC | Internet Security Research Group | Jeff Trawick | Joe Tsai | Kat Joyce | Katriel Cohn-Gordon | Kiril Nikolov | Konrad Kraszewski | Laël Cellier | Linus Nordberg | Mark Schloesser | NORDUnet A/S | Nicholas Galbreath | Oliver Weidner | Pascal Leroy | Paul Hadfield | Paul Lietar | Pavel Kalinnikov | Pierre Phaneuf | PrimeKey Solutions AB | Rob Percival | Rob Stradling | Roger Ng | Roland Shoemaker | Ruslan Kovalov | Samuel Lidén Borell | Tatiana Merkulova | Venafi, Inc. | Vladimir Rutsky | Ximin Luo " +core,github.com/google/certificate-transparency-go/tls,Apache-2.0,"Adam Eijdenberg | Al Cutter | Alex Cohn | Ben Laurie | Chris Kennelly | Comodo CA Limited | David Drysdale | Deyan Bektchiev | Ed Maste | Emilia Kasper | Eran Messeri | Fiaz Hossain | Gary Belvin | Google LLC | Internet Security Research Group | Jeff Trawick | Joe Tsai | Kat Joyce | Katriel Cohn-Gordon | Kiril Nikolov | Konrad Kraszewski | Laël Cellier | Linus Nordberg | Mark Schloesser | NORDUnet A/S | Nicholas Galbreath | Oliver Weidner | Pascal Leroy | Paul Hadfield | Paul Lietar | Pavel Kalinnikov | Pierre Phaneuf | PrimeKey Solutions AB | Rob Percival | Rob Stradling | Roger Ng | Roland Shoemaker | Ruslan Kovalov | Samuel Lidén Borell | Tatiana Merkulova | Venafi, Inc. | Vladimir Rutsky | Ximin Luo " +core,github.com/google/certificate-transparency-go/x509,Apache-2.0,"Adam Eijdenberg | Al Cutter | Alex Cohn | Ben Laurie | Chris Kennelly | Comodo CA Limited | David Drysdale | Deyan Bektchiev | Ed Maste | Emilia Kasper | Eran Messeri | Fiaz Hossain | Gary Belvin | Google LLC | Internet Security Research Group | Jeff Trawick | Joe Tsai | Kat Joyce | Katriel Cohn-Gordon | Kiril Nikolov | Konrad Kraszewski | Laël Cellier | Linus Nordberg | Mark Schloesser | NORDUnet A/S | Nicholas Galbreath | Oliver Weidner | Pascal Leroy | Paul Hadfield | Paul Lietar | Pavel Kalinnikov | Pierre Phaneuf | PrimeKey Solutions AB | Rob Percival | Rob Stradling | Roger Ng | Roland Shoemaker | Ruslan Kovalov | Samuel Lidén Borell | Tatiana Merkulova | Venafi, Inc. | Vladimir Rutsky | Ximin Luo " +core,github.com/google/certificate-transparency-go/x509/pkix,Apache-2.0,"Adam Eijdenberg | Al Cutter | Alex Cohn | Ben Laurie | Chris Kennelly | Comodo CA Limited | David Drysdale | Deyan Bektchiev | Ed Maste | Emilia Kasper | Eran Messeri | Fiaz Hossain | Gary Belvin | Google LLC | Internet Security Research Group | Jeff Trawick | Joe Tsai | Kat Joyce | Katriel Cohn-Gordon | Kiril Nikolov | Konrad Kraszewski | Laël Cellier | Linus Nordberg | Mark Schloesser | NORDUnet A/S | Nicholas Galbreath | Oliver Weidner | Pascal Leroy | Paul Hadfield | Paul Lietar | Pavel Kalinnikov | Pierre Phaneuf | PrimeKey Solutions AB | Rob Percival | Rob Stradling | Roger Ng | Roland Shoemaker | Ruslan Kovalov | Samuel Lidén Borell | Tatiana Merkulova | Venafi, Inc. | Vladimir Rutsky | Ximin Luo " +core,github.com/google/certificate-transparency-go/x509util,Apache-2.0,"Adam Eijdenberg | Al Cutter | Alex Cohn | Ben Laurie | Chris Kennelly | Comodo CA Limited | David Drysdale | Deyan Bektchiev | Ed Maste | Emilia Kasper | Eran Messeri | Fiaz Hossain | Gary Belvin | Google LLC | Internet Security Research Group | Jeff Trawick | Joe Tsai | Kat Joyce | Katriel Cohn-Gordon | Kiril Nikolov | Konrad Kraszewski | Laël Cellier | Linus Nordberg | Mark Schloesser | NORDUnet A/S | Nicholas Galbreath | Oliver Weidner | Pascal Leroy | Paul Hadfield | Paul Lietar | Pavel Kalinnikov | Pierre Phaneuf | PrimeKey Solutions AB | Rob Percival | Rob Stradling | Roger Ng | Roland Shoemaker | Ruslan Kovalov | Samuel Lidén Borell | Tatiana Merkulova | Venafi, Inc. | Vladimir Rutsky | Ximin Luo " core,github.com/google/gnostic-models/compiler,Apache-2.0,"Copyright 2017-2022, Google LLC." core,github.com/google/gnostic-models/extensions,Apache-2.0,"Copyright 2017-2022, Google LLC" core,github.com/google/gnostic-models/jsonschema,Apache-2.0,"Copyright 2017-2022, Google LLC" @@ -1183,9 +1200,13 @@ core,github.com/google/go-containerregistry/internal/redact,Apache-2.0,Copyright core,github.com/google/go-containerregistry/internal/retry,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. core,github.com/google/go-containerregistry/internal/retry/wait,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. core,github.com/google/go-containerregistry/internal/verify,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. +core,github.com/google/go-containerregistry/internal/windows,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. core,github.com/google/go-containerregistry/internal/zstd,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. core,github.com/google/go-containerregistry/pkg/authn,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. core,github.com/google/go-containerregistry/pkg/compression,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. +core,github.com/google/go-containerregistry/pkg/crane,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. +core,github.com/google/go-containerregistry/pkg/legacy,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. +core,github.com/google/go-containerregistry/pkg/legacy/tarball,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. core,github.com/google/go-containerregistry/pkg/logs,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. core,github.com/google/go-containerregistry/pkg/name,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. core,github.com/google/go-containerregistry/pkg/registry,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. @@ -1198,9 +1219,11 @@ core,github.com/google/go-containerregistry/pkg/v1/mutate,Apache-2.0,Copyright 2 core,github.com/google/go-containerregistry/pkg/v1/partial,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. core,github.com/google/go-containerregistry/pkg/v1/remote,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. core,github.com/google/go-containerregistry/pkg/v1/remote/transport,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. +core,github.com/google/go-containerregistry/pkg/v1/static,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. core,github.com/google/go-containerregistry/pkg/v1/stream,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. core,github.com/google/go-containerregistry/pkg/v1/tarball,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. core,github.com/google/go-containerregistry/pkg/v1/types,Apache-2.0,Copyright 2018 Google LLC All Rights Reserved. | Copyright 2019 Google LLC All Rights Reserved. | Copyright 2020 Google LLC All Rights Reserved. | Copyright 2021 Google LLC All Rights Reserved. | Copyright 2022 Google LLC All Rights Reserved. +core,github.com/google/go-github/v62/github,BSD-3-Clause,"178inaba | 2BFL | 413x | 6543 <6543@obermui.de> | Abed Kibbe | Abhinav Gupta | Abhishek Veeramalla | Adam Kohring | Ahmad Nurus S | Ahmed Hagy | Aidan | Aidan Steele | Ainsley Chong | Akeda Bagus | Akhil Mohan | Alec Thomas | Aleks Clark | Alex Bramley | Alex Ellis | Alex Orr | Alex Su | Alex Unger | Alexander Harkness | Alexis Gauthiez | Ali Farooq | Allan Guwatudde | Allen Sun | Amey Sakhadeo | Anders Janmyr | Andreas Garnæs | Andrew Ryabchun | Andrew Svoboda | Andy Grunwald | Andy Hume | Andy Lindeman | Anshuman Bhartiya | Antoine | Antoine Pelisse | Anton Nguyen | Anubha Kushwaha | Aravind | Arda Kuyumcu | Arıl Bozoluk | Asier Marruedo | Austin Burdine | Austin Dizzy | Azuka Okuleye | Ben Batha | Benjamen Keroack | Berkay Tacyildiz | Beshr Kayali | Beyang Liu | Billy Keyes | Billy Lynch | Bingtan Lu | Bjorn Neergaard | Björn Häuser | Bo Huang | Bracken | Brad Harris | Brad Moylan | Bradley Falzon | Bradley McAllister | Brandon Butler | Brandon Cook | Brandon Stubbs | Brett Kuhlman | Brett Logan | Brian Egizi | Bryan Boreham | Bryan Peterson | CI Monk | Cami Diez | Carl Johnson | Carlos Alexandro Becker | Carlos Tadeu Panato Junior | ChandanChainani | Charles Fenwick Elliott | Charlie Yan | Chmouel Boudjnah | Chris King | Chris Mc | Chris Raborg | Chris Roche | Chris Schaefer | Christian Bargmann | Christian Muehlhaeuser | Christoph Jerolimov | Christoph Sassenberg | Colin Misare | Copyright (c) 2013 The go-github AUTHORS. All rights reserved | Craig Gumbley | Craig Peterson | Cristian Maglie | Cyb3r Jak3 | Daehyeok Mun | Dalton Hubble | Daniel Lanner | Daniel Leavitt | Daniel Nilsson | Daoq | Dave Du Cros | Dave Henderson | Dave Perrett | Dave Protasowski | David Deng | David Gamba | David J. M. Karlsen | David Jannotta | David Ji | David Lopez Reyes | Davide Zipeto | Dennis Webb | Derek Jobst | DeviousLab | Dhi Aurrahman | Diego Lapiduz | Diogo Vilela | Dmitri Shuralyov | Don Petersen | Doug Turner | Drew Fradette | Dustin Deus | Dustin Lish | Eivind | Eli Uriegas | Elliott Beach | Emerson Wood | Emil V | Eng Zer Jun | Erick Fejta | Erik Nobel | Evan Anderson | Evan Elias | Fabian Holler | Fabrice | Fatema-Moaiyadi | Federico Di Pierro | Felix Geisendörfer | Filippo Valsorda | Florian Forster | Florian Wagner | Francesc Gil | Francis | Francisco Guimarães | François de Metz | Fredrik Jönsson | Gabriel | Gal Ofri | Garrett Squire | George Kontridze | Georgy Buranov | Glen Mailer | Gnahz | Google Inc. | Grachev Mikhail | Guillaume Jacquet | Guz Alexander | Guðmundur Bjarni Ólafsson | Hanno Hecker | Hari haran | Harikesh00 | Hiroki Ito | Hubot Jr | Huy Tr | Iain Steers | Ikko Ashimine | Ilia Choly | Ioannis Georgoulas | Isao Jonas | JP Phillips | Jacob Valdemar | Jake Krammer | Jake White | Jameel Haffejee | James Bowes | James Cockbain | James Loh | James Maguire | James Turley | Jamie West | Jan Kosecki | Jan Švábík | Jason Field | Javier Campanini | Jef LeCompte | Jeff Wenzbauer | Jens Rantil | Jeremy Morris | Jesse Haka | Jesse Newland | Jihoon Chung | Jille Timmermans | Jimmi Dyson | Joan Saum | Joe Tsai | John Barton | John Engelman | John Jones | John Liu | Jordan Brockopp | Jordan Burandt | Jordan Sussman | Jorge Gómez Reus | Joshua Bezaleel Abednego | João Cerqueira | Juan | Juan Basso | Julien Garcia Gonzalez | Julien Rostand | Junya Kono | Justin Abrahms | Justin Toh | Jusung Lee | Karthik Sundari | Katrina Owen | Kautilya Tripathi | Keita Urashima | Kevin Burke | Kevin Wang | Kevin Zhao | Kirill | Konrad Malawski | Kookheon Kwon | Krishna Indani | Krzysztof Kowalczyk | Kshitij Saraogi | Kumar Saurabh | Kyle Kurz | Lars Lehtonen | Laurent Verdoïa | Liam Galvin | Lluis Campos | Lovro Mažgon | Loïs Postula | Luca Campese | Lucas Alcantara | Lucas Martin-King | Luis Davim | Luke Evers | Luke Hinds | Luke Kysow | Luke Roberts | Luke Young | Magnus Kulke | Maksim Zhylinski | Marc Binder | Marcelo Carlos | Mark Tareshawty | Martin Holman | Martin-Louis Bright | Martins Sipenko | Marwan Sulaiman | Masayuki Izumi | Mat Geist | Matija Horvat | Matin Rahmanian | Matt | Matt Brender | Matt Dainty | Matt Gaunt | Matt Landis | Matt Moore | Matt Simons | Matthew Reidy | Maxime Bury | Michael Meng | Michael Spiegel | Michael Tiller | Michał Glapa | Michelangelo Morrillo | Miguel Elias dos Santos | Mike Chen | Mohammed AlDujaili | Mukundan Senthil | Munia Balayil | Mustafa Abban | Nadav Kaner | Naoki Kanatani | Nathan VanBenschoten | Navaneeth Suresh | Neal Caffery | Neil O'Toole | Nick Miyake | Nick Platt | Nick Spragg | Nicolas Chapurlat | Nikhita Raghunath | Nilesh Singh | Noah Hanjun Lee | Noah Zoschke | Ole Orhagen | Oleg Kovalov | Ondřej Kupka | Ori Talmor | Osama Faqhruldin | Pablo Pérez Schröder | Palash Nigam | Panagiotis Moustafellos | Parham Alvani | Parker Moore | Pat Alwell | Patrick DeVivo | Patrick Marabeas | Patrik Nordlén | Pavel Dvoinos | Pavel Shtanko | Pete Wagner | Petr Shevtsov | Pierce McEntagart | Pierre Carrier | Piotr Zurek | Piyush Chugh | Pratik Mallya | Qais Patankar | Quang Le Hong | Quentin Leffray | Quinn Slack | Rackspace US, Inc. | Radek Simko | Radliński Ignacy | Rafael Aramizu Gomes | Rajat Jindal | Rajendra arora | Rajkumar | Ranbir Singh | Ravi Shekhar Jethani | RaviTeja Pothana | Red Hat, Inc. | Reetuparna Mukherjee | Reinier Timmer | Renjith R | Ricco Førgaard | Richard de Vries | Rob Figueiredo | Rohit Upadhyay | Rojan Dinc | Ronak Jain | Ronan Pelliard | Ross Gustafson | Ruben Vereecken | Russell Boley | Ryan Leung | Ryan Lower | Ryo Nakao | Saaarah | Safwan Olaimat | Sahil Dua | Sai Ravi Teja Chintakrindi | Sam Minnée | Sandeep Sukhani | Sander Knape | Sander van Harmelen | Sanket Payghan | Sarah Funkhouser | Sarasa Kisaragi | Sasha Melentyev | Sean Wang | Sebastian Mandrean | Sebastian Mæland Pedersen | Sergei Popinevskii | Sergey Romanov | Sergio Garcia | Seth Vargo | Sevki | Shagun Khemka | Shawn Catanzarite | Shawn Smith | Shibasis Patel | Sho Okada | Shrikrishna Singh | Simon Davis | SoundCloud, Ltd. | SriVignessh Pss | Sridhar Mocherla | Stefan Sedich | Steve Teuber | Stian Eikeland | Suhaib Mujahid | Szymon Kodrebski | Søren Hansen | T.J. Corrigan | Takashi Yoneuchi | Takayuki Watanabe | Taketoshi Fujiwara | Taketoshi Fujiwara | Takuma Kajikawa | Tasya Aditya Rukmana | Theo Henson | Theofilos Petsios | Thomas Aidan Curran | Thomas Bruyelle | Tim Rogers | Timothy O'Brien | Timothée Peignier | Tingluo Huang | Tobias Gesellchen | Tom Payne | Trey Tacon | Vaibhav Singh | Varadarajan Aravamudhan | Victor Castell | Victor Vrantchan | Victory Osikwemhe | Vivek | Vlad Ungureanu | Wasim Thabraze | Weslei Juan Moser Pereira | Wheeler Law | Will Maier | Will Norris | Willem D'Haeseleer | William Bailey | William Cooke | Xabi | Yann Malet | Yannick Utard | Yarden Shoham | Yicheng Qin | Yosuke Akatsuka | Yumikiyo Osanai | Yurii Soldak | Yusef Mohamadi | Yusuke Kuoka | Zach Latta | aboy | adrienzieba | afdesk | ajz01 | angie pinilla | anjanashenoy | appilon | aprp | apurwaj2 | boljen | chandresh-pancholi | chrisforrette | dmnlk | eperm | erwinvaneyk | griffin_stewie | guangwu | haya14busa | haya14busa | huydx | i2bskn | ishan upadhyay | isqua | jpbelanger-mtl | jzhoucliqr | k0ral | k1rnt | kadern0 | kgalli | kyokomi | leopoldwang | lynn [they] | mohammad ali <2018cs92@student.uet.edu.pk> | ns-cweber | nxya | oslowalk | pari-27 | parkhyukjun89 | rc1140 | reeves122 | saisi | shakeelrao | sona-tar | soniachikh | sushmita wable | tkhandel | tsbkw | ttacon | vikkyomkar | xibz | zhouhaibing089 | 六开箱 | 缘生 | 蒋航 " core,github.com/google/go-querystring/query,BSD-3-Clause,Copyright (c) 2013 Google. All rights reserved core,github.com/google/gofuzz,Apache-2.0,Copyright 2014 Google Inc. All rights reserved. core,github.com/google/gofuzz/bytesource,Apache-2.0,Copyright 2014 Google Inc. All rights reserved. @@ -1261,20 +1284,16 @@ core,github.com/grpc-ecosystem/go-grpc-middleware,Apache-2.0,Copyright (c) The g core,github.com/grpc-ecosystem/go-grpc-middleware/auth,Apache-2.0,Copyright (c) The go-grpc-middleware Authors. core,github.com/grpc-ecosystem/go-grpc-middleware/util/metautils,Apache-2.0,Copyright (c) The go-grpc-middleware Authors. core,github.com/grpc-ecosystem/go-grpc-prometheus,Apache-2.0,Copyright 2016 Michal Witkowski. All Rights Reserved. -core,github.com/grpc-ecosystem/grpc-gateway/codegenerator,BSD-3-Clause,"Copyright (c) 2015, Gengo, Inc." -core,github.com/grpc-ecosystem/grpc-gateway/internal,BSD-3-Clause,"Copyright (c) 2015, Gengo, Inc." -core,github.com/grpc-ecosystem/grpc-gateway/internal/casing,BSD-3-Clause,"Copyright (c) 2015, Gengo, Inc. | Copyright 2010 The Go Authors. All rights reserved" -core,github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway,BSD-3-Clause,"Copyright (c) 2015, Gengo, Inc." -core,github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor,BSD-3-Clause,"Copyright (c) 2015, Gengo, Inc." -core,github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/generator,BSD-3-Clause,"Copyright (c) 2015, Gengo, Inc." -core,github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/httprule,BSD-3-Clause,"Copyright (c) 2015, Gengo, Inc." -core,github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/internal/gengateway,BSD-3-Clause,"Copyright (c) 2015, Gengo, Inc." -core,github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger,BSD-3-Clause,"Copyright (c) 2015, Gengo, Inc." -core,github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/genswagger,BSD-3-Clause,"Copyright (c) 2015, Gengo, Inc." -core,github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options,BSD-3-Clause,"Copyright (c) 2015, Gengo, Inc." -core,github.com/grpc-ecosystem/grpc-gateway/runtime,BSD-3-Clause,"Copyright (c) 2015, Gengo, Inc." -core,github.com/grpc-ecosystem/grpc-gateway/utilities,BSD-3-Clause,"Copyright (c) 2015, Gengo, Inc." +core,github.com/grpc-ecosystem/grpc-gateway/v2/internal/casing,BSD-3-Clause,"Copyright (c) 2015, Gengo, Inc | Copyright (c) 2015, Gengo, Inc. | Copyright 2010, 2019 The Go Authors. All rights reserved" +core,github.com/grpc-ecosystem/grpc-gateway/v2/internal/codegenerator,BSD-3-Clause,"Copyright (c) 2015, Gengo, Inc | Copyright (c) 2015, Gengo, Inc." +core,github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor,BSD-3-Clause,"Copyright (c) 2015, Gengo, Inc | Copyright (c) 2015, Gengo, Inc." +core,github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor/apiconfig,BSD-3-Clause,"Copyright (c) 2015, Gengo, Inc | Copyright (c) 2015, Gengo, Inc." +core,github.com/grpc-ecosystem/grpc-gateway/v2/internal/descriptor/openapiconfig,BSD-3-Clause,"Copyright (c) 2015, Gengo, Inc | Copyright (c) 2015, Gengo, Inc." +core,github.com/grpc-ecosystem/grpc-gateway/v2/internal/generator,BSD-3-Clause,"Copyright (c) 2015, Gengo, Inc | Copyright (c) 2015, Gengo, Inc." core,github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule,BSD-3-Clause,"Copyright (c) 2015, Gengo, Inc | Copyright (c) 2015, Gengo, Inc." +core,github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway,BSD-3-Clause,"Copyright (c) 2015, Gengo, Inc | Copyright (c) 2015, Gengo, Inc." +core,github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway/internal/gengateway,BSD-3-Clause,"Copyright (c) 2015, Gengo, Inc | Copyright (c) 2015, Gengo, Inc." +core,github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options,BSD-3-Clause,"Copyright (c) 2015, Gengo, Inc | Copyright (c) 2015, Gengo, Inc." core,github.com/grpc-ecosystem/grpc-gateway/v2/runtime,BSD-3-Clause,"Copyright (c) 2015, Gengo, Inc | Copyright (c) 2015, Gengo, Inc." core,github.com/grpc-ecosystem/grpc-gateway/v2/utilities,BSD-3-Clause,"Copyright (c) 2015, Gengo, Inc | Copyright (c) 2015, Gengo, Inc." core,github.com/h2non/filetype,MIT,Copyright (c) Tomas Aparicio @@ -1345,35 +1364,19 @@ core,github.com/jackc/pgx/v5/pgtype,MIT,Copyright (c) 2013-2021 Jack Christensen core,github.com/jackc/pgx/v5/pgxpool,MIT,Copyright (c) 2013-2021 Jack Christensen core,github.com/jackc/puddle/v2,MIT,Copyright (c) 2018 Jack Christensen core,github.com/jackc/puddle/v2/internal/genstack,MIT,Copyright (c) 2018 Jack Christensen -core,github.com/jaegertracing/jaeger/cmd/agent/app/configmanager,Apache-2.0,Copyright 2015-2019 The Jaeger Project Authors core,github.com/jaegertracing/jaeger/cmd/agent/app/customtransport,Apache-2.0,Copyright 2015-2019 The Jaeger Project Authors -core,github.com/jaegertracing/jaeger/cmd/agent/app/httpserver,Apache-2.0,Copyright 2015-2019 The Jaeger Project Authors core,github.com/jaegertracing/jaeger/cmd/agent/app/processors,Apache-2.0,Copyright 2015-2019 The Jaeger Project Authors core,github.com/jaegertracing/jaeger/cmd/agent/app/servers,Apache-2.0,Copyright 2015-2019 The Jaeger Project Authors core,github.com/jaegertracing/jaeger/cmd/agent/app/servers/thriftudp,Apache-2.0,Copyright 2015-2019 The Jaeger Project Authors -core,github.com/jaegertracing/jaeger/cmd/collector/app/sampling/model,Apache-2.0,Copyright 2015-2019 The Jaeger Project Authors -core,github.com/jaegertracing/jaeger/cmd/collector/app/sampling/samplingstrategy,Apache-2.0,Copyright 2015-2019 The Jaeger Project Authors core,github.com/jaegertracing/jaeger/model,Apache-2.0,Copyright 2015-2019 The Jaeger Project Authors -core,github.com/jaegertracing/jaeger/model/converter/json,Apache-2.0,Copyright 2015-2019 The Jaeger Project Authors -core,github.com/jaegertracing/jaeger/model/converter/thrift/jaeger,Apache-2.0,Copyright 2015-2019 The Jaeger Project Authors core,github.com/jaegertracing/jaeger/model/converter/thrift/zipkin,Apache-2.0,Copyright 2015-2019 The Jaeger Project Authors -core,github.com/jaegertracing/jaeger/model/json,Apache-2.0,Copyright 2015-2019 The Jaeger Project Authors -core,github.com/jaegertracing/jaeger/pkg/clientcfg/clientcfghttp,Apache-2.0,Copyright 2015-2019 The Jaeger Project Authors -core,github.com/jaegertracing/jaeger/pkg/distributedlock,Apache-2.0,Copyright 2015-2019 The Jaeger Project Authors core,github.com/jaegertracing/jaeger/pkg/metrics,Apache-2.0,Copyright 2015-2019 The Jaeger Project Authors core,github.com/jaegertracing/jaeger/proto-gen/api_v2,Apache-2.0,Copyright 2015-2019 The Jaeger Project Authors -core,github.com/jaegertracing/jaeger/proto-gen/api_v2/metrics,Apache-2.0,Copyright 2015-2019 The Jaeger Project Authors -core,github.com/jaegertracing/jaeger/storage,Apache-2.0,Copyright 2015-2019 The Jaeger Project Authors -core,github.com/jaegertracing/jaeger/storage/dependencystore,Apache-2.0,Copyright 2015-2019 The Jaeger Project Authors -core,github.com/jaegertracing/jaeger/storage/metricsstore,Apache-2.0,Copyright 2015-2019 The Jaeger Project Authors -core,github.com/jaegertracing/jaeger/storage/samplingstore,Apache-2.0,Copyright 2015-2019 The Jaeger Project Authors -core,github.com/jaegertracing/jaeger/storage/spanstore,Apache-2.0,Copyright 2015-2019 The Jaeger Project Authors core,github.com/jaegertracing/jaeger/thrift-gen/agent,Apache-2.0,Copyright 2015-2019 The Jaeger Project Authors -core,github.com/jaegertracing/jaeger/thrift-gen/baggage,Apache-2.0,Copyright 2015-2019 The Jaeger Project Authors core,github.com/jaegertracing/jaeger/thrift-gen/jaeger,Apache-2.0,Copyright 2015-2019 The Jaeger Project Authors -core,github.com/jaegertracing/jaeger/thrift-gen/sampling,Apache-2.0,Copyright 2015-2019 The Jaeger Project Authors core,github.com/jaegertracing/jaeger/thrift-gen/zipkincore,Apache-2.0,Copyright 2015-2019 The Jaeger Project Authors core,github.com/jbenet/go-context/io,MIT,Copyright (c) 2014 Juan Batiz-Benet +core,github.com/jedisct1/go-minisign,MIT,Copyright (c) 2018-2023 Frank Denis core,github.com/jellydator/ttlcache/v3,MIT,Copyright (c) 2022 Jellydator core,github.com/jinzhu/inflection,MIT,Copyright (c) 2015 - Jinzhu core,github.com/jlaffaye/ftp,ISC,"Copyright (c) 2011-2013, Julien Laffaye " @@ -1431,6 +1434,12 @@ core,github.com/leodido/go-syslog/v4/rfc3164,MIT,"Copyright (c) 2018, Leonardo D core,github.com/leodido/go-syslog/v4/rfc5424,MIT,"Copyright (c) 2018, Leonardo Di Donato" core,github.com/leodido/ragel-machinery,MIT,Copyright (c) 2018 Leonardo Di Donato core,github.com/leodido/ragel-machinery/parser,MIT,Copyright (c) 2018 Leonardo Di Donato +core,github.com/letsencrypt/boulder/core,MPL-2.0,"Copyright 2016 ISRG. All rights reserved | copyright doctrines of fair use, fair dealing, or other" +core,github.com/letsencrypt/boulder/goodkey,MPL-2.0,"Copyright 2016 ISRG. All rights reserved | copyright doctrines of fair use, fair dealing, or other" +core,github.com/letsencrypt/boulder/identifier,MPL-2.0,"Copyright 2016 ISRG. All rights reserved | copyright doctrines of fair use, fair dealing, or other" +core,github.com/letsencrypt/boulder/probs,MPL-2.0,"Copyright 2016 ISRG. All rights reserved | copyright doctrines of fair use, fair dealing, or other" +core,github.com/letsencrypt/boulder/revocation,MPL-2.0,"Copyright 2016 ISRG. All rights reserved | copyright doctrines of fair use, fair dealing, or other" +core,github.com/letsencrypt/boulder/strictyaml,MPL-2.0,"Copyright 2016 ISRG. All rights reserved | copyright doctrines of fair use, fair dealing, or other" core,github.com/liamg/jfather,MIT,Copyright (c) 2022 Liam Galvin core,github.com/libp2p/go-reuseport,ISC,Copyright (c) 2013 Conformal Systems LLC core,github.com/lightstep/go-expohisto/mapping,Apache-2.0,Copyright The OpenTelemetry Authors @@ -1458,6 +1467,7 @@ core,github.com/mailru/easyjson/jlexer,MIT,Copyright (c) 2016 Mail.Ru Group core,github.com/mailru/easyjson/jwriter,MIT,Copyright (c) 2016 Mail.Ru Group core,github.com/mailru/easyjson/parser,MIT,Copyright (c) 2016 Mail.Ru Group core,github.com/masahiro331/go-disk,MIT,Copyright (c) 2022 Masahiro331 +core,github.com/masahiro331/go-disk/fs,MIT,Copyright (c) 2022 Masahiro331 core,github.com/masahiro331/go-disk/gpt,MIT,Copyright (c) 2022 Masahiro331 core,github.com/masahiro331/go-disk/mbr,MIT,Copyright (c) 2022 Masahiro331 core,github.com/masahiro331/go-disk/types,MIT,Copyright (c) 2022 Masahiro331 @@ -1484,12 +1494,13 @@ core,github.com/mitchellh/hashstructure/v2,MIT,Copyright (c) 2016 Mitchell Hashi core,github.com/mitchellh/mapstructure,MIT,Copyright (c) 2013 Mitchell Hashimoto core,github.com/mitchellh/reflectwalk,MIT,Copyright (c) 2013 Mitchell Hashimoto core,github.com/mkrautz/goar,BSD-3-Clause,Copyright (c) 2011 Mikkel Krautz -core,github.com/moby/buildkit/frontend/dockerfile/command,Apache-2.0,"Aaron L. Xu | Aaron Lehmann | Aaron Lehmann | Abdur Rehman | Addam Hardy | Adrian Plata | Aidan Hobson Sayers | Akihiro Suda | Alan Fregtman <941331+darkvertex@users.noreply.github.com> | Alex Couture-Beil | Alex Mayer | Alex Suraci | Alexander Morozov | Alexis Murzeau | Alice Frosi | Allen Sun | Amen Belayneh | Anca Iordache | Anda Xu | Anders F Björklund | Andrea Bolognani | Andrea Luzzardi | Andrew Chang | Andrey Smirnov | Andy Alt | Andy Caldwell | Ankush Agarwal | Anthony Sottile | Anurag Goel | Anusha Ragunathan | Arnaud Bailly | Avi Deitcher | Bastiaan Bakker | Ben Longo | Bertrand Paquet | Bin Liu | Brandon Mitchell | Brian Goff | Ce Gao | Chaerim Yeo | Changwei Ge | Chanhun Jeong | ChaosGramer | Charles Chan | Charles Korn | Charles Law | Chenbin | Chris Goller | Chris McKinnel | Christian Höltje | Christian Weichel | Ciro S. Costa | Claudiu Belu | Colin Chartier | Corey Larson | Cory Bennett | Cory Snider | CrazyMax | Csaba Apagyi | Dan Duvall | Daniel Cassidy | Daniel Nephin | Darren Shepherd | Dave Chen | Dave Henderson | Dave Tucker | David Calavera | David Dooling | David Gageot | David Karlsson | Davis Schirmer | Dennis Chen | Derek McGowan | Dharmit Shah | Ding Fei | Doug Davis | Edgar Lee | Eli Uriegas | Elias Faxö | Eng Zer Jun | Eric Engestrom | Erik Sipsma | Fernando Miguel | Fiona Klute | Foysal Iqbal | Fred Cox | Frieder Bluemle | Gabriel | Gabriel Adrian Samfira | Gaetan de Villele | Gahl Saraf | George | Govind Rai | Grant Reaber | Guilhem C | Hans van den Bogert | Hao Hu | Hector S | Helen Xie | Himanshu Pandey | Hiromu Nakamura | HowJMay | Hugo Santos | Ian Campbell | Ilya Dmitrichenko | Iskander (Alex) Sharipov | Jacob Gillespie | Jacob MacElroy | Jean-Pierre Huynh | Jeffrey Huang | Jesse Rittner | Jessica Frazelle | Jitender Kumar | John Howard | John Maguire | John Mulhausen | John Tims | Jon Zeolla | Jonathan Azoff | Jonathan Giannuzzi | Jonathan Stoppani | Jonny Stoten | JordanGoasdoue | Julian Goede | Justas Brazauskas | Justin Chadwell | Justin Cormack | Justin Garrison | Jörg Franke <359489+NewJorg@users.noreply.github.com> | Kang, Matthew | Kees Cook | Kevin Burke | Kir Kolyshkin | Kohei Tokunaga | Koichi Shiraishi | Kris-Mikael Krister | Kunal Kushwaha | Kyle | Lajos Papp | Levi Harrison | Lu Jingxiao | Luca Visentin | Maciej Kalisz | Madhav Puri | Manu Gupta | Marcus Comstedt | Mark Gordon | Marko Kohtala | Mary Anthony | Matias Insaurralde | Matt Kang | Matt Rickard | Maxime Lagresle | Michael Crosby | Michael Friis | Michael Irwin | Miguel Ángel Jimeno | Mihai Borobocea | Mike Brown | Mikhail Vasin | Misty Stanley-Jones | Miyachi Katsuya | Morgan Bauer | Morlay | Nao YONASHIRO | Natasha Jarus | Nathan Sullivan | Nick Miyake | Nick Santos | Nikhil Pandeti | Noel Georgi <18496730+frezbo@users.noreply.github.com> | Oliver Bristow | Omer Duchovne <79370724+od-cyera@users.noreply.github.com> | Omer Mizrahi | Ondrej Fabry | Otto Kekäläinen | Pablo Chico de Guzman | Patrick Hemmer | Patrick Lang | Patrick Van Stee | Paul ""TBBle"" Hampson | Paweł Gronowski | Peter Dave Hello | Petr Fedchenkov | Phil Estes | Pierre Fenoll | Pranav Pandit | Pratik Raj | Prayag Verma | Qiang Huang | Remy Suen | Ri Xu | Rob Taylor | Robert Estelle | Rubens Figueiredo | Sam Whited | Sascha Schwarze | Sean P. Kane | Sebastiaan van Stijn | Seiya Miyata | Serhat Gülçiçek | Sertac Ozercan | Shev Yan | Shijiang Wei | Shingo Omura | Shiwei Zhang | Siebe Schaap | Silvin Lubecki <31478878+silvin-lubecki@users.noreply.github.com> | Simon Ferquel | Slava Semushin | Solomon Hykes | Stefan Scherer | Stefan Weil | StefanSchoof | Stepan Blyshchak | Steve Lohr | Sven Dowideit | Takuya Noguchi | Thomas Leonard | Thomas Riccardi | Thomas Shaw | Tianon Gravi | Tibor Vass | Tiffany Jernigan | Tim Waugh | Tim Wraight | Tino Rusch | Tobias Klauser | Tomas Tomecek | Tomasz Kopczynski | Tomohiro Kusumoto | Troels Liebe Bentsen | Tõnis Tiigi | Valentin Lorentz | Vasek - Tom C | Victor Vieux | Victoria Bialas | Vincent Demeester | Vlad A. Ionescu | Vladislav Ivanov | Wang Yumu <37442693@qq.com> | Wei Fu | Wei Zhang | Xiaofan Zhang | Ximo Guanter | Yamazaki Masashi | Yan Song | Yong Tang | Yuichiro Kaneko | Yurii Rashkovskii | Zach Badgett | Ziv Tsarfati | a-palchikov | coryb | dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> | dito | eyherabh | f0 | genglu.gl | ggjulio | jgeiger | jlecordier | joey | jroenf | kevinmeredith | l00397676 | lalyos | liwenqi | lixiaobing10051267 | lomot | masibw | mikelinjie <294893458@qq.com> | msg | pieterdd | squeegels <1674195+squeegels@users.noreply.github.com> | sunchunming | wingkwong | zhangwenlong | 岁丰 | 沈陵 | 郑泽宇 " -core,github.com/moby/buildkit/frontend/dockerfile/instructions,Apache-2.0,"Aaron L. Xu | Aaron Lehmann | Aaron Lehmann | Abdur Rehman | Addam Hardy | Adrian Plata | Aidan Hobson Sayers | Akihiro Suda | Alan Fregtman <941331+darkvertex@users.noreply.github.com> | Alex Couture-Beil | Alex Mayer | Alex Suraci | Alexander Morozov | Alexis Murzeau | Alice Frosi | Allen Sun | Amen Belayneh | Anca Iordache | Anda Xu | Anders F Björklund | Andrea Bolognani | Andrea Luzzardi | Andrew Chang | Andrey Smirnov | Andy Alt | Andy Caldwell | Ankush Agarwal | Anthony Sottile | Anurag Goel | Anusha Ragunathan | Arnaud Bailly | Avi Deitcher | Bastiaan Bakker | Ben Longo | Bertrand Paquet | Bin Liu | Brandon Mitchell | Brian Goff | Ce Gao | Chaerim Yeo | Changwei Ge | Chanhun Jeong | ChaosGramer | Charles Chan | Charles Korn | Charles Law | Chenbin | Chris Goller | Chris McKinnel | Christian Höltje | Christian Weichel | Ciro S. Costa | Claudiu Belu | Colin Chartier | Corey Larson | Cory Bennett | Cory Snider | CrazyMax | Csaba Apagyi | Dan Duvall | Daniel Cassidy | Daniel Nephin | Darren Shepherd | Dave Chen | Dave Henderson | Dave Tucker | David Calavera | David Dooling | David Gageot | David Karlsson | Davis Schirmer | Dennis Chen | Derek McGowan | Dharmit Shah | Ding Fei | Doug Davis | Edgar Lee | Eli Uriegas | Elias Faxö | Eng Zer Jun | Eric Engestrom | Erik Sipsma | Fernando Miguel | Fiona Klute | Foysal Iqbal | Fred Cox | Frieder Bluemle | Gabriel | Gabriel Adrian Samfira | Gaetan de Villele | Gahl Saraf | George | Govind Rai | Grant Reaber | Guilhem C | Hans van den Bogert | Hao Hu | Hector S | Helen Xie | Himanshu Pandey | Hiromu Nakamura | HowJMay | Hugo Santos | Ian Campbell | Ilya Dmitrichenko | Iskander (Alex) Sharipov | Jacob Gillespie | Jacob MacElroy | Jean-Pierre Huynh | Jeffrey Huang | Jesse Rittner | Jessica Frazelle | Jitender Kumar | John Howard | John Maguire | John Mulhausen | John Tims | Jon Zeolla | Jonathan Azoff | Jonathan Giannuzzi | Jonathan Stoppani | Jonny Stoten | JordanGoasdoue | Julian Goede | Justas Brazauskas | Justin Chadwell | Justin Cormack | Justin Garrison | Jörg Franke <359489+NewJorg@users.noreply.github.com> | Kang, Matthew | Kees Cook | Kevin Burke | Kir Kolyshkin | Kohei Tokunaga | Koichi Shiraishi | Kris-Mikael Krister | Kunal Kushwaha | Kyle | Lajos Papp | Levi Harrison | Lu Jingxiao | Luca Visentin | Maciej Kalisz | Madhav Puri | Manu Gupta | Marcus Comstedt | Mark Gordon | Marko Kohtala | Mary Anthony | Matias Insaurralde | Matt Kang | Matt Rickard | Maxime Lagresle | Michael Crosby | Michael Friis | Michael Irwin | Miguel Ángel Jimeno | Mihai Borobocea | Mike Brown | Mikhail Vasin | Misty Stanley-Jones | Miyachi Katsuya | Morgan Bauer | Morlay | Nao YONASHIRO | Natasha Jarus | Nathan Sullivan | Nick Miyake | Nick Santos | Nikhil Pandeti | Noel Georgi <18496730+frezbo@users.noreply.github.com> | Oliver Bristow | Omer Duchovne <79370724+od-cyera@users.noreply.github.com> | Omer Mizrahi | Ondrej Fabry | Otto Kekäläinen | Pablo Chico de Guzman | Patrick Hemmer | Patrick Lang | Patrick Van Stee | Paul ""TBBle"" Hampson | Paweł Gronowski | Peter Dave Hello | Petr Fedchenkov | Phil Estes | Pierre Fenoll | Pranav Pandit | Pratik Raj | Prayag Verma | Qiang Huang | Remy Suen | Ri Xu | Rob Taylor | Robert Estelle | Rubens Figueiredo | Sam Whited | Sascha Schwarze | Sean P. Kane | Sebastiaan van Stijn | Seiya Miyata | Serhat Gülçiçek | Sertac Ozercan | Shev Yan | Shijiang Wei | Shingo Omura | Shiwei Zhang | Siebe Schaap | Silvin Lubecki <31478878+silvin-lubecki@users.noreply.github.com> | Simon Ferquel | Slava Semushin | Solomon Hykes | Stefan Scherer | Stefan Weil | StefanSchoof | Stepan Blyshchak | Steve Lohr | Sven Dowideit | Takuya Noguchi | Thomas Leonard | Thomas Riccardi | Thomas Shaw | Tianon Gravi | Tibor Vass | Tiffany Jernigan | Tim Waugh | Tim Wraight | Tino Rusch | Tobias Klauser | Tomas Tomecek | Tomasz Kopczynski | Tomohiro Kusumoto | Troels Liebe Bentsen | Tõnis Tiigi | Valentin Lorentz | Vasek - Tom C | Victor Vieux | Victoria Bialas | Vincent Demeester | Vlad A. Ionescu | Vladislav Ivanov | Wang Yumu <37442693@qq.com> | Wei Fu | Wei Zhang | Xiaofan Zhang | Ximo Guanter | Yamazaki Masashi | Yan Song | Yong Tang | Yuichiro Kaneko | Yurii Rashkovskii | Zach Badgett | Ziv Tsarfati | a-palchikov | coryb | dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> | dito | eyherabh | f0 | genglu.gl | ggjulio | jgeiger | jlecordier | joey | jroenf | kevinmeredith | l00397676 | lalyos | liwenqi | lixiaobing10051267 | lomot | masibw | mikelinjie <294893458@qq.com> | msg | pieterdd | squeegels <1674195+squeegels@users.noreply.github.com> | sunchunming | wingkwong | zhangwenlong | 岁丰 | 沈陵 | 郑泽宇 " -core,github.com/moby/buildkit/frontend/dockerfile/parser,Apache-2.0,"Aaron L. Xu | Aaron Lehmann | Aaron Lehmann | Abdur Rehman | Addam Hardy | Adrian Plata | Aidan Hobson Sayers | Akihiro Suda | Alan Fregtman <941331+darkvertex@users.noreply.github.com> | Alex Couture-Beil | Alex Mayer | Alex Suraci | Alexander Morozov | Alexis Murzeau | Alice Frosi | Allen Sun | Amen Belayneh | Anca Iordache | Anda Xu | Anders F Björklund | Andrea Bolognani | Andrea Luzzardi | Andrew Chang | Andrey Smirnov | Andy Alt | Andy Caldwell | Ankush Agarwal | Anthony Sottile | Anurag Goel | Anusha Ragunathan | Arnaud Bailly | Avi Deitcher | Bastiaan Bakker | Ben Longo | Bertrand Paquet | Bin Liu | Brandon Mitchell | Brian Goff | Ce Gao | Chaerim Yeo | Changwei Ge | Chanhun Jeong | ChaosGramer | Charles Chan | Charles Korn | Charles Law | Chenbin | Chris Goller | Chris McKinnel | Christian Höltje | Christian Weichel | Ciro S. Costa | Claudiu Belu | Colin Chartier | Corey Larson | Cory Bennett | Cory Snider | CrazyMax | Csaba Apagyi | Dan Duvall | Daniel Cassidy | Daniel Nephin | Darren Shepherd | Dave Chen | Dave Henderson | Dave Tucker | David Calavera | David Dooling | David Gageot | David Karlsson | Davis Schirmer | Dennis Chen | Derek McGowan | Dharmit Shah | Ding Fei | Doug Davis | Edgar Lee | Eli Uriegas | Elias Faxö | Eng Zer Jun | Eric Engestrom | Erik Sipsma | Fernando Miguel | Fiona Klute | Foysal Iqbal | Fred Cox | Frieder Bluemle | Gabriel | Gabriel Adrian Samfira | Gaetan de Villele | Gahl Saraf | George | Govind Rai | Grant Reaber | Guilhem C | Hans van den Bogert | Hao Hu | Hector S | Helen Xie | Himanshu Pandey | Hiromu Nakamura | HowJMay | Hugo Santos | Ian Campbell | Ilya Dmitrichenko | Iskander (Alex) Sharipov | Jacob Gillespie | Jacob MacElroy | Jean-Pierre Huynh | Jeffrey Huang | Jesse Rittner | Jessica Frazelle | Jitender Kumar | John Howard | John Maguire | John Mulhausen | John Tims | Jon Zeolla | Jonathan Azoff | Jonathan Giannuzzi | Jonathan Stoppani | Jonny Stoten | JordanGoasdoue | Julian Goede | Justas Brazauskas | Justin Chadwell | Justin Cormack | Justin Garrison | Jörg Franke <359489+NewJorg@users.noreply.github.com> | Kang, Matthew | Kees Cook | Kevin Burke | Kir Kolyshkin | Kohei Tokunaga | Koichi Shiraishi | Kris-Mikael Krister | Kunal Kushwaha | Kyle | Lajos Papp | Levi Harrison | Lu Jingxiao | Luca Visentin | Maciej Kalisz | Madhav Puri | Manu Gupta | Marcus Comstedt | Mark Gordon | Marko Kohtala | Mary Anthony | Matias Insaurralde | Matt Kang | Matt Rickard | Maxime Lagresle | Michael Crosby | Michael Friis | Michael Irwin | Miguel Ángel Jimeno | Mihai Borobocea | Mike Brown | Mikhail Vasin | Misty Stanley-Jones | Miyachi Katsuya | Morgan Bauer | Morlay | Nao YONASHIRO | Natasha Jarus | Nathan Sullivan | Nick Miyake | Nick Santos | Nikhil Pandeti | Noel Georgi <18496730+frezbo@users.noreply.github.com> | Oliver Bristow | Omer Duchovne <79370724+od-cyera@users.noreply.github.com> | Omer Mizrahi | Ondrej Fabry | Otto Kekäläinen | Pablo Chico de Guzman | Patrick Hemmer | Patrick Lang | Patrick Van Stee | Paul ""TBBle"" Hampson | Paweł Gronowski | Peter Dave Hello | Petr Fedchenkov | Phil Estes | Pierre Fenoll | Pranav Pandit | Pratik Raj | Prayag Verma | Qiang Huang | Remy Suen | Ri Xu | Rob Taylor | Robert Estelle | Rubens Figueiredo | Sam Whited | Sascha Schwarze | Sean P. Kane | Sebastiaan van Stijn | Seiya Miyata | Serhat Gülçiçek | Sertac Ozercan | Shev Yan | Shijiang Wei | Shingo Omura | Shiwei Zhang | Siebe Schaap | Silvin Lubecki <31478878+silvin-lubecki@users.noreply.github.com> | Simon Ferquel | Slava Semushin | Solomon Hykes | Stefan Scherer | Stefan Weil | StefanSchoof | Stepan Blyshchak | Steve Lohr | Sven Dowideit | Takuya Noguchi | Thomas Leonard | Thomas Riccardi | Thomas Shaw | Tianon Gravi | Tibor Vass | Tiffany Jernigan | Tim Waugh | Tim Wraight | Tino Rusch | Tobias Klauser | Tomas Tomecek | Tomasz Kopczynski | Tomohiro Kusumoto | Troels Liebe Bentsen | Tõnis Tiigi | Valentin Lorentz | Vasek - Tom C | Victor Vieux | Victoria Bialas | Vincent Demeester | Vlad A. Ionescu | Vladislav Ivanov | Wang Yumu <37442693@qq.com> | Wei Fu | Wei Zhang | Xiaofan Zhang | Ximo Guanter | Yamazaki Masashi | Yan Song | Yong Tang | Yuichiro Kaneko | Yurii Rashkovskii | Zach Badgett | Ziv Tsarfati | a-palchikov | coryb | dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> | dito | eyherabh | f0 | genglu.gl | ggjulio | jgeiger | jlecordier | joey | jroenf | kevinmeredith | l00397676 | lalyos | liwenqi | lixiaobing10051267 | lomot | masibw | mikelinjie <294893458@qq.com> | msg | pieterdd | squeegels <1674195+squeegels@users.noreply.github.com> | sunchunming | wingkwong | zhangwenlong | 岁丰 | 沈陵 | 郑泽宇 " -core,github.com/moby/buildkit/frontend/dockerfile/shell,Apache-2.0,"Aaron L. Xu | Aaron Lehmann | Aaron Lehmann | Abdur Rehman | Addam Hardy | Adrian Plata | Aidan Hobson Sayers | Akihiro Suda | Alan Fregtman <941331+darkvertex@users.noreply.github.com> | Alex Couture-Beil | Alex Mayer | Alex Suraci | Alexander Morozov | Alexis Murzeau | Alice Frosi | Allen Sun | Amen Belayneh | Anca Iordache | Anda Xu | Anders F Björklund | Andrea Bolognani | Andrea Luzzardi | Andrew Chang | Andrey Smirnov | Andy Alt | Andy Caldwell | Ankush Agarwal | Anthony Sottile | Anurag Goel | Anusha Ragunathan | Arnaud Bailly | Avi Deitcher | Bastiaan Bakker | Ben Longo | Bertrand Paquet | Bin Liu | Brandon Mitchell | Brian Goff | Ce Gao | Chaerim Yeo | Changwei Ge | Chanhun Jeong | ChaosGramer | Charles Chan | Charles Korn | Charles Law | Chenbin | Chris Goller | Chris McKinnel | Christian Höltje | Christian Weichel | Ciro S. Costa | Claudiu Belu | Colin Chartier | Corey Larson | Cory Bennett | Cory Snider | CrazyMax | Csaba Apagyi | Dan Duvall | Daniel Cassidy | Daniel Nephin | Darren Shepherd | Dave Chen | Dave Henderson | Dave Tucker | David Calavera | David Dooling | David Gageot | David Karlsson | Davis Schirmer | Dennis Chen | Derek McGowan | Dharmit Shah | Ding Fei | Doug Davis | Edgar Lee | Eli Uriegas | Elias Faxö | Eng Zer Jun | Eric Engestrom | Erik Sipsma | Fernando Miguel | Fiona Klute | Foysal Iqbal | Fred Cox | Frieder Bluemle | Gabriel | Gabriel Adrian Samfira | Gaetan de Villele | Gahl Saraf | George | Govind Rai | Grant Reaber | Guilhem C | Hans van den Bogert | Hao Hu | Hector S | Helen Xie | Himanshu Pandey | Hiromu Nakamura | HowJMay | Hugo Santos | Ian Campbell | Ilya Dmitrichenko | Iskander (Alex) Sharipov | Jacob Gillespie | Jacob MacElroy | Jean-Pierre Huynh | Jeffrey Huang | Jesse Rittner | Jessica Frazelle | Jitender Kumar | John Howard | John Maguire | John Mulhausen | John Tims | Jon Zeolla | Jonathan Azoff | Jonathan Giannuzzi | Jonathan Stoppani | Jonny Stoten | JordanGoasdoue | Julian Goede | Justas Brazauskas | Justin Chadwell | Justin Cormack | Justin Garrison | Jörg Franke <359489+NewJorg@users.noreply.github.com> | Kang, Matthew | Kees Cook | Kevin Burke | Kir Kolyshkin | Kohei Tokunaga | Koichi Shiraishi | Kris-Mikael Krister | Kunal Kushwaha | Kyle | Lajos Papp | Levi Harrison | Lu Jingxiao | Luca Visentin | Maciej Kalisz | Madhav Puri | Manu Gupta | Marcus Comstedt | Mark Gordon | Marko Kohtala | Mary Anthony | Matias Insaurralde | Matt Kang | Matt Rickard | Maxime Lagresle | Michael Crosby | Michael Friis | Michael Irwin | Miguel Ángel Jimeno | Mihai Borobocea | Mike Brown | Mikhail Vasin | Misty Stanley-Jones | Miyachi Katsuya | Morgan Bauer | Morlay | Nao YONASHIRO | Natasha Jarus | Nathan Sullivan | Nick Miyake | Nick Santos | Nikhil Pandeti | Noel Georgi <18496730+frezbo@users.noreply.github.com> | Oliver Bristow | Omer Duchovne <79370724+od-cyera@users.noreply.github.com> | Omer Mizrahi | Ondrej Fabry | Otto Kekäläinen | Pablo Chico de Guzman | Patrick Hemmer | Patrick Lang | Patrick Van Stee | Paul ""TBBle"" Hampson | Paweł Gronowski | Peter Dave Hello | Petr Fedchenkov | Phil Estes | Pierre Fenoll | Pranav Pandit | Pratik Raj | Prayag Verma | Qiang Huang | Remy Suen | Ri Xu | Rob Taylor | Robert Estelle | Rubens Figueiredo | Sam Whited | Sascha Schwarze | Sean P. Kane | Sebastiaan van Stijn | Seiya Miyata | Serhat Gülçiçek | Sertac Ozercan | Shev Yan | Shijiang Wei | Shingo Omura | Shiwei Zhang | Siebe Schaap | Silvin Lubecki <31478878+silvin-lubecki@users.noreply.github.com> | Simon Ferquel | Slava Semushin | Solomon Hykes | Stefan Scherer | Stefan Weil | StefanSchoof | Stepan Blyshchak | Steve Lohr | Sven Dowideit | Takuya Noguchi | Thomas Leonard | Thomas Riccardi | Thomas Shaw | Tianon Gravi | Tibor Vass | Tiffany Jernigan | Tim Waugh | Tim Wraight | Tino Rusch | Tobias Klauser | Tomas Tomecek | Tomasz Kopczynski | Tomohiro Kusumoto | Troels Liebe Bentsen | Tõnis Tiigi | Valentin Lorentz | Vasek - Tom C | Victor Vieux | Victoria Bialas | Vincent Demeester | Vlad A. Ionescu | Vladislav Ivanov | Wang Yumu <37442693@qq.com> | Wei Fu | Wei Zhang | Xiaofan Zhang | Ximo Guanter | Yamazaki Masashi | Yan Song | Yong Tang | Yuichiro Kaneko | Yurii Rashkovskii | Zach Badgett | Ziv Tsarfati | a-palchikov | coryb | dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> | dito | eyherabh | f0 | genglu.gl | ggjulio | jgeiger | jlecordier | joey | jroenf | kevinmeredith | l00397676 | lalyos | liwenqi | lixiaobing10051267 | lomot | masibw | mikelinjie <294893458@qq.com> | msg | pieterdd | squeegels <1674195+squeegels@users.noreply.github.com> | sunchunming | wingkwong | zhangwenlong | 岁丰 | 沈陵 | 郑泽宇 " -core,github.com/moby/buildkit/util/stack,Apache-2.0,"Aaron L. Xu | Aaron Lehmann | Aaron Lehmann | Abdur Rehman | Addam Hardy | Adrian Plata | Aidan Hobson Sayers | Akihiro Suda | Alan Fregtman <941331+darkvertex@users.noreply.github.com> | Alex Couture-Beil | Alex Mayer | Alex Suraci | Alexander Morozov | Alexis Murzeau | Alice Frosi | Allen Sun | Amen Belayneh | Anca Iordache | Anda Xu | Anders F Björklund | Andrea Bolognani | Andrea Luzzardi | Andrew Chang | Andrey Smirnov | Andy Alt | Andy Caldwell | Ankush Agarwal | Anthony Sottile | Anurag Goel | Anusha Ragunathan | Arnaud Bailly | Avi Deitcher | Bastiaan Bakker | Ben Longo | Bertrand Paquet | Bin Liu | Brandon Mitchell | Brian Goff | Ce Gao | Chaerim Yeo | Changwei Ge | Chanhun Jeong | ChaosGramer | Charles Chan | Charles Korn | Charles Law | Chenbin | Chris Goller | Chris McKinnel | Christian Höltje | Christian Weichel | Ciro S. Costa | Claudiu Belu | Colin Chartier | Corey Larson | Cory Bennett | Cory Snider | CrazyMax | Csaba Apagyi | Dan Duvall | Daniel Cassidy | Daniel Nephin | Darren Shepherd | Dave Chen | Dave Henderson | Dave Tucker | David Calavera | David Dooling | David Gageot | David Karlsson | Davis Schirmer | Dennis Chen | Derek McGowan | Dharmit Shah | Ding Fei | Doug Davis | Edgar Lee | Eli Uriegas | Elias Faxö | Eng Zer Jun | Eric Engestrom | Erik Sipsma | Fernando Miguel | Fiona Klute | Foysal Iqbal | Fred Cox | Frieder Bluemle | Gabriel | Gabriel Adrian Samfira | Gaetan de Villele | Gahl Saraf | George | Govind Rai | Grant Reaber | Guilhem C | Hans van den Bogert | Hao Hu | Hector S | Helen Xie | Himanshu Pandey | Hiromu Nakamura | HowJMay | Hugo Santos | Ian Campbell | Ilya Dmitrichenko | Iskander (Alex) Sharipov | Jacob Gillespie | Jacob MacElroy | Jean-Pierre Huynh | Jeffrey Huang | Jesse Rittner | Jessica Frazelle | Jitender Kumar | John Howard | John Maguire | John Mulhausen | John Tims | Jon Zeolla | Jonathan Azoff | Jonathan Giannuzzi | Jonathan Stoppani | Jonny Stoten | JordanGoasdoue | Julian Goede | Justas Brazauskas | Justin Chadwell | Justin Cormack | Justin Garrison | Jörg Franke <359489+NewJorg@users.noreply.github.com> | Kang, Matthew | Kees Cook | Kevin Burke | Kir Kolyshkin | Kohei Tokunaga | Koichi Shiraishi | Kris-Mikael Krister | Kunal Kushwaha | Kyle | Lajos Papp | Levi Harrison | Lu Jingxiao | Luca Visentin | Maciej Kalisz | Madhav Puri | Manu Gupta | Marcus Comstedt | Mark Gordon | Marko Kohtala | Mary Anthony | Matias Insaurralde | Matt Kang | Matt Rickard | Maxime Lagresle | Michael Crosby | Michael Friis | Michael Irwin | Miguel Ángel Jimeno | Mihai Borobocea | Mike Brown | Mikhail Vasin | Misty Stanley-Jones | Miyachi Katsuya | Morgan Bauer | Morlay | Nao YONASHIRO | Natasha Jarus | Nathan Sullivan | Nick Miyake | Nick Santos | Nikhil Pandeti | Noel Georgi <18496730+frezbo@users.noreply.github.com> | Oliver Bristow | Omer Duchovne <79370724+od-cyera@users.noreply.github.com> | Omer Mizrahi | Ondrej Fabry | Otto Kekäläinen | Pablo Chico de Guzman | Patrick Hemmer | Patrick Lang | Patrick Van Stee | Paul ""TBBle"" Hampson | Paweł Gronowski | Peter Dave Hello | Petr Fedchenkov | Phil Estes | Pierre Fenoll | Pranav Pandit | Pratik Raj | Prayag Verma | Qiang Huang | Remy Suen | Ri Xu | Rob Taylor | Robert Estelle | Rubens Figueiredo | Sam Whited | Sascha Schwarze | Sean P. Kane | Sebastiaan van Stijn | Seiya Miyata | Serhat Gülçiçek | Sertac Ozercan | Shev Yan | Shijiang Wei | Shingo Omura | Shiwei Zhang | Siebe Schaap | Silvin Lubecki <31478878+silvin-lubecki@users.noreply.github.com> | Simon Ferquel | Slava Semushin | Solomon Hykes | Stefan Scherer | Stefan Weil | StefanSchoof | Stepan Blyshchak | Steve Lohr | Sven Dowideit | Takuya Noguchi | Thomas Leonard | Thomas Riccardi | Thomas Shaw | Tianon Gravi | Tibor Vass | Tiffany Jernigan | Tim Waugh | Tim Wraight | Tino Rusch | Tobias Klauser | Tomas Tomecek | Tomasz Kopczynski | Tomohiro Kusumoto | Troels Liebe Bentsen | Tõnis Tiigi | Valentin Lorentz | Vasek - Tom C | Victor Vieux | Victoria Bialas | Vincent Demeester | Vlad A. Ionescu | Vladislav Ivanov | Wang Yumu <37442693@qq.com> | Wei Fu | Wei Zhang | Xiaofan Zhang | Ximo Guanter | Yamazaki Masashi | Yan Song | Yong Tang | Yuichiro Kaneko | Yurii Rashkovskii | Zach Badgett | Ziv Tsarfati | a-palchikov | coryb | dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> | dito | eyherabh | f0 | genglu.gl | ggjulio | jgeiger | jlecordier | joey | jroenf | kevinmeredith | l00397676 | lalyos | liwenqi | lixiaobing10051267 | lomot | masibw | mikelinjie <294893458@qq.com> | msg | pieterdd | squeegels <1674195+squeegels@users.noreply.github.com> | sunchunming | wingkwong | zhangwenlong | 岁丰 | 沈陵 | 郑泽宇 " -core,github.com/moby/buildkit/util/suggest,Apache-2.0,"Aaron L. Xu | Aaron Lehmann | Aaron Lehmann | Abdur Rehman | Addam Hardy | Adrian Plata | Aidan Hobson Sayers | Akihiro Suda | Alan Fregtman <941331+darkvertex@users.noreply.github.com> | Alex Couture-Beil | Alex Mayer | Alex Suraci | Alexander Morozov | Alexis Murzeau | Alice Frosi | Allen Sun | Amen Belayneh | Anca Iordache | Anda Xu | Anders F Björklund | Andrea Bolognani | Andrea Luzzardi | Andrew Chang | Andrey Smirnov | Andy Alt | Andy Caldwell | Ankush Agarwal | Anthony Sottile | Anurag Goel | Anusha Ragunathan | Arnaud Bailly | Avi Deitcher | Bastiaan Bakker | Ben Longo | Bertrand Paquet | Bin Liu | Brandon Mitchell | Brian Goff | Ce Gao | Chaerim Yeo | Changwei Ge | Chanhun Jeong | ChaosGramer | Charles Chan | Charles Korn | Charles Law | Chenbin | Chris Goller | Chris McKinnel | Christian Höltje | Christian Weichel | Ciro S. Costa | Claudiu Belu | Colin Chartier | Corey Larson | Cory Bennett | Cory Snider | CrazyMax | Csaba Apagyi | Dan Duvall | Daniel Cassidy | Daniel Nephin | Darren Shepherd | Dave Chen | Dave Henderson | Dave Tucker | David Calavera | David Dooling | David Gageot | David Karlsson | Davis Schirmer | Dennis Chen | Derek McGowan | Dharmit Shah | Ding Fei | Doug Davis | Edgar Lee | Eli Uriegas | Elias Faxö | Eng Zer Jun | Eric Engestrom | Erik Sipsma | Fernando Miguel | Fiona Klute | Foysal Iqbal | Fred Cox | Frieder Bluemle | Gabriel | Gabriel Adrian Samfira | Gaetan de Villele | Gahl Saraf | George | Govind Rai | Grant Reaber | Guilhem C | Hans van den Bogert | Hao Hu | Hector S | Helen Xie | Himanshu Pandey | Hiromu Nakamura | HowJMay | Hugo Santos | Ian Campbell | Ilya Dmitrichenko | Iskander (Alex) Sharipov | Jacob Gillespie | Jacob MacElroy | Jean-Pierre Huynh | Jeffrey Huang | Jesse Rittner | Jessica Frazelle | Jitender Kumar | John Howard | John Maguire | John Mulhausen | John Tims | Jon Zeolla | Jonathan Azoff | Jonathan Giannuzzi | Jonathan Stoppani | Jonny Stoten | JordanGoasdoue | Julian Goede | Justas Brazauskas | Justin Chadwell | Justin Cormack | Justin Garrison | Jörg Franke <359489+NewJorg@users.noreply.github.com> | Kang, Matthew | Kees Cook | Kevin Burke | Kir Kolyshkin | Kohei Tokunaga | Koichi Shiraishi | Kris-Mikael Krister | Kunal Kushwaha | Kyle | Lajos Papp | Levi Harrison | Lu Jingxiao | Luca Visentin | Maciej Kalisz | Madhav Puri | Manu Gupta | Marcus Comstedt | Mark Gordon | Marko Kohtala | Mary Anthony | Matias Insaurralde | Matt Kang | Matt Rickard | Maxime Lagresle | Michael Crosby | Michael Friis | Michael Irwin | Miguel Ángel Jimeno | Mihai Borobocea | Mike Brown | Mikhail Vasin | Misty Stanley-Jones | Miyachi Katsuya | Morgan Bauer | Morlay | Nao YONASHIRO | Natasha Jarus | Nathan Sullivan | Nick Miyake | Nick Santos | Nikhil Pandeti | Noel Georgi <18496730+frezbo@users.noreply.github.com> | Oliver Bristow | Omer Duchovne <79370724+od-cyera@users.noreply.github.com> | Omer Mizrahi | Ondrej Fabry | Otto Kekäläinen | Pablo Chico de Guzman | Patrick Hemmer | Patrick Lang | Patrick Van Stee | Paul ""TBBle"" Hampson | Paweł Gronowski | Peter Dave Hello | Petr Fedchenkov | Phil Estes | Pierre Fenoll | Pranav Pandit | Pratik Raj | Prayag Verma | Qiang Huang | Remy Suen | Ri Xu | Rob Taylor | Robert Estelle | Rubens Figueiredo | Sam Whited | Sascha Schwarze | Sean P. Kane | Sebastiaan van Stijn | Seiya Miyata | Serhat Gülçiçek | Sertac Ozercan | Shev Yan | Shijiang Wei | Shingo Omura | Shiwei Zhang | Siebe Schaap | Silvin Lubecki <31478878+silvin-lubecki@users.noreply.github.com> | Simon Ferquel | Slava Semushin | Solomon Hykes | Stefan Scherer | Stefan Weil | StefanSchoof | Stepan Blyshchak | Steve Lohr | Sven Dowideit | Takuya Noguchi | Thomas Leonard | Thomas Riccardi | Thomas Shaw | Tianon Gravi | Tibor Vass | Tiffany Jernigan | Tim Waugh | Tim Wraight | Tino Rusch | Tobias Klauser | Tomas Tomecek | Tomasz Kopczynski | Tomohiro Kusumoto | Troels Liebe Bentsen | Tõnis Tiigi | Valentin Lorentz | Vasek - Tom C | Victor Vieux | Victoria Bialas | Vincent Demeester | Vlad A. Ionescu | Vladislav Ivanov | Wang Yumu <37442693@qq.com> | Wei Fu | Wei Zhang | Xiaofan Zhang | Ximo Guanter | Yamazaki Masashi | Yan Song | Yong Tang | Yuichiro Kaneko | Yurii Rashkovskii | Zach Badgett | Ziv Tsarfati | a-palchikov | coryb | dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> | dito | eyherabh | f0 | genglu.gl | ggjulio | jgeiger | jlecordier | joey | jroenf | kevinmeredith | l00397676 | lalyos | liwenqi | lixiaobing10051267 | lomot | masibw | mikelinjie <294893458@qq.com> | msg | pieterdd | squeegels <1674195+squeegels@users.noreply.github.com> | sunchunming | wingkwong | zhangwenlong | 岁丰 | 沈陵 | 郑泽宇 " +core,github.com/moby/buildkit/frontend/dockerfile/command,Apache-2.0,"Aaron L. Xu | Aaron Lehmann | Aaron Lehmann | Abdur Rehman | Addam Hardy | Adrian Plata | Adrien Delorme | Ahmon Dancy | Aidan Hobson Sayers | Akihiro Suda | Alan Fregtman <941331+darkvertex@users.noreply.github.com> | Alano Terblanche <18033717+Benehiko@users.noreply.github.com> | Aleksa Sarai | Alex Couture-Beil | Alex Mayer | Alex Suraci | Alexander Morozov | Alexis Murzeau | Alice Frosi | Allen Sun | Amen Belayneh | Anca Iordache | Anda Xu | Anders F Björklund | Andrea Bolognani | Andrea Luzzardi | Andrew Chang | Andrey Smirnov | Andy Alt | Andy Caldwell | Ankush Agarwal | Anthony Nandaa | Anthony Sottile | Anurag Goel | Anusha Ragunathan | Arkadiusz Drabczyk | Arnaldo Garcia Rincon | Arnaud Bailly | Artem Khramov | Austin Vazquez | Avi Deitcher | Bastiaan Bakker | Ben Longo | Bertrand Paquet | Billy Owire | Bin Liu | Bjorn Neergaard | Brandon Mitchell | Brennan Kinney <5098581+polarathene@users.noreply.github.com> | Brian Goff | Bunyamin Dokmetas <19335284+ztzxt@users.noreply.github.com> | Burt Holzman | Ce Gao | Chaerim Yeo | Changwei Ge | Chanhun Jeong | ChaosGramer | Charles Chan | Charles Korn | Charles Law | Chenbin | Chris Goller | Chris McKinnel | Christian Höltje | Christian Weichel | Ciro S. Costa | Claudiu Belu | Colin Chartier | Corey Larson | Cory Bennett | Cory Snider | Craig Andrews | CrazyMax | Csaba Apagyi | Dan Duvall | Daniel Cassidy | Daniel Nephin | Darren Shepherd | Dave Chen | Dave Henderson | Dave Tucker | David Calavera | David Dooling | David Gageot | David Karlsson | Davis Schirmer | Debosmit Ray | Dennis Chen | Dennis Haney | Derek McGowan | Dharmit Shah | Ding Fei | Doug Davis | Edgar Lee | Eli Uriegas | Elias Faxö | Eng Zer Jun | Eric Engestrom | Erik McKelvey | Erik Sipsma | Felix Fontein | Fernando Miguel | Fiona Klute | Foysal Iqbal | Frank Villaro-Dixon | Fred Cox | Frieder Bluemle | Félix Mattrat | Gabriel-Adrian Samfira | Gaetan de Villele | Gahl Saraf | George | Govind Rai | Grant Reaber | Grégoire Payen de La Garanderie | Guilhem Charles | Hans van den Bogert | Hao Hu | Hector S | Helen Xie | Himanshu Pandey | Hiromu Nakamura | HowJMay | Hugo Santos | Höhl, Lukas | Ian Campbell | Ian King'ori | Ignas Mikalajūnas | Ilya Dmitrichenko | Iskander (Alex) Sharipov | Jacob Gillespie | Jacob MacElroy | Jakub Ciolek | James Carnegie | Jean-Pierre Huynh | Jeffrey Huang | Jesper Noordsij | Jesse Rittner | Jessica Frazelle | Jitender Kumar | John Howard | John Maguire | John Mulhausen | John Tims | Jon Zeolla | Jonathan A. Sternberg | Jonathan Azoff | Jonathan Giannuzzi | Jonathan Stoppani | Jonny Stoten | JordanGoasdoue | Julian Goede | Justas Brazauskas | Justin Chadwell | Justin Cormack | Justin Garrison | Jörg Franke <359489+NewJorg@users.noreply.github.com> | Kai Takac | Kang, Matthew | Kazuyoshi Kato | Kees Cook | Kevin Burke | Kir Kolyshkin | Kirill A. Korinsky | Kohei Tokunaga | Koichi Shiraishi | Kris-Mikael Krister | Kunal Kushwaha | Kyle | Lajos Papp | Leandro Santiago | Levi Harrison | Lu Jingxiao | Luca Visentin | Maciej Kalisz | Madhav Puri | Manu Gupta | Marat Radchenko | Marcus Comstedt | Mark Gordon | Mark Yen | Marko Kohtala | Mary Anthony | Matias Insaurralde | Matt Kang | Matt Rickard | Maxime Lagresle | Michael Crosby | Michael Friis | Michael Irwin | Miguel Ángel Jimeno | Mihai Borobocea | Mike Brown | Mikhail Vasin | Milas Bowman | Misty Stanley-Jones | Mitsuru Kariya | Miyachi Katsuya | Morgan Bauer | Moritz ""WanzenBug"" Wanzenböck | Morlay | Nao YONASHIRO | Natasha Jarus | Nathan Sullivan | Nguyễn Đức Chiến | Nick Miyake | Nick Santos | Nikhil Pandeti | Nobi | Noel Georgi <18496730+frezbo@users.noreply.github.com> | Oliver Bristow | Omer Duchovne <79370724+od-cyera@users.noreply.github.com> | Omer Mizrahi | Ondrej Fabry | Otto Kekäläinen | Pablo Chico de Guzman | Patrick Hemmer | Patrick Lang | Patrick Van Stee | Paul ""TBBle"" Hampson | Paweł Gronowski | Peter Dave Hello | Petr Fedchenkov | Petteri Räty | Phil Estes | Pierre Fenoll | Pranav Pandit | Pratik Raj | Prayag Verma | Qiang Huang | Remy Suen | Reshen | Ri Xu | Rob Taylor | Robert Estelle | Rubens Figueiredo | Salim B | Sam Whited | Sascha Hemleb | Sascha Schwarze | Sean P. Kane | Sebastiaan van Stijn | Seiya Miyata | Serhat Gülçiçek | Sertac Ozercan | Shaun Thompson | Shev Yan | Shijiang Wei | Shingo Omura | Shiwei Zhang | Siebe Schaap | Silvin Lubecki <31478878+silvin-lubecki@users.noreply.github.com> | Simon Ferquel | Slava Semushin | Solomon Hykes | Stefan Scherer | Stefan Weil | StefanSchoof | Stepan Blyshchak | Stephen Day | Steve Lohr | Sven Dowideit | Swagat Bora | Takuya Noguchi | Talon Bowler | Thomas Leonard | Thomas Riccardi | Thomas Shaw | Tianon Gravi | Tibor Vass | Tiffany Jernigan | Tim Waugh | Tim Wraight | Tino Rusch | Tobias Klauser | Tomas Tomecek | Tomasz Kopczynski | Tomohiro Kusumoto | Tristan Stenner | Troels Liebe Bentsen | Tõnis Tiigi | Valentin Lorentz | Vasek - Tom C | Victor Vieux | Victoria Bialas | Vincent Demeester | Vlad A. Ionescu | Vladislav Ivanov | Wang Yumu <37442693@qq.com> | Wei Fu | Wei Zhang | Xiaofan Zhang | Ximo Guanter | Yamazaki Masashi | Yan Song | Yong Tang | Yuichiro Kaneko | Yurii Rashkovskii | Zach Badgett | Zhizhen He | Ziv Tsarfati | a-palchikov | adamperlin | coryb | cuiyourong | dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> | dito | eyherabh | f0 | fanjiyun.fjy | frankyang | genglu.gl | ggjulio | guangwu | guoguangwu | jgeiger | jlecordier | joey | jroenf | kevinmeredith | l00397676 | lalyos | liulanzheng | liwenqi | lixiaobing10051267 | lomot | masibw | mikelinjie <294893458@qq.com> | msg | njucjc | omahs <73983677+omahs@users.noreply.github.com> | pieterdd | racequite | retornam | squeegels <1674195+squeegels@users.noreply.github.com> | sunchunming | wingkwong | x893675 | yumemio <59369226+yumemio@users.noreply.github.com> | yzewei | zhangwenlong | 岁丰 | 沈陵 | 蝦米 | 郑泽宇 " +core,github.com/moby/buildkit/frontend/dockerfile/instructions,Apache-2.0,"Aaron L. Xu | Aaron Lehmann | Aaron Lehmann | Abdur Rehman | Addam Hardy | Adrian Plata | Adrien Delorme | Ahmon Dancy | Aidan Hobson Sayers | Akihiro Suda | Alan Fregtman <941331+darkvertex@users.noreply.github.com> | Alano Terblanche <18033717+Benehiko@users.noreply.github.com> | Aleksa Sarai | Alex Couture-Beil | Alex Mayer | Alex Suraci | Alexander Morozov | Alexis Murzeau | Alice Frosi | Allen Sun | Amen Belayneh | Anca Iordache | Anda Xu | Anders F Björklund | Andrea Bolognani | Andrea Luzzardi | Andrew Chang | Andrey Smirnov | Andy Alt | Andy Caldwell | Ankush Agarwal | Anthony Nandaa | Anthony Sottile | Anurag Goel | Anusha Ragunathan | Arkadiusz Drabczyk | Arnaldo Garcia Rincon | Arnaud Bailly | Artem Khramov | Austin Vazquez | Avi Deitcher | Bastiaan Bakker | Ben Longo | Bertrand Paquet | Billy Owire | Bin Liu | Bjorn Neergaard | Brandon Mitchell | Brennan Kinney <5098581+polarathene@users.noreply.github.com> | Brian Goff | Bunyamin Dokmetas <19335284+ztzxt@users.noreply.github.com> | Burt Holzman | Ce Gao | Chaerim Yeo | Changwei Ge | Chanhun Jeong | ChaosGramer | Charles Chan | Charles Korn | Charles Law | Chenbin | Chris Goller | Chris McKinnel | Christian Höltje | Christian Weichel | Ciro S. Costa | Claudiu Belu | Colin Chartier | Corey Larson | Cory Bennett | Cory Snider | Craig Andrews | CrazyMax | Csaba Apagyi | Dan Duvall | Daniel Cassidy | Daniel Nephin | Darren Shepherd | Dave Chen | Dave Henderson | Dave Tucker | David Calavera | David Dooling | David Gageot | David Karlsson | Davis Schirmer | Debosmit Ray | Dennis Chen | Dennis Haney | Derek McGowan | Dharmit Shah | Ding Fei | Doug Davis | Edgar Lee | Eli Uriegas | Elias Faxö | Eng Zer Jun | Eric Engestrom | Erik McKelvey | Erik Sipsma | Felix Fontein | Fernando Miguel | Fiona Klute | Foysal Iqbal | Frank Villaro-Dixon | Fred Cox | Frieder Bluemle | Félix Mattrat | Gabriel-Adrian Samfira | Gaetan de Villele | Gahl Saraf | George | Govind Rai | Grant Reaber | Grégoire Payen de La Garanderie | Guilhem Charles | Hans van den Bogert | Hao Hu | Hector S | Helen Xie | Himanshu Pandey | Hiromu Nakamura | HowJMay | Hugo Santos | Höhl, Lukas | Ian Campbell | Ian King'ori | Ignas Mikalajūnas | Ilya Dmitrichenko | Iskander (Alex) Sharipov | Jacob Gillespie | Jacob MacElroy | Jakub Ciolek | James Carnegie | Jean-Pierre Huynh | Jeffrey Huang | Jesper Noordsij | Jesse Rittner | Jessica Frazelle | Jitender Kumar | John Howard | John Maguire | John Mulhausen | John Tims | Jon Zeolla | Jonathan A. Sternberg | Jonathan Azoff | Jonathan Giannuzzi | Jonathan Stoppani | Jonny Stoten | JordanGoasdoue | Julian Goede | Justas Brazauskas | Justin Chadwell | Justin Cormack | Justin Garrison | Jörg Franke <359489+NewJorg@users.noreply.github.com> | Kai Takac | Kang, Matthew | Kazuyoshi Kato | Kees Cook | Kevin Burke | Kir Kolyshkin | Kirill A. Korinsky | Kohei Tokunaga | Koichi Shiraishi | Kris-Mikael Krister | Kunal Kushwaha | Kyle | Lajos Papp | Leandro Santiago | Levi Harrison | Lu Jingxiao | Luca Visentin | Maciej Kalisz | Madhav Puri | Manu Gupta | Marat Radchenko | Marcus Comstedt | Mark Gordon | Mark Yen | Marko Kohtala | Mary Anthony | Matias Insaurralde | Matt Kang | Matt Rickard | Maxime Lagresle | Michael Crosby | Michael Friis | Michael Irwin | Miguel Ángel Jimeno | Mihai Borobocea | Mike Brown | Mikhail Vasin | Milas Bowman | Misty Stanley-Jones | Mitsuru Kariya | Miyachi Katsuya | Morgan Bauer | Moritz ""WanzenBug"" Wanzenböck | Morlay | Nao YONASHIRO | Natasha Jarus | Nathan Sullivan | Nguyễn Đức Chiến | Nick Miyake | Nick Santos | Nikhil Pandeti | Nobi | Noel Georgi <18496730+frezbo@users.noreply.github.com> | Oliver Bristow | Omer Duchovne <79370724+od-cyera@users.noreply.github.com> | Omer Mizrahi | Ondrej Fabry | Otto Kekäläinen | Pablo Chico de Guzman | Patrick Hemmer | Patrick Lang | Patrick Van Stee | Paul ""TBBle"" Hampson | Paweł Gronowski | Peter Dave Hello | Petr Fedchenkov | Petteri Räty | Phil Estes | Pierre Fenoll | Pranav Pandit | Pratik Raj | Prayag Verma | Qiang Huang | Remy Suen | Reshen | Ri Xu | Rob Taylor | Robert Estelle | Rubens Figueiredo | Salim B | Sam Whited | Sascha Hemleb | Sascha Schwarze | Sean P. Kane | Sebastiaan van Stijn | Seiya Miyata | Serhat Gülçiçek | Sertac Ozercan | Shaun Thompson | Shev Yan | Shijiang Wei | Shingo Omura | Shiwei Zhang | Siebe Schaap | Silvin Lubecki <31478878+silvin-lubecki@users.noreply.github.com> | Simon Ferquel | Slava Semushin | Solomon Hykes | Stefan Scherer | Stefan Weil | StefanSchoof | Stepan Blyshchak | Stephen Day | Steve Lohr | Sven Dowideit | Swagat Bora | Takuya Noguchi | Talon Bowler | Thomas Leonard | Thomas Riccardi | Thomas Shaw | Tianon Gravi | Tibor Vass | Tiffany Jernigan | Tim Waugh | Tim Wraight | Tino Rusch | Tobias Klauser | Tomas Tomecek | Tomasz Kopczynski | Tomohiro Kusumoto | Tristan Stenner | Troels Liebe Bentsen | Tõnis Tiigi | Valentin Lorentz | Vasek - Tom C | Victor Vieux | Victoria Bialas | Vincent Demeester | Vlad A. Ionescu | Vladislav Ivanov | Wang Yumu <37442693@qq.com> | Wei Fu | Wei Zhang | Xiaofan Zhang | Ximo Guanter | Yamazaki Masashi | Yan Song | Yong Tang | Yuichiro Kaneko | Yurii Rashkovskii | Zach Badgett | Zhizhen He | Ziv Tsarfati | a-palchikov | adamperlin | coryb | cuiyourong | dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> | dito | eyherabh | f0 | fanjiyun.fjy | frankyang | genglu.gl | ggjulio | guangwu | guoguangwu | jgeiger | jlecordier | joey | jroenf | kevinmeredith | l00397676 | lalyos | liulanzheng | liwenqi | lixiaobing10051267 | lomot | masibw | mikelinjie <294893458@qq.com> | msg | njucjc | omahs <73983677+omahs@users.noreply.github.com> | pieterdd | racequite | retornam | squeegels <1674195+squeegels@users.noreply.github.com> | sunchunming | wingkwong | x893675 | yumemio <59369226+yumemio@users.noreply.github.com> | yzewei | zhangwenlong | 岁丰 | 沈陵 | 蝦米 | 郑泽宇 " +core,github.com/moby/buildkit/frontend/dockerfile/linter,Apache-2.0,"Aaron L. Xu | Aaron Lehmann | Aaron Lehmann | Abdur Rehman | Addam Hardy | Adrian Plata | Adrien Delorme | Ahmon Dancy | Aidan Hobson Sayers | Akihiro Suda | Alan Fregtman <941331+darkvertex@users.noreply.github.com> | Alano Terblanche <18033717+Benehiko@users.noreply.github.com> | Aleksa Sarai | Alex Couture-Beil | Alex Mayer | Alex Suraci | Alexander Morozov | Alexis Murzeau | Alice Frosi | Allen Sun | Amen Belayneh | Anca Iordache | Anda Xu | Anders F Björklund | Andrea Bolognani | Andrea Luzzardi | Andrew Chang | Andrey Smirnov | Andy Alt | Andy Caldwell | Ankush Agarwal | Anthony Nandaa | Anthony Sottile | Anurag Goel | Anusha Ragunathan | Arkadiusz Drabczyk | Arnaldo Garcia Rincon | Arnaud Bailly | Artem Khramov | Austin Vazquez | Avi Deitcher | Bastiaan Bakker | Ben Longo | Bertrand Paquet | Billy Owire | Bin Liu | Bjorn Neergaard | Brandon Mitchell | Brennan Kinney <5098581+polarathene@users.noreply.github.com> | Brian Goff | Bunyamin Dokmetas <19335284+ztzxt@users.noreply.github.com> | Burt Holzman | Ce Gao | Chaerim Yeo | Changwei Ge | Chanhun Jeong | ChaosGramer | Charles Chan | Charles Korn | Charles Law | Chenbin | Chris Goller | Chris McKinnel | Christian Höltje | Christian Weichel | Ciro S. Costa | Claudiu Belu | Colin Chartier | Corey Larson | Cory Bennett | Cory Snider | Craig Andrews | CrazyMax | Csaba Apagyi | Dan Duvall | Daniel Cassidy | Daniel Nephin | Darren Shepherd | Dave Chen | Dave Henderson | Dave Tucker | David Calavera | David Dooling | David Gageot | David Karlsson | Davis Schirmer | Debosmit Ray | Dennis Chen | Dennis Haney | Derek McGowan | Dharmit Shah | Ding Fei | Doug Davis | Edgar Lee | Eli Uriegas | Elias Faxö | Eng Zer Jun | Eric Engestrom | Erik McKelvey | Erik Sipsma | Felix Fontein | Fernando Miguel | Fiona Klute | Foysal Iqbal | Frank Villaro-Dixon | Fred Cox | Frieder Bluemle | Félix Mattrat | Gabriel-Adrian Samfira | Gaetan de Villele | Gahl Saraf | George | Govind Rai | Grant Reaber | Grégoire Payen de La Garanderie | Guilhem Charles | Hans van den Bogert | Hao Hu | Hector S | Helen Xie | Himanshu Pandey | Hiromu Nakamura | HowJMay | Hugo Santos | Höhl, Lukas | Ian Campbell | Ian King'ori | Ignas Mikalajūnas | Ilya Dmitrichenko | Iskander (Alex) Sharipov | Jacob Gillespie | Jacob MacElroy | Jakub Ciolek | James Carnegie | Jean-Pierre Huynh | Jeffrey Huang | Jesper Noordsij | Jesse Rittner | Jessica Frazelle | Jitender Kumar | John Howard | John Maguire | John Mulhausen | John Tims | Jon Zeolla | Jonathan A. Sternberg | Jonathan Azoff | Jonathan Giannuzzi | Jonathan Stoppani | Jonny Stoten | JordanGoasdoue | Julian Goede | Justas Brazauskas | Justin Chadwell | Justin Cormack | Justin Garrison | Jörg Franke <359489+NewJorg@users.noreply.github.com> | Kai Takac | Kang, Matthew | Kazuyoshi Kato | Kees Cook | Kevin Burke | Kir Kolyshkin | Kirill A. Korinsky | Kohei Tokunaga | Koichi Shiraishi | Kris-Mikael Krister | Kunal Kushwaha | Kyle | Lajos Papp | Leandro Santiago | Levi Harrison | Lu Jingxiao | Luca Visentin | Maciej Kalisz | Madhav Puri | Manu Gupta | Marat Radchenko | Marcus Comstedt | Mark Gordon | Mark Yen | Marko Kohtala | Mary Anthony | Matias Insaurralde | Matt Kang | Matt Rickard | Maxime Lagresle | Michael Crosby | Michael Friis | Michael Irwin | Miguel Ángel Jimeno | Mihai Borobocea | Mike Brown | Mikhail Vasin | Milas Bowman | Misty Stanley-Jones | Mitsuru Kariya | Miyachi Katsuya | Morgan Bauer | Moritz ""WanzenBug"" Wanzenböck | Morlay | Nao YONASHIRO | Natasha Jarus | Nathan Sullivan | Nguyễn Đức Chiến | Nick Miyake | Nick Santos | Nikhil Pandeti | Nobi | Noel Georgi <18496730+frezbo@users.noreply.github.com> | Oliver Bristow | Omer Duchovne <79370724+od-cyera@users.noreply.github.com> | Omer Mizrahi | Ondrej Fabry | Otto Kekäläinen | Pablo Chico de Guzman | Patrick Hemmer | Patrick Lang | Patrick Van Stee | Paul ""TBBle"" Hampson | Paweł Gronowski | Peter Dave Hello | Petr Fedchenkov | Petteri Räty | Phil Estes | Pierre Fenoll | Pranav Pandit | Pratik Raj | Prayag Verma | Qiang Huang | Remy Suen | Reshen | Ri Xu | Rob Taylor | Robert Estelle | Rubens Figueiredo | Salim B | Sam Whited | Sascha Hemleb | Sascha Schwarze | Sean P. Kane | Sebastiaan van Stijn | Seiya Miyata | Serhat Gülçiçek | Sertac Ozercan | Shaun Thompson | Shev Yan | Shijiang Wei | Shingo Omura | Shiwei Zhang | Siebe Schaap | Silvin Lubecki <31478878+silvin-lubecki@users.noreply.github.com> | Simon Ferquel | Slava Semushin | Solomon Hykes | Stefan Scherer | Stefan Weil | StefanSchoof | Stepan Blyshchak | Stephen Day | Steve Lohr | Sven Dowideit | Swagat Bora | Takuya Noguchi | Talon Bowler | Thomas Leonard | Thomas Riccardi | Thomas Shaw | Tianon Gravi | Tibor Vass | Tiffany Jernigan | Tim Waugh | Tim Wraight | Tino Rusch | Tobias Klauser | Tomas Tomecek | Tomasz Kopczynski | Tomohiro Kusumoto | Tristan Stenner | Troels Liebe Bentsen | Tõnis Tiigi | Valentin Lorentz | Vasek - Tom C | Victor Vieux | Victoria Bialas | Vincent Demeester | Vlad A. Ionescu | Vladislav Ivanov | Wang Yumu <37442693@qq.com> | Wei Fu | Wei Zhang | Xiaofan Zhang | Ximo Guanter | Yamazaki Masashi | Yan Song | Yong Tang | Yuichiro Kaneko | Yurii Rashkovskii | Zach Badgett | Zhizhen He | Ziv Tsarfati | a-palchikov | adamperlin | coryb | cuiyourong | dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> | dito | eyherabh | f0 | fanjiyun.fjy | frankyang | genglu.gl | ggjulio | guangwu | guoguangwu | jgeiger | jlecordier | joey | jroenf | kevinmeredith | l00397676 | lalyos | liulanzheng | liwenqi | lixiaobing10051267 | lomot | masibw | mikelinjie <294893458@qq.com> | msg | njucjc | omahs <73983677+omahs@users.noreply.github.com> | pieterdd | racequite | retornam | squeegels <1674195+squeegels@users.noreply.github.com> | sunchunming | wingkwong | x893675 | yumemio <59369226+yumemio@users.noreply.github.com> | yzewei | zhangwenlong | 岁丰 | 沈陵 | 蝦米 | 郑泽宇 " +core,github.com/moby/buildkit/frontend/dockerfile/parser,Apache-2.0,"Aaron L. Xu | Aaron Lehmann | Aaron Lehmann | Abdur Rehman | Addam Hardy | Adrian Plata | Adrien Delorme | Ahmon Dancy | Aidan Hobson Sayers | Akihiro Suda | Alan Fregtman <941331+darkvertex@users.noreply.github.com> | Alano Terblanche <18033717+Benehiko@users.noreply.github.com> | Aleksa Sarai | Alex Couture-Beil | Alex Mayer | Alex Suraci | Alexander Morozov | Alexis Murzeau | Alice Frosi | Allen Sun | Amen Belayneh | Anca Iordache | Anda Xu | Anders F Björklund | Andrea Bolognani | Andrea Luzzardi | Andrew Chang | Andrey Smirnov | Andy Alt | Andy Caldwell | Ankush Agarwal | Anthony Nandaa | Anthony Sottile | Anurag Goel | Anusha Ragunathan | Arkadiusz Drabczyk | Arnaldo Garcia Rincon | Arnaud Bailly | Artem Khramov | Austin Vazquez | Avi Deitcher | Bastiaan Bakker | Ben Longo | Bertrand Paquet | Billy Owire | Bin Liu | Bjorn Neergaard | Brandon Mitchell | Brennan Kinney <5098581+polarathene@users.noreply.github.com> | Brian Goff | Bunyamin Dokmetas <19335284+ztzxt@users.noreply.github.com> | Burt Holzman | Ce Gao | Chaerim Yeo | Changwei Ge | Chanhun Jeong | ChaosGramer | Charles Chan | Charles Korn | Charles Law | Chenbin | Chris Goller | Chris McKinnel | Christian Höltje | Christian Weichel | Ciro S. Costa | Claudiu Belu | Colin Chartier | Corey Larson | Cory Bennett | Cory Snider | Craig Andrews | CrazyMax | Csaba Apagyi | Dan Duvall | Daniel Cassidy | Daniel Nephin | Darren Shepherd | Dave Chen | Dave Henderson | Dave Tucker | David Calavera | David Dooling | David Gageot | David Karlsson | Davis Schirmer | Debosmit Ray | Dennis Chen | Dennis Haney | Derek McGowan | Dharmit Shah | Ding Fei | Doug Davis | Edgar Lee | Eli Uriegas | Elias Faxö | Eng Zer Jun | Eric Engestrom | Erik McKelvey | Erik Sipsma | Felix Fontein | Fernando Miguel | Fiona Klute | Foysal Iqbal | Frank Villaro-Dixon | Fred Cox | Frieder Bluemle | Félix Mattrat | Gabriel-Adrian Samfira | Gaetan de Villele | Gahl Saraf | George | Govind Rai | Grant Reaber | Grégoire Payen de La Garanderie | Guilhem Charles | Hans van den Bogert | Hao Hu | Hector S | Helen Xie | Himanshu Pandey | Hiromu Nakamura | HowJMay | Hugo Santos | Höhl, Lukas | Ian Campbell | Ian King'ori | Ignas Mikalajūnas | Ilya Dmitrichenko | Iskander (Alex) Sharipov | Jacob Gillespie | Jacob MacElroy | Jakub Ciolek | James Carnegie | Jean-Pierre Huynh | Jeffrey Huang | Jesper Noordsij | Jesse Rittner | Jessica Frazelle | Jitender Kumar | John Howard | John Maguire | John Mulhausen | John Tims | Jon Zeolla | Jonathan A. Sternberg | Jonathan Azoff | Jonathan Giannuzzi | Jonathan Stoppani | Jonny Stoten | JordanGoasdoue | Julian Goede | Justas Brazauskas | Justin Chadwell | Justin Cormack | Justin Garrison | Jörg Franke <359489+NewJorg@users.noreply.github.com> | Kai Takac | Kang, Matthew | Kazuyoshi Kato | Kees Cook | Kevin Burke | Kir Kolyshkin | Kirill A. Korinsky | Kohei Tokunaga | Koichi Shiraishi | Kris-Mikael Krister | Kunal Kushwaha | Kyle | Lajos Papp | Leandro Santiago | Levi Harrison | Lu Jingxiao | Luca Visentin | Maciej Kalisz | Madhav Puri | Manu Gupta | Marat Radchenko | Marcus Comstedt | Mark Gordon | Mark Yen | Marko Kohtala | Mary Anthony | Matias Insaurralde | Matt Kang | Matt Rickard | Maxime Lagresle | Michael Crosby | Michael Friis | Michael Irwin | Miguel Ángel Jimeno | Mihai Borobocea | Mike Brown | Mikhail Vasin | Milas Bowman | Misty Stanley-Jones | Mitsuru Kariya | Miyachi Katsuya | Morgan Bauer | Moritz ""WanzenBug"" Wanzenböck | Morlay | Nao YONASHIRO | Natasha Jarus | Nathan Sullivan | Nguyễn Đức Chiến | Nick Miyake | Nick Santos | Nikhil Pandeti | Nobi | Noel Georgi <18496730+frezbo@users.noreply.github.com> | Oliver Bristow | Omer Duchovne <79370724+od-cyera@users.noreply.github.com> | Omer Mizrahi | Ondrej Fabry | Otto Kekäläinen | Pablo Chico de Guzman | Patrick Hemmer | Patrick Lang | Patrick Van Stee | Paul ""TBBle"" Hampson | Paweł Gronowski | Peter Dave Hello | Petr Fedchenkov | Petteri Räty | Phil Estes | Pierre Fenoll | Pranav Pandit | Pratik Raj | Prayag Verma | Qiang Huang | Remy Suen | Reshen | Ri Xu | Rob Taylor | Robert Estelle | Rubens Figueiredo | Salim B | Sam Whited | Sascha Hemleb | Sascha Schwarze | Sean P. Kane | Sebastiaan van Stijn | Seiya Miyata | Serhat Gülçiçek | Sertac Ozercan | Shaun Thompson | Shev Yan | Shijiang Wei | Shingo Omura | Shiwei Zhang | Siebe Schaap | Silvin Lubecki <31478878+silvin-lubecki@users.noreply.github.com> | Simon Ferquel | Slava Semushin | Solomon Hykes | Stefan Scherer | Stefan Weil | StefanSchoof | Stepan Blyshchak | Stephen Day | Steve Lohr | Sven Dowideit | Swagat Bora | Takuya Noguchi | Talon Bowler | Thomas Leonard | Thomas Riccardi | Thomas Shaw | Tianon Gravi | Tibor Vass | Tiffany Jernigan | Tim Waugh | Tim Wraight | Tino Rusch | Tobias Klauser | Tomas Tomecek | Tomasz Kopczynski | Tomohiro Kusumoto | Tristan Stenner | Troels Liebe Bentsen | Tõnis Tiigi | Valentin Lorentz | Vasek - Tom C | Victor Vieux | Victoria Bialas | Vincent Demeester | Vlad A. Ionescu | Vladislav Ivanov | Wang Yumu <37442693@qq.com> | Wei Fu | Wei Zhang | Xiaofan Zhang | Ximo Guanter | Yamazaki Masashi | Yan Song | Yong Tang | Yuichiro Kaneko | Yurii Rashkovskii | Zach Badgett | Zhizhen He | Ziv Tsarfati | a-palchikov | adamperlin | coryb | cuiyourong | dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> | dito | eyherabh | f0 | fanjiyun.fjy | frankyang | genglu.gl | ggjulio | guangwu | guoguangwu | jgeiger | jlecordier | joey | jroenf | kevinmeredith | l00397676 | lalyos | liulanzheng | liwenqi | lixiaobing10051267 | lomot | masibw | mikelinjie <294893458@qq.com> | msg | njucjc | omahs <73983677+omahs@users.noreply.github.com> | pieterdd | racequite | retornam | squeegels <1674195+squeegels@users.noreply.github.com> | sunchunming | wingkwong | x893675 | yumemio <59369226+yumemio@users.noreply.github.com> | yzewei | zhangwenlong | 岁丰 | 沈陵 | 蝦米 | 郑泽宇 " +core,github.com/moby/buildkit/frontend/dockerfile/shell,Apache-2.0,"Aaron L. Xu | Aaron Lehmann | Aaron Lehmann | Abdur Rehman | Addam Hardy | Adrian Plata | Adrien Delorme | Ahmon Dancy | Aidan Hobson Sayers | Akihiro Suda | Alan Fregtman <941331+darkvertex@users.noreply.github.com> | Alano Terblanche <18033717+Benehiko@users.noreply.github.com> | Aleksa Sarai | Alex Couture-Beil | Alex Mayer | Alex Suraci | Alexander Morozov | Alexis Murzeau | Alice Frosi | Allen Sun | Amen Belayneh | Anca Iordache | Anda Xu | Anders F Björklund | Andrea Bolognani | Andrea Luzzardi | Andrew Chang | Andrey Smirnov | Andy Alt | Andy Caldwell | Ankush Agarwal | Anthony Nandaa | Anthony Sottile | Anurag Goel | Anusha Ragunathan | Arkadiusz Drabczyk | Arnaldo Garcia Rincon | Arnaud Bailly | Artem Khramov | Austin Vazquez | Avi Deitcher | Bastiaan Bakker | Ben Longo | Bertrand Paquet | Billy Owire | Bin Liu | Bjorn Neergaard | Brandon Mitchell | Brennan Kinney <5098581+polarathene@users.noreply.github.com> | Brian Goff | Bunyamin Dokmetas <19335284+ztzxt@users.noreply.github.com> | Burt Holzman | Ce Gao | Chaerim Yeo | Changwei Ge | Chanhun Jeong | ChaosGramer | Charles Chan | Charles Korn | Charles Law | Chenbin | Chris Goller | Chris McKinnel | Christian Höltje | Christian Weichel | Ciro S. Costa | Claudiu Belu | Colin Chartier | Corey Larson | Cory Bennett | Cory Snider | Craig Andrews | CrazyMax | Csaba Apagyi | Dan Duvall | Daniel Cassidy | Daniel Nephin | Darren Shepherd | Dave Chen | Dave Henderson | Dave Tucker | David Calavera | David Dooling | David Gageot | David Karlsson | Davis Schirmer | Debosmit Ray | Dennis Chen | Dennis Haney | Derek McGowan | Dharmit Shah | Ding Fei | Doug Davis | Edgar Lee | Eli Uriegas | Elias Faxö | Eng Zer Jun | Eric Engestrom | Erik McKelvey | Erik Sipsma | Felix Fontein | Fernando Miguel | Fiona Klute | Foysal Iqbal | Frank Villaro-Dixon | Fred Cox | Frieder Bluemle | Félix Mattrat | Gabriel-Adrian Samfira | Gaetan de Villele | Gahl Saraf | George | Govind Rai | Grant Reaber | Grégoire Payen de La Garanderie | Guilhem Charles | Hans van den Bogert | Hao Hu | Hector S | Helen Xie | Himanshu Pandey | Hiromu Nakamura | HowJMay | Hugo Santos | Höhl, Lukas | Ian Campbell | Ian King'ori | Ignas Mikalajūnas | Ilya Dmitrichenko | Iskander (Alex) Sharipov | Jacob Gillespie | Jacob MacElroy | Jakub Ciolek | James Carnegie | Jean-Pierre Huynh | Jeffrey Huang | Jesper Noordsij | Jesse Rittner | Jessica Frazelle | Jitender Kumar | John Howard | John Maguire | John Mulhausen | John Tims | Jon Zeolla | Jonathan A. Sternberg | Jonathan Azoff | Jonathan Giannuzzi | Jonathan Stoppani | Jonny Stoten | JordanGoasdoue | Julian Goede | Justas Brazauskas | Justin Chadwell | Justin Cormack | Justin Garrison | Jörg Franke <359489+NewJorg@users.noreply.github.com> | Kai Takac | Kang, Matthew | Kazuyoshi Kato | Kees Cook | Kevin Burke | Kir Kolyshkin | Kirill A. Korinsky | Kohei Tokunaga | Koichi Shiraishi | Kris-Mikael Krister | Kunal Kushwaha | Kyle | Lajos Papp | Leandro Santiago | Levi Harrison | Lu Jingxiao | Luca Visentin | Maciej Kalisz | Madhav Puri | Manu Gupta | Marat Radchenko | Marcus Comstedt | Mark Gordon | Mark Yen | Marko Kohtala | Mary Anthony | Matias Insaurralde | Matt Kang | Matt Rickard | Maxime Lagresle | Michael Crosby | Michael Friis | Michael Irwin | Miguel Ángel Jimeno | Mihai Borobocea | Mike Brown | Mikhail Vasin | Milas Bowman | Misty Stanley-Jones | Mitsuru Kariya | Miyachi Katsuya | Morgan Bauer | Moritz ""WanzenBug"" Wanzenböck | Morlay | Nao YONASHIRO | Natasha Jarus | Nathan Sullivan | Nguyễn Đức Chiến | Nick Miyake | Nick Santos | Nikhil Pandeti | Nobi | Noel Georgi <18496730+frezbo@users.noreply.github.com> | Oliver Bristow | Omer Duchovne <79370724+od-cyera@users.noreply.github.com> | Omer Mizrahi | Ondrej Fabry | Otto Kekäläinen | Pablo Chico de Guzman | Patrick Hemmer | Patrick Lang | Patrick Van Stee | Paul ""TBBle"" Hampson | Paweł Gronowski | Peter Dave Hello | Petr Fedchenkov | Petteri Räty | Phil Estes | Pierre Fenoll | Pranav Pandit | Pratik Raj | Prayag Verma | Qiang Huang | Remy Suen | Reshen | Ri Xu | Rob Taylor | Robert Estelle | Rubens Figueiredo | Salim B | Sam Whited | Sascha Hemleb | Sascha Schwarze | Sean P. Kane | Sebastiaan van Stijn | Seiya Miyata | Serhat Gülçiçek | Sertac Ozercan | Shaun Thompson | Shev Yan | Shijiang Wei | Shingo Omura | Shiwei Zhang | Siebe Schaap | Silvin Lubecki <31478878+silvin-lubecki@users.noreply.github.com> | Simon Ferquel | Slava Semushin | Solomon Hykes | Stefan Scherer | Stefan Weil | StefanSchoof | Stepan Blyshchak | Stephen Day | Steve Lohr | Sven Dowideit | Swagat Bora | Takuya Noguchi | Talon Bowler | Thomas Leonard | Thomas Riccardi | Thomas Shaw | Tianon Gravi | Tibor Vass | Tiffany Jernigan | Tim Waugh | Tim Wraight | Tino Rusch | Tobias Klauser | Tomas Tomecek | Tomasz Kopczynski | Tomohiro Kusumoto | Tristan Stenner | Troels Liebe Bentsen | Tõnis Tiigi | Valentin Lorentz | Vasek - Tom C | Victor Vieux | Victoria Bialas | Vincent Demeester | Vlad A. Ionescu | Vladislav Ivanov | Wang Yumu <37442693@qq.com> | Wei Fu | Wei Zhang | Xiaofan Zhang | Ximo Guanter | Yamazaki Masashi | Yan Song | Yong Tang | Yuichiro Kaneko | Yurii Rashkovskii | Zach Badgett | Zhizhen He | Ziv Tsarfati | a-palchikov | adamperlin | coryb | cuiyourong | dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> | dito | eyherabh | f0 | fanjiyun.fjy | frankyang | genglu.gl | ggjulio | guangwu | guoguangwu | jgeiger | jlecordier | joey | jroenf | kevinmeredith | l00397676 | lalyos | liulanzheng | liwenqi | lixiaobing10051267 | lomot | masibw | mikelinjie <294893458@qq.com> | msg | njucjc | omahs <73983677+omahs@users.noreply.github.com> | pieterdd | racequite | retornam | squeegels <1674195+squeegels@users.noreply.github.com> | sunchunming | wingkwong | x893675 | yumemio <59369226+yumemio@users.noreply.github.com> | yzewei | zhangwenlong | 岁丰 | 沈陵 | 蝦米 | 郑泽宇 " +core,github.com/moby/buildkit/util/stack,Apache-2.0,"Aaron L. Xu | Aaron Lehmann | Aaron Lehmann | Abdur Rehman | Addam Hardy | Adrian Plata | Adrien Delorme | Ahmon Dancy | Aidan Hobson Sayers | Akihiro Suda | Alan Fregtman <941331+darkvertex@users.noreply.github.com> | Alano Terblanche <18033717+Benehiko@users.noreply.github.com> | Aleksa Sarai | Alex Couture-Beil | Alex Mayer | Alex Suraci | Alexander Morozov | Alexis Murzeau | Alice Frosi | Allen Sun | Amen Belayneh | Anca Iordache | Anda Xu | Anders F Björklund | Andrea Bolognani | Andrea Luzzardi | Andrew Chang | Andrey Smirnov | Andy Alt | Andy Caldwell | Ankush Agarwal | Anthony Nandaa | Anthony Sottile | Anurag Goel | Anusha Ragunathan | Arkadiusz Drabczyk | Arnaldo Garcia Rincon | Arnaud Bailly | Artem Khramov | Austin Vazquez | Avi Deitcher | Bastiaan Bakker | Ben Longo | Bertrand Paquet | Billy Owire | Bin Liu | Bjorn Neergaard | Brandon Mitchell | Brennan Kinney <5098581+polarathene@users.noreply.github.com> | Brian Goff | Bunyamin Dokmetas <19335284+ztzxt@users.noreply.github.com> | Burt Holzman | Ce Gao | Chaerim Yeo | Changwei Ge | Chanhun Jeong | ChaosGramer | Charles Chan | Charles Korn | Charles Law | Chenbin | Chris Goller | Chris McKinnel | Christian Höltje | Christian Weichel | Ciro S. Costa | Claudiu Belu | Colin Chartier | Corey Larson | Cory Bennett | Cory Snider | Craig Andrews | CrazyMax | Csaba Apagyi | Dan Duvall | Daniel Cassidy | Daniel Nephin | Darren Shepherd | Dave Chen | Dave Henderson | Dave Tucker | David Calavera | David Dooling | David Gageot | David Karlsson | Davis Schirmer | Debosmit Ray | Dennis Chen | Dennis Haney | Derek McGowan | Dharmit Shah | Ding Fei | Doug Davis | Edgar Lee | Eli Uriegas | Elias Faxö | Eng Zer Jun | Eric Engestrom | Erik McKelvey | Erik Sipsma | Felix Fontein | Fernando Miguel | Fiona Klute | Foysal Iqbal | Frank Villaro-Dixon | Fred Cox | Frieder Bluemle | Félix Mattrat | Gabriel-Adrian Samfira | Gaetan de Villele | Gahl Saraf | George | Govind Rai | Grant Reaber | Grégoire Payen de La Garanderie | Guilhem Charles | Hans van den Bogert | Hao Hu | Hector S | Helen Xie | Himanshu Pandey | Hiromu Nakamura | HowJMay | Hugo Santos | Höhl, Lukas | Ian Campbell | Ian King'ori | Ignas Mikalajūnas | Ilya Dmitrichenko | Iskander (Alex) Sharipov | Jacob Gillespie | Jacob MacElroy | Jakub Ciolek | James Carnegie | Jean-Pierre Huynh | Jeffrey Huang | Jesper Noordsij | Jesse Rittner | Jessica Frazelle | Jitender Kumar | John Howard | John Maguire | John Mulhausen | John Tims | Jon Zeolla | Jonathan A. Sternberg | Jonathan Azoff | Jonathan Giannuzzi | Jonathan Stoppani | Jonny Stoten | JordanGoasdoue | Julian Goede | Justas Brazauskas | Justin Chadwell | Justin Cormack | Justin Garrison | Jörg Franke <359489+NewJorg@users.noreply.github.com> | Kai Takac | Kang, Matthew | Kazuyoshi Kato | Kees Cook | Kevin Burke | Kir Kolyshkin | Kirill A. Korinsky | Kohei Tokunaga | Koichi Shiraishi | Kris-Mikael Krister | Kunal Kushwaha | Kyle | Lajos Papp | Leandro Santiago | Levi Harrison | Lu Jingxiao | Luca Visentin | Maciej Kalisz | Madhav Puri | Manu Gupta | Marat Radchenko | Marcus Comstedt | Mark Gordon | Mark Yen | Marko Kohtala | Mary Anthony | Matias Insaurralde | Matt Kang | Matt Rickard | Maxime Lagresle | Michael Crosby | Michael Friis | Michael Irwin | Miguel Ángel Jimeno | Mihai Borobocea | Mike Brown | Mikhail Vasin | Milas Bowman | Misty Stanley-Jones | Mitsuru Kariya | Miyachi Katsuya | Morgan Bauer | Moritz ""WanzenBug"" Wanzenböck | Morlay | Nao YONASHIRO | Natasha Jarus | Nathan Sullivan | Nguyễn Đức Chiến | Nick Miyake | Nick Santos | Nikhil Pandeti | Nobi | Noel Georgi <18496730+frezbo@users.noreply.github.com> | Oliver Bristow | Omer Duchovne <79370724+od-cyera@users.noreply.github.com> | Omer Mizrahi | Ondrej Fabry | Otto Kekäläinen | Pablo Chico de Guzman | Patrick Hemmer | Patrick Lang | Patrick Van Stee | Paul ""TBBle"" Hampson | Paweł Gronowski | Peter Dave Hello | Petr Fedchenkov | Petteri Räty | Phil Estes | Pierre Fenoll | Pranav Pandit | Pratik Raj | Prayag Verma | Qiang Huang | Remy Suen | Reshen | Ri Xu | Rob Taylor | Robert Estelle | Rubens Figueiredo | Salim B | Sam Whited | Sascha Hemleb | Sascha Schwarze | Sean P. Kane | Sebastiaan van Stijn | Seiya Miyata | Serhat Gülçiçek | Sertac Ozercan | Shaun Thompson | Shev Yan | Shijiang Wei | Shingo Omura | Shiwei Zhang | Siebe Schaap | Silvin Lubecki <31478878+silvin-lubecki@users.noreply.github.com> | Simon Ferquel | Slava Semushin | Solomon Hykes | Stefan Scherer | Stefan Weil | StefanSchoof | Stepan Blyshchak | Stephen Day | Steve Lohr | Sven Dowideit | Swagat Bora | Takuya Noguchi | Talon Bowler | Thomas Leonard | Thomas Riccardi | Thomas Shaw | Tianon Gravi | Tibor Vass | Tiffany Jernigan | Tim Waugh | Tim Wraight | Tino Rusch | Tobias Klauser | Tomas Tomecek | Tomasz Kopczynski | Tomohiro Kusumoto | Tristan Stenner | Troels Liebe Bentsen | Tõnis Tiigi | Valentin Lorentz | Vasek - Tom C | Victor Vieux | Victoria Bialas | Vincent Demeester | Vlad A. Ionescu | Vladislav Ivanov | Wang Yumu <37442693@qq.com> | Wei Fu | Wei Zhang | Xiaofan Zhang | Ximo Guanter | Yamazaki Masashi | Yan Song | Yong Tang | Yuichiro Kaneko | Yurii Rashkovskii | Zach Badgett | Zhizhen He | Ziv Tsarfati | a-palchikov | adamperlin | coryb | cuiyourong | dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> | dito | eyherabh | f0 | fanjiyun.fjy | frankyang | genglu.gl | ggjulio | guangwu | guoguangwu | jgeiger | jlecordier | joey | jroenf | kevinmeredith | l00397676 | lalyos | liulanzheng | liwenqi | lixiaobing10051267 | lomot | masibw | mikelinjie <294893458@qq.com> | msg | njucjc | omahs <73983677+omahs@users.noreply.github.com> | pieterdd | racequite | retornam | squeegels <1674195+squeegels@users.noreply.github.com> | sunchunming | wingkwong | x893675 | yumemio <59369226+yumemio@users.noreply.github.com> | yzewei | zhangwenlong | 岁丰 | 沈陵 | 蝦米 | 郑泽宇 " +core,github.com/moby/buildkit/util/suggest,Apache-2.0,"Aaron L. Xu | Aaron Lehmann | Aaron Lehmann | Abdur Rehman | Addam Hardy | Adrian Plata | Adrien Delorme | Ahmon Dancy | Aidan Hobson Sayers | Akihiro Suda | Alan Fregtman <941331+darkvertex@users.noreply.github.com> | Alano Terblanche <18033717+Benehiko@users.noreply.github.com> | Aleksa Sarai | Alex Couture-Beil | Alex Mayer | Alex Suraci | Alexander Morozov | Alexis Murzeau | Alice Frosi | Allen Sun | Amen Belayneh | Anca Iordache | Anda Xu | Anders F Björklund | Andrea Bolognani | Andrea Luzzardi | Andrew Chang | Andrey Smirnov | Andy Alt | Andy Caldwell | Ankush Agarwal | Anthony Nandaa | Anthony Sottile | Anurag Goel | Anusha Ragunathan | Arkadiusz Drabczyk | Arnaldo Garcia Rincon | Arnaud Bailly | Artem Khramov | Austin Vazquez | Avi Deitcher | Bastiaan Bakker | Ben Longo | Bertrand Paquet | Billy Owire | Bin Liu | Bjorn Neergaard | Brandon Mitchell | Brennan Kinney <5098581+polarathene@users.noreply.github.com> | Brian Goff | Bunyamin Dokmetas <19335284+ztzxt@users.noreply.github.com> | Burt Holzman | Ce Gao | Chaerim Yeo | Changwei Ge | Chanhun Jeong | ChaosGramer | Charles Chan | Charles Korn | Charles Law | Chenbin | Chris Goller | Chris McKinnel | Christian Höltje | Christian Weichel | Ciro S. Costa | Claudiu Belu | Colin Chartier | Corey Larson | Cory Bennett | Cory Snider | Craig Andrews | CrazyMax | Csaba Apagyi | Dan Duvall | Daniel Cassidy | Daniel Nephin | Darren Shepherd | Dave Chen | Dave Henderson | Dave Tucker | David Calavera | David Dooling | David Gageot | David Karlsson | Davis Schirmer | Debosmit Ray | Dennis Chen | Dennis Haney | Derek McGowan | Dharmit Shah | Ding Fei | Doug Davis | Edgar Lee | Eli Uriegas | Elias Faxö | Eng Zer Jun | Eric Engestrom | Erik McKelvey | Erik Sipsma | Felix Fontein | Fernando Miguel | Fiona Klute | Foysal Iqbal | Frank Villaro-Dixon | Fred Cox | Frieder Bluemle | Félix Mattrat | Gabriel-Adrian Samfira | Gaetan de Villele | Gahl Saraf | George | Govind Rai | Grant Reaber | Grégoire Payen de La Garanderie | Guilhem Charles | Hans van den Bogert | Hao Hu | Hector S | Helen Xie | Himanshu Pandey | Hiromu Nakamura | HowJMay | Hugo Santos | Höhl, Lukas | Ian Campbell | Ian King'ori | Ignas Mikalajūnas | Ilya Dmitrichenko | Iskander (Alex) Sharipov | Jacob Gillespie | Jacob MacElroy | Jakub Ciolek | James Carnegie | Jean-Pierre Huynh | Jeffrey Huang | Jesper Noordsij | Jesse Rittner | Jessica Frazelle | Jitender Kumar | John Howard | John Maguire | John Mulhausen | John Tims | Jon Zeolla | Jonathan A. Sternberg | Jonathan Azoff | Jonathan Giannuzzi | Jonathan Stoppani | Jonny Stoten | JordanGoasdoue | Julian Goede | Justas Brazauskas | Justin Chadwell | Justin Cormack | Justin Garrison | Jörg Franke <359489+NewJorg@users.noreply.github.com> | Kai Takac | Kang, Matthew | Kazuyoshi Kato | Kees Cook | Kevin Burke | Kir Kolyshkin | Kirill A. Korinsky | Kohei Tokunaga | Koichi Shiraishi | Kris-Mikael Krister | Kunal Kushwaha | Kyle | Lajos Papp | Leandro Santiago | Levi Harrison | Lu Jingxiao | Luca Visentin | Maciej Kalisz | Madhav Puri | Manu Gupta | Marat Radchenko | Marcus Comstedt | Mark Gordon | Mark Yen | Marko Kohtala | Mary Anthony | Matias Insaurralde | Matt Kang | Matt Rickard | Maxime Lagresle | Michael Crosby | Michael Friis | Michael Irwin | Miguel Ángel Jimeno | Mihai Borobocea | Mike Brown | Mikhail Vasin | Milas Bowman | Misty Stanley-Jones | Mitsuru Kariya | Miyachi Katsuya | Morgan Bauer | Moritz ""WanzenBug"" Wanzenböck | Morlay | Nao YONASHIRO | Natasha Jarus | Nathan Sullivan | Nguyễn Đức Chiến | Nick Miyake | Nick Santos | Nikhil Pandeti | Nobi | Noel Georgi <18496730+frezbo@users.noreply.github.com> | Oliver Bristow | Omer Duchovne <79370724+od-cyera@users.noreply.github.com> | Omer Mizrahi | Ondrej Fabry | Otto Kekäläinen | Pablo Chico de Guzman | Patrick Hemmer | Patrick Lang | Patrick Van Stee | Paul ""TBBle"" Hampson | Paweł Gronowski | Peter Dave Hello | Petr Fedchenkov | Petteri Räty | Phil Estes | Pierre Fenoll | Pranav Pandit | Pratik Raj | Prayag Verma | Qiang Huang | Remy Suen | Reshen | Ri Xu | Rob Taylor | Robert Estelle | Rubens Figueiredo | Salim B | Sam Whited | Sascha Hemleb | Sascha Schwarze | Sean P. Kane | Sebastiaan van Stijn | Seiya Miyata | Serhat Gülçiçek | Sertac Ozercan | Shaun Thompson | Shev Yan | Shijiang Wei | Shingo Omura | Shiwei Zhang | Siebe Schaap | Silvin Lubecki <31478878+silvin-lubecki@users.noreply.github.com> | Simon Ferquel | Slava Semushin | Solomon Hykes | Stefan Scherer | Stefan Weil | StefanSchoof | Stepan Blyshchak | Stephen Day | Steve Lohr | Sven Dowideit | Swagat Bora | Takuya Noguchi | Talon Bowler | Thomas Leonard | Thomas Riccardi | Thomas Shaw | Tianon Gravi | Tibor Vass | Tiffany Jernigan | Tim Waugh | Tim Wraight | Tino Rusch | Tobias Klauser | Tomas Tomecek | Tomasz Kopczynski | Tomohiro Kusumoto | Tristan Stenner | Troels Liebe Bentsen | Tõnis Tiigi | Valentin Lorentz | Vasek - Tom C | Victor Vieux | Victoria Bialas | Vincent Demeester | Vlad A. Ionescu | Vladislav Ivanov | Wang Yumu <37442693@qq.com> | Wei Fu | Wei Zhang | Xiaofan Zhang | Ximo Guanter | Yamazaki Masashi | Yan Song | Yong Tang | Yuichiro Kaneko | Yurii Rashkovskii | Zach Badgett | Zhizhen He | Ziv Tsarfati | a-palchikov | adamperlin | coryb | cuiyourong | dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> | dito | eyherabh | f0 | fanjiyun.fjy | frankyang | genglu.gl | ggjulio | guangwu | guoguangwu | jgeiger | jlecordier | joey | jroenf | kevinmeredith | l00397676 | lalyos | liulanzheng | liwenqi | lixiaobing10051267 | lomot | masibw | mikelinjie <294893458@qq.com> | msg | njucjc | omahs <73983677+omahs@users.noreply.github.com> | pieterdd | racequite | retornam | squeegels <1674195+squeegels@users.noreply.github.com> | sunchunming | wingkwong | x893675 | yumemio <59369226+yumemio@users.noreply.github.com> | yzewei | zhangwenlong | 岁丰 | 沈陵 | 蝦米 | 郑泽宇 " core,github.com/moby/docker-image-spec/specs-go/v1,Apache-2.0,"Copyright 2012-2017 Docker, Inc." core,github.com/moby/locker,Apache-2.0,"Copyright 2013-2018 Docker, Inc" core,github.com/moby/spdystream,Apache-2.0,"Copyright 2013-2021 Docker, inc. Released under the [Apache 2.0 license](LICENSE) | Copyright 2014-2021 Docker Inc" @@ -1523,6 +1534,7 @@ core,github.com/netsampler/goflow2/pb,BSD-3-Clause,"Copyright (c) 2021, NetSampl core,github.com/netsampler/goflow2/producer,BSD-3-Clause,"Copyright (c) 2021, NetSampler" core,github.com/netsampler/goflow2/transport,BSD-3-Clause,"Copyright (c) 2021, NetSampler" core,github.com/netsampler/goflow2/utils,BSD-3-Clause,"Copyright (c) 2021, NetSampler" +core,github.com/nozzle/throttler,Apache-2.0,Copyright 2018 Derek Perkins core,github.com/oklog/ulid,Apache-2.0,- Peter Bourgon (@peterbourgon) | - Tomás Senart (@tsenart) core,github.com/olekukonko/tablewriter,MIT,Copyright (C) 2014 by Oleku Konko core,github.com/oliveagle/jsonpath,MIT,Copyright (c) 2015 oliver @@ -1675,12 +1687,14 @@ core,github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadata core,github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders/docker,Apache-2.0,Copyright The OpenTelemetry Authors core,github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders/internal,Apache-2.0,Copyright The OpenTelemetry Authors core,github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders/k8snode,Apache-2.0,Copyright The OpenTelemetry Authors +core,github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders/kubeadm,Apache-2.0,Copyright The OpenTelemetry Authors core,github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders/openshift,Apache-2.0,Copyright The OpenTelemetry Authors core,github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders/system,Apache-2.0,Copyright The OpenTelemetry Authors core,github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil,Apache-2.0,Copyright The OpenTelemetry Authors core,github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent,Apache-2.0,Copyright The OpenTelemetry Authors core,github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk,Apache-2.0,Copyright The OpenTelemetry Authors core,github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr,Apache-2.0,Copyright The OpenTelemetry Authors +core,github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog,Apache-2.0,Copyright The OpenTelemetry Authors core,github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog/config,Apache-2.0,Copyright The OpenTelemetry Authors core,github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata,Apache-2.0,Copyright The OpenTelemetry Authors core,github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl,Apache-2.0,Copyright The OpenTelemetry Authors @@ -1796,6 +1810,8 @@ core,github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourc core,github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/heroku/internal/metadata,Apache-2.0,Copyright The OpenTelemetry Authors core,github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/k8snode,Apache-2.0,Copyright The OpenTelemetry Authors core,github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/k8snode/internal/metadata,Apache-2.0,Copyright The OpenTelemetry Authors +core,github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/kubeadm,Apache-2.0,Copyright The OpenTelemetry Authors +core,github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/kubeadm/internal/metadata,Apache-2.0,Copyright The OpenTelemetry Authors core,github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/metadata,Apache-2.0,Copyright The OpenTelemetry Authors core,github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/openshift,Apache-2.0,Copyright The OpenTelemetry Authors core,github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor/internal/openshift/internal/metadata,Apache-2.0,Copyright The OpenTelemetry Authors @@ -1874,6 +1890,11 @@ core,github.com/openshift/client-go/quota/clientset/versioned/typed/quota/v1,Apa core,github.com/opentracing/opentracing-go,Apache-2.0,Copyright 2016 The OpenTracing Authors core,github.com/opentracing/opentracing-go/ext,Apache-2.0,Copyright 2016 The OpenTracing Authors core,github.com/opentracing/opentracing-go/log,Apache-2.0,Copyright 2016 The OpenTracing Authors +core,github.com/openvex/discovery/pkg/discovery,Apache-2.0,Copyright 2023 The OpenVEX Authors +core,github.com/openvex/discovery/pkg/discovery/options,Apache-2.0,Copyright 2023 The OpenVEX Authors +core,github.com/openvex/discovery/pkg/oci,Apache-2.0,Copyright 2023 The OpenVEX Authors +core,github.com/openvex/discovery/pkg/probers/oci,Apache-2.0,Copyright 2023 The OpenVEX Authors +core,github.com/openvex/go-vex/pkg/attestation,Apache-2.0,Copyright 2023 The OpenVEX Authors core,github.com/openvex/go-vex/pkg/csaf,Apache-2.0,Copyright 2023 The OpenVEX Authors core,github.com/openvex/go-vex/pkg/vex,Apache-2.0,Copyright 2023 The OpenVEX Authors core,github.com/openzipkin/zipkin-go/model,Apache-2.0,Copyright 2017 The OpenZipkin Authors @@ -2018,10 +2039,11 @@ core,github.com/samber/lo/internal/constraints,MIT,Copyright (c) 2022 Samuel Ber core,github.com/samber/lo/internal/rand,MIT,Copyright (c) 2022 Samuel Berthe | Copyright © 2022 [Samuel Berthe](https://github.com/samber) core,github.com/samuel/go-zookeeper/zk,BSD-3-Clause,"Copyright (c) 2013, Samuel Stauffer " core,github.com/santhosh-tekuri/jsonschema/v5,Apache-2.0,Copyright (c) 2017-2024 Santhosh Kumar Tekuri -core,github.com/saracen/walker,MIT,Copyright (c) 2019 Arran Walker core,github.com/sassoftware/go-rpmutils,Apache-2.0,Copyright (c) SAS Institute Inc. core,github.com/sassoftware/go-rpmutils/cpio,Apache-2.0,Copyright (c) SAS Institute Inc. core,github.com/sassoftware/go-rpmutils/fileutil,Apache-2.0,Copyright (c) SAS Institute Inc. +core,github.com/sassoftware/relic/lib/pkcs7,Apache-2.0,Copyright (c) SAS Institute Inc. +core,github.com/sassoftware/relic/lib/x509tools,Apache-2.0,Copyright (c) SAS Institute Inc. core,github.com/scaleway/scaleway-sdk-go/api/baremetal/v1,Apache-2.0,Copyright 2019 Scaleway core,github.com/scaleway/scaleway-sdk-go/api/block/v1alpha1,Apache-2.0,Copyright 2019 Scaleway core,github.com/scaleway/scaleway-sdk-go/api/instance/v1,Apache-2.0,Copyright 2019 Scaleway @@ -2038,6 +2060,7 @@ core,github.com/scaleway/scaleway-sdk-go/scw,Apache-2.0,Copyright 2019 Scaleway core,github.com/scaleway/scaleway-sdk-go/validation,Apache-2.0,Copyright 2019 Scaleway core,github.com/secure-systems-lab/go-securesystemslib/cjson,MIT,Copyright (c) 2021 NYU Secure Systems Lab core,github.com/secure-systems-lab/go-securesystemslib/dsse,MIT,Copyright (c) 2021 NYU Secure Systems Lab +core,github.com/secure-systems-lab/go-securesystemslib/encrypted,MIT,Copyright (c) 2021 NYU Secure Systems Lab core,github.com/secure-systems-lab/go-securesystemslib/signerverifier,MIT,Copyright (c) 2021 NYU Secure Systems Lab core,github.com/sergi/go-diff/diffmatchpatch,MIT,Copyright (c) 2012-2016 The go-diff Authors. All rights reserved | Danny Yoo | James Kolb | Jonathan Amsterdam | Markus Zimmermann | Matt Kovars | Osman Masood | Robert Carlsen | Rory Flynn | Sergi Mansilla | Shatrugna Sadhu | Shawn Smith | Stas Maksimov | Tor Arvid Lund | Zac Bergquist | Örjan Persson core,github.com/shibumi/go-pathspec,Apache-2.0,Copyright (c) 2012 The Go Authors. All rights reserved. @@ -2054,12 +2077,58 @@ core,github.com/shirou/w32,BSD-3-Clause,Copyright (c) 2010-2012 The w32 Authors. core,github.com/shopspring/decimal,MIT,"Copyright (c) 2013 Oguz Bilgic | Copyright (c) 2015 Spring, Inc" core,github.com/signalfx/sapm-proto/client,Apache-2.0,"Copyright 2019 Splunk, Inc." core,github.com/signalfx/sapm-proto/gen,Apache-2.0,"Copyright 2019 Splunk, Inc." +core,github.com/sigstore/cosign/v2/internal/pkg/cosign,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/cosign/v2/internal/pkg/cosign/payload/size,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/cosign/v2/internal/pkg/now,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/cosign/v2/internal/pkg/oci/remote,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/cosign/v2/internal/ui,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/cosign/v2/pkg/blob,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/cosign/v2/pkg/cosign,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/cosign/v2/pkg/cosign/bundle,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/cosign/v2/pkg/cosign/env,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/cosign/v2/pkg/cosign/fulcioverifier/ctutil,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/cosign/v2/pkg/oci,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/cosign/v2/pkg/oci/empty,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/cosign/v2/pkg/oci/internal/signature,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/cosign/v2/pkg/oci/layout,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/cosign/v2/pkg/oci/remote,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/cosign/v2/pkg/oci/signed,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/cosign/v2/pkg/oci/static,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/cosign/v2/pkg/types,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/rekor/pkg/client,Apache-2.0,Copyright 2021 The Sigstore Authors core,github.com/sigstore/rekor/pkg/generated/client,Apache-2.0,Copyright 2021 The Sigstore Authors core,github.com/sigstore/rekor/pkg/generated/client/entries,Apache-2.0,Copyright 2021 The Sigstore Authors core,github.com/sigstore/rekor/pkg/generated/client/index,Apache-2.0,Copyright 2021 The Sigstore Authors core,github.com/sigstore/rekor/pkg/generated/client/pubkey,Apache-2.0,Copyright 2021 The Sigstore Authors core,github.com/sigstore/rekor/pkg/generated/client/tlog,Apache-2.0,Copyright 2021 The Sigstore Authors core,github.com/sigstore/rekor/pkg/generated/models,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/rekor/pkg/log,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/rekor/pkg/pki,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/rekor/pkg/pki/identity,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/rekor/pkg/pki/minisign,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/rekor/pkg/pki/pgp,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/rekor/pkg/pki/pkcs7,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/rekor/pkg/pki/ssh,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/rekor/pkg/pki/tuf,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/rekor/pkg/pki/x509,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/rekor/pkg/types,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/rekor/pkg/types/dsse,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/rekor/pkg/types/dsse/v0.0.1,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/rekor/pkg/types/hashedrekord,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/rekor/pkg/types/intoto,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/rekor/pkg/types/intoto/v0.0.1,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/rekor/pkg/types/intoto/v0.0.2,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/rekor/pkg/types/rekord,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/rekor/pkg/types/rekord/v0.0.1,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/rekor/pkg/util,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/sigstore/pkg/cryptoutils,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/sigstore/pkg/signature,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/sigstore/pkg/signature/dsse,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/sigstore/pkg/signature/options,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/sigstore/pkg/signature/payload,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/sigstore/pkg/tuf,Apache-2.0,Copyright 2021 The Sigstore Authors +core,github.com/sigstore/timestamp-authority/pkg/verification,Apache-2.0,Copyright 2022 The Sigstore Authors core,github.com/sijms/go-ora/v2,MIT,Copyright (c) 2020 Samy Sultan core,github.com/sijms/go-ora/v2/advanced_nego,MIT,Copyright (c) 2020 Samy Sultan core,github.com/sijms/go-ora/v2/advanced_nego/ntlmssp,MIT,Copyright (c) 2020 Samy Sultan @@ -2147,7 +2216,6 @@ core,github.com/tetratelabs/wazero/api,Apache-2.0,Copyright 2020-2023 wazero aut core,github.com/tetratelabs/wazero/experimental,Apache-2.0,Copyright 2020-2023 wazero authors core,github.com/tetratelabs/wazero/experimental/sys,Apache-2.0,Copyright 2020-2023 wazero authors core,github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1,Apache-2.0,Copyright 2020-2023 wazero authors -core,github.com/tetratelabs/wazero/internal/close,Apache-2.0,Copyright 2020-2023 wazero authors core,github.com/tetratelabs/wazero/internal/descriptor,Apache-2.0,Copyright 2020-2023 wazero authors core,github.com/tetratelabs/wazero/internal/engine/interpreter,Apache-2.0,Copyright 2020-2023 wazero authors core,github.com/tetratelabs/wazero/internal/engine/wazevo,Apache-2.0,Copyright 2020-2023 wazero authors @@ -2158,6 +2226,7 @@ core,github.com/tetratelabs/wazero/internal/engine/wazevo/backend/regalloc,Apach core,github.com/tetratelabs/wazero/internal/engine/wazevo/frontend,Apache-2.0,Copyright 2020-2023 wazero authors core,github.com/tetratelabs/wazero/internal/engine/wazevo/ssa,Apache-2.0,Copyright 2020-2023 wazero authors core,github.com/tetratelabs/wazero/internal/engine/wazevo/wazevoapi,Apache-2.0,Copyright 2020-2023 wazero authors +core,github.com/tetratelabs/wazero/internal/expctxkeys,Apache-2.0,Copyright 2020-2023 wazero authors core,github.com/tetratelabs/wazero/internal/filecache,Apache-2.0,Copyright 2020-2023 wazero authors core,github.com/tetratelabs/wazero/internal/fsapi,Apache-2.0,Copyright 2020-2023 wazero authors core,github.com/tetratelabs/wazero/internal/ieee754,Apache-2.0,Copyright 2020-2023 wazero authors @@ -2176,8 +2245,20 @@ core,github.com/tetratelabs/wazero/internal/wasm,Apache-2.0,Copyright 2020-2023 core,github.com/tetratelabs/wazero/internal/wasm/binary,Apache-2.0,Copyright 2020-2023 wazero authors core,github.com/tetratelabs/wazero/internal/wasmdebug,Apache-2.0,Copyright 2020-2023 wazero authors core,github.com/tetratelabs/wazero/internal/wasmruntime,Apache-2.0,Copyright 2020-2023 wazero authors -core,github.com/tetratelabs/wazero/internal/wazeroir,Apache-2.0,Copyright 2020-2023 wazero authors core,github.com/tetratelabs/wazero/sys,Apache-2.0,Copyright 2020-2023 wazero authors +core,github.com/theupdateframework/go-tuf,BSD-3-Clause,"Copyright (c) 2014-2020 Prime Directive, Inc. All rights reserved" +core,github.com/theupdateframework/go-tuf/client,BSD-3-Clause,"Copyright (c) 2014-2020 Prime Directive, Inc. All rights reserved" +core,github.com/theupdateframework/go-tuf/client/leveldbstore,BSD-3-Clause,"Copyright (c) 2014-2020 Prime Directive, Inc. All rights reserved" +core,github.com/theupdateframework/go-tuf/data,BSD-3-Clause,"Copyright (c) 2014-2020 Prime Directive, Inc. All rights reserved" +core,github.com/theupdateframework/go-tuf/internal/fsutil,BSD-3-Clause,"Copyright (c) 2014-2020 Prime Directive, Inc. All rights reserved" +core,github.com/theupdateframework/go-tuf/internal/roles,BSD-3-Clause,"Copyright (c) 2014-2020 Prime Directive, Inc. All rights reserved" +core,github.com/theupdateframework/go-tuf/internal/sets,BSD-3-Clause,"Copyright (c) 2014-2020 Prime Directive, Inc. All rights reserved" +core,github.com/theupdateframework/go-tuf/internal/signer,BSD-3-Clause,"Copyright (c) 2014-2020 Prime Directive, Inc. All rights reserved" +core,github.com/theupdateframework/go-tuf/pkg/keys,BSD-3-Clause,"Copyright (c) 2014-2020 Prime Directive, Inc. All rights reserved" +core,github.com/theupdateframework/go-tuf/pkg/targets,BSD-3-Clause,"Copyright (c) 2014-2020 Prime Directive, Inc. All rights reserved" +core,github.com/theupdateframework/go-tuf/sign,BSD-3-Clause,"Copyright (c) 2014-2020 Prime Directive, Inc. All rights reserved" +core,github.com/theupdateframework/go-tuf/util,BSD-3-Clause,"Copyright (c) 2014-2020 Prime Directive, Inc. All rights reserved" +core,github.com/theupdateframework/go-tuf/verify,BSD-3-Clause,"Copyright (c) 2014-2020 Prime Directive, Inc. All rights reserved" core,github.com/tidwall/gjson,MIT,Copyright (c) 2016 Josh Baker core,github.com/tidwall/match,MIT,Copyright (c) 2016 Josh Baker core,github.com/tidwall/pretty,MIT,Copyright (c) 2017 Josh Baker @@ -2187,9 +2268,15 @@ core,github.com/tinylib/msgp/gen,MIT,Copyright (c) 2009 The Go Authors (license core,github.com/tinylib/msgp/msgp,MIT,Copyright (c) 2009 The Go Authors (license at http://golang.org) where indicated | Copyright (c) 2014 Philip Hofer core,github.com/tinylib/msgp/parse,MIT,Copyright (c) 2009 The Go Authors (license at http://golang.org) where indicated | Copyright (c) 2014 Philip Hofer core,github.com/tinylib/msgp/printer,MIT,Copyright (c) 2009 The Go Authors (license at http://golang.org) where indicated | Copyright (c) 2014 Philip Hofer +core,github.com/titanous/rocacheck,MIT,"Copyright (c) 2017, CRoCS, EnigmaBridge Ltd | Copyright (c) 2017, Jonathan Rudenberg" core,github.com/tklauser/go-sysconf,BSD-3-Clause,"Copyright (c) 2018-2022, Tobias Klauser" core,github.com/tklauser/numcpus,Apache-2.0,Copyright 2018 Tobias Klauser core,github.com/tmthrgd/go-hex,BSD-3-Clause,"Copyright (c) 2005-2016, Wojciech Muła | Copyright (c) 2012 The Go Authors. All rights reserved | Copyright (c) 2016, Tom Thorogood" +core,github.com/tonistiigi/go-csvvalue,MIT,Copyright 2024 Tõnis Tiigi +core,github.com/transparency-dev/merkle,Apache-2.0,Copyright 2017 Google LLC. All Rights Reserved. +core,github.com/transparency-dev/merkle/compact,Apache-2.0,Copyright 2019 Google LLC. All Rights Reserved. +core,github.com/transparency-dev/merkle/proof,Apache-2.0,Copyright 2017 Google LLC. All Rights Reserved. | Copyright 2022 Google LLC. All Rights Reserved. +core,github.com/transparency-dev/merkle/rfc6962,Apache-2.0,Copyright 2016 Google LLC. All Rights Reserved. core,github.com/twitchtv/twirp,Apache-2.0,"Copyright 2018 Twitch Interactive, Inc. All Rights Reserved" core,github.com/twitchtv/twirp/ctxsetters,Apache-2.0,"Copyright 2018 Twitch Interactive, Inc. All Rights Reserved" core,github.com/twitchtv/twirp/internal/contextkeys,Apache-2.0,"Copyright 2018 Twitch Interactive, Inc. All Rights Reserved" @@ -2335,6 +2422,8 @@ core,go.opencensus.io/trace,Apache-2.0,"Copyright 2018, OpenCensus Authors" core,go.opencensus.io/trace/internal,Apache-2.0,"Copyright 2018, OpenCensus Authors" core,go.opencensus.io/trace/propagation,Apache-2.0,"Copyright 2018, OpenCensus Authors" core,go.opencensus.io/trace/tracestate,Apache-2.0,"Copyright 2018, OpenCensus Authors" +core,go.opentelemetry.io/auto/sdk,Apache-2.0,Copyright The OpenTelemetry Authors +core,go.opentelemetry.io/auto/sdk/internal/telemetry,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/client,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/component,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/component/componentstatus,Apache-2.0,Copyright The OpenTelemetry Authors @@ -2343,12 +2432,12 @@ core,go.opentelemetry.io/collector/config/configauth,Apache-2.0,Copyright The Op core,go.opentelemetry.io/collector/config/configcompression,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/config/configgrpc,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/config/confighttp,Apache-2.0,Copyright The OpenTelemetry Authors +core,go.opentelemetry.io/collector/config/confighttp/internal,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/config/confignet,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/config/configopaque,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/config/configretry,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/config/configtelemetry,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/config/configtls,Apache-2.0,Copyright The OpenTelemetry Authors -core,go.opentelemetry.io/collector/config/internal,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/confmap,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/confmap/internal/envvar,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/confmap/internal/mapstructure,Apache-2.0,Copyright The OpenTelemetry Authors @@ -2359,16 +2448,16 @@ core,go.opentelemetry.io/collector/confmap/provider/httpsprovider,Apache-2.0,Cop core,go.opentelemetry.io/collector/confmap/provider/internal/configurablehttpprovider,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/confmap/provider/yamlprovider,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/connector,Apache-2.0,Copyright The OpenTelemetry Authors -core,go.opentelemetry.io/collector/connector/connectorprofiles,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/connector/connectortest,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/connector/internal,Apache-2.0,Copyright The OpenTelemetry Authors +core,go.opentelemetry.io/collector/connector/xconnector,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/consumer,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/consumer/consumererror,Apache-2.0,Copyright The OpenTelemetry Authors -core,go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/consumer/consumererror/internal,Apache-2.0,Copyright The OpenTelemetry Authors -core,go.opentelemetry.io/collector/consumer/consumerprofiles,Apache-2.0,Copyright The OpenTelemetry Authors +core,go.opentelemetry.io/collector/consumer/consumererror/xconsumererror,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/consumer/consumertest,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/consumer/internal,Apache-2.0,Copyright The OpenTelemetry Authors +core,go.opentelemetry.io/collector/consumer/xconsumer,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/exporter,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/exporter/debugexporter,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/exporter/debugexporter/internal/metadata,Apache-2.0,Copyright The OpenTelemetry Authors @@ -2376,26 +2465,27 @@ core,go.opentelemetry.io/collector/exporter/debugexporter/internal/normal,Apache core,go.opentelemetry.io/collector/exporter/debugexporter/internal/otlptext,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/exporter/exporterbatcher,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/exporter/exporterhelper,Apache-2.0,Copyright The OpenTelemetry Authors -core,go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/exporter/exporterhelper/internal,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/exporter/exporterhelper/internal/metadata,Apache-2.0,Copyright The OpenTelemetry Authors -core,go.opentelemetry.io/collector/exporter/exporterprofiles,Apache-2.0,Copyright The OpenTelemetry Authors +core,go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/exporter/exporterqueue,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/exporter/exportertest,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/exporter/internal,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/exporter/internal/experr,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/exporter/internal/queue,Apache-2.0,Copyright The OpenTelemetry Authors +core,go.opentelemetry.io/collector/exporter/internal/requesttest,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/exporter/nopexporter,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/exporter/nopexporter/internal/metadata,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/exporter/otlpexporter,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/exporter/otlpexporter/internal/metadata,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/exporter/otlphttpexporter,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/exporter/otlphttpexporter/internal/metadata,Apache-2.0,Copyright The OpenTelemetry Authors +core,go.opentelemetry.io/collector/exporter/xexporter,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/extension,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/extension/auth,Apache-2.0,Copyright The OpenTelemetry Authors -core,go.opentelemetry.io/collector/extension/experimental/storage,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/extension/extensioncapabilities,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/extension/extensiontest,Apache-2.0,Copyright The OpenTelemetry Authors +core,go.opentelemetry.io/collector/extension/xextension/storage,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/extension/zpagesextension,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/extension/zpagesextension/internal/metadata,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/featuregate,Apache-2.0,Copyright The OpenTelemetry Authors @@ -2435,7 +2525,7 @@ core,go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp,Apache-2.0,Copyright core,go.opentelemetry.io/collector/pdata/testdata,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/pipeline,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/pipeline/internal/globalsignal,Apache-2.0,Copyright The OpenTelemetry Authors -core,go.opentelemetry.io/collector/pipeline/pipelineprofiles,Apache-2.0,Copyright The OpenTelemetry Authors +core,go.opentelemetry.io/collector/pipeline/xpipeline,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/processor,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/processor/batchprocessor,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/processor/batchprocessor/internal/metadata,Apache-2.0,Copyright The OpenTelemetry Authors @@ -2444,11 +2534,10 @@ core,go.opentelemetry.io/collector/processor/memorylimiterprocessor,Apache-2.0,C core,go.opentelemetry.io/collector/processor/memorylimiterprocessor/internal/metadata,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/processor/processorhelper,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/processor/processorhelper/internal/metadata,Apache-2.0,Copyright The OpenTelemetry Authors -core,go.opentelemetry.io/collector/processor/processorhelper/processorhelperprofiles,Apache-2.0,Copyright The OpenTelemetry Authors -core,go.opentelemetry.io/collector/processor/processorprofiles,Apache-2.0,Copyright The OpenTelemetry Authors +core,go.opentelemetry.io/collector/processor/processorhelper/xprocessorhelper,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/processor/processortest,Apache-2.0,Copyright The OpenTelemetry Authors +core,go.opentelemetry.io/collector/processor/xprocessor,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/receiver,Apache-2.0,Copyright The OpenTelemetry Authors -core,go.opentelemetry.io/collector/receiver/internal,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/receiver/nopreceiver,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/receiver/nopreceiver/internal/metadata,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/receiver/otlpreceiver,Apache-2.0,Copyright The OpenTelemetry Authors @@ -2459,14 +2548,14 @@ core,go.opentelemetry.io/collector/receiver/otlpreceiver/internal/metrics,Apache core,go.opentelemetry.io/collector/receiver/otlpreceiver/internal/profiles,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/receiver/otlpreceiver/internal/trace,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/receiver/receiverhelper,Apache-2.0,Copyright The OpenTelemetry Authors +core,go.opentelemetry.io/collector/receiver/receiverhelper/internal,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/receiver/receiverhelper/internal/metadata,Apache-2.0,Copyright The OpenTelemetry Authors -core,go.opentelemetry.io/collector/receiver/receiverprofiles,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/receiver/receivertest,Apache-2.0,Copyright The OpenTelemetry Authors -core,go.opentelemetry.io/collector/receiver/scrapererror,Apache-2.0,Copyright The OpenTelemetry Authors -core,go.opentelemetry.io/collector/receiver/scraperhelper,Apache-2.0,Copyright The OpenTelemetry Authors -core,go.opentelemetry.io/collector/receiver/scraperhelper/internal/metadata,Apache-2.0,Copyright The OpenTelemetry Authors +core,go.opentelemetry.io/collector/receiver/xreceiver,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/scraper,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/scraper/scrapererror,Apache-2.0,Copyright The OpenTelemetry Authors +core,go.opentelemetry.io/collector/scraper/scraperhelper,Apache-2.0,Copyright The OpenTelemetry Authors +core,go.opentelemetry.io/collector/scraper/scraperhelper/internal/metadata,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/semconv/v1.16.0,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/semconv/v1.17.0,Apache-2.0,Copyright The OpenTelemetry Authors core,go.opentelemetry.io/collector/semconv/v1.18.0,Apache-2.0,Copyright The OpenTelemetry Authors @@ -2630,11 +2719,18 @@ core,golang.org/x/crypto/chacha20poly1305,BSD-3-Clause,Copyright (c) 2009 The Go core,golang.org/x/crypto/cryptobyte,BSD-3-Clause,Copyright 2009 The Go Authors core,golang.org/x/crypto/cryptobyte/asn1,BSD-3-Clause,Copyright 2009 The Go Authors core,golang.org/x/crypto/curve25519,BSD-3-Clause,Copyright 2009 The Go Authors +core,golang.org/x/crypto/ed25519,BSD-3-Clause,Copyright 2009 The Go Authors core,golang.org/x/crypto/hkdf,BSD-3-Clause,Copyright 2009 The Go Authors core,golang.org/x/crypto/internal/alias,BSD-3-Clause,Copyright 2009 The Go Authors core,golang.org/x/crypto/internal/poly1305,BSD-3-Clause,Copyright 2009 The Go Authors core,golang.org/x/crypto/nacl/secretbox,BSD-3-Clause,Copyright 2009 The Go Authors core,golang.org/x/crypto/ocsp,BSD-3-Clause,Copyright 2009 The Go Authors +core,golang.org/x/crypto/openpgp,BSD-3-Clause,Copyright 2009 The Go Authors +core,golang.org/x/crypto/openpgp/armor,BSD-3-Clause,Copyright 2009 The Go Authors +core,golang.org/x/crypto/openpgp/elgamal,BSD-3-Clause,Copyright 2009 The Go Authors +core,golang.org/x/crypto/openpgp/errors,BSD-3-Clause,Copyright 2009 The Go Authors +core,golang.org/x/crypto/openpgp/packet,BSD-3-Clause,Copyright 2009 The Go Authors +core,golang.org/x/crypto/openpgp/s2k,BSD-3-Clause,Copyright 2009 The Go Authors core,golang.org/x/crypto/pbkdf2,BSD-3-Clause,Copyright 2009 The Go Authors core,golang.org/x/crypto/pkcs12,BSD-3-Clause,Copyright 2009 The Go Authors core,golang.org/x/crypto/pkcs12/internal/rc2,BSD-3-Clause,Copyright 2009 The Go Authors @@ -2645,6 +2741,7 @@ core,golang.org/x/crypto/ssh,BSD-3-Clause,Copyright 2009 The Go Authors core,golang.org/x/crypto/ssh/agent,BSD-3-Clause,Copyright 2009 The Go Authors core,golang.org/x/crypto/ssh/internal/bcrypt_pbkdf,BSD-3-Clause,Copyright 2009 The Go Authors core,golang.org/x/crypto/ssh/knownhosts,BSD-3-Clause,Copyright 2009 The Go Authors +core,golang.org/x/crypto/ssh/terminal,BSD-3-Clause,Copyright 2009 The Go Authors core,golang.org/x/exp/constraints,BSD-3-Clause,Copyright 2009 The Go Authors core,golang.org/x/exp/maps,BSD-3-Clause,Copyright 2009 The Go Authors core,golang.org/x/exp/mmap,BSD-3-Clause,Copyright 2009 The Go Authors @@ -2798,6 +2895,8 @@ core,google.golang.org/grpc/balancer,Apache-2.0,Copyright 2014 gRPC authors. core,google.golang.org/grpc/balancer/base,Apache-2.0,Copyright 2014 gRPC authors. core,google.golang.org/grpc/balancer/grpclb/state,Apache-2.0,Copyright 2014 gRPC authors. core,google.golang.org/grpc/balancer/pickfirst,Apache-2.0,Copyright 2014 gRPC authors. +core,google.golang.org/grpc/balancer/pickfirst/internal,Apache-2.0,Copyright 2014 gRPC authors. +core,google.golang.org/grpc/balancer/pickfirst/pickfirstleaf,Apache-2.0,Copyright 2014 gRPC authors. core,google.golang.org/grpc/balancer/roundrobin,Apache-2.0,Copyright 2014 gRPC authors. core,google.golang.org/grpc/binarylog/grpc_binarylog_v1,Apache-2.0,Copyright 2014 gRPC authors. core,google.golang.org/grpc/channelz,Apache-2.0,Copyright 2014 gRPC authors. @@ -2876,6 +2975,7 @@ core,google.golang.org/protobuf/internal/impl,BSD-3-Clause,Copyright (c) 2018 Th core,google.golang.org/protobuf/internal/msgfmt,BSD-3-Clause,Copyright (c) 2018 The Go Authors. All rights reserved core,google.golang.org/protobuf/internal/order,BSD-3-Clause,Copyright (c) 2018 The Go Authors. All rights reserved core,google.golang.org/protobuf/internal/pragma,BSD-3-Clause,Copyright (c) 2018 The Go Authors. All rights reserved +core,google.golang.org/protobuf/internal/protolazy,BSD-3-Clause,Copyright (c) 2018 The Go Authors. All rights reserved core,google.golang.org/protobuf/internal/set,BSD-3-Clause,Copyright (c) 2018 The Go Authors. All rights reserved core,google.golang.org/protobuf/internal/strs,BSD-3-Clause,Copyright (c) 2018 The Go Authors. All rights reserved core,google.golang.org/protobuf/internal/version,BSD-3-Clause,Copyright (c) 2018 The Go Authors. All rights reserved @@ -2959,6 +3059,9 @@ core,gopkg.in/DataDog/dd-trace-go.v1/profiler/internal/pprofutils,Apache-2.0,"Co core,gopkg.in/Knetic/govaluate.v3,MIT,"Copyright (c) 2014-2016 George Lester | abrander (panic-finding testing tool) | benpaxton (fix for missing type checks during literal elide process) | bgaifullin (lifting restriction on complex/struct types) | dpaolella (exposure of variables used in an expression) | iasci (ternary operator) | oxtoacart (parameter structures, deferred parameter retrieval) | prashantv (optimization of bools) | vjeantet (regex support) | wmiller848 (bitwise operators) | xfennec (fix for dates being parsed in the current Location)" core,gopkg.in/cheggaaa/pb.v1,BSD-3-Clause,"Copyright (c) 2012-2015, Sergey Cherepanov" core,gopkg.in/evanphx/json-patch.v4,BSD-3-Clause,"Copyright (c) 2014, Evan Phoenix" +core,gopkg.in/go-jose/go-jose.v2,Apache-2.0,Copyright 2014 Square Inc. +core,gopkg.in/go-jose/go-jose.v2/cipher,Apache-2.0,Copyright 2014 Square Inc. +core,gopkg.in/go-jose/go-jose.v2/json,BSD-3-Clause,Copyright (c) 2012 The Go Authors. All rights reserved | Copyright 2014 Square Inc. core,gopkg.in/inf.v0,BSD-3-Clause,Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go Authors. All rights reserved. core,gopkg.in/ini.v1,Apache-2.0,Copyright 2014 Unknwon core,gopkg.in/natefinch/lumberjack.v2,MIT,Copyright (c) 2014 Nate Finch @@ -3892,9 +3995,12 @@ core,k8s.io/kube-state-metrics/v2/pkg/builder,Apache-2.0,Copyright 2014 The Kube core,k8s.io/kube-state-metrics/v2/pkg/builder/types,Apache-2.0,Copyright 2014 The Kubernetes Authors. core,k8s.io/kube-state-metrics/v2/pkg/constant,Apache-2.0,Copyright 2014 The Kubernetes Authors. core,k8s.io/kube-state-metrics/v2/pkg/customresource,Apache-2.0,Copyright 2014 The Kubernetes Authors. +core,k8s.io/kube-state-metrics/v2/pkg/customresourcestate,Apache-2.0,Copyright 2014 The Kubernetes Authors. +core,k8s.io/kube-state-metrics/v2/pkg/discovery,Apache-2.0,Copyright 2014 The Kubernetes Authors. core,k8s.io/kube-state-metrics/v2/pkg/metric,Apache-2.0,Copyright 2014 The Kubernetes Authors. core,k8s.io/kube-state-metrics/v2/pkg/metric_generator,Apache-2.0,Copyright 2014 The Kubernetes Authors. core,k8s.io/kube-state-metrics/v2/pkg/metrics_store,Apache-2.0,Copyright 2014 The Kubernetes Authors. +core,k8s.io/kube-state-metrics/v2/pkg/metricshandler,Apache-2.0,Copyright 2014 The Kubernetes Authors. core,k8s.io/kube-state-metrics/v2/pkg/options,Apache-2.0,Copyright 2014 The Kubernetes Authors. core,k8s.io/kube-state-metrics/v2/pkg/sharding,Apache-2.0,Copyright 2014 The Kubernetes Authors. core,k8s.io/kube-state-metrics/v2/pkg/util,Apache-2.0,Copyright 2014 The Kubernetes Authors. @@ -3960,6 +4066,8 @@ core,modernc.org/sqlite,BSD-3-Clause,Alexander Menzhinsky | Alexey Palazhchenko | Angus Dippenaar | Artyom Pervukhin | Copyright (c) 2017 The Sqlite Authors. All rights reserved | Dan Kortschak | Dan Peterson | David Skinner | David Walton | Davsk Ltd Co | Elle Mouton | FerretDB Inc. | FlyingOnion <731677080@qq.com> | Gleb Sakhnov | Jaap Aarts | Jan Mercl <0xjnml@gmail.com> | Josh Bleecher Snyder | Josh Klein | Kim | Logan Snow | Mario Salgado | Mark Summerfield | Matthew Gabeler-Lee | Michael Hoffmann | Michael Rykov | Morgan Bazalgette | Prathyush PV | Romain Le Disez | Ross Light | Saed SayedAhmed | Sean McGivern | Steffen Butzer | Toni Spets | W. Michael Petullo | Yaacov Akiba Slama core,modernc.org/strutil,BSD-3-Clause,CZ.NIC z.s.p.o. | Copyright (c) 2014 The strutil Authors. All rights reserved | Jan Mercl <0xjnml@gmail.com> core,modernc.org/token,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved +core,rsc.io/binaryregexp,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved +core,rsc.io/binaryregexp/syntax,BSD-3-Clause,Copyright (c) 2009 The Go Authors. All rights reserved core,sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client,Apache-2.0,Copyright 2017 The Kubernetes Authors. core,sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/metrics,Apache-2.0,Copyright 2017 The Kubernetes Authors. core,sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/common/metrics,Apache-2.0,Copyright 2017 The Kubernetes Authors. diff --git a/cmd/agent/common/loader.go b/cmd/agent/common/loader.go index 890d7f7d422ee..67050699bdef9 100644 --- a/cmd/agent/common/loader.go +++ b/cmd/agent/common/loader.go @@ -16,7 +16,7 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/sbom/scanner" "github.com/DataDog/datadog-agent/pkg/util/defaultpaths" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // GetWorkloadmetaInit provides the InitHelper for workloadmeta so it can be injected as a Param @@ -25,7 +25,7 @@ func GetWorkloadmetaInit() workloadmeta.InitHelper { return func(ctx context.Context, wm workloadmeta.Component, cfg config.Component) error { // SBOM scanner needs to be called here as initialization is required prior to the // catalog getting instantiated and initialized. - sbomScanner, err := scanner.CreateGlobalScanner(cfg, optional.NewOption(wm)) + sbomScanner, err := scanner.CreateGlobalScanner(cfg, option.New(wm)) if err != nil { return fmt.Errorf("failed to create SBOM scanner: %s", err) } else if sbomScanner != nil { diff --git a/cmd/agent/dist/conf.d/gpu.d/conf.yaml.example b/cmd/agent/dist/conf.d/gpu.d/conf.yaml.example new file mode 100644 index 0000000000000..18bcc8ed94127 --- /dev/null +++ b/cmd/agent/dist/conf.d/gpu.d/conf.yaml.example @@ -0,0 +1,20 @@ +init_config: + +instances: + + - + + ## @param nvml_library_path - string - optional - default: "" + ## Configure an alternative path for the NVML NVIDIA library. Necessary + ## if the library is in a location where the agent cannot automatically find it. + # + # nvml_library_path: "" + + ## @param tags - list of strings following the pattern: "key:value" - optional + ## List of tags to attach to every metric, event, and service check emitted by this integration. + ## + ## Learn more about tagging: https://docs.datadoghq.com/tagging/ + # + # tags: + # - : + # - : diff --git a/cmd/agent/subcommands/diagnose/command.go b/cmd/agent/subcommands/diagnose/command.go index bccd76a31820a..f0ae880b9858b 100644 --- a/cmd/agent/subcommands/diagnose/command.go +++ b/cmd/agent/subcommands/diagnose/command.go @@ -29,14 +29,14 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" workloadmetafx "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx" haagentfx "github.com/DataDog/datadog-agent/comp/haagent/fx" - compressionfx "github.com/DataDog/datadog-agent/comp/serializer/compression/fx" + logscompressorfx "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx" + metricscompressorfx "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/fx" "github.com/DataDog/datadog-agent/pkg/api/util" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/diagnose" "github.com/DataDog/datadog-agent/pkg/diagnose/diagnosis" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" - + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/fatih/color" "github.com/spf13/cobra" ) @@ -102,12 +102,13 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { AgentType: workloadmeta.NodeAgent, InitHelper: common.GetWorkloadmetaInit(), }), - fx.Supply(optional.NewNoneOption[collector.Component]()), + fx.Supply(option.None[collector.Component]()), dualTaggerfx.Module(common.DualTaggerParams()), autodiscoveryimpl.Module(), - compressionfx.Module(), diagnosesendermanagerimpl.Module(), haagentfx.Module(), + logscompressorfx.Module(), + metricscompressorfx.Module(), ) }, } @@ -301,7 +302,7 @@ This command print the security-agent metadata payload. This payload is used by func cmdDiagnose(cliParams *cliParams, senderManager diagnosesendermanager.Component, - wmeta optional.Option[workloadmeta.Component], + wmeta option.Option[workloadmeta.Component], ac autodiscovery.Component, secretResolver secrets.Component, _ log.Component, diff --git a/cmd/agent/subcommands/flare/command.go b/cmd/agent/subcommands/flare/command.go index a6da5391ae5d5..e3a20c4789284 100644 --- a/cmd/agent/subcommands/flare/command.go +++ b/cmd/agent/subcommands/flare/command.go @@ -51,7 +51,8 @@ import ( "github.com/DataDog/datadog-agent/comp/metadata/inventoryhost/inventoryhostimpl" "github.com/DataDog/datadog-agent/comp/metadata/inventoryotel/inventoryotelimpl" "github.com/DataDog/datadog-agent/comp/metadata/resources/resourcesimpl" - compressionfx "github.com/DataDog/datadog-agent/comp/serializer/compression/fx" + logscompressorfx "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx" + metricscompressorfx "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/fx" "github.com/DataDog/datadog-agent/pkg/api/util" "github.com/DataDog/datadog-agent/pkg/config/settings" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" @@ -59,7 +60,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/defaultpaths" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/input" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // cliParams are the command-line arguments for this subcommand @@ -130,8 +131,7 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { }), localTaggerfx.Module(tagger.Params{}), autodiscoveryimpl.Module(), - fx.Supply(optional.NewNoneOption[collector.Component]()), - compressionfx.Module(), + fx.Supply(option.None[collector.Component]()), diagnosesendermanagerimpl.Module(), // We need inventoryagent to fill the status page generated by the flare. inventoryagentimpl.Module(), @@ -147,6 +147,8 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { }), core.Bundle(), haagentfx.Module(), + logscompressorfx.Module(), + metricscompressorfx.Module(), ) }, } @@ -175,18 +177,10 @@ func readProfileData(seconds int) (flare.ProfileData, error) { type pprofGetter func(path string) ([]byte, error) - tcpGet := func(portConfig string, onHTTPS bool) pprofGetter { - endpoint := url.URL{ - Scheme: "http", - Host: net.JoinHostPort("127.0.0.1", strconv.Itoa(pkgconfigsetup.Datadog().GetInt(portConfig))), - Path: "/debug/pprof", - } - if onHTTPS { - endpoint.Scheme = "https" - } - + tcpGet := func(portConfig string) pprofGetter { + pprofURL := fmt.Sprintf("http://127.0.0.1:%d/debug/pprof", pkgconfigsetup.Datadog().GetInt(portConfig)) return func(path string) ([]byte, error) { - return util.DoGet(c, endpoint.String()+path, util.LeaveConnectionOpen) + return util.DoGet(c, pprofURL+path, util.LeaveConnectionOpen) } } @@ -236,15 +230,15 @@ func readProfileData(seconds int) (flare.ProfileData, error) { } agentCollectors := map[string]agentProfileCollector{ - "core": serviceProfileCollector(tcpGet("expvar_port", false), seconds), - "security-agent": serviceProfileCollector(tcpGet("security_agent.expvar_port", false), seconds), + "core": serviceProfileCollector(tcpGet("expvar_port"), seconds), + "security-agent": serviceProfileCollector(tcpGet("security_agent.expvar_port"), seconds), } if pkgconfigsetup.Datadog().GetBool("process_config.enabled") || pkgconfigsetup.Datadog().GetBool("process_config.container_collection.enabled") || pkgconfigsetup.Datadog().GetBool("process_config.process_collection.enabled") { - agentCollectors["process"] = serviceProfileCollector(tcpGet("process_config.expvar_port", false), seconds) + agentCollectors["process"] = serviceProfileCollector(tcpGet("process_config.expvar_port"), seconds) } if pkgconfigsetup.Datadog().GetBool("apm_config.enabled") { @@ -257,7 +251,7 @@ func readProfileData(seconds int) (flare.ProfileData, error) { traceCpusec = 4 } - agentCollectors["trace"] = serviceProfileCollector(tcpGet("apm_config.debug.port", true), traceCpusec) + agentCollectors["trace"] = serviceProfileCollector(tcpGet("apm_config.debug.port"), traceCpusec) } if pkgconfigsetup.SystemProbe().GetBool("system_probe_config.enabled") { @@ -304,7 +298,7 @@ func makeFlare(flareComp flare.Component, config config.Component, _ sysprobeconfig.Component, cliParams *cliParams, - _ optional.Option[workloadmeta.Component], + _ option.Option[workloadmeta.Component], _ tagger.Component) error { var ( profile flare.ProfileData diff --git a/cmd/agent/subcommands/flare/command_test.go b/cmd/agent/subcommands/flare/command_test.go index 4c96cae079aa9..a27304e95b133 100644 --- a/cmd/agent/subcommands/flare/command_test.go +++ b/cmd/agent/subcommands/flare/command_test.go @@ -29,7 +29,6 @@ type commandTestSuite struct { suite.Suite sysprobeSocketPath string tcpServer *httptest.Server - tcpTLSServer *httptest.Server unixServer *httptest.Server systemProbeServer *httptest.Server } @@ -43,17 +42,13 @@ func (c *commandTestSuite) SetupSuite() { // This should be called by each test that requires them. func (c *commandTestSuite) startTestServers() { t := c.T() - c.tcpServer, c.tcpTLSServer, c.unixServer, c.systemProbeServer = c.getPprofTestServer() + c.tcpServer, c.unixServer, c.systemProbeServer = c.getPprofTestServer() t.Cleanup(func() { if c.tcpServer != nil { c.tcpServer.Close() c.tcpServer = nil } - if c.tcpTLSServer != nil { - c.tcpTLSServer.Close() - c.tcpTLSServer = nil - } if c.unixServer != nil { c.unixServer.Close() c.unixServer = nil @@ -87,13 +82,12 @@ func newMockHandler() http.HandlerFunc { }) } -func (c *commandTestSuite) getPprofTestServer() (tcpServer *httptest.Server, tcpTLSServer *httptest.Server, unixServer *httptest.Server, sysProbeServer *httptest.Server) { +func (c *commandTestSuite) getPprofTestServer() (tcpServer *httptest.Server, unixServer *httptest.Server, sysProbeServer *httptest.Server) { var err error t := c.T() handler := newMockHandler() tcpServer = httptest.NewServer(handler) - tcpTLSServer = httptest.NewTLSServer(handler) if runtime.GOOS == "linux" { unixServer = httptest.NewUnstartedServer(handler) unixServer.Listener, err = net.Listen("unix", c.sysprobeSocketPath) @@ -107,7 +101,7 @@ func (c *commandTestSuite) getPprofTestServer() (tcpServer *httptest.Server, tcp sysProbeServer.Start() } - return tcpServer, tcpTLSServer, unixServer, sysProbeServer + return tcpServer, unixServer, sysProbeServer } func TestCommandTestSuite(t *testing.T) { @@ -122,14 +116,10 @@ func (c *commandTestSuite) TestReadProfileData() { require.NoError(t, err) port := u.Port() - u, err = url.Parse(c.tcpTLSServer.URL) - require.NoError(t, err) - httpsPort := u.Port() - mockConfig := configmock.New(t) mockConfig.SetWithoutSource("expvar_port", port) mockConfig.SetWithoutSource("apm_config.enabled", true) - mockConfig.SetWithoutSource("apm_config.debug.port", httpsPort) + mockConfig.SetWithoutSource("apm_config.debug.port", port) mockConfig.SetWithoutSource("apm_config.receiver_timeout", "10") mockConfig.SetWithoutSource("process_config.expvar_port", port) mockConfig.SetWithoutSource("security_agent.expvar_port", port) diff --git a/cmd/agent/subcommands/flare/command_windows_test.go b/cmd/agent/subcommands/flare/command_windows_test.go index 930860a424b90..ebedde3ed8161 100644 --- a/cmd/agent/subcommands/flare/command_windows_test.go +++ b/cmd/agent/subcommands/flare/command_windows_test.go @@ -32,7 +32,8 @@ func sysprobeSocketPath(_ *testing.T) string { func NewSystemProbeTestServer(handler http.Handler) (*httptest.Server, error) { server := httptest.NewUnstartedServer(handler) - conn, err := sysprobeserver.NewListener(systemProbeTestPipeName) + // The test named pipe allows the current user. + conn, err := sysprobeserver.NewListenerForCurrentUser(systemProbeTestPipeName) if err != nil { return nil, err } diff --git a/cmd/agent/subcommands/jmx/command.go b/cmd/agent/subcommands/jmx/command.go index 22b595198fc43..be114f3345cba 100644 --- a/cmd/agent/subcommands/jmx/command.go +++ b/cmd/agent/subcommands/jmx/command.go @@ -54,7 +54,8 @@ import ( integrations "github.com/DataDog/datadog-agent/comp/logs/integrations/def" "github.com/DataDog/datadog-agent/comp/remote-config/rcservice" "github.com/DataDog/datadog-agent/comp/remote-config/rcservicemrf" - compressionfx "github.com/DataDog/datadog-agent/comp/serializer/compression/fx" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx" + metricscompression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/fx" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" "github.com/DataDog/datadog-agent/pkg/cli/standalone" pkgcollector "github.com/DataDog/datadog-agent/pkg/collector" @@ -62,7 +63,7 @@ import ( proccontainers "github.com/DataDog/datadog-agent/pkg/process/util/containers" "github.com/DataDog/datadog-agent/pkg/util/defaultpaths" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) type cliParams struct { @@ -129,7 +130,6 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { fx.Supply(cliParams), fx.Supply(params), core.Bundle(), - compressionfx.Module(), diagnosesendermanagerimpl.Module(), fx.Supply(func(diagnoseSenderManager diagnosesendermanager.Component) (sender.SenderManager, error) { return diagnoseSenderManager.LazyGetSenderManager() @@ -147,11 +147,11 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { // TODO(components): this is a temporary hack as the StartServer() method of the API package was previously called with nil arguments // This highlights the fact that the API Server created by JMX (through ExecJmx... function) should be different from the ones created // in others commands such as run. - fx.Supply(optional.NewNoneOption[rcservice.Component]()), - fx.Supply(optional.NewNoneOption[rcservicemrf.Component]()), - fx.Supply(optional.NewNoneOption[collector.Component]()), - fx.Supply(optional.NewNoneOption[logsAgent.Component]()), - fx.Supply(optional.NewNoneOption[integrations.Component]()), + fx.Supply(option.None[rcservice.Component]()), + fx.Supply(option.None[rcservicemrf.Component]()), + fx.Supply(option.None[collector.Component]()), + fx.Supply(option.None[logsAgent.Component]()), + fx.Supply(option.None[integrations.Component]()), fx.Provide(func() dogstatsdServer.Component { return nil }), fx.Provide(func() pidmap.Component { return nil }), fx.Provide(func() replay.Component { return nil }), @@ -168,6 +168,8 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { }), fx.Provide(func() remoteagentregistry.Component { return nil }), haagentfx.Module(), + logscompression.Module(), + metricscompression.Module(), ) } @@ -303,9 +305,9 @@ func runJmxCommandConsole(config config.Component, diagnoseSendermanager diagnosesendermanager.Component, secretResolver secrets.Component, agentAPI internalAPI.Component, - collector optional.Option[collector.Component], + collector option.Option[collector.Component], jmxLogger jmxlogger.Component, - logReceiver optional.Option[integrations.Component], + logReceiver option.Option[integrations.Component], tagger tagger.Component) error { // This prevents log-spam from "comp/core/workloadmeta/collectors/internal/remote/process_collector/process_collector.go" // It appears that this collector creates some contention in AD. diff --git a/cmd/agent/subcommands/run/command.go b/cmd/agent/subcommands/run/command.go index 76775693251e4..f27345b6668a0 100644 --- a/cmd/agent/subcommands/run/command.go +++ b/cmd/agent/subcommands/run/command.go @@ -32,7 +32,6 @@ import ( agenttelemetry "github.com/DataDog/datadog-agent/comp/core/agenttelemetry/def" agenttelemetryfx "github.com/DataDog/datadog-agent/comp/core/agenttelemetry/fx" haagentfx "github.com/DataDog/datadog-agent/comp/haagent/fx" - compressionfx "github.com/DataDog/datadog-agent/comp/serializer/compression/fx" // checks implemented as components @@ -124,6 +123,7 @@ import ( "github.com/DataDog/datadog-agent/comp/remote-config/rcservice/rcserviceimpl" "github.com/DataDog/datadog-agent/comp/remote-config/rcservicemrf/rcservicemrfimpl" "github.com/DataDog/datadog-agent/comp/remote-config/rctelemetryreporter/rctelemetryreporterimpl" + metricscompressorfx "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/fx" "github.com/DataDog/datadog-agent/comp/snmptraps" snmptrapsServer "github.com/DataDog/datadog-agent/comp/snmptraps/server" traceagentStatusImpl "github.com/DataDog/datadog-agent/comp/trace/status/statusimpl" @@ -156,7 +156,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/installinfo" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/leaderelection" pkglog "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/version" // runtime init routines @@ -231,7 +231,7 @@ func run(log log.Component, _ runner.Component, demultiplexer demultiplexer.Component, sharedSerializer serializer.MetricSerializer, - logsAgent optional.Option[logsAgent.Component], + logsAgent option.Option[logsAgent.Component], _ statsd.Component, processAgent processAgent.Component, otelcollector otelcollector.Component, @@ -241,7 +241,7 @@ func run(log log.Component, _ inventoryotel.Component, _ secrets.Component, invChecks inventorychecks.Component, - logReceiver optional.Option[integrations.Component], + logReceiver option.Option[integrations.Component], _ netflowServer.Component, _ snmptrapsServer.Component, _ langDetectionCl.Component, @@ -258,7 +258,7 @@ func run(log log.Component, _ healthprobe.Component, _ autoexit.Component, settings settings.Component, - _ optional.Option[gui.Component], + _ option.Option[gui.Component], _ agenttelemetry.Component, ) error { defer func() { @@ -380,15 +380,14 @@ func getSharedFxOption() fx.Option { authtokenimpl.Module(), apiimpl.Module(), commonendpoints.Module(), - compressionfx.Module(), demultiplexerimpl.Module(demultiplexerimpl.NewDefaultParams(demultiplexerimpl.WithDogstatsdNoAggregationPipelineConfig())), demultiplexerendpointfx.Module(), dogstatsd.Bundle(dogstatsdServer.Params{Serverless: false}), - fx.Provide(func(logsagent optional.Option[logsAgent.Component]) optional.Option[logsagentpipeline.Component] { + fx.Provide(func(logsagent option.Option[logsAgent.Component]) option.Option[logsagentpipeline.Component] { if la, ok := logsagent.Get(); ok { - return optional.NewOption[logsagentpipeline.Component](la) + return option.New[logsagentpipeline.Component](la) } - return optional.NewNoneOption[logsagentpipeline.Component]() + return option.None[logsagentpipeline.Component]() }), otelcol.Bundle(), rctelemetryreporterimpl.Module(), @@ -432,8 +431,8 @@ func getSharedFxOption() fx.Option { fx.Provide(func(demuxInstance demultiplexer.Component) serializer.MetricSerializer { return demuxInstance.Serializer() }), - fx.Provide(func(ms serializer.MetricSerializer) optional.Option[serializer.MetricSerializer] { - return optional.NewOption[serializer.MetricSerializer](ms) + fx.Provide(func(ms serializer.MetricSerializer) option.Option[serializer.MetricSerializer] { + return option.New[serializer.MetricSerializer](ms) }), ndmtmp.Bundle(), netflow.Bundle(), @@ -474,6 +473,7 @@ func getSharedFxOption() fx.Option { networkpath.Bundle(), remoteagentregistryfx.Module(), haagentfx.Module(), + metricscompressorfx.Module(), ) } @@ -488,7 +488,7 @@ func startAgent( tagger tagger.Component, ac autodiscovery.Component, rcclient rcclient.Component, - _ optional.Option[logsAgent.Component], + _ option.Option[logsAgent.Component], _ processAgent.Component, _ defaultforwarder.Component, _ serializer.MetricSerializer, @@ -496,7 +496,7 @@ func startAgent( demultiplexer demultiplexer.Component, _ internalAPI.Component, invChecks inventorychecks.Component, - logReceiver optional.Option[integrations.Component], + logReceiver option.Option[integrations.Component], _ status.Component, collector collector.Component, cfg config.Component, @@ -584,7 +584,7 @@ func startAgent( // Set up check collector commonchecks.RegisterChecks(wmeta, tagger, cfg, telemetry) - ac.AddScheduler("check", pkgcollector.InitCheckScheduler(optional.NewOption(collector), demultiplexer, logReceiver, tagger), true) + ac.AddScheduler("check", pkgcollector.InitCheckScheduler(option.New(collector), demultiplexer, logReceiver, tagger), true) demultiplexer.AddAgentStartupTelemetry(version.AgentVersion) diff --git a/cmd/agent/subcommands/run/command_windows.go b/cmd/agent/subcommands/run/command_windows.go index 8504227a81433..4bc8c673b8a35 100644 --- a/cmd/agent/subcommands/run/command_windows.go +++ b/cmd/agent/subcommands/run/command_windows.go @@ -75,7 +75,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/serializer" "github.com/DataDog/datadog-agent/pkg/util/defaultpaths" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/util/winutil" // runtime init routines ) @@ -106,7 +106,7 @@ func StartAgentWithDefaults(ctxChan <-chan context.Context) (<-chan error, error ac autodiscovery.Component, rcclient rcclient.Component, forwarder defaultforwarder.Component, - logsAgent optional.Option[logsAgent.Component], + logsAgent option.Option[logsAgent.Component], processAgent processAgent.Component, _ runner.Component, sharedSerializer serializer.MetricSerializer, @@ -118,7 +118,7 @@ func StartAgentWithDefaults(ctxChan <-chan context.Context) (<-chan error, error _ inventoryotel.Component, _ secrets.Component, invChecks inventorychecks.Component, - logsReceiver optional.Option[integrations.Component], + logsReceiver option.Option[integrations.Component], _ netflowServer.Component, _ trapserver.Component, agentAPI internalAPI.Component, @@ -130,7 +130,7 @@ func StartAgentWithDefaults(ctxChan <-chan context.Context) (<-chan error, error _ expvarserver.Component, jmxlogger jmxlogger.Component, settings settings.Component, - _ optional.Option[gui.Component], + _ option.Option[gui.Component], _ agenttelemetry.Component, ) error { defer StopAgentWithDefaults() diff --git a/cmd/agent/subcommands/secret/command.go b/cmd/agent/subcommands/secret/command.go index d8b2a7048114f..b57fa2a6a9ddb 100644 --- a/cmd/agent/subcommands/secret/command.go +++ b/cmd/agent/subcommands/secret/command.go @@ -84,32 +84,53 @@ func secretRefresh(config config.Component, _ log.Component) error { } fmt.Println(string(res)) } + + { + fmt.Println("Security Agent refresh:") + res, err := securityAgentSecretRefresh(config) + if err != nil { + // the security agent might not be running + // so we handle the error in a non-fatal way + fmt.Println(err.Error()) + } else { + fmt.Println(string(res)) + } + } + return nil } -func traceAgentSecretRefresh(conf config.Component) ([]byte, error) { +func commonSubAgentSecretRefresh(conf config.Component, agentName, portConfigName string) ([]byte, error) { err := apiutil.SetAuthToken(conf) if err != nil { return nil, err } - port := conf.GetInt("apm_config.debug.port") + port := conf.GetInt(portConfigName) if port <= 0 { - return nil, fmt.Errorf("invalid apm_config.debug.port -- %d", port) + return nil, fmt.Errorf("invalid %s -- %d", portConfigName, port) } c := apiutil.GetClient(false) c.Timeout = conf.GetDuration("server_timeout") * time.Second - url := fmt.Sprintf("https://127.0.0.1:%d/secret/refresh", port) + url := fmt.Sprintf("http://127.0.0.1:%d/secret/refresh", port) res, err := apiutil.DoGet(c, url, apiutil.CloseConnection) if err != nil { - return nil, fmt.Errorf("could not contact trace-agent: %s", err) + return nil, fmt.Errorf("could not contact %s: %s", agentName, err) } return res, nil } +func traceAgentSecretRefresh(conf config.Component) ([]byte, error) { + return commonSubAgentSecretRefresh(conf, "trace-agent", "apm_config.debug.port") +} + +func securityAgentSecretRefresh(conf config.Component) ([]byte, error) { + return commonSubAgentSecretRefresh(conf, "security-agent", "security_agent.cmd_port") +} + func callIPCEndpoint(config config.Component, endpointURL string) ([]byte, error) { endpoint, err := apiutil.NewIPCEndpoint(config, endpointURL) if err != nil { diff --git a/cmd/agent/subcommands/snmp/command.go b/cmd/agent/subcommands/snmp/command.go index 4dba953403985..d1b431feb3b7d 100644 --- a/cmd/agent/subcommands/snmp/command.go +++ b/cmd/agent/subcommands/snmp/command.go @@ -9,10 +9,6 @@ package snmp import ( "errors" "fmt" - "net" - "os" - "strconv" - "github.com/DataDog/datadog-agent/cmd/agent/command" "github.com/DataDog/datadog-agent/comp/aggregator" "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer/demultiplexerimpl" @@ -27,14 +23,19 @@ import ( "github.com/DataDog/datadog-agent/comp/forwarder/eventplatformreceiver/eventplatformreceiverimpl" "github.com/DataDog/datadog-agent/comp/forwarder/orchestrator/orchestratorimpl" haagentfx "github.com/DataDog/datadog-agent/comp/haagent/fx" - compressionfx "github.com/DataDog/datadog-agent/comp/serializer/compression/fx" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx" + metricscompression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/fx" snmpscan "github.com/DataDog/datadog-agent/comp/snmpscan/def" snmpscanfx "github.com/DataDog/datadog-agent/comp/snmpscan/fx" + "github.com/DataDog/datadog-agent/pkg/networkdevice/metadata" "github.com/DataDog/datadog-agent/pkg/snmp/snmpparse" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/spf13/cobra" "go.uber.org/fx" + "net" + "os" + "strconv" + "time" ) const ( @@ -98,10 +99,11 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { forwarder.Bundle(defaultforwarder.NewParams(defaultforwarder.WithFeatures(defaultforwarder.CoreFeatures))), orchestratorimpl.Module(orchestratorimpl.NewDefaultParams()), eventplatformimpl.Module(eventplatformimpl.NewDefaultParams()), - compressionfx.Module(), nooptagger.Module(), eventplatformreceiverimpl.Module(), haagentfx.Module(), + metricscompression.Module(), + logscompression.Module(), ) if err != nil { var ue configErr @@ -162,10 +164,11 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { forwarder.Bundle(defaultforwarder.NewParams(defaultforwarder.WithFeatures(defaultforwarder.CoreFeatures))), eventplatformimpl.Module(eventplatformimpl.NewDefaultParams()), eventplatformreceiverimpl.Module(), - compressionfx.Module(), nooptagger.Module(), snmpscanfx.Module(), haagentfx.Module(), + metricscompression.Module(), + logscompression.Module(), ) if err != nil { var ue configErr @@ -302,16 +305,64 @@ func scanDevice(connParams *snmpparse.SNMPConfig, args argsType, snmpScanner snm // newSNMP only returns config errors, so any problem is a usage error return configErr{err} } - if err := snmp.Connect(); err != nil { + namespace := conf.GetString("network_devices.namespace") + deviceID := namespace + ":" + connParams.IPAddress + // Since the snmp connection can take a while, start by sending an in progress status for the start of the scan + // before connecting to the agent + inProgressStatusPayload := metadata.NetworkDevicesMetadata{ + DeviceScanStatus: &metadata.ScanStatusMetadata{ + DeviceID: deviceID, + ScanStatus: metadata.ScanStatusInProgress, + }, + CollectTimestamp: time.Now().Unix(), + Namespace: namespace, + } + if err = snmpScanner.SendPayload(inProgressStatusPayload); err != nil { + return fmt.Errorf("unable to send in progress status: %v", err) + } + if err = snmp.Connect(); err != nil { + // Send an error status if we can't connect to the agent + errorStatusPayload := metadata.NetworkDevicesMetadata{ + DeviceScanStatus: &metadata.ScanStatusMetadata{ + DeviceID: deviceID, + ScanStatus: metadata.ScanStatusError, + }, + CollectTimestamp: time.Now().Unix(), + Namespace: namespace, + } + if err = snmpScanner.SendPayload(errorStatusPayload); err != nil { + return fmt.Errorf("unable to send error status: %v", err) + } return fmt.Errorf("unable to connect to SNMP agent on %s:%d: %w", snmp.LocalAddr, snmp.Port, err) } - - namespace := conf.GetString("network_devices.namespace") - - err = snmpScanner.RunDeviceScan(snmp, namespace, connParams.IPAddress) + err = snmpScanner.RunDeviceScan(snmp, namespace, deviceID) if err != nil { + // Send an error status if we can't scan the device + errorStatusPayload := metadata.NetworkDevicesMetadata{ + DeviceScanStatus: &metadata.ScanStatusMetadata{ + DeviceID: deviceID, + ScanStatus: metadata.ScanStatusError, + }, + CollectTimestamp: time.Now().Unix(), + Namespace: namespace, + } + if err = snmpScanner.SendPayload(errorStatusPayload); err != nil { + return fmt.Errorf("unable to send error status: %v", err) + } return fmt.Errorf("unable to perform device scan: %v", err) } + // Send a completed status if the scan was successful + completedStatusPayload := metadata.NetworkDevicesMetadata{ + DeviceScanStatus: &metadata.ScanStatusMetadata{ + DeviceID: deviceID, + ScanStatus: metadata.ScanStatusCompleted, + }, + CollectTimestamp: time.Now().Unix(), + Namespace: namespace, + } + if err = snmpScanner.SendPayload(completedStatusPayload); err != nil { + return fmt.Errorf("unable to send completed status: %v", err) + } return nil } diff --git a/cmd/cluster-agent-cloudfoundry/subcommands/run/command.go b/cmd/cluster-agent-cloudfoundry/subcommands/run/command.go index b16485b36f9e4..d565da2063fc1 100644 --- a/cmd/cluster-agent-cloudfoundry/subcommands/run/command.go +++ b/cmd/cluster-agent-cloudfoundry/subcommands/run/command.go @@ -51,7 +51,8 @@ import ( orchestratorForwarderImpl "github.com/DataDog/datadog-agent/comp/forwarder/orchestrator/orchestratorimpl" haagentfx "github.com/DataDog/datadog-agent/comp/haagent/fx" integrations "github.com/DataDog/datadog-agent/comp/logs/integrations/def" - compressionfx "github.com/DataDog/datadog-agent/comp/serializer/compression/fx" + logscompressionfx "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx" + metricscompressionfx "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/fx" "github.com/DataDog/datadog-agent/pkg/clusteragent" "github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks" pkgcollector "github.com/DataDog/datadog-agent/pkg/collector" @@ -64,7 +65,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/hostname" pkglog "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/version" "go.uber.org/fx" @@ -86,7 +87,6 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { }), core.Bundle(), forwarder.Bundle(defaultforwarder.NewParams(defaultforwarder.WithResolvers())), - compressionfx.Module(), demultiplexerimpl.Module(demultiplexerimpl.NewDefaultParams()), orchestratorForwarderImpl.Module(orchestratorForwarderImpl.NewDisabledParams()), eventplatformimpl.Module(eventplatformimpl.NewDisabledParams()), @@ -99,11 +99,11 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { }), // TODO(components): check what this must be for cluster-agent-cloudfoundry localTaggerfx.Module(tagger.Params{}), collectorimpl.Module(), - fx.Provide(func() optional.Option[serializer.MetricSerializer] { - return optional.NewNoneOption[serializer.MetricSerializer]() + fx.Provide(func() option.Option[serializer.MetricSerializer] { + return option.None[serializer.MetricSerializer]() }), - fx.Provide(func() optional.Option[integrations.Component] { - return optional.NewNoneOption[integrations.Component]() + fx.Provide(func() option.Option[integrations.Component] { + return option.None[integrations.Component]() }), // The cluster-agent-cloudfoundry agent do not have a status command // so there is no need to initialize the status component @@ -128,6 +128,8 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { proccontainers.InitSharedContainerProvider(wmeta, tagger) }), haagentfx.Module(), + logscompressionfx.Module(), + metricscompressionfx.Module(), ) }, } @@ -147,7 +149,7 @@ func run( statusComponent status.Component, _ healthprobe.Component, settings settings.Component, - logReceiver optional.Option[integrations.Component], + logReceiver option.Option[integrations.Component], ) error { mainCtx, mainCtxCancel := context.WithCancel(context.Background()) defer mainCtxCancel() // Calling cancel twice is safe @@ -185,7 +187,7 @@ func run( common.LoadComponents(secretResolver, wmeta, ac, pkgconfigsetup.Datadog().GetString("confd_path")) // Set up check collector - ac.AddScheduler("check", pkgcollector.InitCheckScheduler(optional.NewOption(collector), demultiplexer, logReceiver, taggerComp), true) + ac.AddScheduler("check", pkgcollector.InitCheckScheduler(option.New(collector), demultiplexer, logReceiver, taggerComp), true) // start the autoconfig, this will immediately run any configured check ac.LoadAndRun(mainCtx) diff --git a/cmd/cluster-agent/api/agent/agent.go b/cmd/cluster-agent/api/agent/agent.go index 53e0f4afa50ce..946098af9161b 100644 --- a/cmd/cluster-agent/api/agent/agent.go +++ b/cmd/cluster-agent/api/agent/agent.go @@ -22,7 +22,7 @@ import ( tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" - "github.com/DataDog/datadog-agent/pkg/flare" + clusterAgentFlare "github.com/DataDog/datadog-agent/pkg/flare/clusteragent" "github.com/DataDog/datadog-agent/pkg/status/health" "github.com/DataDog/datadog-agent/pkg/util/defaultpaths" "github.com/DataDog/datadog-agent/pkg/util/hostname" @@ -129,7 +129,7 @@ func makeFlare(w http.ResponseWriter, r *http.Request, statusComponent status.Co log.Infof("Making a flare") w.Header().Set("Content-Type", "application/json") - var profile flare.ProfileData + var profile clusterAgentFlare.ProfileData if r.Body != http.NoBody { body, err := io.ReadAll(r.Body) @@ -148,7 +148,7 @@ func makeFlare(w http.ResponseWriter, r *http.Request, statusComponent status.Co if logFile == "" { logFile = defaultpaths.DCALogFile } - filePath, err := flare.CreateDCAArchive(false, defaultpaths.GetDistPath(), logFile, profile, statusComponent) + filePath, err := clusterAgentFlare.CreateDCAArchive(false, defaultpaths.GetDistPath(), logFile, profile, statusComponent) if err != nil || filePath == "" { if err != nil { log.Errorf("The flare failed to be created: %s", err) diff --git a/cmd/cluster-agent/api/server.go b/cmd/cluster-agent/api/server.go index c4f5c0534633a..202fdf011ce08 100644 --- a/cmd/cluster-agent/api/server.go +++ b/cmd/cluster-agent/api/server.go @@ -25,6 +25,7 @@ import ( "time" languagedetection "github.com/DataDog/datadog-agent/cmd/cluster-agent/api/v1/languagedetection" + "github.com/DataDog/datadog-agent/cmd/cluster-agent/api/v2/series" "github.com/gorilla/mux" grpc_auth "github.com/grpc-ecosystem/go-grpc-middleware/auth" @@ -69,6 +70,10 @@ func StartServer(ctx context.Context, w workloadmeta.Component, taggerComp tagge // API V1 Language Detection APIs languagedetection.InstallLanguageDetectionEndpoints(ctx, apiRouter, w, cfg) + // API V2 Series APIs + v2ApiRouter := router.PathPrefix("/api/v2").Subrouter() + series.InstallNodeMetricsEndpoints(ctx, v2ApiRouter, cfg) + // Validate token for every request router.Use(validateToken) @@ -136,7 +141,7 @@ func StartServer(ctx context.Context, w workloadmeta.Component, taggerComp tagge // event size should be small enough to fit within the grpc max message size maxEventSize := maxMessageSize / 2 pb.RegisterAgentSecureServer(grpcSrv, &serverSecure{ - taggerServer: taggerserver.NewServer(taggerComp, maxEventSize), + taggerServer: taggerserver.NewServer(taggerComp, maxEventSize, cfg.GetInt("remote_tagger.max_concurrent_sync")), }) timeout := pkgconfigsetup.Datadog().GetDuration("cluster_agent.server.idle_timeout_seconds") * time.Second @@ -192,6 +197,7 @@ func isExternalPath(path string) bool { return strings.HasPrefix(path, "/api/v1/metadata/") && len(strings.Split(path, "/")) == 7 || // support for agents < 6.5.0 path == "/version" || path == "/api/v1/languagedetection" || + path == "/api/v2/series" || strings.HasPrefix(path, "/api/v1/annotations/node/") && len(strings.Split(path, "/")) == 6 || strings.HasPrefix(path, "/api/v1/cf/apps") && len(strings.Split(path, "/")) == 5 || strings.HasPrefix(path, "/api/v1/cf/apps/") && len(strings.Split(path, "/")) == 6 || diff --git a/cmd/cluster-agent/api/v2/series/doc.go b/cmd/cluster-agent/api/v2/series/doc.go new file mode 100644 index 0000000000000..da20852316eab --- /dev/null +++ b/cmd/cluster-agent/api/v2/series/doc.go @@ -0,0 +1,9 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +/* +Package series implements API handler for metric series submitted by node agent +*/ +package series diff --git a/cmd/cluster-agent/api/v2/series/job.go b/cmd/cluster-agent/api/v2/series/job.go new file mode 100644 index 0000000000000..c77c095005ca9 --- /dev/null +++ b/cmd/cluster-agent/api/v2/series/job.go @@ -0,0 +1,130 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build kubeapiserver + +package series + +import ( + "context" + "sync" + "time" + + "github.com/DataDog/agent-payload/v5/gogen" + loadstore "github.com/DataDog/datadog-agent/pkg/clusteragent/autoscaling/workload/loadstore" + "github.com/DataDog/datadog-agent/pkg/telemetry" + "github.com/DataDog/datadog-agent/pkg/util/log" + "golang.org/x/time/rate" + "k8s.io/client-go/util/workqueue" +) + +const ( + subsystem = "autoscaling_workload" + payloadProcessQPS = 1000 + payloadProcessRateBurst = 50 +) + +var ( + commonOpts = telemetry.Options{NoDoubleUnderscoreSep: true} + + telemetryWorkloadEntities = telemetry.NewGaugeWithOpts( + subsystem, + "store_load_entities", + []string{"namespace", "deployment", "loadname"}, + "Number of entities in the store", + commonOpts, + ) + + telemetryWorkloadJobQueueLength = telemetry.NewCounterWithOpts( + subsystem, + "store_job_queue_length", + []string{"status"}, + "Length of the job queue", + commonOpts, + ) +) + +// jobQueue is a wrapper around workqueue.DelayingInterface to make it thread-safe. +type jobQueue struct { + taskQueue workqueue.TypedRateLimitingInterface[*gogen.MetricPayload] + isStarted bool + store loadstore.Store + m sync.Mutex +} + +// newJobQueue creates a new jobQueue with no delay for adding items +func newJobQueue(ctx context.Context) *jobQueue { + q := jobQueue{ + taskQueue: workqueue.NewTypedRateLimitingQueue(workqueue.NewTypedMaxOfRateLimiter( + &workqueue.TypedBucketRateLimiter[*gogen.MetricPayload]{ + Limiter: rate.NewLimiter(rate.Limit(payloadProcessQPS), payloadProcessRateBurst), + }, + )), + store: loadstore.GetWorkloadMetricStore(ctx), + isStarted: false, + } + go q.start(ctx) + return &q +} + +func (jq *jobQueue) start(ctx context.Context) { + jq.m.Lock() + if jq.isStarted { + return + } + jq.isStarted = true + jq.m.Unlock() + defer jq.taskQueue.ShutDown() + jq.reportTelemetry(ctx) + for { + select { + case <-ctx.Done(): + log.Infof("Stopping series payload job queue") + return + default: + jq.processNextWorkItem() + } + } +} + +func (jq *jobQueue) processNextWorkItem() bool { + metricPayload, shutdown := jq.taskQueue.Get() + if shutdown { + return false + } + defer jq.taskQueue.Done(metricPayload) + telemetryWorkloadJobQueueLength.Inc("processed") + loadstore.ProcessLoadPayload(metricPayload, jq.store) + return true +} + +func (jq *jobQueue) addJob(payload *gogen.MetricPayload) { + jq.taskQueue.Add(payload) + telemetryWorkloadJobQueueLength.Inc("queued") +} + +func (jq *jobQueue) reportTelemetry(ctx context.Context) { + go func() { + infoTicker := time.NewTicker(60 * time.Second) + for { + select { + case <-ctx.Done(): + return + case <-infoTicker.C: + if jq.store == nil { + continue + } + info := jq.store.GetStoreInfo() + statsResults := info.StatsResults + for _, statsResult := range statsResults { + telemetryWorkloadEntities.Set(float64(statsResult.Count), + statsResult.Namespace, + statsResult.PodOwner, + statsResult.MetricName) + } + } + } + }() +} diff --git a/cmd/cluster-agent/api/v2/series/series.go b/cmd/cluster-agent/api/v2/series/series.go new file mode 100644 index 0000000000000..8add93a85c265 --- /dev/null +++ b/cmd/cluster-agent/api/v2/series/series.go @@ -0,0 +1,98 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build kubeapiserver + +package series + +import ( + "compress/gzip" + "compress/zlib" + "context" + "io" + "net/http" + + "github.com/DataDog/agent-payload/v5/gogen" + "github.com/DataDog/datadog-agent/comp/core/config" + "github.com/DataDog/datadog-agent/pkg/clusteragent/api" + "github.com/DataDog/datadog-agent/pkg/util/log" + "github.com/DataDog/zstd" + "github.com/gorilla/mux" +) + +const ( + encodingGzip = "gzip" + encodingDeflate = "deflate" + encodingZstd = "zstd" + loadMetricsHandlerName = "load-metrics-handler" +) + +// InstallNodeMetricsEndpoints register handler for node metrics collection +func InstallNodeMetricsEndpoints(ctx context.Context, r *mux.Router, cfg config.Component) { + leaderHander := newSeriesHandler(ctx) + handler := api.WithLeaderProxyHandler( + loadMetricsHandlerName, + func(w http.ResponseWriter, r *http.Request) bool { // preHandler + if !cfg.GetBool("autoscaling.failover.enabled") { + http.Error(w, "Autoscaling workload failover store is disabled on the cluster agent", http.StatusServiceUnavailable) + return false + } + if r.Body == nil { + http.Error(w, "Request body is empty", http.StatusBadRequest) + return false + } + return true + }, + leaderHander.handle, + ) + r.HandleFunc("/series", api.WithTelemetryWrapper(loadMetricsHandlerName, handler)).Methods("POST") +} + +// Handler handles the series request and store the metrics to loadstore +type seriesHandler struct { + jobQueue *jobQueue +} + +func newSeriesHandler(ctx context.Context) *seriesHandler { + handler := seriesHandler{ + jobQueue: newJobQueue(ctx), + } + return &handler +} + +func (h *seriesHandler) handle(w http.ResponseWriter, r *http.Request) { + log.Tracef("Received series request from %s", r.RemoteAddr) + var err error + var rc io.ReadCloser + switch r.Header.Get("Content-Encoding") { + case encodingGzip: + rc, err = gzip.NewReader(r.Body) + case encodingDeflate: + rc, err = zlib.NewReader(r.Body) + case encodingZstd: + rc = zstd.NewReader(r.Body) + default: + rc = r.Body + } + defer rc.Close() + if err != nil { + w.WriteHeader(http.StatusBadRequest) + return + } + + payload, err := io.ReadAll(rc) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + return + } + + metricPayload := &gogen.MetricPayload{} + if err := metricPayload.Unmarshal(payload); err != nil { + w.WriteHeader(http.StatusBadRequest) + return + } + h.jobQueue.addJob(metricPayload) + w.WriteHeader(http.StatusOK) +} diff --git a/cmd/cluster-agent/api/v2/series/series_nocompile.go b/cmd/cluster-agent/api/v2/series/series_nocompile.go new file mode 100644 index 0000000000000..14191d26c5281 --- /dev/null +++ b/cmd/cluster-agent/api/v2/series/series_nocompile.go @@ -0,0 +1,19 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2023-present Datadog, Inc. + +//go:build !kubeapiserver + +package series + +import ( + "context" + + "github.com/DataDog/datadog-agent/comp/core/config" + "github.com/gorilla/mux" +) + +// InstallNodeMetricsEndpoints installs node metrics collection endpoints +func InstallNodeMetricsEndpoints(_ context.Context, _ *mux.Router, _ config.Component) { +} diff --git a/cmd/cluster-agent/custommetrics/server.go b/cmd/cluster-agent/custommetrics/server.go index f27e310bb84ce..95a6b6424a18f 100644 --- a/cmd/cluster-agent/custommetrics/server.go +++ b/cmd/cluster-agent/custommetrics/server.go @@ -24,7 +24,7 @@ import ( as "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) var cmd *DatadogMetricsAdapter @@ -44,7 +44,7 @@ const ( ) // RunServer creates and start a k8s custom metrics API server -func RunServer(ctx context.Context, apiCl *as.APIClient, datadogCl optional.Option[datadogclient.Component]) error { +func RunServer(ctx context.Context, apiCl *as.APIClient, datadogCl option.Option[datadogclient.Component]) error { defer clearServerResources() if apiCl == nil { return fmt.Errorf("unable to run server with nil APIClient") @@ -82,7 +82,7 @@ func RunServer(ctx context.Context, apiCl *as.APIClient, datadogCl optional.Opti return server.GenericAPIServer.PrepareRun().RunWithContext(ctx) } -func (a *DatadogMetricsAdapter) makeProviderOrDie(ctx context.Context, apiCl *as.APIClient, datadogCl optional.Option[datadogclient.Component]) (provider.ExternalMetricsProvider, error) { +func (a *DatadogMetricsAdapter) makeProviderOrDie(ctx context.Context, apiCl *as.APIClient, datadogCl option.Option[datadogclient.Component]) (provider.ExternalMetricsProvider, error) { client, err := a.DynamicClient() if err != nil { log.Infof("Unable to construct dynamic client: %v", err) diff --git a/cmd/cluster-agent/subcommands/diagnose/command.go b/cmd/cluster-agent/subcommands/diagnose/command.go index 5e2521b60f8c3..a871858e4edcb 100644 --- a/cmd/cluster-agent/subcommands/diagnose/command.go +++ b/cmd/cluster-agent/subcommands/diagnose/command.go @@ -18,7 +18,6 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/core/secrets" - compressionfx "github.com/DataDog/datadog-agent/comp/serializer/compression/fx" "github.com/DataDog/datadog-agent/pkg/diagnose" "github.com/DataDog/datadog-agent/pkg/diagnose/diagnosis" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -38,7 +37,6 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { LogParams: log.ForOneShot(command.LoggerName, "off", true), // no need to show regular logs }), core.Bundle(), - compressionfx.Module(), ) }, } diff --git a/cmd/cluster-agent/subcommands/start/command.go b/cmd/cluster-agent/subcommands/start/command.go index 80fe4a3b6d383..938e11c3d4d4f 100644 --- a/cmd/cluster-agent/subcommands/start/command.go +++ b/cmd/cluster-agent/subcommands/start/command.go @@ -19,13 +19,6 @@ import ( "syscall" "time" - compressionfx "github.com/DataDog/datadog-agent/comp/serializer/compression/fx" - "github.com/DataDog/datadog-agent/pkg/remoteconfig/state" - "github.com/DataDog/datadog-agent/pkg/serializer" - "github.com/DataDog/datadog-agent/pkg/util/defaultpaths" - "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/controllers" - "github.com/DataDog/datadog-agent/pkg/util/optional" - "github.com/DataDog/datadog-agent/cmd/agent/common" admissioncmd "github.com/DataDog/datadog-agent/cmd/cluster-agent/admission" "github.com/DataDog/datadog-agent/cmd/cluster-agent/api" @@ -66,6 +59,9 @@ import ( rccomp "github.com/DataDog/datadog-agent/comp/remote-config/rcservice" "github.com/DataDog/datadog-agent/comp/remote-config/rcservice/rcserviceimpl" "github.com/DataDog/datadog-agent/comp/remote-config/rctelemetryreporter/rctelemetryreporterimpl" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/def" + logscompressionfx "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx" + metricscompressionfx "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/fx" "github.com/DataDog/datadog-agent/pkg/clusteragent" admissionpkg "github.com/DataDog/datadog-agent/pkg/clusteragent/admission" admissionpatch "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/patch" @@ -78,17 +74,22 @@ import ( rcclient "github.com/DataDog/datadog-agent/pkg/config/remote/client" commonsettings "github.com/DataDog/datadog-agent/pkg/config/settings" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + "github.com/DataDog/datadog-agent/pkg/remoteconfig/state" + "github.com/DataDog/datadog-agent/pkg/serializer" hostnameStatus "github.com/DataDog/datadog-agent/pkg/status/clusteragent/hostname" endpointsStatus "github.com/DataDog/datadog-agent/pkg/status/endpoints" "github.com/DataDog/datadog-agent/pkg/status/health" "github.com/DataDog/datadog-agent/pkg/util/coredump" + "github.com/DataDog/datadog-agent/pkg/util/defaultpaths" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/hostname" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" apicommon "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" + "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/controllers" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/leaderelection" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/clustername" pkglog "github.com/DataDog/datadog-agent/pkg/util/log" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/version" "github.com/gorilla/mux" @@ -138,7 +139,6 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { }), core.Bundle(), forwarder.Bundle(defaultforwarder.NewParams(defaultforwarder.WithResolvers(), defaultforwarder.WithDisableAPIKeyChecking())), - compressionfx.Module(), demultiplexerimpl.Module(demultiplexerimpl.NewDefaultParams()), orchestratorForwarderImpl.Module(orchestratorForwarderImpl.NewDefaultParams()), eventplatformimpl.Module(eventplatformimpl.NewDisabledParams()), @@ -165,13 +165,13 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { fx.Provide(func(config config.Component) status.HeaderInformationProvider { return status.NewHeaderInformationProvider(hostnameStatus.NewProvider(config)) }), - fx.Provide(func() optional.Option[integrations.Component] { - return optional.NewNoneOption[integrations.Component]() + fx.Provide(func() option.Option[integrations.Component] { + return option.None[integrations.Component]() }), statusimpl.Module(), collectorimpl.Module(), - fx.Provide(func() optional.Option[serializer.MetricSerializer] { - return optional.NewNoneOption[serializer.MetricSerializer]() + fx.Provide(func() option.Option[serializer.MetricSerializer] { + return option.None[serializer.MetricSerializer]() }), autodiscoveryimpl.Module(), rcserviceimpl.Module(), @@ -205,6 +205,8 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { proccontainers.InitSharedContainerProvider(wmeta, tagger) }), haagentfx.Module(), + logscompressionfx.Module(), + metricscompressionfx.Module(), ) }, } @@ -219,14 +221,15 @@ func start(log log.Component, demultiplexer demultiplexer.Component, wmeta workloadmeta.Component, ac autodiscovery.Component, - dc optional.Option[datadogclient.Component], + dc option.Option[datadogclient.Component], secretResolver secrets.Component, statusComponent status.Component, collector collector.Component, - rcService optional.Option[rccomp.Component], - logReceiver optional.Option[integrations.Component], + rcService option.Option[rccomp.Component], + logReceiver option.Option[integrations.Component], _ healthprobe.Component, settings settings.Component, + compression logscompression.Component, datadogConfig config.Component, ) error { stopCh := make(chan struct{}) @@ -267,8 +270,10 @@ func start(log log.Component, } }() - // Setup the leader forwarder for language detection and cluster checks - if config.GetBool("cluster_checks.enabled") || (config.GetBool("language_detection.enabled") && config.GetBool("language_detection.reporting.enabled")) { + // Setup the leader forwarder for autoscaling failover store, language detection and cluster checks + if config.GetBool("cluster_checks.enabled") || + (config.GetBool("language_detection.enabled") && config.GetBool("language_detection.reporting.enabled")) || + config.GetBool("autoscaling.failover.enabled") { apidca.NewGlobalLeaderForwarder( config.GetInt("cluster_agent.cmd_port"), config.GetInt("cluster_agent.max_connections"), @@ -382,7 +387,7 @@ func start(log log.Component, // Set up check collector registerChecks(wmeta, taggerComp, config) - ac.AddScheduler("check", pkgcollector.InitCheckScheduler(optional.NewOption(collector), demultiplexer, logReceiver, taggerComp), true) + ac.AddScheduler("check", pkgcollector.InitCheckScheduler(option.New(collector), demultiplexer, logReceiver, taggerComp), true) // start the autoconfig, this will immediately run any configured check ac.LoadAndRun(mainCtx) @@ -440,7 +445,7 @@ func start(log log.Component, go func() { defer wg.Done() - if err := runCompliance(mainCtx, demultiplexer, wmeta, apiCl, le.IsLeader); err != nil { + if err := runCompliance(mainCtx, demultiplexer, wmeta, apiCl, compression, le.IsLeader); err != nil { pkglog.Errorf("Error while running compliance agent: %v", err) } }() diff --git a/cmd/cluster-agent/subcommands/start/compliance.go b/cmd/cluster-agent/subcommands/start/compliance.go index 780e390331cee..74e4a1f78b97b 100644 --- a/cmd/cluster-agent/subcommands/start/compliance.go +++ b/cmd/cluster-agent/subcommands/start/compliance.go @@ -16,6 +16,7 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/comp/logs/agent/config" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/def" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" "github.com/DataDog/datadog-agent/pkg/compliance" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" @@ -31,9 +32,9 @@ const ( intakeTrackType = "compliance" ) -func runCompliance(ctx context.Context, senderManager sender.SenderManager, wmeta workloadmeta.Component, apiCl *apiserver.APIClient, isLeader func() bool) error { +func runCompliance(ctx context.Context, senderManager sender.SenderManager, wmeta workloadmeta.Component, apiCl *apiserver.APIClient, compression logscompression.Component, isLeader func() bool) error { stopper := startstop.NewSerialStopper() - if err := startCompliance(senderManager, wmeta, stopper, apiCl, isLeader); err != nil { + if err := startCompliance(senderManager, wmeta, stopper, apiCl, isLeader, compression); err != nil { return err } @@ -72,7 +73,7 @@ func newLogContextCompliance() (*config.Endpoints, *client.DestinationsContext, return newLogContext(logsConfigComplianceKeys, "cspm-intake.") } -func startCompliance(senderManager sender.SenderManager, wmeta workloadmeta.Component, stopper startstop.Stopper, apiCl *apiserver.APIClient, isLeader func() bool) error { +func startCompliance(senderManager sender.SenderManager, wmeta workloadmeta.Component, stopper startstop.Stopper, apiCl *apiserver.APIClient, isLeader func() bool, compression logscompression.Component) error { endpoints, ctx, err := newLogContextCompliance() if err != nil { log.Error(err) @@ -87,7 +88,7 @@ func startCompliance(senderManager sender.SenderManager, wmeta workloadmeta.Comp return err } - reporter := compliance.NewLogReporter(hname, "compliance-agent", "compliance", endpoints, ctx) + reporter := compliance.NewLogReporter(hname, "compliance-agent", "compliance", endpoints, ctx, compression) statsdClient, err := simpleTelemetrySenderFromSenderManager(senderManager) if err != nil { return err diff --git a/cmd/cluster-agent/subcommands/telemetry/command.go b/cmd/cluster-agent/subcommands/telemetry/command.go index eaeffbbabcb94..23f826762e5ac 100644 --- a/cmd/cluster-agent/subcommands/telemetry/command.go +++ b/cmd/cluster-agent/subcommands/telemetry/command.go @@ -11,9 +11,10 @@ package telemetry import ( "fmt" - "github.com/DataDog/datadog-agent/cmd/cluster-agent/command" - "github.com/DataDog/datadog-agent/pkg/flare" "github.com/spf13/cobra" + + "github.com/DataDog/datadog-agent/cmd/cluster-agent/command" + clusterAgentFlare "github.com/DataDog/datadog-agent/pkg/flare/clusteragent" ) // Commands returns a slice of subcommands for the 'cluster-agent' command. @@ -25,7 +26,7 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { Short: "Print the telemetry metrics exposed by the cluster agent", Long: ``, RunE: func(cmd *cobra.Command, args []string) error { - payload, err := flare.QueryDCAMetrics() + payload, err := clusterAgentFlare.QueryDCAMetrics() if err != nil { return err } diff --git a/cmd/dogstatsd/subcommands/start/command.go b/cmd/dogstatsd/subcommands/start/command.go index b6e6a63361ae8..8071cbd3cfe0d 100644 --- a/cmd/dogstatsd/subcommands/start/command.go +++ b/cmd/dogstatsd/subcommands/start/command.go @@ -54,14 +54,15 @@ import ( "github.com/DataDog/datadog-agent/comp/metadata/resources/resourcesimpl" "github.com/DataDog/datadog-agent/comp/metadata/runner" metadatarunnerimpl "github.com/DataDog/datadog-agent/comp/metadata/runner/runnerimpl" - compressionfx "github.com/DataDog/datadog-agent/comp/serializer/compression/fx" + logscompressionfx "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx" + metricscompressionfx "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/fx" "github.com/DataDog/datadog-agent/pkg/serializer" "github.com/DataDog/datadog-agent/pkg/status/health" "github.com/DataDog/datadog-agent/pkg/util/coredump" "github.com/DataDog/datadog-agent/pkg/util/fxutil" pkglog "github.com/DataDog/datadog-agent/pkg/util/log" pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/version" ) @@ -125,8 +126,8 @@ func RunDogstatsdFct(cliParams *CLIParams, defaultConfPath string, defaultLogFil defaultConfPath, configOptions..., )), - fx.Provide(func(comp secrets.Component) optional.Option[secrets.Component] { - return optional.NewOption[secrets.Component](comp) + fx.Provide(func(comp secrets.Component) option.Option[secrets.Component] { + return option.New[secrets.Component](comp) }), fx.Supply(secrets.NewEnabledParams()), telemetryimpl.Module(), @@ -141,7 +142,8 @@ func RunDogstatsdFct(cliParams *CLIParams, defaultConfPath string, defaultLogFil AgentType: workloadmeta.NodeAgent, InitHelper: common.GetWorkloadmetaInit(), }), - compressionfx.Module(), + metricscompressionfx.Module(), + logscompressionfx.Module(), demultiplexerimpl.Module(demultiplexerimpl.NewDefaultParams( demultiplexerimpl.WithContinueOnMissingHostname(), demultiplexerimpl.WithDogstatsdNoAggregationPipelineConfig(), diff --git a/cmd/installer/subcommands/daemon/api.go b/cmd/installer/subcommands/daemon/api.go index 2b1eeb174a67f..53bcb49eaf54c 100644 --- a/cmd/installer/subcommands/daemon/api.go +++ b/cmd/installer/subcommands/daemon/api.go @@ -54,6 +54,17 @@ func apiCommands(global *command.GlobalParams) []*cobra.Command { }) }, } + removeCmd := &cobra.Command{ + Use: "remove package", + Short: "Removes a package", + Args: cobra.ExactArgs(1), + RunE: func(_ *cobra.Command, args []string) error { + return experimentFxWrapper(remove, &cliParams{ + GlobalParams: *global, + pkg: args[0], + }) + }, + } startExperimentCmd := &cobra.Command{ Use: "start-experiment package version", Aliases: []string{"start"}, @@ -128,7 +139,7 @@ func apiCommands(global *command.GlobalParams) []*cobra.Command { }) }, } - return []*cobra.Command{setCatalogCmd, startExperimentCmd, stopExperimentCmd, promoteExperimentCmd, installCmd, startConfigExperimentCmd, stopConfigExperimentCmd, promoteConfigExperimentCmd} + return []*cobra.Command{setCatalogCmd, startExperimentCmd, stopExperimentCmd, promoteExperimentCmd, installCmd, removeCmd, startConfigExperimentCmd, stopConfigExperimentCmd, promoteConfigExperimentCmd} } func experimentFxWrapper(f interface{}, params *cliParams) error { @@ -211,7 +222,16 @@ func promoteConfig(params *cliParams, client localapiclient.Component) error { func install(params *cliParams, client localapiclient.Component) error { err := client.Install(params.pkg, params.version) if err != nil { - fmt.Println("Error bootstrapping package:", err) + fmt.Println("Error installing package:", err) + return err + } + return nil +} + +func remove(params *cliParams, client localapiclient.Component) error { + err := client.Remove(params.pkg) + if err != nil { + fmt.Println("Error removing package:", err) return err } return nil diff --git a/cmd/installer/subcommands/daemon/run_nix.go b/cmd/installer/subcommands/daemon/run_nix.go index f20aa840c2aa7..24410f2a1e6c4 100644 --- a/cmd/installer/subcommands/daemon/run_nix.go +++ b/cmd/installer/subcommands/daemon/run_nix.go @@ -8,11 +8,13 @@ package daemon import ( + "errors" "os" "os/signal" "syscall" "github.com/DataDog/datadog-agent/cmd/installer/command" + "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/core/pid" "github.com/DataDog/datadog-agent/pkg/util/log" "go.uber.org/fx" @@ -22,6 +24,9 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) +// ErrNotEnabled represents the case in which datadog-installer is not enabled +var ErrNotEnabled = errors.New("datadog-installer not enabled") + func runFxWrapper(global *command.GlobalParams) error { return fxutil.OneShot( run, @@ -29,11 +34,26 @@ func runFxWrapper(global *command.GlobalParams) error { ) } -func run(shutdowner fx.Shutdowner, _ pid.Component, _ localapi.Component, _ telemetry.Component) error { +func run(shutdowner fx.Shutdowner, cfg config.Component, _ pid.Component, _ localapi.Component, _ telemetry.Component) error { + if err := gracefullyExitIfDisabled(cfg, shutdowner); err != nil { + log.Infof("Datadog installer is not enabled, exiting") + return nil + } handleSignals(shutdowner) return nil } +func gracefullyExitIfDisabled(cfg config.Component, shutdowner fx.Shutdowner) error { + if !cfg.GetBool("remote_updates") { + // Note: when not using systemd we may run into an issue where we need to + // sleep for a while here, like the system probe does + // See https://github.com/DataDog/datadog-agent/blob/b5c6a93dff27a8fdae37fc9bf23b3604a9f87591/cmd/system-probe/subcommands/run/command.go#L128 + _ = shutdowner.Shutdown() + return ErrNotEnabled + } + return nil +} + func handleSignals(shutdowner fx.Shutdowner) { sigChan := make(chan os.Signal, 1) signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGPIPE) diff --git a/cmd/installer/subcommands/daemon/status.tmpl b/cmd/installer/subcommands/daemon/status.tmpl index 02481dea5c50d..72a1a811b1dff 100644 --- a/cmd/installer/subcommands/daemon/status.tmpl +++ b/cmd/installer/subcommands/daemon/status.tmpl @@ -1,18 +1,31 @@ Datadog Installer v{{ htmlSafe .Version }} -{{ range $name, $package := .Packages }} +{{ range $name, $packageState := .Packages }} +{{ $version := $packageState.Version }} +{{ $config := $packageState.Config }} {{ boldText $name }} - State: {{ if $package.Experiment -}}{{ yellowText "Upgrading" }}{{- else if $package.Stable -}}{{ greenText "OK" }}{{- else -}}config only{{- end }} + State: {{ if $version.Experiment -}}{{ yellowText "Upgrading" }}{{- else if $version.Stable -}}{{ greenText "OK" }}{{- else -}}config only{{- end }} Installed versions: - {{- if $package.Stable }} - {{ greenText "●" }} stable: v{{ htmlSafe $package.Stable }} + {{- if $version.Stable }} + {{ greenText "●" }} stable: v{{ htmlSafe $version.Stable }} {{- else }} ● stable: none {{- end }} - {{- if $package.Experiment }} - {{ yellowText "●" }} experiment: v{{ htmlSafe $package.Experiment }} + {{- if $version.Experiment }} + {{ yellowText "●" }} experiment: v{{ htmlSafe $version.Experiment }} {{- else }} ● experiment: none {{- end }} + {{- if $config.Stable }} + {{ greenText "●" }} stable config: {{ htmlSafe $config.Stable }} + {{- else }} + ● stable config: none + {{- end }} + {{- if $config.Experiment }} + {{ yellowText "●" }} experiment config: {{ htmlSafe $config.Experiment }} + {{- else }} + ● experiment config: none + {{- end }} + {{- if eq $name "datadog-apm-inject" }} {{ template "datadog-apm-inject" $.ApmInjectionStatus }} @@ -23,9 +36,8 @@ Datadog Installer v{{ htmlSafe .Version }} Remote configuration client state: StableVersion: {{ $remoteConfig.StableVersion }} ExperimentVersion: {{ $remoteConfig.ExperimentVersion }} - StableConfigVersion: {{ if $remoteConfig.StableConfigState }}{{ $remoteConfig.StableConfigState.Version }}{{ else }}{{ "" }}{{ end }} - ExperimentConfigVersion: {{ if $remoteConfig.ExperimentConfigState }}{{ $remoteConfig.ExperimentConfigState.Version }}{{ else }}{{ "" }}{{ end }} - RemoteConfigVersion: {{ if $remoteConfig.RemoteConfigState }}{{ $remoteConfig.RemoteConfigState.Version }}{{ else }}{{ "" }}{{ end }} + StableConfigVersion: {{ $remoteConfig.StableConfigVersion }} + ExperimentConfigVersion: {{ $remoteConfig.ExperimentConfigVersion }} Task: {{- if $remoteConfig.Task }} Id: {{ $remoteConfig.Task.Id }} diff --git a/cmd/installer/subcommands/installer/command.go b/cmd/installer/subcommands/installer/command.go index f394d9955d407..0642cacda8310 100644 --- a/cmd/installer/subcommands/installer/command.go +++ b/cmd/installer/subcommands/installer/command.go @@ -400,10 +400,10 @@ func promoteExperimentCommand() *cobra.Command { func installConfigExperimentCommand() *cobra.Command { cmd := &cobra.Command{ - Use: "install-config-experiment ", + Use: "install-config-experiment ", Short: "Install a config experiment", GroupID: "installer", - Args: cobra.ExactArgs(2), + Args: cobra.ExactArgs(3), RunE: func(_ *cobra.Command, args []string) (err error) { i, err := newInstallerCmd("install_config_experiment") if err != nil { @@ -412,7 +412,7 @@ func installConfigExperimentCommand() *cobra.Command { defer func() { i.stop(err) }() i.span.SetTag("params.package", args[0]) i.span.SetTag("params.version", args[1]) - return i.InstallConfigExperiment(i.ctx, args[0], args[1]) + return i.InstallConfigExperiment(i.ctx, args[0], args[1], []byte(args[2])) }, } return cmd diff --git a/cmd/otel-agent/command/command.go b/cmd/otel-agent/command/command.go index 94e3d2c3f70e3..2a4ea3d6bac0a 100644 --- a/cmd/otel-agent/command/command.go +++ b/cmd/otel-agent/command/command.go @@ -73,7 +73,8 @@ func makeCommands(globalParams *subcommands.GlobalParams) *cobra.Command { true, // show env variable value in usage ) - if err := ef.Parse(os.Args[1:]); err != nil { + // There may be other env vars in addition to the ones in envflag.NewEnvFlag. Do not panic if those env vars do not have a help message (flag.ErrHelp) + if err := ef.Parse(os.Args[1:]); err != nil && err != flag.ErrHelp { panic(err) } @@ -82,7 +83,7 @@ func makeCommands(globalParams *subcommands.GlobalParams) *cobra.Command { const configFlag = "config" const coreConfigFlag = "core-config" -const syncDelayFlag = "sync-delay" +const syncDelayFlag = "sync-delay" // TODO: Change this to sync-on-init-timeout const syncTimeoutFlag = "sync-to" func flags(reg *featuregate.Registry, cfgs *subcommands.GlobalParams) *flag.FlagSet { @@ -90,8 +91,8 @@ func flags(reg *featuregate.Registry, cfgs *subcommands.GlobalParams) *flag.Flag flagSet.Var(cfgs, configFlag, "Locations to the config file(s), note that only a"+ " single location can be set per flag entry e.g. `--config=file:/path/to/first --config=file:path/to/second`.") flagSet.StringVar(&cfgs.CoreConfPath, coreConfigFlag, "", "Location to the Datadog Agent config file.") - flagSet.DurationVar(&cfgs.SyncDelay, syncDelayFlag, 0, "Delay before first config sync.") - flagSet.DurationVar(&cfgs.SyncTimeout, syncTimeoutFlag, 3*time.Second, "Timeout for sync requests.") + flagSet.DurationVar(&cfgs.SyncOnInitTimeout, syncDelayFlag, 0, "How long should config sync retry at initialization before failing.") + flagSet.DurationVar(&cfgs.SyncTimeout, syncTimeoutFlag, 3*time.Second, "Timeout for config sync requests.") flagSet.Func("set", "Set arbitrary component config property. The component has to be defined in the config file and the flag"+ @@ -106,6 +107,10 @@ func flags(reg *featuregate.Registry, cfgs *subcommands.GlobalParams) *flag.Flag return nil }) + err := featuregate.GlobalRegistry().Set("datadog.EnableOperationAndResourceNameV2", true) + if err != nil { + panic(err) + } reg.RegisterFlags(flagSet) return flagSet } diff --git a/cmd/otel-agent/config/agent_config.go b/cmd/otel-agent/config/agent_config.go index 4f2a1a5b107cc..b812ed304444c 100644 --- a/cmd/otel-agent/config/agent_config.go +++ b/cmd/otel-agent/config/agent_config.go @@ -12,6 +12,7 @@ import ( "fmt" "strings" + pkgdatadog "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog" datadogconfig "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog/config" "go.opentelemetry.io/collector/confmap" "go.opentelemetry.io/collector/confmap/provider/envprovider" @@ -79,6 +80,7 @@ func NewConfigComponent(ctx context.Context, ddCfg string, uris []string) (confi httpprovider.NewFactory(), httpsprovider.NewFactory(), }, + DefaultScheme: "env", } resolver, err := confmap.NewResolver(rs) @@ -117,7 +119,7 @@ func NewConfigComponent(ctx context.Context, ddCfg string, uris []string) (confi return nil, err } var ok bool - activeLogLevel, ok = logLevelMap[pkgconfig.GetString("log_level")] + activeLogLevel, ok = logLevelMap[strings.ToLower(pkgconfig.GetString("log_level"))] if !ok { return nil, fmt.Errorf("invalid log level (%v) set in the Datadog Agent configuration", pkgconfig.GetString("log_level")) } @@ -125,13 +127,14 @@ func NewConfigComponent(ctx context.Context, ddCfg string, uris []string) (confi // Set the right log level. The most verbose setting takes precedence. telemetryLogLevel := sc.Telemetry.Logs.Level - telemetryLogMapping, ok := logLevelMap[telemetryLogLevel.String()] + telemetryLogMapping, ok := logLevelMap[strings.ToLower(telemetryLogLevel.String())] if !ok { return nil, fmt.Errorf("invalid log level (%v) set in the OTel Telemetry configuration", telemetryLogLevel.String()) } if telemetryLogMapping < activeLogLevel { activeLogLevel = telemetryLogMapping } + fmt.Printf("setting log level to: %v\n", logLevelReverseMap[activeLogLevel]) pkgconfig.Set("log_level", logLevelReverseMap[activeLogLevel], pkgconfigmodel.SourceFile) // Override config read (if any) with Default values @@ -179,7 +182,10 @@ func NewConfigComponent(ctx context.Context, ddCfg string, uris []string) (confi } if pkgconfig.Get("apm_config.features") == nil { - apmConfigFeatures := []string{"enable_receive_resource_spans_v2", "enable_operation_and_resource_name_logic_v2"} + apmConfigFeatures := []string{} + if pkgdatadog.OperationAndResourceNameV2FeatureGate.IsEnabled() { + apmConfigFeatures = append(apmConfigFeatures, "enable_operation_and_resource_name_logic_v2") + } if ddc.Traces.ComputeTopLevelBySpanKind { apmConfigFeatures = append(apmConfigFeatures, "enable_otlp_compute_top_level_by_span_kind") } diff --git a/cmd/otel-agent/config/agent_config_test.go b/cmd/otel-agent/config/agent_config_test.go index 01567d48d7ffe..085ccecf33fee 100644 --- a/cmd/otel-agent/config/agent_config_test.go +++ b/cmd/otel-agent/config/agent_config_test.go @@ -8,6 +8,7 @@ package config import ( "context" "fmt" + "go.opentelemetry.io/collector/featuregate" "io/fs" "os" "testing" @@ -57,7 +58,7 @@ func (suite *ConfigTestSuite) TestAgentConfig() { assert.Equal(t, false, c.Get("apm_config.receiver_enabled")) assert.Equal(t, 10, c.Get("apm_config.trace_buffer")) assert.Equal(t, false, c.Get("otlp_config.traces.span_name_as_resource_name")) - assert.Equal(t, []string{"enable_receive_resource_spans_v2", "enable_operation_and_resource_name_logic_v2"}, c.Get("apm_config.features")) + assert.Equal(t, []string{}, c.Get("apm_config.features")) } func (suite *ConfigTestSuite) TestAgentConfigDefaults() { @@ -79,10 +80,56 @@ func (suite *ConfigTestSuite) TestAgentConfigDefaults() { assert.Equal(t, "https://trace.agent.datadoghq.com", c.Get("apm_config.apm_dd_url")) assert.Equal(t, false, c.Get("apm_config.receiver_enabled")) assert.Equal(t, false, c.Get("otlp_config.traces.span_name_as_resource_name")) - assert.Equal(t, []string{"enable_receive_resource_spans_v2", "enable_operation_and_resource_name_logic_v2", "enable_otlp_compute_top_level_by_span_kind"}, + assert.Equal(t, []string{"enable_otlp_compute_top_level_by_span_kind"}, c.Get("apm_config.features")) } +func (suite *ConfigTestSuite) TestOperationAndResourceNameV2FeatureGate() { + featuregate.GlobalRegistry().Set("datadog.EnableOperationAndResourceNameV2", true) + t := suite.T() + fileName := "testdata/config_default.yaml" + c, err := NewConfigComponent(context.Background(), "", []string{fileName}) + if err != nil { + t.Errorf("Failed to load agent config: %v", err) + } + assert.Equal(t, "DATADOG_API_KEY", c.Get("api_key")) + assert.Equal(t, "datadoghq.com", c.Get("site")) + assert.Equal(t, "https://api.datadoghq.com", c.Get("dd_url")) + assert.Equal(t, true, c.Get("logs_enabled")) + assert.Equal(t, "https://agent-http-intake.logs.datadoghq.com", c.Get("logs_config.logs_dd_url")) + assert.Equal(t, 5, c.Get("logs_config.batch_wait")) + assert.Equal(t, true, c.Get("logs_config.use_compression")) + assert.Equal(t, true, c.Get("logs_config.force_use_http")) + assert.Equal(t, 6, c.Get("logs_config.compression_level")) + assert.Equal(t, "https://trace.agent.datadoghq.com", c.Get("apm_config.apm_dd_url")) + assert.Equal(t, false, c.Get("apm_config.receiver_enabled")) + assert.Equal(t, false, c.Get("otlp_config.traces.span_name_as_resource_name")) + assert.Equal(t, []string{"enable_operation_and_resource_name_logic_v2", "enable_otlp_compute_top_level_by_span_kind"}, + c.Get("apm_config.features")) +} + +func (suite *ConfigTestSuite) TestAgentConfigExpandEnvVars() { + t := suite.T() + fileName := "testdata/config_default_expand_envvar.yaml" + suite.T().Setenv("DD_API_KEY", "abc") + c, err := NewConfigComponent(context.Background(), "", []string{fileName}) + if err != nil { + t.Errorf("Failed to load agent config: %v", err) + } + assert.Equal(t, "abc", c.Get("api_key")) +} + +func (suite *ConfigTestSuite) TestAgentConfigExpandEnvVars_Raw() { + t := suite.T() + fileName := "testdata/config_default_expand_envvar_raw.yaml" + suite.T().Setenv("DD_API_KEY", "abc") + c, err := NewConfigComponent(context.Background(), "", []string{fileName}) + if err != nil { + t.Errorf("Failed to load agent config: %v", err) + } + assert.Equal(t, "abc", c.Get("api_key")) +} + func (suite *ConfigTestSuite) TestAgentConfigWithDatadogYamlDefaults() { t := suite.T() fileName := "testdata/config_default.yaml" @@ -105,7 +152,7 @@ func (suite *ConfigTestSuite) TestAgentConfigWithDatadogYamlDefaults() { assert.Equal(t, "https://trace.agent.datadoghq.com", c.Get("apm_config.apm_dd_url")) assert.Equal(t, false, c.Get("apm_config.receiver_enabled")) assert.Equal(t, false, c.Get("otlp_config.traces.span_name_as_resource_name")) - assert.Equal(t, []string{"enable_receive_resource_spans_v2", "enable_operation_and_resource_name_logic_v2", "enable_otlp_compute_top_level_by_span_kind"}, c.Get("apm_config.features")) + assert.Equal(t, []string{"enable_otlp_compute_top_level_by_span_kind"}, c.Get("apm_config.features")) // log_level from datadog.yaml takes precedence -> more verbose assert.Equal(t, "debug", c.Get("log_level")) @@ -203,7 +250,29 @@ func (suite *ConfigTestSuite) TestEnvBadLogLevel() { fileName := "testdata/config_default.yaml" ddFileName := "testdata/datadog_low_log_level.yaml" _, err := NewConfigComponent(context.Background(), ddFileName, []string{fileName}) - assert.Error(t, err) + assert.EqualError(t, err, "invalid log level (yabadabadooo) set in the Datadog Agent configuration") +} + +func (suite *ConfigTestSuite) TestEnvUpperCaseLogLevel() { + t := suite.T() + oldval, exists := os.LookupEnv("DD_LOG_LEVEL") + os.Unsetenv("DD_LOG_LEVEL") + defer func() { + if !exists { + os.Unsetenv("DD_LOG_LEVEL") + } else { + os.Setenv("DD_LOG_LEVEL", oldval) + } + }() + fileName := "testdata/config_default.yaml" + ddFileName := "testdata/datadog_uppercase_log_level.yaml" + c, err := NewConfigComponent(context.Background(), ddFileName, []string{fileName}) + if err != nil { + t.Errorf("Failed to load agent config: %v", err) + } + + // log_level will be mapped to lowercase by code and set accordingly + assert.Equal(t, "info", c.Get("log_level")) } func (suite *ConfigTestSuite) TestBadDDConfigFile() { diff --git a/cmd/otel-agent/config/testdata/config_default_expand_envvar.yaml b/cmd/otel-agent/config/testdata/config_default_expand_envvar.yaml new file mode 100644 index 0000000000000..e5cfa7d2fd686 --- /dev/null +++ b/cmd/otel-agent/config/testdata/config_default_expand_envvar.yaml @@ -0,0 +1,18 @@ +receivers: + otlp: + protocols: + grpc: + http: +exporters: + datadog: + api: + key: ${env:DD_API_KEY} +processors: + batch: + timeout: 10s +service: + pipelines: + traces: + receivers: [otlp] + processors: [batch] + exporters: [datadog] diff --git a/cmd/otel-agent/config/testdata/config_default_expand_envvar_raw.yaml b/cmd/otel-agent/config/testdata/config_default_expand_envvar_raw.yaml new file mode 100644 index 0000000000000..a665596025b1a --- /dev/null +++ b/cmd/otel-agent/config/testdata/config_default_expand_envvar_raw.yaml @@ -0,0 +1,18 @@ +receivers: + otlp: + protocols: + grpc: + http: +exporters: + datadog: + api: + key: ${DD_API_KEY} +processors: + batch: + timeout: 10s +service: + pipelines: + traces: + receivers: [otlp] + processors: [batch] + exporters: [datadog] diff --git a/cmd/otel-agent/config/testdata/datadog_uppercase_log_level.yaml b/cmd/otel-agent/config/testdata/datadog_uppercase_log_level.yaml new file mode 100644 index 0000000000000..709850d25b5f8 --- /dev/null +++ b/cmd/otel-agent/config/testdata/datadog_uppercase_log_level.yaml @@ -0,0 +1,10 @@ +log_level: INFO + +otelcollector: + enabled: true + extension_url: "https://localhost:7777" + +agent_ipc: + port: 5009 + config_refresh_interval: 60 + diff --git a/cmd/otel-agent/dist/otel-config.yaml b/cmd/otel-agent/dist/otel-config.yaml index 58423bb8982ec..fb916d72bb659 100644 --- a/cmd/otel-agent/dist/otel-config.yaml +++ b/cmd/otel-agent/dist/otel-config.yaml @@ -37,8 +37,8 @@ exporters: span_name_as_resource_name: true hostname: "otelcol-docker" api: - key: ${DD_API_KEY} - site: ${DD_SITE} + key: ${env:DD_API_KEY} + site: ${env:DD_SITE} processors: infraattributes: batch: diff --git a/cmd/otel-agent/subcommands/run/command.go b/cmd/otel-agent/subcommands/run/command.go index d7958c6dabc59..347e38f312ab0 100644 --- a/cmd/otel-agent/subcommands/run/command.go +++ b/cmd/otel-agent/subcommands/run/command.go @@ -32,7 +32,6 @@ import ( remoteTaggerFx "github.com/DataDog/datadog-agent/comp/core/tagger/fx-remote" taggerTypes "github.com/DataDog/datadog-agent/comp/core/tagger/types" "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl" - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" workloadmetafx "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx" "github.com/DataDog/datadog-agent/comp/dogstatsd/statsd" @@ -48,7 +47,9 @@ import ( "github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline/logsagentpipelineimpl" "github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/serializerexporter" "github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/metricsclient" - compressionfxzlib "github.com/DataDog/datadog-agent/comp/serializer/compression/fx-zlib" + logscompressionfx "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx" + metricscompression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/def" + metricscompressionfx "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/fx-otel" traceagentfx "github.com/DataDog/datadog-agent/comp/trace/agent/fx" traceagentcomp "github.com/DataDog/datadog-agent/comp/trace/agent/impl" gzipfx "github.com/DataDog/datadog-agent/comp/trace/compression/fx-gzip" @@ -58,8 +59,9 @@ import ( pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/serializer" "github.com/DataDog/datadog-agent/pkg/trace/telemetry" + "github.com/DataDog/datadog-agent/pkg/util/compression" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "go.uber.org/fx" ) @@ -112,13 +114,7 @@ func runOTelAgentCommand(ctx context.Context, params *subcommands.GlobalParams, }), logfx.Module(), fetchonlyimpl.Module(), - // TODO: don't rely on this pattern; remove this `ModuleWithParams` thing - // and instead adapt OptionalModule to allow parameter passing naturally. - // See: https://github.com/DataDog/datadog-agent/pull/28386 - configsyncimpl.ModuleWithParams(), - fx.Provide(func() configsyncimpl.Params { - return configsyncimpl.NewParams(params.SyncTimeout, params.SyncDelay, true) - }), + configsyncimpl.Module(configsyncimpl.NewParams(params.SyncTimeout, true, params.SyncOnInitTimeout)), converterfx.Module(), fx.Provide(func(cp converter.Component, _ configsync.Component) confmap.Converter { return cp @@ -166,8 +162,13 @@ func runOTelAgentCommand(ctx context.Context, params *subcommands.GlobalParams, return log.ForDaemon(params.LoggerName, "log_file", pkgconfigsetup.DefaultOTelAgentLogFile) }), logsagentpipelineimpl.Module(), - // We directly select fxzlib - compressionfxzlib.Module(), + logscompressionfx.Module(), + metricscompressionfx.Module(), + // For FX to provide the compression.Compressor interface (used by serializer.NewSerializer) + // implemented by the metricsCompression.Component + fx.Provide(func(c metricscompression.Component) compression.Compressor { + return c + }), fx.Provide(serializer.NewSerializer), // For FX to provide the serializer.MetricSerializer from the serializer.Serializer fx.Provide(func(s *serializer.Serializer) serializer.MetricSerializer { @@ -188,16 +189,10 @@ func runOTelAgentCommand(ctx context.Context, params *subcommands.GlobalParams, }), fx.Provide(newOrchestratorinterfaceimpl), fx.Options(opts...), - fx.Invoke(func(_ collectordef.Component, _ defaultforwarder.Forwarder, _ optional.Option[logsagentpipeline.Component]) { + fx.Invoke(func(_ collectordef.Component, _ defaultforwarder.Forwarder, _ option.Option[logsagentpipeline.Component]) { }), - // TODO: don't rely on this pattern; remove this `ModuleWithParams` thing - // and instead adapt OptionalModule to allow parameter passing naturally. - // See: https://github.com/DataDog/datadog-agent/pull/28386 - configsyncimpl.ModuleWithParams(), - fx.Provide(func() configsyncimpl.Params { - return configsyncimpl.NewParams(params.SyncTimeout, params.SyncDelay, true) - }), + configsyncimpl.Module(configsyncimpl.NewParams(params.SyncTimeout, true, params.SyncOnInitTimeout)), remoteTaggerFx.Module(tagger.RemoteParams{ RemoteTarget: func(c coreconfig.Component) (string, error) { return fmt.Sprintf(":%v", c.GetInt("cmd_port")), nil }, diff --git a/cmd/otel-agent/subcommands/subcommands.go b/cmd/otel-agent/subcommands/subcommands.go index 1d3dbe688724f..4abed724947ee 100644 --- a/cmd/otel-agent/subcommands/subcommands.go +++ b/cmd/otel-agent/subcommands/subcommands.go @@ -16,13 +16,13 @@ import ( // A pointer to this type is passed to SubcommandFactory's, but its contents // are not valid until Cobra calls the subcommand's Run or RunE function. type GlobalParams struct { - ConfPaths []string - Sets []string - CoreConfPath string - ConfigName string - LoggerName string - SyncDelay time.Duration - SyncTimeout time.Duration + ConfPaths []string + Sets []string + CoreConfPath string + ConfigName string + LoggerName string + SyncOnInitTimeout time.Duration + SyncTimeout time.Duration } // Set is called by Cobra when a flag is set. diff --git a/cmd/process-agent/command/main_common.go b/cmd/process-agent/command/main_common.go index 4d6b33d5ad831..2510bd55b6eac 100644 --- a/cmd/process-agent/command/main_common.go +++ b/cmd/process-agent/command/main_common.go @@ -19,7 +19,6 @@ import ( "github.com/DataDog/datadog-agent/comp/api/authtoken/fetchonlyimpl" "github.com/DataDog/datadog-agent/comp/core" "github.com/DataDog/datadog-agent/comp/core/config" - "github.com/DataDog/datadog-agent/comp/core/configsync" "github.com/DataDog/datadog-agent/comp/core/configsync/configsyncimpl" logcomp "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/core/pid" @@ -155,7 +154,7 @@ func runApp(ctx context.Context, globalParams *GlobalParams) error { fetchonlyimpl.Module(), // Provide configsync module - configsyncimpl.Module(), + configsyncimpl.Module(configsyncimpl.NewDefaultParams()), // Provide autoexit module autoexitimpl.Module(), @@ -214,7 +213,6 @@ func runApp(ctx context.Context, globalParams *GlobalParams) error { _ expvars.Component, _ apiserver.Component, cfg config.Component, - _ configsync.Component, // TODO: This is needed by the container-provider which is not currently a component. // We should ensure the tagger is a dependency when converting to a component. _ tagger.Component, diff --git a/cmd/secrethelper/providers/k8s_secret.go b/cmd/secrethelper/providers/k8s_secret.go index 670e1adf6d305..72598a38169a7 100644 --- a/cmd/secrethelper/providers/k8s_secret.go +++ b/cmd/secrethelper/providers/k8s_secret.go @@ -6,18 +6,17 @@ package providers import ( - "context" "fmt" "strings" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" - "github.com/DataDog/datadog-agent/comp/core/secrets" ) +// KubeSecretGetter is a function that fetches a secret from k8s +type KubeSecretGetter func(string, string) (map[string][]byte, error) + // ReadKubernetesSecret reads a secrets store in k8s -func ReadKubernetesSecret(kubeClient kubernetes.Interface, path string) secrets.SecretVal { +func ReadKubernetesSecret(readSecretFromKubeClient KubeSecretGetter, path string) secrets.SecretVal { splitName := strings.Split(path, "/") if len(splitName) != 3 { @@ -26,12 +25,12 @@ func ReadKubernetesSecret(kubeClient kubernetes.Interface, path string) secrets. namespace, name, key := splitName[0], splitName[1], splitName[2] - secret, err := kubeClient.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + secret, err := readSecretFromKubeClient(namespace, name) if err != nil { return secrets.SecretVal{ErrorMsg: err.Error()} } - value, ok := secret.Data[key] + value, ok := secret[key] if !ok { return secrets.SecretVal{ErrorMsg: fmt.Sprintf("key %s not found in secret %s/%s", key, namespace, name)} } diff --git a/cmd/secrethelper/providers/k8s_secret_test.go b/cmd/secrethelper/providers/k8s_secret_test.go index 5dddb83e878f0..cb8ac7959f641 100644 --- a/cmd/secrethelper/providers/k8s_secret_test.go +++ b/cmd/secrethelper/providers/k8s_secret_test.go @@ -6,6 +6,7 @@ package providers import ( + "context" "testing" "github.com/stretchr/testify/assert" @@ -74,7 +75,15 @@ func TestReadKubernetesSecret(t *testing.T) { } kubeClient := fake.NewSimpleClientset(kubeObjects...) - resolvedSecret := ReadKubernetesSecret(kubeClient, test.secretPath) + secretGetter := func(namespace, name string) (map[string][]byte, error) { + secret, err := kubeClient.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return secret.Data, nil + } + + resolvedSecret := ReadKubernetesSecret(secretGetter, test.secretPath) if test.expectedError != "" { assert.Equal(t, test.expectedError, resolvedSecret.ErrorMsg) diff --git a/cmd/secrethelper/secret_helper.go b/cmd/secrethelper/secret_helper.go index a56201009e2b3..0806c6f9d8417 100644 --- a/cmd/secrethelper/secret_helper.go +++ b/cmd/secrethelper/secret_helper.go @@ -32,11 +32,9 @@ import ( "os" "path/filepath" "strings" - "time" "github.com/spf13/cobra" "go.uber.org/fx" - "k8s.io/client-go/kubernetes" "github.com/DataDog/datadog-agent/cmd/secrethelper/providers" "github.com/DataDog/datadog-agent/comp/core/secrets" @@ -51,9 +49,6 @@ const ( k8sSecretPrefix = "k8s_secret" ) -// NewKubeClient returns a new kubernetes.Interface -type NewKubeClient func(timeout time.Duration, qps float32, burst int) (kubernetes.Interface, error) - // cliParams are the command-line arguments for this subcommand type cliParams struct { usePrefixes bool @@ -100,17 +95,17 @@ func readCmd(cliParams *cliParams) error { dir = cliParams.args[0] } - return readSecrets(os.Stdin, os.Stdout, dir, cliParams.usePrefixes, apiserver.GetKubeClient) + return readSecrets(os.Stdin, os.Stdout, dir, cliParams.usePrefixes, apiserver.GetKubeSecret) } -func readSecrets(r io.Reader, w io.Writer, dir string, usePrefixes bool, newKubeClientFunc NewKubeClient) error { +func readSecrets(r io.Reader, w io.Writer, dir string, usePrefixes bool, kubeSecretGetter providers.KubeSecretGetter) error { inputSecrets, err := parseInputSecrets(r) if err != nil { return err } if usePrefixes { - return writeFetchedSecrets(w, readSecretsUsingPrefixes(inputSecrets, dir, newKubeClientFunc)) + return writeFetchedSecrets(w, readSecretsUsingPrefixes(inputSecrets, dir, kubeSecretGetter)) } return writeFetchedSecrets(w, readSecretsFromFile(inputSecrets, dir)) @@ -161,7 +156,7 @@ func readSecretsFromFile(secretsList []string, dir string) map[string]secrets.Se return res } -func readSecretsUsingPrefixes(secretsList []string, rootPath string, newKubeClientFunc NewKubeClient) map[string]secrets.SecretVal { +func readSecretsUsingPrefixes(secretsList []string, rootPath string, kubeSecretGetter providers.KubeSecretGetter) map[string]secrets.SecretVal { res := make(map[string]secrets.SecretVal) for _, secretID := range secretsList { @@ -175,12 +170,7 @@ func readSecretsUsingPrefixes(secretsList []string, rootPath string, newKubeClie case filePrefix: res[secretID] = providers.ReadSecretFile(id) case k8sSecretPrefix: - kubeClient, err := newKubeClientFunc(10*time.Second, 0, 0) // Default QPS and burst to Kube client defaults using 0 - if err != nil { - res[secretID] = secrets.SecretVal{Value: "", ErrorMsg: err.Error()} - } else { - res[secretID] = providers.ReadKubernetesSecret(kubeClient, id) - } + res[secretID] = providers.ReadKubernetesSecret(kubeSecretGetter, id) default: res[secretID] = secrets.SecretVal{Value: "", ErrorMsg: fmt.Sprintf("provider not supported: %s", prefix)} } diff --git a/cmd/secrethelper/secret_helper_test.go b/cmd/secrethelper/secret_helper_test.go index 3b76fa8d461cb..9130d011a8e91 100644 --- a/cmd/secrethelper/secret_helper_test.go +++ b/cmd/secrethelper/secret_helper_test.go @@ -7,29 +7,35 @@ package secrethelper import ( "bytes" + "context" "fmt" "path/filepath" "strings" "testing" - "time" - "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" + + "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) func TestReadSecrets(t *testing.T) { - newKubeClientFunc := func(_ time.Duration, _ float32, _ int) (kubernetes.Interface, error) { - return fake.NewSimpleClientset(&v1.Secret{ + newKubeClientFunc := func(namespace, name string) (map[string][]byte, error) { + kubeClient := fake.NewSimpleClientset(&v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "some_name", Namespace: "some_namespace", }, Data: map[string][]byte{"some_key": []byte("some_value")}, - }), nil + }) + + secret, err := kubeClient.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return secret.Data, nil } tests := []struct { diff --git a/cmd/security-agent/api/agent/agent.go b/cmd/security-agent/api/agent/agent.go index 06ad276eae538..f596227074c3d 100644 --- a/cmd/security-agent/api/agent/agent.go +++ b/cmd/security-agent/api/agent/agent.go @@ -16,9 +16,11 @@ import ( "github.com/DataDog/datadog-agent/cmd/agent/common" "github.com/DataDog/datadog-agent/cmd/agent/common/signals" + "github.com/DataDog/datadog-agent/comp/core/secrets" "github.com/DataDog/datadog-agent/comp/core/settings" "github.com/DataDog/datadog-agent/comp/core/status" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" + apiutil "github.com/DataDog/datadog-agent/pkg/api/util" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/flare" "github.com/DataDog/datadog-agent/pkg/status/health" @@ -31,14 +33,16 @@ type Agent struct { statusComponent status.Component settings settings.Component wmeta workloadmeta.Component + secrets secrets.Component } // NewAgent returns a new Agent -func NewAgent(statusComponent status.Component, settings settings.Component, wmeta workloadmeta.Component) *Agent { +func NewAgent(statusComponent status.Component, settings settings.Component, wmeta workloadmeta.Component, secrets secrets.Component) *Agent { return &Agent{ statusComponent: statusComponent, settings: settings, wmeta: wmeta, + secrets: secrets, } } @@ -60,6 +64,7 @@ func (a *Agent) SetupHandlers(r *mux.Router) { verbose := r.URL.Query().Get("verbose") == "true" workloadList(w, verbose, a.wmeta) }).Methods("GET") + r.HandleFunc("/secret/refresh", a.refreshSecrets).Methods("GET") } func workloadList(w http.ResponseWriter, verbose bool, wmeta workloadmeta.Component) { @@ -153,3 +158,19 @@ func (a *Agent) makeFlare(w http.ResponseWriter, _ *http.Request) { } w.Write([]byte(filePath)) } + +func (a *Agent) refreshSecrets(w http.ResponseWriter, req *http.Request) { + if apiutil.Validate(w, req) != nil { + return + } + + res, err := a.secrets.Refresh() + if err != nil { + log.Errorf("error while refresing secrets: %s", err) + w.Header().Set("Content-Type", "application/json") + body, _ := json.Marshal(map[string]string{"error": err.Error()}) + http.Error(w, string(body), http.StatusInternalServerError) + return + } + w.Write([]byte(res)) +} diff --git a/cmd/security-agent/api/server.go b/cmd/security-agent/api/server.go index 307a4e4c6cd43..948ed796b994b 100644 --- a/cmd/security-agent/api/server.go +++ b/cmd/security-agent/api/server.go @@ -21,6 +21,7 @@ import ( "github.com/DataDog/datadog-agent/cmd/security-agent/api/agent" "github.com/DataDog/datadog-agent/comp/api/authtoken" + "github.com/DataDog/datadog-agent/comp/core/secrets" "github.com/DataDog/datadog-agent/comp/core/settings" "github.com/DataDog/datadog-agent/comp/core/status" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" @@ -38,14 +39,14 @@ type Server struct { } // NewServer creates a new Server instance -func NewServer(statusComponent status.Component, settings settings.Component, wmeta workloadmeta.Component, at authtoken.Component) (*Server, error) { +func NewServer(statusComponent status.Component, settings settings.Component, wmeta workloadmeta.Component, at authtoken.Component, secrets secrets.Component) (*Server, error) { listener, err := newListener() if err != nil { return nil, err } return &Server{ listener: listener, - agent: agent.NewAgent(statusComponent, settings, wmeta), + agent: agent.NewAgent(statusComponent, settings, wmeta, secrets), tlsConfig: at.GetTLSServerConfig(), }, nil } diff --git a/cmd/security-agent/main_windows.go b/cmd/security-agent/main_windows.go index f6a2e3aedf28f..346273ef763bf 100644 --- a/cmd/security-agent/main_windows.go +++ b/cmd/security-agent/main_windows.go @@ -29,7 +29,6 @@ import ( "github.com/DataDog/datadog-agent/comp/api/authtoken/fetchonlyimpl" "github.com/DataDog/datadog-agent/comp/core" "github.com/DataDog/datadog-agent/comp/core/config" - "github.com/DataDog/datadog-agent/comp/core/configsync" "github.com/DataDog/datadog-agent/comp/core/configsync/configsyncimpl" log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/core/secrets" @@ -46,12 +45,14 @@ import ( "github.com/DataDog/datadog-agent/comp/dogstatsd" "github.com/DataDog/datadog-agent/comp/dogstatsd/statsd" "github.com/DataDog/datadog-agent/comp/metadata/host/hostimpl" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/def" + logscompressionfx "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx" commonsettings "github.com/DataDog/datadog-agent/pkg/config/settings" "github.com/DataDog/datadog-agent/pkg/collector/python" "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/security/agent" - "github.com/DataDog/datadog-agent/pkg/security/utils" + "github.com/DataDog/datadog-agent/pkg/security/utils/hostnameutils" "github.com/DataDog/datadog-agent/pkg/util/defaultpaths" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -90,12 +91,12 @@ func (s *service) Run(svcctx context.Context) error { params := &cliParams{} err := fxutil.OneShot( - func(log log.Component, config config.Component, _ secrets.Component, _ statsd.Component, _ sysprobeconfig.Component, + func(log log.Component, config config.Component, secrets secrets.Component, _ statsd.Component, _ sysprobeconfig.Component, telemetry telemetry.Component, _ workloadmeta.Component, _ *cliParams, statusComponent status.Component, _ autoexit.Component, settings settings.Component, wmeta workloadmeta.Component, at authtoken.Component) error { defer start.StopAgent(log) - err := start.RunAgent(log, config, telemetry, statusComponent, settings, wmeta, at) + err := start.RunAgent(log, config, secrets, telemetry, statusComponent, settings, wmeta, at) if err != nil { if errors.Is(err, start.ErrAllComponentsDisabled) { // If all components are disabled, we should exit cleanly @@ -125,7 +126,7 @@ func (s *service) Run(svcctx context.Context) error { workloadmetafx.Module(workloadmeta.Params{ AgentType: workloadmeta.Remote, }), - fx.Provide(func(log log.Component, config config.Component, statsd statsd.Component, wmeta workloadmeta.Component) (status.InformationProvider, *agent.RuntimeSecurityAgent, error) { + fx.Provide(func(log log.Component, config config.Component, statsd statsd.Component, wmeta workloadmeta.Component, compression logscompression.Component) (status.InformationProvider, *agent.RuntimeSecurityAgent, error) { stopper := startstop.NewSerialStopper() statsdClient, err := statsd.CreateForHostPort(setup.GetBindHost(config), config.GetInt("dogstatsd_port")) @@ -134,12 +135,12 @@ func (s *service) Run(svcctx context.Context) error { return status.NewInformationProvider(nil), nil, err } - hostnameDetected, err := utils.GetHostnameWithContextAndFallback(context.TODO()) + hostnameDetected, err := hostnameutils.GetHostnameWithContextAndFallback(context.TODO()) if err != nil { return status.NewInformationProvider(nil), nil, err } - runtimeAgent, err := runtime.StartRuntimeSecurity(log, config, hostnameDetected, stopper, statsdClient, wmeta) + runtimeAgent, err := runtime.StartRuntimeSecurity(log, config, hostnameDetected, stopper, statsdClient, wmeta, compression) if err != nil { return status.NewInformationProvider(nil), nil, err } @@ -165,9 +166,7 @@ func (s *service) Run(svcctx context.Context) error { statusimpl.Module(), fetchonlyimpl.Module(), - configsyncimpl.Module(), - // Force the instantiation of the component - fx.Invoke(func(_ configsync.Component) {}), + configsyncimpl.Module(configsyncimpl.NewDefaultParams()), autoexitimpl.Module(), fx.Provide(func(c config.Component) settings.Params { return settings.Params{ @@ -178,6 +177,7 @@ func (s *service) Run(svcctx context.Context) error { } }), settingsimpl.Module(), + logscompressionfx.Module(), ) return err diff --git a/cmd/security-agent/subcommands/check/command.go b/cmd/security-agent/subcommands/check/command.go index 690f9c052bd7e..78cd3b1cbefef 100644 --- a/cmd/security-agent/subcommands/check/command.go +++ b/cmd/security-agent/subcommands/check/command.go @@ -31,10 +31,12 @@ import ( "github.com/DataDog/datadog-agent/comp/core/sysprobeconfig/sysprobeconfigimpl" "github.com/DataDog/datadog-agent/comp/dogstatsd" "github.com/DataDog/datadog-agent/comp/dogstatsd/statsd" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/def" + logscompressionfx "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx" "github.com/DataDog/datadog-agent/pkg/compliance" "github.com/DataDog/datadog-agent/pkg/compliance/k8sconfig" "github.com/DataDog/datadog-agent/pkg/security/common" - "github.com/DataDog/datadog-agent/pkg/security/utils" + "github.com/DataDog/datadog-agent/pkg/security/utils/hostnameutils" "github.com/DataDog/datadog-agent/pkg/util/flavor" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/hostname" @@ -93,6 +95,7 @@ func commandsWrapped(bundleParamsFactory func() core.BundleParams) []*cobra.Comm fx.Supply(checkArgs), fx.Supply(bundleParams), core.Bundle(), + logscompressionfx.Module(), dogstatsd.ClientBundle, ) }, @@ -109,7 +112,7 @@ func commandsWrapped(bundleParamsFactory func() core.BundleParams) []*cobra.Comm } // RunCheck runs a check -func RunCheck(log log.Component, config config.Component, _ secrets.Component, statsdComp statsd.Component, checkArgs *CliParams) error { +func RunCheck(log log.Component, config config.Component, _ secrets.Component, statsdComp statsd.Component, checkArgs *CliParams, compression logscompression.Component) error { hname, err := hostname.Get(context.TODO()) if err != nil { return err @@ -218,7 +221,7 @@ func RunCheck(log log.Component, config config.Component, _ secrets.Component, s } } if checkArgs.report { - if err := reportComplianceEvents(log, events); err != nil { + if err := reportComplianceEvents(log, events, compression); err != nil { log.Error(err) return err } @@ -241,8 +244,8 @@ func dumpComplianceEvents(reportFile string, events []*compliance.CheckEvent) er return nil } -func reportComplianceEvents(log log.Component, events []*compliance.CheckEvent) error { - hostnameDetected, err := utils.GetHostnameWithContextAndFallback(context.Background()) +func reportComplianceEvents(log log.Component, events []*compliance.CheckEvent, compression logscompression.Component) error { + hostnameDetected, err := hostnameutils.GetHostnameWithContextAndFallback(context.Background()) if err != nil { return log.Errorf("Error while getting hostname, exiting: %v", err) } @@ -250,7 +253,7 @@ func reportComplianceEvents(log log.Component, events []*compliance.CheckEvent) if err != nil { return fmt.Errorf("reporter: could not reate log context for compliance: %w", err) } - reporter := compliance.NewLogReporter(hostnameDetected, "compliance-agent", "compliance", endpoints, context) + reporter := compliance.NewLogReporter(hostnameDetected, "compliance-agent", "compliance", endpoints, context, compression) defer reporter.Stop() for _, event := range events { reporter.ReportEvent(event) diff --git a/cmd/security-agent/subcommands/compliance/command.go b/cmd/security-agent/subcommands/compliance/command.go index c6614a89232f7..c80614d164ad2 100644 --- a/cmd/security-agent/subcommands/compliance/command.go +++ b/cmd/security-agent/subcommands/compliance/command.go @@ -24,13 +24,15 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/core/secrets" + compression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/def" + logscompressionfx "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx" "github.com/DataDog/datadog-agent/pkg/compliance" "github.com/DataDog/datadog-agent/pkg/compliance/aptconfig" "github.com/DataDog/datadog-agent/pkg/compliance/dbconfig" "github.com/DataDog/datadog-agent/pkg/compliance/k8sconfig" complianceutils "github.com/DataDog/datadog-agent/pkg/compliance/utils" "github.com/DataDog/datadog-agent/pkg/security/common" - secutils "github.com/DataDog/datadog-agent/pkg/security/utils" + "github.com/DataDog/datadog-agent/pkg/security/utils/hostnameutils" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -82,6 +84,7 @@ func complianceLoadCommand(globalParams *command.GlobalParams) *cobra.Command { LogParams: log.ForOneShot(command.LoggerName, "info", true), }), core.Bundle(), + logscompressionfx.Module(), ) }, } @@ -160,6 +163,7 @@ func complianceEventCommand(globalParams *command.GlobalParams) *cobra.Command { LogParams: log.ForOneShot(command.LoggerName, "info", true), }), core.Bundle(), + logscompressionfx.Module(), ) }, Hidden: true, @@ -176,8 +180,8 @@ func complianceEventCommand(globalParams *command.GlobalParams) *cobra.Command { return eventCmd } -func eventRun(log log.Component, eventArgs *eventCliParams) error { - hostnameDetected, err := secutils.GetHostnameWithContextAndFallback(context.Background()) +func eventRun(log log.Component, eventArgs *eventCliParams, compression compression.Component) error { + hostnameDetected, err := hostnameutils.GetHostnameWithContextAndFallback(context.Background()) if err != nil { return log.Errorf("Error while getting hostname, exiting: %v", err) } @@ -187,7 +191,7 @@ func eventRun(log log.Component, eventArgs *eventCliParams) error { return err } - reporter := compliance.NewLogReporter(hostnameDetected, eventArgs.sourceName, eventArgs.sourceType, endpoints, dstContext) + reporter := compliance.NewLogReporter(hostnameDetected, eventArgs.sourceName, eventArgs.sourceType, endpoints, dstContext, compression) defer reporter.Stop() eventData := make(map[string]interface{}) diff --git a/cmd/security-agent/subcommands/compliance/compliance.go b/cmd/security-agent/subcommands/compliance/compliance.go index f37f90613d3c4..984e1808ed7f8 100644 --- a/cmd/security-agent/subcommands/compliance/compliance.go +++ b/cmd/security-agent/subcommands/compliance/compliance.go @@ -18,6 +18,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/sysprobeconfig" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/comp/dogstatsd/constants" + compression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/def" "github.com/DataDog/datadog-agent/pkg/compliance" "github.com/DataDog/datadog-agent/pkg/security/common" "github.com/DataDog/datadog-agent/pkg/security/telemetry" @@ -28,7 +29,16 @@ import ( // StartCompliance runs the compliance sub-agent running compliance benchmarks // and checks. -func StartCompliance(log log.Component, config config.Component, sysprobeconfig sysprobeconfig.Component, hostname string, stopper startstop.Stopper, statsdClient ddgostatsd.ClientInterface, wmeta workloadmeta.Component) (*compliance.Agent, error) { +func StartCompliance(log log.Component, + config config.Component, + sysprobeconfig sysprobeconfig.Component, + hostname string, + stopper startstop.Stopper, + statsdClient ddgostatsd.ClientInterface, + wmeta workloadmeta.Component, + compression compression.Component, +) (*compliance.Agent, error) { + enabled := config.GetBool("compliance_config.enabled") configDir := config.GetString("compliance_config.dir") metricsEnabled := config.GetBool("compliance_config.metrics.enabled") @@ -67,7 +77,7 @@ func StartCompliance(log log.Component, config config.Component, sysprobeconfig enabledConfigurationsExporters = append(enabledConfigurationsExporters, compliance.DBExporter) } - reporter := compliance.NewLogReporter(hostname, "compliance-agent", "compliance", endpoints, context) + reporter := compliance.NewLogReporter(hostname, "compliance-agent", "compliance", endpoints, context, compression) telemetrySender := telemetry.NewSimpleTelemetrySenderFromStatsd(statsdClient) agent := compliance.NewAgent(telemetrySender, wmeta, compliance.AgentOptions{ diff --git a/cmd/security-agent/subcommands/runtime/activity_dump.go b/cmd/security-agent/subcommands/runtime/activity_dump.go index 0f15351d72e02..1a1a3908433d8 100644 --- a/cmd/security-agent/subcommands/runtime/activity_dump.go +++ b/cmd/security-agent/subcommands/runtime/activity_dump.go @@ -26,10 +26,12 @@ import ( secagent "github.com/DataDog/datadog-agent/pkg/security/agent" secconfig "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/proto/api" + "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" activity_tree "github.com/DataDog/datadog-agent/pkg/security/security_profile/activity_tree" "github.com/DataDog/datadog-agent/pkg/security/security_profile/dump" "github.com/DataDog/datadog-agent/pkg/security/utils" + "github.com/DataDog/datadog-agent/pkg/security/utils/pathutils" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -56,6 +58,9 @@ func activityDumpCommands(globalParams *command.GlobalParams) []*cobra.Command { activityDumpCmd := &cobra.Command{ Use: "activity-dump", Short: "activity dump command", + PersistentPreRun: func(_ *cobra.Command, _ []string) { + model.SECLConstants() + }, } activityDumpCmd.AddCommand(generateCommands(globalParams)...) @@ -761,7 +766,7 @@ func activityDumpToWorkloadPolicy(_ log.Component, _ config.Component, _ secrets } generatedRules := dump.GenerateRules(ads, opts) - generatedRules = utils.BuildPatterns(generatedRules) + generatedRules = pathutils.BuildPatterns(generatedRules) policyDef := rules.PolicyDef{ Rules: generatedRules, diff --git a/cmd/security-agent/subcommands/runtime/command.go b/cmd/security-agent/subcommands/runtime/command.go index c0b5e2c0775ff..e95c92c8a325d 100644 --- a/cmd/security-agent/subcommands/runtime/command.go +++ b/cmd/security-agent/subcommands/runtime/command.go @@ -33,6 +33,7 @@ import ( log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/core/secrets" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" + compression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/def" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" secagent "github.com/DataDog/datadog-agent/pkg/security/agent" "github.com/DataDog/datadog-agent/pkg/security/common" @@ -709,7 +710,7 @@ func reloadRuntimePolicies(_ log.Component, _ config.Component, _ secrets.Compon } // StartRuntimeSecurity starts runtime security -func StartRuntimeSecurity(log log.Component, config config.Component, hostname string, stopper startstop.Stopper, statsdClient ddgostatsd.ClientInterface, wmeta workloadmeta.Component) (*secagent.RuntimeSecurityAgent, error) { +func StartRuntimeSecurity(log log.Component, config config.Component, hostname string, stopper startstop.Stopper, statsdClient ddgostatsd.ClientInterface, wmeta workloadmeta.Component, compression compression.Component) (*secagent.RuntimeSecurityAgent, error) { enabled := config.GetBool("runtime_security_config.enabled") if !enabled { log.Info("Datadog runtime security agent disabled by config") @@ -733,7 +734,7 @@ func StartRuntimeSecurity(log log.Component, config config.Component, hostname s } stopper.Add(ctx) - reporter, err := reporter.NewCWSReporter(hostname, stopper, endpoints, ctx) + reporter, err := reporter.NewCWSReporter(hostname, stopper, endpoints, ctx, compression) if err != nil { return nil, err } diff --git a/cmd/security-agent/subcommands/runtime/command_unsupported.go b/cmd/security-agent/subcommands/runtime/command_unsupported.go index 83f7a48273788..793136ca42ff1 100644 --- a/cmd/security-agent/subcommands/runtime/command_unsupported.go +++ b/cmd/security-agent/subcommands/runtime/command_unsupported.go @@ -17,6 +17,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" + compression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/def" secagent "github.com/DataDog/datadog-agent/pkg/security/agent" "github.com/DataDog/datadog-agent/pkg/util/startstop" "github.com/DataDog/datadog-go/v5/statsd" @@ -28,7 +29,7 @@ func Commands(*command.GlobalParams) []*cobra.Command { } // StartRuntimeSecurity starts runtime security -func StartRuntimeSecurity(log log.Component, config config.Component, _ string, _ startstop.Stopper, _ statsd.ClientInterface, _ workloadmeta.Component) (*secagent.RuntimeSecurityAgent, error) { +func StartRuntimeSecurity(log log.Component, config config.Component, _ string, _ startstop.Stopper, _ statsd.ClientInterface, _ workloadmeta.Component, _ compression.Component) (*secagent.RuntimeSecurityAgent, error) { enabled := config.GetBool("runtime_security_config.enabled") if !enabled { log.Info("Datadog runtime security agent disabled by config") diff --git a/cmd/security-agent/subcommands/runtime/security_profile.go b/cmd/security-agent/subcommands/runtime/security_profile.go index ff646e0da558f..93ea59e09124d 100644 --- a/cmd/security-agent/subcommands/runtime/security_profile.go +++ b/cmd/security-agent/subcommands/runtime/security_profile.go @@ -158,6 +158,9 @@ func printActivityTreeStats(prefix string, msg *api.ActivityTreeStatsMessage) { fmt.Printf("%s file_nodes_count: %v\n", prefix, msg.GetFileNodesCount()) fmt.Printf("%s dns_nodes_count: %v\n", prefix, msg.GetDNSNodesCount()) fmt.Printf("%s socket_nodes_count: %v\n", prefix, msg.GetSocketNodesCount()) + fmt.Printf("%s imds_nodes_count: %v\n", prefix, msg.GetIMDSNodesCount()) + fmt.Printf("%s syscall_nodes_count: %v\n", prefix, msg.GetSyscallNodesCount()) + fmt.Printf("%s flow_nodes_count: %v\n", prefix, msg.GetFlowNodesCount()) } func printSecurityProfileMessage(msg *api.SecurityProfileMessage) { diff --git a/cmd/security-agent/subcommands/start/command.go b/cmd/security-agent/subcommands/start/command.go index 933a44f4a0135..982375fac643a 100644 --- a/cmd/security-agent/subcommands/start/command.go +++ b/cmd/security-agent/subcommands/start/command.go @@ -33,7 +33,6 @@ import ( "github.com/DataDog/datadog-agent/comp/api/authtoken/fetchonlyimpl" "github.com/DataDog/datadog-agent/comp/core" "github.com/DataDog/datadog-agent/comp/core/config" - "github.com/DataDog/datadog-agent/comp/core/configsync" "github.com/DataDog/datadog-agent/comp/core/configsync/configsyncimpl" log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/core/pid" @@ -55,6 +54,8 @@ import ( "github.com/DataDog/datadog-agent/comp/dogstatsd" "github.com/DataDog/datadog-agent/comp/dogstatsd/statsd" "github.com/DataDog/datadog-agent/comp/metadata/host/hostimpl" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/def" + logscompressionfx "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx" "github.com/DataDog/datadog-agent/pkg/api/security" "github.com/DataDog/datadog-agent/pkg/collector/python" pkgCompliance "github.com/DataDog/datadog-agent/pkg/compliance" @@ -62,7 +63,7 @@ import ( commonsettings "github.com/DataDog/datadog-agent/pkg/config/settings" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/security/agent" - "github.com/DataDog/datadog-agent/pkg/security/utils" + "github.com/DataDog/datadog-agent/pkg/security/utils/hostnameutils" "github.com/DataDog/datadog-agent/pkg/status/health" "github.com/DataDog/datadog-agent/pkg/util/coredump" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -124,13 +125,13 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { fx.Provide(func(config config.Component, statsd statsd.Component) (ddgostatsd.ClientInterface, error) { return statsd.CreateForHostPort(pkgconfigsetup.GetBindHost(config), config.GetInt("dogstatsd_port")) }), - fx.Provide(func(stopper startstop.Stopper, log log.Component, config config.Component, statsdClient ddgostatsd.ClientInterface, wmeta workloadmeta.Component) (status.InformationProvider, *agent.RuntimeSecurityAgent, error) { - hostnameDetected, err := utils.GetHostnameWithContextAndFallback(context.TODO()) + fx.Provide(func(stopper startstop.Stopper, log log.Component, config config.Component, statsdClient ddgostatsd.ClientInterface, wmeta workloadmeta.Component, compression logscompression.Component) (status.InformationProvider, *agent.RuntimeSecurityAgent, error) { + hostnameDetected, err := hostnameutils.GetHostnameWithContextAndFallback(context.TODO()) if err != nil { return status.NewInformationProvider(nil), nil, err } - runtimeAgent, err := runtime.StartRuntimeSecurity(log, config, hostnameDetected, stopper, statsdClient, wmeta) + runtimeAgent, err := runtime.StartRuntimeSecurity(log, config, hostnameDetected, stopper, statsdClient, wmeta, compression) if err != nil { return status.NewInformationProvider(nil), nil, err } @@ -142,14 +143,14 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { // TODO - components: Do not remove runtimeAgent ref until "github.com/DataDog/datadog-agent/pkg/security/agent" is a component so they're not GCed return status.NewInformationProvider(runtimeAgent.StatusProvider()), runtimeAgent, nil }), - fx.Provide(func(stopper startstop.Stopper, log log.Component, config config.Component, statsdClient ddgostatsd.ClientInterface, sysprobeconfig sysprobeconfig.Component, wmeta workloadmeta.Component) (status.InformationProvider, *pkgCompliance.Agent, error) { - hostnameDetected, err := utils.GetHostnameWithContextAndFallback(context.TODO()) + fx.Provide(func(stopper startstop.Stopper, log log.Component, config config.Component, statsdClient ddgostatsd.ClientInterface, sysprobeconfig sysprobeconfig.Component, wmeta workloadmeta.Component, compression logscompression.Component) (status.InformationProvider, *pkgCompliance.Agent, error) { + hostnameDetected, err := hostnameutils.GetHostnameWithContextAndFallback(context.TODO()) if err != nil { return status.NewInformationProvider(nil), nil, err } // start compliance security agent - complianceAgent, err := compliance.StartCompliance(log, config, sysprobeconfig, hostnameDetected, stopper, statsdClient, wmeta) + complianceAgent, err := compliance.StartCompliance(log, config, sysprobeconfig, hostnameDetected, stopper, statsdClient, wmeta, compression) if err != nil { return status.NewInformationProvider(nil), nil, err } @@ -173,9 +174,7 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { }), statusimpl.Module(), fetchonlyimpl.Module(), - configsyncimpl.Module(), - // Force the instantiation of the component - fx.Invoke(func(_ configsync.Component) {}), + configsyncimpl.Module(configsyncimpl.NewDefaultParams()), autoexitimpl.Module(), fx.Supply(pidimpl.NewParams(params.pidfilePath)), fx.Provide(func(c config.Component) settings.Params { @@ -187,6 +186,7 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { } }), settingsimpl.Module(), + logscompressionfx.Module(), ) }, } @@ -201,10 +201,10 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { // TODO(components): note how workloadmeta is passed anonymously, it is still required as it is used // as a global. This should eventually be fixed and all workloadmeta interactions should be via the // injected instance. -func start(log log.Component, config config.Component, _ secrets.Component, _ statsd.Component, _ sysprobeconfig.Component, telemetry telemetry.Component, statusComponent status.Component, _ pid.Component, _ autoexit.Component, settings settings.Component, wmeta workloadmeta.Component, at authtoken.Component) error { +func start(log log.Component, config config.Component, secrets secrets.Component, _ statsd.Component, _ sysprobeconfig.Component, telemetry telemetry.Component, statusComponent status.Component, _ pid.Component, _ autoexit.Component, settings settings.Component, wmeta workloadmeta.Component, at authtoken.Component) error { defer StopAgent(log) - err := RunAgent(log, config, telemetry, statusComponent, settings, wmeta, at) + err := RunAgent(log, config, secrets, telemetry, statusComponent, settings, wmeta, at) if errors.Is(err, ErrAllComponentsDisabled) || errors.Is(err, errNoAPIKeyConfigured) { return nil } @@ -256,7 +256,7 @@ var ErrAllComponentsDisabled = errors.New("all security-agent component are disa var errNoAPIKeyConfigured = errors.New("no API key configured") // RunAgent initialized resources and starts API server -func RunAgent(log log.Component, config config.Component, telemetry telemetry.Component, statusComponent status.Component, settings settings.Component, wmeta workloadmeta.Component, at authtoken.Component) (err error) { +func RunAgent(log log.Component, config config.Component, secrets secrets.Component, telemetry telemetry.Component, statusComponent status.Component, settings settings.Component, wmeta workloadmeta.Component, at authtoken.Component) (err error) { if err := coredump.Setup(config); err != nil { log.Warnf("Can't setup core dumps: %v, core dumps might not be available after a crash", err) } @@ -272,7 +272,7 @@ func RunAgent(log log.Component, config config.Component, telemetry telemetry.Co return ErrAllComponentsDisabled } - if !config.IsSet("api_key") { + if !config.IsConfigured("api_key") { log.Critical("No API key configured, exiting") // A sleep is necessary so that sysV doesn't think the agent has failed @@ -299,7 +299,7 @@ func RunAgent(log log.Component, config config.Component, telemetry telemetry.Co } }() - srv, err = api.NewServer(statusComponent, settings, wmeta, at) + srv, err = api.NewServer(statusComponent, settings, wmeta, at, secrets) if err != nil { return log.Errorf("Error while creating api server, exiting: %v", err) } diff --git a/cmd/serverless-init/cloudservice/cloudrun.go b/cmd/serverless-init/cloudservice/cloudrun.go index 597b414984034..9750fefd3bfbb 100644 --- a/cmd/serverless-init/cloudservice/cloudrun.go +++ b/cmd/serverless-init/cloudservice/cloudrun.go @@ -80,7 +80,7 @@ func (c *CloudRun) GetTags() map[string]string { return c.getFunctionTags(tags) } - tags["_dd.gcr.resource_name"] = "projects/" + tags["project_id"] + "/locations/" + tags["location"] + "/services/" + serviceName + tags["gcr.resource_name"] = "projects/" + tags["project_id"] + "/locations/" + tags["location"] + "/services/" + serviceName return tags } @@ -96,7 +96,7 @@ func (c *CloudRun) getFunctionTags(tags map[string]string) map[string]string { tags[c.spanNamespace+"function_signature_type"] = functionSignatureType } - tags["_dd.gcrfx.resource_name"] = "projects/" + tags["project_id"] + "/locations/" + tags["location"] + "/services/" + tags["service_name"] + "/functions/" + functionTarget + tags["gcrfx.resource_name"] = "projects/" + tags["project_id"] + "/locations/" + tags["location"] + "/services/" + tags["service_name"] + "/functions/" + functionTarget return tags } diff --git a/cmd/serverless-init/cloudservice/cloudrun_test.go b/cmd/serverless-init/cloudservice/cloudrun_test.go index e5733745ccb94..bcc7d934123a4 100644 --- a/cmd/serverless-init/cloudservice/cloudrun_test.go +++ b/cmd/serverless-init/cloudservice/cloudrun_test.go @@ -35,15 +35,15 @@ func TestGetCloudRunTags(t *testing.T) { tags := service.GetTags() assert.Equal(t, map[string]string{ - "container_id": "test_container", - "gcr.container_id": "test_container", - "gcr.location": "test_region", - "location": "test_region", - "project_id": "test_project", - "gcr.project_id": "test_project", - "origin": "cloudrun", - "_dd.origin": "cloudrun", - "_dd.gcr.resource_name": "projects/test_project/locations/test_region/services/", + "container_id": "test_container", + "gcr.container_id": "test_container", + "gcr.location": "test_region", + "location": "test_region", + "project_id": "test_project", + "gcr.project_id": "test_project", + "origin": "cloudrun", + "_dd.origin": "cloudrun", + "gcr.resource_name": "projects/test_project/locations/test_region/services/", }, tags) } @@ -73,19 +73,19 @@ func TestGetCloudRunTagsWithEnvironmentVariables(t *testing.T) { tags := service.GetTags() assert.Equal(t, map[string]string{ - "container_id": "test_container", - "gcr.container_id": "test_container", - "location": "test_region", - "gcr.location": "test_region", - "project_id": "test_project", - "gcr.project_id": "test_project", - "service_name": "test_service", - "gcr.service_name": "test_service", - "gcr.revision_name": "test_revision", - "revision_name": "test_revision", - "origin": "cloudrun", - "_dd.origin": "cloudrun", - "_dd.gcr.resource_name": "projects/test_project/locations/test_region/services/test_service", + "container_id": "test_container", + "gcr.container_id": "test_container", + "location": "test_region", + "gcr.location": "test_region", + "project_id": "test_project", + "gcr.project_id": "test_project", + "service_name": "test_service", + "gcr.service_name": "test_service", + "gcr.revision_name": "test_revision", + "revision_name": "test_revision", + "origin": "cloudrun", + "_dd.origin": "cloudrun", + "gcr.resource_name": "projects/test_project/locations/test_region/services/test_service", }, tags) } @@ -134,6 +134,6 @@ func TestGetCloudRunFunctionTagsWithEnvironmentVariables(t *testing.T) { "_dd.origin": "cloudrun", "gcrfx.function_target": "test_target", "gcrfx.function_signature_type": "test_signature", - "_dd.gcrfx.resource_name": "projects/test_project/locations/test_region/services/test_service/functions/test_target", + "gcrfx.resource_name": "projects/test_project/locations/test_region/services/test_service/functions/test_target", }, tags) } diff --git a/cmd/serverless-init/log/log.go b/cmd/serverless-init/log/log.go index 3065af97ce498..77e69704ff897 100644 --- a/cmd/serverless-init/log/log.go +++ b/cmd/serverless-init/log/log.go @@ -16,6 +16,7 @@ import ( tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" logsAgent "github.com/DataDog/datadog-agent/comp/logs/agent" logConfig "github.com/DataDog/datadog-agent/comp/logs/agent/config" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/def" "github.com/DataDog/datadog-agent/pkg/logs/sources" serverlessLogs "github.com/DataDog/datadog-agent/pkg/serverless/logs" serverlessTag "github.com/DataDog/datadog-agent/pkg/serverless/tags" @@ -53,8 +54,8 @@ func CreateConfig(origin string) *Config { } // SetupLogAgent creates the log agent and sets the base tags -func SetupLogAgent(conf *Config, tags map[string]string, tagger tagger.Component) logsAgent.ServerlessLogsAgent { - logsAgent, _ := serverlessLogs.SetupLogAgent(conf.Channel, sourceName, conf.source, tagger) +func SetupLogAgent(conf *Config, tags map[string]string, tagger tagger.Component, compression logscompression.Component) logsAgent.ServerlessLogsAgent { + logsAgent, _ := serverlessLogs.SetupLogAgent(conf.Channel, sourceName, conf.source, tagger, compression) tagsArray := serverlessTag.MapToArray(tags) diff --git a/cmd/serverless-init/main.go b/cmd/serverless-init/main.go index b4e2076350ff5..2fe4a799017da 100644 --- a/cmd/serverless-init/main.go +++ b/cmd/serverless-init/main.go @@ -31,6 +31,8 @@ import ( tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" localTaggerFx "github.com/DataDog/datadog-agent/comp/core/tagger/fx" nooptelemetry "github.com/DataDog/datadog-agent/comp/core/telemetry/noopsimpl" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/def" + logscompressionfx "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" workloadmetafx "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx" @@ -54,7 +56,7 @@ import ( tracelog "github.com/DataDog/datadog-agent/pkg/trace/log" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const datadogConfigPath = "datadog.yaml" @@ -80,9 +82,10 @@ func main() { workloadmetafx.Module(workloadmeta.NewParams()), fx.Supply(coreconfig.NewParams("", coreconfig.WithConfigMissingOK(true))), coreconfig.Module(), + logscompressionfx.Module(), fx.Supply(secrets.NewEnabledParams()), secretsimpl.Module(), - fx.Provide(func(secrets secrets.Component) optional.Option[secrets.Component] { return optional.NewOption(secrets) }), + fx.Provide(func(secrets secrets.Component) option.Option[secrets.Component] { return option.New(secrets) }), fx.Supply(logdef.ForOneShot(modeConf.LoggerName, "off", true)), logfx.Module(), nooptelemetry.Module(), @@ -97,8 +100,8 @@ func main() { } // removing these unused dependencies will cause silent crash due to fx framework -func run(_ secrets.Component, _ autodiscovery.Component, _ healthprobeDef.Component, tagger tagger.Component) error { - cloudService, logConfig, traceAgent, metricAgent, logsAgent := setup(modeConf, tagger) +func run(_ secrets.Component, _ autodiscovery.Component, _ healthprobeDef.Component, tagger tagger.Component, compression logscompression.Component) error { + cloudService, logConfig, traceAgent, metricAgent, logsAgent := setup(modeConf, tagger, compression) err := modeConf.Runner(logConfig) @@ -108,7 +111,7 @@ func run(_ secrets.Component, _ autodiscovery.Component, _ healthprobeDef.Compon return err } -func setup(_ mode.Conf, tagger tagger.Component) (cloudservice.CloudService, *serverlessInitLog.Config, trace.ServerlessTraceAgent, *metrics.ServerlessMetricAgent, logsAgent.ServerlessLogsAgent) { +func setup(_ mode.Conf, tagger tagger.Component, compression logscompression.Component) (cloudservice.CloudService, *serverlessInitLog.Config, trace.ServerlessTraceAgent, *metrics.ServerlessMetricAgent, logsAgent.ServerlessLogsAgent) { tracelog.SetLogger(corelogger{}) // load proxy settings @@ -139,7 +142,7 @@ func setup(_ mode.Conf, tagger tagger.Component) (cloudservice.CloudService, *se if err != nil { log.Debugf("Error loading config: %v\n", err) } - logsAgent := serverlessInitLog.SetupLogAgent(agentLogConfig, tags, tagger) + logsAgent := serverlessInitLog.SetupLogAgent(agentLogConfig, tags, tagger, compression) traceAgent := setupTraceAgent(tags, tagger) diff --git a/cmd/serverless-init/main_test.go b/cmd/serverless-init/main_test.go index 2b99ef0d0be46..ca3dbb076cdad 100644 --- a/cmd/serverless-init/main_test.go +++ b/cmd/serverless-init/main_test.go @@ -19,6 +19,7 @@ import ( "github.com/DataDog/datadog-agent/cmd/serverless-init/mode" "github.com/DataDog/datadog-agent/comp/core/tagger/mock" "github.com/DataDog/datadog-agent/comp/logs/agent/agentimpl" + compressionmock "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx-mock" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/serverless/logs" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -29,6 +30,7 @@ func TestTagsSetup(t *testing.T) { t.Skip() fakeTagger := mock.SetupFakeTagger(t) + fakeCompression := compressionmock.NewMockCompressor() configmock.New(t) @@ -41,7 +43,7 @@ func TestTagsSetup(t *testing.T) { allTags := append(ddTags, ddExtraTags...) - _, _, traceAgent, metricAgent, _ := setup(mode.Conf{}, fakeTagger) + _, _, traceAgent, metricAgent, _ := setup(mode.Conf{}, fakeTagger, fakeCompression) defer traceAgent.Stop() defer metricAgent.Stop() assert.Subset(t, metricAgent.GetExtraTags(), allTags) diff --git a/cmd/serverless-init/metric/metric.go b/cmd/serverless-init/metric/metric.go index 0cfac6823a628..ff5ff64a57575 100644 --- a/cmd/serverless-init/metric/metric.go +++ b/cmd/serverless-init/metric/metric.go @@ -12,6 +12,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator" "github.com/DataDog/datadog-agent/pkg/metrics" + "github.com/DataDog/datadog-agent/pkg/util/log" ) // AddColdStartMetric adds the coldstart metric to the demultiplexer @@ -29,6 +30,10 @@ func AddShutdownMetric(metricPrefix string, tags []string, _ time.Time, demux ag } func add(name string, tags []string, timestamp time.Time, demux aggregator.Demultiplexer) { + if demux == nil { + log.Debugf("Cannot add metric %s, the metric agent is not running", name) + return + } metricTimestamp := float64(timestamp.UnixNano()) / float64(time.Second) demux.AggregateSample(metrics.MetricSample{ Name: name, diff --git a/cmd/serverless-init/metric/metric_test.go b/cmd/serverless-init/metric/metric_test.go index facd4acabca0c..4aafa0cb19481 100644 --- a/cmd/serverless-init/metric/metric_test.go +++ b/cmd/serverless-init/metric/metric_test.go @@ -17,7 +17,8 @@ import ( "github.com/DataDog/datadog-agent/comp/core/hostname/hostnameimpl" log "github.com/DataDog/datadog-agent/comp/core/log/def" logmock "github.com/DataDog/datadog-agent/comp/core/log/mock" - compressionmock "github.com/DataDog/datadog-agent/comp/serializer/compression/fx-mock" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx-mock" + metricscompression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/fx-mock" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -64,6 +65,17 @@ func TestAddShutdownMetric(t *testing.T) { assert.Equal(t, metric.Tags[1], "tagb:valueb") } +func TestNilDemuxDoesNotPanic(t *testing.T) { + demux := createDemultiplexer(t) + timestamp := time.Now() + // Pass nil for demux to mimic when a port is blocked and dogstatsd does not start properly. + // This previously led to a panic and segmentation fault + add("metric", []string{"taga:valuea", "tagb:valueb"}, timestamp, nil) + generatedMetrics, timedMetrics := demux.WaitForSamples(100 * time.Millisecond) + assert.Equal(t, 0, len(timedMetrics)) + assert.Equal(t, 0, len(generatedMetrics)) +} + func createDemultiplexer(t *testing.T) demultiplexer.FakeSamplerMock { - return fxutil.Test[demultiplexer.FakeSamplerMock](t, fx.Provide(func() log.Component { return logmock.New(t) }), compressionmock.MockModule(), demultiplexerimpl.FakeSamplerMockModule(), hostnameimpl.MockModule()) + return fxutil.Test[demultiplexer.FakeSamplerMock](t, fx.Provide(func() log.Component { return logmock.New(t) }), logscompression.MockModule(), metricscompression.MockModule(), demultiplexerimpl.FakeSamplerMockModule(), hostnameimpl.MockModule()) } diff --git a/cmd/serverless/main.go b/cmd/serverless/main.go index a679ceef10181..e8ab54a8b74dc 100644 --- a/cmd/serverless/main.go +++ b/cmd/serverless/main.go @@ -19,6 +19,8 @@ import ( tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" taggernoop "github.com/DataDog/datadog-agent/comp/core/tagger/fx-noop" logConfig "github.com/DataDog/datadog-agent/comp/logs/agent/config" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/def" + logscompressionfx "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx" "github.com/DataDog/datadog-agent/pkg/config/model" remoteconfig "github.com/DataDog/datadog-agent/pkg/config/remote/service" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" @@ -78,6 +80,7 @@ func main() { err := fxutil.OneShot( runAgent, taggernoop.Module(), + logscompressionfx.Module(), ) if err != nil { @@ -86,7 +89,8 @@ func main() { } } -func runAgent(tagger tagger.Component) { +func runAgent(tagger tagger.Component, compression logscompression.Component) { + startTime := time.Now() setupLambdaAgentOverrides() @@ -125,7 +129,7 @@ func runAgent(tagger tagger.Component) { go startTraceAgent(&wg, lambdaSpanChan, coldStartSpanId, serverlessDaemon, tagger, rcService) go startOtlpAgent(&wg, metricAgent, serverlessDaemon, tagger) - go startTelemetryCollection(&wg, serverlessID, logChannel, serverlessDaemon, tagger) + go startTelemetryCollection(&wg, serverlessID, logChannel, serverlessDaemon, tagger, compression) // start appsec appsecProxyProcessor := startAppSec(serverlessDaemon) @@ -298,7 +302,7 @@ func startAppSec(serverlessDaemon *daemon.Daemon) *httpsec.ProxyLifecycleProcess return appsecProxyProcessor } -func startTelemetryCollection(wg *sync.WaitGroup, serverlessID registration.ID, logChannel chan *logConfig.ChannelMessage, serverlessDaemon *daemon.Daemon, tagger tagger.Component) { +func startTelemetryCollection(wg *sync.WaitGroup, serverlessID registration.ID, logChannel chan *logConfig.ChannelMessage, serverlessDaemon *daemon.Daemon, tagger tagger.Component, compression logscompression.Component) { defer wg.Done() if os.Getenv(daemon.LocalTestEnvVar) == "true" || os.Getenv(daemon.LocalTestEnvVar) == "1" { log.Debug("Running in local test mode. Telemetry collection HTTP route won't be enabled") @@ -322,7 +326,7 @@ func startTelemetryCollection(wg *sync.WaitGroup, serverlessID registration.ID, if logRegistrationError != nil { log.Error("Can't subscribe to logs:", logRegistrationError) } else { - logsAgent, err := serverlessLogs.SetupLogAgent(logChannel, "AWS Logs", "lambda", tagger) + logsAgent, err := serverlessLogs.SetupLogAgent(logChannel, "AWS Logs", "lambda", tagger, compression) if err != nil { log.Errorf("Error setting up the logs agent: %s", err) } diff --git a/cmd/system-probe/api/debug/handlers_linux.go b/cmd/system-probe/api/debug/handlers_linux.go index 07ba06c49354f..54737415699de 100644 --- a/cmd/system-probe/api/debug/handlers_linux.go +++ b/cmd/system-probe/api/debug/handlers_linux.go @@ -9,14 +9,110 @@ package debug import ( + "bytes" "context" "errors" "fmt" + "io" "net/http" "os/exec" + "regexp" + "strconv" + "syscall" "time" + + "golang.org/x/sys/unix" ) +var klogRegexp = regexp.MustCompile(`<(\d+)>(.*)`) + +var klogLevels = []string{ + "emerg", + "alert", + "crit", + "err", + "warn", + "notice", + "info", + "debug", +} + +// lowest 3 bits are the log level, remaining bits are the facility +const klogFacilityShift = 3 +const klogLevelMask = (1 << klogFacilityShift) - 1 + +func klogLevelName(level int) string { + return klogLevels[level&klogLevelMask] +} + +func readAllDmesg() ([]byte, error) { + n, err := syscall.Klogctl(unix.SYSLOG_ACTION_SIZE_BUFFER, nil) + if err != nil { + return nil, fmt.Errorf("failed to query size of log buffer [%w]", err) + } + + b := make([]byte, n) + + m, err := syscall.Klogctl(unix.SYSLOG_ACTION_READ_ALL, b) + if err != nil { + return nil, fmt.Errorf("failed to read messages from log buffer [%w]", err) + } + + return b[:m], nil +} + +func parseDmesg(buffer []byte) (string, error) { + buf := bytes.NewBuffer(buffer) + var result string + + for { + line, err := buf.ReadString('\n') + if err == io.EOF { + break + } else if err != nil { + return result, err + } + + levelName := "info" + message := line + + // convert the numeric log level to a string + parts := klogRegexp.FindStringSubmatch(line) + if parts != nil { + message = parts[2] + + digits := parts[1] + level, err := strconv.Atoi(digits) + if err == nil { + levelName = klogLevelName(level) + } + } + + result += fmt.Sprintf("%-6s: %s\n", levelName, message) + } + + return result, nil +} + +// HandleLinuxDmesg writes linux dmesg into the HTTP response. +func HandleLinuxDmesg(w http.ResponseWriter, _ *http.Request) { + dmesg, err := readAllDmesg() + if err != nil { + w.WriteHeader(500) + fmt.Fprintf(w, "failed to read dmesg: %s", err) + return + } + + dmesgStr, err := parseDmesg(dmesg) + if err != nil { + w.WriteHeader(500) + fmt.Fprintf(w, "failed to parse dmesg: %s", err) + return + } + + io.WriteString(w, dmesgStr) +} + // handleCommand runs commandName with the provided arguments and writes it to the HTTP response. // If the command exits with a failure or doesn't exist in the PATH, it will still 200 but report the failure. // Any other kind of error will 500. diff --git a/cmd/system-probe/api/debug/handlers_linux_test.go b/cmd/system-probe/api/debug/handlers_linux_test.go new file mode 100644 index 0000000000000..3f2d20a761134 --- /dev/null +++ b/cmd/system-probe/api/debug/handlers_linux_test.go @@ -0,0 +1,24 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build linux + +// Package debug contains handlers for debug information global to all of system-probe +package debug + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +const userFacility = 8 + +func TestKlogLevelName(t *testing.T) { + require.Equal(t, "emerg", klogLevelName(0)) + require.Equal(t, "notice", klogLevelName(5)) + + require.Equal(t, "notice", klogLevelName(userFacility|5)) +} diff --git a/cmd/system-probe/api/debug/handlers_nolinux.go b/cmd/system-probe/api/debug/handlers_nolinux.go index 246f4a3a7c78a..1e8a84189e07c 100644 --- a/cmd/system-probe/api/debug/handlers_nolinux.go +++ b/cmd/system-probe/api/debug/handlers_nolinux.go @@ -13,6 +13,12 @@ import ( "net/http" ) +// HandleLinuxDmesg is not supported +func HandleLinuxDmesg(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(500) + io.WriteString(w, "HandleLinuxDmesg is not supported on this platform") +} + // HandleSelinuxSestatus is not supported func HandleSelinuxSestatus(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(500) diff --git a/cmd/system-probe/api/module/common.go b/cmd/system-probe/api/module/common.go index cec9bf608de45..8bbad735a27e8 100644 --- a/cmd/system-probe/api/module/common.go +++ b/cmd/system-probe/api/module/common.go @@ -12,6 +12,7 @@ import ( tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" "github.com/DataDog/datadog-agent/comp/core/telemetry" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/def" "go.uber.org/fx" ) @@ -30,7 +31,8 @@ type Module interface { type FactoryDependencies struct { fx.In - WMeta workloadmeta.Component - Tagger tagger.Component - Telemetry telemetry.Component + WMeta workloadmeta.Component + Tagger tagger.Component + Telemetry telemetry.Component + Compression logscompression.Component } diff --git a/cmd/system-probe/api/server.go b/cmd/system-probe/api/server.go index f4d9e85522d91..f0fbe81919f30 100644 --- a/cmd/system-probe/api/server.go +++ b/cmd/system-probe/api/server.go @@ -59,6 +59,7 @@ func StartServer(cfg *sysconfigtypes.Config, telemetry telemetry.Component, wmet if runtime.GOOS == "linux" { mux.HandleFunc("/debug/ebpf_btf_loader_info", ebpf.HandleBTFLoaderInfo) + mux.HandleFunc("/debug/dmesg", debug.HandleLinuxDmesg) mux.HandleFunc("/debug/selinux_sestatus", debug.HandleSelinuxSestatus) mux.HandleFunc("/debug/selinux_semodule_list", debug.HandleSelinuxSemoduleList) } diff --git a/cmd/system-probe/api/server/listener_windows.go b/cmd/system-probe/api/server/listener_windows.go index d4e13d58c80f7..3bc2bdd862205 100644 --- a/cmd/system-probe/api/server/listener_windows.go +++ b/cmd/system-probe/api/server/listener_windows.go @@ -8,6 +8,10 @@ package server import ( "fmt" "net" + "strings" + + "github.com/DataDog/datadog-agent/pkg/util/log" + "github.com/DataDog/datadog-agent/pkg/util/winutil" "github.com/Microsoft/go-winio" ) @@ -19,18 +23,76 @@ const ( namedPipeInputBufferSize = int32(4096) namedPipeOutputBufferSize = int32(4096) - // DACL for the system probe named pipe. + // DACL template for the system probe named pipe that allows a specific SID. // SE_DACL_PROTECTED (P), SE_DACL_AUTO_INHERITED (AI) - // Allow Everyone (WD) - // nolint:revive // TODO: Hardened DACL and ensure the datadogagent run-as user is allowed. - namedPipeSecurityDescriptor = "D:PAI(A;;FA;;;WD)" + // Allow Administorators (BA), Local System (SY) + // Allow a custom SID, NO_PROPAGATE_INHERIT_ACE (NP) + namedPipeSecurityDescriptorTemplate = "D:PAI(A;;FA;;;BA)(A;;FA;;;SY)(A;NP;FRFW;;;%s)" + + // Default DACL for the system probe named pipe. + // Allow Administorators (BA), Local System (SY) + namedPipeDefaultSecurityDescriptor = "D:PAI(A;;FA;;;BA)(A;;FA;;;SY)" + + // SID representing Everyone + everyoneSid = "S-1-1-0" ) +// setupSecurityDescriptor prepares the security descriptor for the system probe named pipe. +func setupSecurityDescriptor() (string, error) { + // Set up the DACL to allow ddagentuser. + sid, err := winutil.GetDDAgentUserSID() + if err != nil { + return "", fmt.Errorf("failed to get SID for ddagentuser: %s", err) + } + + sidString := sid.String() + + // Sanity checks + if len(sidString) == 0 { + return "", fmt.Errorf("failed to get SID string from ddagentuser") + } + + if sidString == everyoneSid { + return "", fmt.Errorf("ddagentuser as Everyone is not supported") + } + + sd, err := formatSecurityDescriptorWithSid(sidString) + if err != nil { + return "", fmt.Errorf("invalid SID from ddagentuser: %s", err) + } + + log.Debugf("named pipe DACL prepared with ddagentuser %s", sidString) + return sd, nil +} + +// formatSecurityDescriptorWithSid creates a security descriptor string for the system probe +// named pipe that allows a set of default users and the specified SID. +func formatSecurityDescriptorWithSid(sidString string) (string, error) { + // Sanity check + if !strings.HasPrefix(sidString, "S-") { + return "", fmt.Errorf("invalid SID %s", sidString) + } + return fmt.Sprintf(namedPipeSecurityDescriptorTemplate, sidString), nil +} + // NewListener sets up a named pipe listener for the system probe service. func NewListener(namedPipeName string) (net.Listener, error) { - // The DACL must allow the run-as user of datadogagent. + sd, err := setupSecurityDescriptor() + if err != nil { + log.Errorf("failed to setup security descriptor, ddagentuser is denied: %s", err) + + // The default security descriptor does not include ddagentuser. + // Queries from the DD agent will fail. + sd = namedPipeDefaultSecurityDescriptor + } + + return newListenerWithSecurityDescriptor(namedPipeName, sd) +} + +// newListenerWithSecurityDescriptor sets up a named pipe listener with a security descriptor. +func newListenerWithSecurityDescriptor(namedPipeName string, securityDescriptor string) (net.Listener, error) { config := winio.PipeConfig{ - SecurityDescriptor: namedPipeSecurityDescriptor, + SecurityDescriptor: securityDescriptor, InputBufferSize: namedPipeInputBufferSize, OutputBufferSize: namedPipeOutputBufferSize, } @@ -41,5 +103,8 @@ func NewListener(namedPipeName string) (net.Listener, error) { if err != nil { return nil, fmt.Errorf("named pipe listener %q: %s", namedPipeName, err) } + + log.Infof("named pipe %s ready", namedPipeName) + return namedPipe, nil } diff --git a/cmd/system-probe/api/server/listener_windows_testutil.go b/cmd/system-probe/api/server/listener_windows_testutil.go new file mode 100644 index 0000000000000..4700426453d9d --- /dev/null +++ b/cmd/system-probe/api/server/listener_windows_testutil.go @@ -0,0 +1,30 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build test && windows + +package server + +import ( + "net" + "os/user" +) + +// NewListenerForCurrentUser sets up a named pipe listener for tests that mock system probe. +// Do not use this for the normal system probe named pipe. +func NewListenerForCurrentUser(namedPipeName string) (net.Listener, error) { + // Prepare a security descriptor that allows the current user. + currentUser, err := user.Current() + if err != nil { + return nil, err + } + + sd, err := formatSecurityDescriptorWithSid(currentUser.Uid) + if err != nil { + return nil, err + } + + return newListenerWithSecurityDescriptor(namedPipeName, sd) +} diff --git a/cmd/system-probe/command/command.go b/cmd/system-probe/command/command.go index e9f82cdd449d0..e76e59405c407 100644 --- a/cmd/system-probe/command/command.go +++ b/cmd/system-probe/command/command.go @@ -9,6 +9,7 @@ package command import ( "fmt" "os" + "slices" "strings" "github.com/fatih/color" @@ -76,21 +77,24 @@ Runtime Security Monitoring, Universal Service Monitoring, and others.`, func SetDefaultCommandIfNonePresent(rootCmd *cobra.Command) { var subCommandNames []string for _, command := range rootCmd.Commands() { - subCommandNames = append(subCommandNames, append(command.Aliases, command.Name())...) + subCommandNames = append(subCommandNames, command.Name()) + subCommandNames = append(subCommandNames, command.Aliases...) } + helpAndCompletionCommands := []string{"help", "-h", "--help", "completion"} + args := []string{os.Args[0], "run"} if len(os.Args) > 1 { potentialCommand := os.Args[1] - if potentialCommand == "help" || potentialCommand == "-h" || potentialCommand == "completion" { + + if slices.Contains(helpAndCompletionCommands, potentialCommand) { return } - for _, command := range subCommandNames { - if command == potentialCommand { - return - } + if slices.Contains(subCommandNames, potentialCommand) { + return } + if !strings.HasPrefix(potentialCommand, "-") { // run command takes no positional arguments, so if one is passed // fallback to default cobra handling for good errors diff --git a/cmd/system-probe/config/adjust_npm.go b/cmd/system-probe/config/adjust_npm.go index e1be10ae08d79..98a5734827357 100644 --- a/cmd/system-probe/config/adjust_npm.go +++ b/cmd/system-probe/config/adjust_npm.go @@ -26,6 +26,10 @@ const ( func adjustNetwork(cfg model.Config) { ebpflessEnabled := cfg.GetBool(netNS("enable_ebpfless")) + deprecateInt(cfg, spNS("closed_connection_flush_threshold"), netNS("closed_connection_flush_threshold")) + deprecateInt(cfg, spNS("closed_channel_size"), netNS("closed_channel_size")) + applyDefault(cfg, netNS("closed_channel_size"), 500) + limitMaxInt(cfg, spNS("max_conns_per_message"), maxConnsMessageBatchSize) if cfg.GetBool(spNS("disable_tcp")) { diff --git a/cmd/system-probe/config/adjust_security.go b/cmd/system-probe/config/adjust_security.go index f5b3d27ddf80c..7a98ab8e75a3c 100644 --- a/cmd/system-probe/config/adjust_security.go +++ b/cmd/system-probe/config/adjust_security.go @@ -30,7 +30,7 @@ func adjustSecurity(cfg model.Config) { if cfg.GetBool(secNS("enabled")) { // if runtime is enabled then we enable fim as well (except if force disabled) - if runtime.GOOS != "windows" || !cfg.IsSet(secNS("fim_enabled")) { + if runtime.GOOS != "windows" || !cfg.IsConfigured(secNS("fim_enabled")) { cfg.Set(secNS("fim_enabled"), true, model.SourceAgentRuntime) } } else { diff --git a/cmd/system-probe/config/adjust_usm.go b/cmd/system-probe/config/adjust_usm.go index aa473538b5002..0cd888c87f37d 100644 --- a/cmd/system-probe/config/adjust_usm.go +++ b/cmd/system-probe/config/adjust_usm.go @@ -71,6 +71,15 @@ func adjustUSM(cfg model.Config) { applyDefault(cfg, smNS("max_postgres_stats_buffered"), 100000) applyDefault(cfg, smNS("max_redis_stats_buffered"), 100000) + // kernel_buffer_pages determines the number of pages allocated *per CPU* + // for buffering kernel data, whether using a perf buffer or a ring buffer. + applyDefault(cfg, smNS("kernel_buffer_pages"), 16) + + // data_channel_size defines the size of the Go channel that buffers events. + // Each event has a fixed size of approximately 4KB (sizeof(batch_data_t)). + // By setting this value to 100, the channel will buffer up to ~400KB of data in the Go heap memory. + applyDefault(cfg, smNS("data_channel_size"), 100) + validateInt(cfg, smNS("http_notification_threshold"), cfg.GetInt(smNS("max_tracked_http_connections"))/2, func(v int) error { limit := cfg.GetInt(smNS("max_tracked_http_connections")) if v >= limit { diff --git a/cmd/system-probe/config/config.go b/cmd/system-probe/config/config.go index c67bd69c18aca..e5f1b2543c454 100644 --- a/cmd/system-probe/config/config.go +++ b/cmd/system-probe/config/config.go @@ -20,7 +20,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/secrets" "github.com/DataDog/datadog-agent/pkg/config/model" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -195,7 +195,7 @@ func SetupOptionalDatadogConfigWithDir(configDir, configFile string) error { pkgconfigsetup.Datadog().SetConfigFile(configFile) } // load the configuration - _, err := pkgconfigsetup.LoadDatadogCustom(pkgconfigsetup.Datadog(), "datadog.yaml", optional.NewNoneOption[secrets.Component](), pkgconfigsetup.SystemProbe().GetEnvVars()) + _, err := pkgconfigsetup.LoadDatadogCustom(pkgconfigsetup.Datadog(), "datadog.yaml", option.None[secrets.Component](), pkgconfigsetup.SystemProbe().GetEnvVars()) // If `!failOnMissingFile`, do not issue an error if we cannot find the default config file. var e viper.ConfigFileNotFoundError if err != nil && !errors.As(err, &e) { diff --git a/cmd/system-probe/modules/eventmonitor.go b/cmd/system-probe/modules/eventmonitor.go index 41707ad513dec..f0a9288f14f2a 100644 --- a/cmd/system-probe/modules/eventmonitor.go +++ b/cmd/system-probe/modules/eventmonitor.go @@ -53,7 +53,7 @@ func createEventMonitorModule(_ *sysconfigtypes.Config, deps module.FactoryDepen } if secconfig.RuntimeSecurity.IsRuntimeEnabled() { - cws, err := secmodule.NewCWSConsumer(evm, secconfig.RuntimeSecurity, deps.WMeta, secmoduleOpts) + cws, err := secmodule.NewCWSConsumer(evm, secconfig.RuntimeSecurity, deps.WMeta, secmoduleOpts, deps.Compression) if err != nil { return nil, err } diff --git a/cmd/system-probe/subcommands/run/command.go b/cmd/system-probe/subcommands/run/command.go index 659f7e2fe0f52..f5d2990b5b0ab 100644 --- a/cmd/system-probe/subcommands/run/command.go +++ b/cmd/system-probe/subcommands/run/command.go @@ -64,7 +64,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/coredump" "github.com/DataDog/datadog-agent/pkg/util/fxutil" pkglog "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/util/profiling" "github.com/DataDog/datadog-agent/pkg/version" ) @@ -96,7 +96,7 @@ func Commands(globalParams *command.GlobalParams) []*cobra.Command { fx.Supply(sysprobeconfigimpl.NewParams(sysprobeconfigimpl.WithSysProbeConfFilePath(globalParams.ConfFilePath), sysprobeconfigimpl.WithFleetPoliciesDirPath(globalParams.FleetPoliciesDirPath))), fx.Supply(log.ForDaemon("SYS-PROBE", "log_file", common.DefaultLogFile)), fx.Supply(rcclient.Params{AgentName: "system-probe", AgentVersion: version.AgentVersion, IsSystemProbe: true}), - fx.Supply(optional.NewNoneOption[secrets.Component]()), + fx.Supply(option.None[secrets.Component]()), compstatsd.Module(), config.Module(), telemetryimpl.Module(), @@ -267,7 +267,7 @@ func runSystemProbe(ctxChan <-chan context.Context, errChan chan error) error { fx.Supply(sysprobeconfigimpl.NewParams(sysprobeconfigimpl.WithSysProbeConfFilePath(""))), fx.Supply(log.ForDaemon("SYS-PROBE", "log_file", common.DefaultLogFile)), fx.Supply(rcclient.Params{AgentName: "system-probe", AgentVersion: version.AgentVersion, IsSystemProbe: true}), - fx.Supply(optional.NewNoneOption[secrets.Component]()), + fx.Supply(option.None[secrets.Component]()), rcclientimpl.Module(), config.Module(), telemetryimpl.Module(), diff --git a/cmd/system-probe/subcommands/runtime/command_linux.go b/cmd/system-probe/subcommands/runtime/command_linux.go index 1980ac0fbfb23..7d4c2795f53c9 100644 --- a/cmd/system-probe/subcommands/runtime/command_linux.go +++ b/cmd/system-probe/subcommands/runtime/command_linux.go @@ -21,7 +21,7 @@ import ( func Commands(globalParams *command.GlobalParams) []*cobra.Command { runtimeCmd := &cobra.Command{ Use: "runtime", - Short: "runtime Agent utility commands", + Short: "Runtime Security Agent (CWS) utility commands", } runtimeCmd.AddCommand(commonPolicyCommands(globalParams)...) diff --git a/cmd/systray/command/command.go b/cmd/systray/command/command.go index 2774a0731eea6..a7eed7024a244 100644 --- a/cmd/systray/command/command.go +++ b/cmd/systray/command/command.go @@ -29,13 +29,14 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" haagentfx "github.com/DataDog/datadog-agent/comp/haagent/fx" "github.com/DataDog/datadog-agent/comp/metadata/inventoryagent/inventoryagentimpl" - compressionfx "github.com/DataDog/datadog-agent/comp/serializer/compression/fx" + logscompressionfx "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx" + metricscompressionfx "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/fx" "github.com/DataDog/datadog-agent/comp/systray/systray" "github.com/DataDog/datadog-agent/comp/systray/systray/systrayimpl" "github.com/DataDog/datadog-agent/pkg/serializer" "github.com/DataDog/datadog-agent/pkg/util/defaultpaths" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/util/winutil" ) @@ -108,9 +109,10 @@ func MakeCommand() *cobra.Command { defaultpaths.StreamlogsLogFile, )), noopAutodiscover.Module(), - fx.Supply(optional.NewNoneOption[workloadmeta.Component]()), - fx.Supply(optional.NewNoneOption[collector.Component]()), - compressionfx.Module(), + fx.Supply(option.None[workloadmeta.Component]()), + fx.Supply(option.None[collector.Component]()), + logscompressionfx.Module(), + metricscompressionfx.Module(), diagnosesendermanagerimpl.Module(), nooptagger.Module(), authtokenimpl.Module(), diff --git a/cmd/trace-agent/subcommands/config/command.go b/cmd/trace-agent/subcommands/config/command.go index ae91f0b21f69d..0125b8801f3aa 100644 --- a/cmd/trace-agent/subcommands/config/command.go +++ b/cmd/trace-agent/subcommands/config/command.go @@ -14,7 +14,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/secrets" "github.com/DataDog/datadog-agent/pkg/config/fetcher" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "go.uber.org/fx" "github.com/spf13/cobra" @@ -29,7 +29,7 @@ func MakeCommand(globalParamsGetter func() *subcommands.GlobalParams) *cobra.Com RunE: func(*cobra.Command, []string) error { return fxutil.OneShot(printConfig, fx.Supply(config.NewAgentParams(globalParamsGetter().ConfPath, config.WithFleetPoliciesDirPath(globalParamsGetter().FleetPoliciesDirPath))), - fx.Supply(optional.NewNoneOption[secrets.Component]()), + fx.Supply(option.None[secrets.Component]()), config.Module(), ) }, diff --git a/cmd/trace-agent/subcommands/info/command.go b/cmd/trace-agent/subcommands/info/command.go index 8fbb52f254430..4ce8b6a3dc3f8 100644 --- a/cmd/trace-agent/subcommands/info/command.go +++ b/cmd/trace-agent/subcommands/info/command.go @@ -21,7 +21,7 @@ import ( "github.com/DataDog/datadog-agent/comp/trace/config" "github.com/DataDog/datadog-agent/pkg/trace/info" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // MakeCommand returns the start subcommand for the 'trace-agent' command. @@ -42,7 +42,7 @@ func runTraceAgentInfoFct(params *subcommands.GlobalParams, fct interface{}) err return fxutil.OneShot(fct, config.Module(), fx.Supply(coreconfig.NewAgentParams(params.ConfPath, coreconfig.WithFleetPoliciesDirPath(params.FleetPoliciesDirPath))), - fx.Supply(optional.NewNoneOption[secrets.Component]()), + fx.Supply(option.None[secrets.Component]()), fx.Supply(secrets.NewEnabledParams()), coreconfig.Module(), secretsimpl.Module(), diff --git a/cmd/trace-agent/subcommands/run/command.go b/cmd/trace-agent/subcommands/run/command.go index 4526101362de1..4df84a00f7a23 100644 --- a/cmd/trace-agent/subcommands/run/command.go +++ b/cmd/trace-agent/subcommands/run/command.go @@ -19,7 +19,6 @@ import ( "github.com/DataDog/datadog-agent/comp/agent/autoexit/autoexitimpl" "github.com/DataDog/datadog-agent/comp/api/authtoken/fetchonlyimpl" coreconfig "github.com/DataDog/datadog-agent/comp/core/config" - "github.com/DataDog/datadog-agent/comp/core/configsync" "github.com/DataDog/datadog-agent/comp/core/configsync/configsyncimpl" log "github.com/DataDog/datadog-agent/comp/core/log/def" logtracefx "github.com/DataDog/datadog-agent/comp/core/log/fx-trace" @@ -38,7 +37,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/api/security" "github.com/DataDog/datadog-agent/pkg/trace/telemetry" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // MakeCommand returns the run subcommand for the 'trace-agent' command. @@ -79,8 +78,8 @@ func runTraceAgentProcess(ctx context.Context, cliParams *Params, defaultConfPat fx.Provide(func() context.Context { return ctx }), // fx.Supply(ctx) fails with a missing type error. fx.Supply(coreconfig.NewAgentParams(cliParams.ConfPath, coreconfig.WithFleetPoliciesDirPath(cliParams.FleetPoliciesDirPath))), secretsimpl.Module(), - fx.Provide(func(comp secrets.Component) optional.Option[secrets.Component] { - return optional.NewOption[secrets.Component](comp) + fx.Provide(func(comp secrets.Component) option.Option[secrets.Component] { + return option.New[secrets.Component](comp) }), fx.Supply(secrets.NewEnabledParams()), telemetryimpl.Module(), @@ -113,9 +112,9 @@ func runTraceAgentProcess(ctx context.Context, cliParams *Params, defaultConfPat zstdfx.Module(), trace.Bundle(), fetchonlyimpl.Module(), - configsyncimpl.Module(), + configsyncimpl.Module(configsyncimpl.NewDefaultParams()), // Force the instantiation of the components - fx.Invoke(func(_ traceagent.Component, _ configsync.Component, _ autoexit.Component) {}), + fx.Invoke(func(_ traceagent.Component, _ autoexit.Component) {}), ) if err != nil && errors.Is(err, traceagentimpl.ErrAgentDisabled) { return nil diff --git a/comp/README.md b/comp/README.md index f011bc57fe71d..3df8351f3b755 100644 --- a/comp/README.md +++ b/comp/README.md @@ -16,7 +16,7 @@ Package autoexit lets setup automatic shutdown mechanism if necessary ### [comp/agent/cloudfoundrycontainer](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/agent/cloudfoundrycontainer) -*Datadog Team*: platform-integrations +*Datadog Team*: agent-integrations Package cloudfoundrycontainer provides the cloud foundry container component. @@ -615,11 +615,17 @@ Package client implements a component to send process metadata to the Cluster-Ag Package rdnsquerier provides the reverse DNS querier component. -### [comp/serializer/compression](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/serializer/compression) +### [comp/serializer/logscompression](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/serializer/logscompression) -*Datadog Team*: agent-metrics-logs +*Datadog Team*: agent-processing-and-routing + +Package logscompression provides the component for logs compression + +### [comp/serializer/metricscompression](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/serializer/metricscompression) + +*Datadog Team*: agent-processing-and-routing -Package compression provides a compression implementation based on the configuration or available build tags. +Package metricscompression provides the component for metrics compression ### [comp/snmpscan](https://pkg.go.dev/github.com/DataDog/datadog-agent/comp/snmpscan) diff --git a/comp/agent/bundle_test.go b/comp/agent/bundle_test.go index 8a1820206ee81..44618341d203b 100644 --- a/comp/agent/bundle_test.go +++ b/comp/agent/bundle_test.go @@ -16,7 +16,6 @@ import ( "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" "github.com/DataDog/datadog-agent/comp/forwarder/eventplatform/eventplatformimpl" "github.com/DataDog/datadog-agent/comp/forwarder/orchestrator/orchestratorimpl" - compressionmock "github.com/DataDog/datadog-agent/comp/serializer/compression/fx-mock" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -24,7 +23,6 @@ func TestBundleDependencies(t *testing.T) { fxutil.TestBundle(t, Bundle(jmxloggerimpl.NewDefaultParams()), core.MockBundle(), - compressionmock.MockModule(), defaultforwarder.MockModule(), orchestratorimpl.MockModule(), eventplatformimpl.MockModule(), diff --git a/comp/agent/cloudfoundrycontainer/component.go b/comp/agent/cloudfoundrycontainer/component.go index 36e070dad333c..db0ff8c44ff2c 100644 --- a/comp/agent/cloudfoundrycontainer/component.go +++ b/comp/agent/cloudfoundrycontainer/component.go @@ -6,7 +6,7 @@ // Package cloudfoundrycontainer provides the cloud foundry container component. package cloudfoundrycontainer -// team: platform-integrations +// team: agent-integrations // Component is the component type. type Component interface{} diff --git a/comp/aggregator/bundle_test.go b/comp/aggregator/bundle_test.go index 2a38d5a0ac603..8b5b5c35929a5 100644 --- a/comp/aggregator/bundle_test.go +++ b/comp/aggregator/bundle_test.go @@ -15,18 +15,20 @@ import ( "github.com/DataDog/datadog-agent/comp/forwarder/eventplatform/eventplatformimpl" orchestratorForwarderImpl "github.com/DataDog/datadog-agent/comp/forwarder/orchestrator/orchestratorimpl" haagentmock "github.com/DataDog/datadog-agent/comp/haagent/mock" - compressionmock "github.com/DataDog/datadog-agent/comp/serializer/compression/fx-mock" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx-mock" + metricscompression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/fx-mock" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) func TestBundleDependencies(t *testing.T) { fxutil.TestBundle(t, Bundle(demultiplexerimpl.Params{}), core.MockBundle(), - compressionmock.MockModule(), defaultforwarder.MockModule(), orchestratorForwarderImpl.MockModule(), eventplatformimpl.MockModule(), nooptagger.Module(), haagentmock.Module(), + logscompression.MockModule(), + metricscompression.MockModule(), ) } diff --git a/comp/aggregator/demultiplexer/demultiplexerimpl/demultiplexer.go b/comp/aggregator/demultiplexer/demultiplexerimpl/demultiplexer.go index b6b1dd7d56310..6011623caae83 100644 --- a/comp/aggregator/demultiplexer/demultiplexerimpl/demultiplexer.go +++ b/comp/aggregator/demultiplexer/demultiplexerimpl/demultiplexer.go @@ -21,7 +21,7 @@ import ( "github.com/DataDog/datadog-agent/comp/forwarder/eventplatform" orchestratorforwarder "github.com/DataDog/datadog-agent/comp/forwarder/orchestrator" haagent "github.com/DataDog/datadog-agent/comp/haagent/def" - compression "github.com/DataDog/datadog-agent/comp/serializer/compression/def" + compression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/def" "github.com/DataDog/datadog-agent/pkg/aggregator" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" "github.com/DataDog/datadog-agent/pkg/util/fxutil" diff --git a/comp/aggregator/demultiplexer/demultiplexerimpl/demultiplexer_fake_sampler_mock.go b/comp/aggregator/demultiplexer/demultiplexerimpl/demultiplexer_fake_sampler_mock.go index 53e3beb5f77d6..a16b2ac7c4860 100644 --- a/comp/aggregator/demultiplexer/demultiplexerimpl/demultiplexer_fake_sampler_mock.go +++ b/comp/aggregator/demultiplexer/demultiplexerimpl/demultiplexer_fake_sampler_mock.go @@ -16,7 +16,8 @@ import ( demultiplexerComp "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer" "github.com/DataDog/datadog-agent/comp/core/hostname" log "github.com/DataDog/datadog-agent/comp/core/log/def" - compression "github.com/DataDog/datadog-agent/comp/serializer/compression/def" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/def" + metricscompression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/def" "github.com/DataDog/datadog-agent/pkg/aggregator" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -33,10 +34,11 @@ func FakeSamplerMockModule() fxutil.Module { type fakeSamplerMockDependencies struct { fx.In - Lc fx.Lifecycle - Log log.Component - Hostname hostname.Component - Compressor compression.Component + Lc fx.Lifecycle + Log log.Component + Hostname hostname.Component + LogsCompression logscompression.Component + MetricsCompression metricscompression.Component } type fakeSamplerMock struct { @@ -56,7 +58,7 @@ func (f *fakeSamplerMock) Stop(flush bool) { } func newFakeSamplerMock(deps fakeSamplerMockDependencies) demultiplexerComp.FakeSamplerMock { - demux := initTestAgentDemultiplexerWithFlushInterval(deps.Log, deps.Hostname, deps.Compressor, time.Hour) + demux := initTestAgentDemultiplexerWithFlushInterval(deps.Log, deps.Hostname, deps.LogsCompression, deps.MetricsCompression, time.Hour) mock := &fakeSamplerMock{ TestAgentDemultiplexer: demux, } diff --git a/comp/aggregator/demultiplexer/demultiplexerimpl/demultiplexer_mock.go b/comp/aggregator/demultiplexer/demultiplexerimpl/demultiplexer_mock.go index 942c10771a63a..8f609ac9f7d73 100644 --- a/comp/aggregator/demultiplexer/demultiplexerimpl/demultiplexer_mock.go +++ b/comp/aggregator/demultiplexer/demultiplexerimpl/demultiplexer_mock.go @@ -16,7 +16,8 @@ import ( log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" haagentmock "github.com/DataDog/datadog-agent/comp/haagent/mock" - compressionmock "github.com/DataDog/datadog-agent/comp/serializer/compression/fx-mock" + logscompressionmock "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx-mock" + metricscompressionmock "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/fx-mock" "github.com/DataDog/datadog-agent/pkg/aggregator" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -73,11 +74,12 @@ func newMock(deps mockDependencies) MockProvides { opts.DontStartForwarders = true aggDeps := aggregator.TestDeps{ - Log: deps.Log, - Hostname: deps.Hostname, - SharedForwarder: defaultforwarder.NoopForwarder{}, - Compressor: compressionmock.NewMockCompressor(), - HaAgent: haagentmock.NewMockHaAgent(), + Log: deps.Log, + Hostname: deps.Hostname, + SharedForwarder: defaultforwarder.NoopForwarder{}, + LogsCompression: logscompressionmock.NewMockCompressor(), + MetricsCompression: metricscompressionmock.NewMockCompressor(), + HaAgent: haagentmock.NewMockHaAgent(), } instance := &mock{AgentDemultiplexer: aggregator.InitAndStartAgentDemultiplexerForTest(aggDeps, opts, "")} diff --git a/comp/aggregator/demultiplexer/demultiplexerimpl/params.go b/comp/aggregator/demultiplexer/demultiplexerimpl/params.go index b176d9b30a0ef..9e293ab36a6e3 100644 --- a/comp/aggregator/demultiplexer/demultiplexerimpl/params.go +++ b/comp/aggregator/demultiplexer/demultiplexerimpl/params.go @@ -8,7 +8,7 @@ package demultiplexerimpl import ( "time" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // Params contains the parameters for the demultiplexer @@ -16,7 +16,7 @@ type Params struct { continueOnMissingHostname bool // This is an optional field to override the default flush interval only if it is set - flushInterval optional.Option[time.Duration] + flushInterval option.Option[time.Duration] useDogstatsdNoAggregationPipelineConfig bool } @@ -43,7 +43,7 @@ func WithContinueOnMissingHostname() Option { // WithFlushInterval sets the flushInterval field to the provided duration func WithFlushInterval(duration time.Duration) Option { return func(p *Params) { - p.flushInterval = optional.NewOption(duration) + p.flushInterval = option.New(duration) } } diff --git a/comp/aggregator/demultiplexer/demultiplexerimpl/status_test.go b/comp/aggregator/demultiplexer/demultiplexerimpl/status_test.go index 0e60ca002a79e..2470cfd351a46 100644 --- a/comp/aggregator/demultiplexer/demultiplexerimpl/status_test.go +++ b/comp/aggregator/demultiplexer/demultiplexerimpl/status_test.go @@ -21,7 +21,8 @@ import ( "github.com/DataDog/datadog-agent/comp/forwarder/eventplatform/eventplatformimpl" "github.com/DataDog/datadog-agent/comp/forwarder/orchestrator/orchestratorimpl" haagentmock "github.com/DataDog/datadog-agent/comp/haagent/mock" - compressionmock "github.com/DataDog/datadog-agent/comp/serializer/compression/fx-mock" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx-mock" + metricscompression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/fx-mock" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -60,11 +61,12 @@ func TestStatusOutPut(t *testing.T) { deps := fxutil.Test[dependencies](t, fx.Options( core.MockBundle(), - compressionmock.MockModule(), defaultforwarder.MockModule(), haagentmock.Module(), orchestratorimpl.MockModule(), eventplatformimpl.MockModule(), + logscompression.MockModule(), + metricscompression.MockModule(), fx.Provide(func() tagger.Component { return mockTagger }), diff --git a/comp/aggregator/demultiplexer/demultiplexerimpl/test_agent_demultiplexer.go b/comp/aggregator/demultiplexer/demultiplexerimpl/test_agent_demultiplexer.go index 431bcdb8c06e2..8f103d19b22fe 100644 --- a/comp/aggregator/demultiplexer/demultiplexerimpl/test_agent_demultiplexer.go +++ b/comp/aggregator/demultiplexer/demultiplexerimpl/test_agent_demultiplexer.go @@ -19,14 +19,15 @@ import ( "github.com/DataDog/datadog-agent/comp/forwarder/eventplatform" "github.com/DataDog/datadog-agent/comp/forwarder/eventplatform/eventplatformimpl" haagentmock "github.com/DataDog/datadog-agent/comp/haagent/mock" - compression "github.com/DataDog/datadog-agent/comp/serializer/compression/def" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/def" + metricscompression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/def" "github.com/DataDog/datadog-agent/pkg/aggregator" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/message" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/metrics/event" "github.com/DataDog/datadog-agent/pkg/metrics/servicecheck" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // TestAgentDemultiplexer is an implementation of the Demultiplexer which is sending @@ -176,7 +177,7 @@ func (a *TestAgentDemultiplexer) Reset() { } // initTestAgentDemultiplexerWithFlushInterval inits a TestAgentDemultiplexer with the given flush interval. -func initTestAgentDemultiplexerWithFlushInterval(log log.Component, hostname hostname.Component, compressor compression.Component, flushInterval time.Duration) *TestAgentDemultiplexer { +func initTestAgentDemultiplexerWithFlushInterval(log log.Component, hostname hostname.Component, logscompressor logscompression.Component, metricscompressor metricscompression.Component, flushInterval time.Duration) *TestAgentDemultiplexer { opts := aggregator.DefaultAgentDemultiplexerOptions() opts.FlushInterval = flushInterval opts.DontStartForwarders = true @@ -184,8 +185,9 @@ func initTestAgentDemultiplexerWithFlushInterval(log log.Component, hostname hos sharedForwarderOptions := defaultforwarder.NewOptions(pkgconfigsetup.Datadog(), log, nil) sharedForwarder := defaultforwarder.NewDefaultForwarder(pkgconfigsetup.Datadog(), log, sharedForwarderOptions) - orchestratorForwarder := optional.NewOption[defaultforwarder.Forwarder](defaultforwarder.NoopForwarder{}) - eventPlatformForwarder := optional.NewOptionPtr[eventplatform.Forwarder](eventplatformimpl.NewNoopEventPlatformForwarder(hostname)) - demux := aggregator.InitAndStartAgentDemultiplexer(log, sharedForwarder, &orchestratorForwarder, opts, eventPlatformForwarder, haagentmock.NewMockHaAgent(), compressor, noopimpl.NewComponent(), "hostname") + + orchestratorForwarder := option.New[defaultforwarder.Forwarder](defaultforwarder.NoopForwarder{}) + eventPlatformForwarder := option.NewPtr[eventplatform.Forwarder](eventplatformimpl.NewNoopEventPlatformForwarder(hostname, logscompressor)) + demux := aggregator.InitAndStartAgentDemultiplexer(log, sharedForwarder, &orchestratorForwarder, opts, eventPlatformForwarder, haagentmock.NewMockHaAgent(), metricscompressor, noopimpl.NewComponent(), "hostname") return NewTestAgentDemultiplexer(demux) } diff --git a/comp/aggregator/diagnosesendermanager/diagnosesendermanagerimpl/sendermanager.go b/comp/aggregator/diagnosesendermanager/diagnosesendermanagerimpl/sendermanager.go index 9e3c058314c79..0f290c2bb216b 100644 --- a/comp/aggregator/diagnosesendermanager/diagnosesendermanagerimpl/sendermanager.go +++ b/comp/aggregator/diagnosesendermanager/diagnosesendermanagerimpl/sendermanager.go @@ -20,11 +20,12 @@ import ( "github.com/DataDog/datadog-agent/comp/forwarder/eventplatform" "github.com/DataDog/datadog-agent/comp/forwarder/eventplatform/eventplatformimpl" haagent "github.com/DataDog/datadog-agent/comp/haagent/def" - compression "github.com/DataDog/datadog-agent/comp/serializer/compression/def" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/def" + metricscompression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/def" "github.com/DataDog/datadog-agent/pkg/aggregator" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // Module defines the fx options for this component. @@ -35,16 +36,17 @@ func Module() fxutil.Module { type dependencies struct { fx.In - Log log.Component - Config config.Component - Hostname hostname.Component - Compressor compression.Component - Tagger tagger.Component - HaAgent haagent.Component + Log log.Component + Config config.Component + Hostname hostname.Component + LogsCompressor logscompression.Component + MetricsCompressor metricscompression.Component + Tagger tagger.Component + HaAgent haagent.Component } type diagnoseSenderManager struct { - senderManager optional.Option[sender.SenderManager] + senderManager option.Option[sender.SenderManager] deps dependencies } @@ -73,8 +75,8 @@ func (sender *diagnoseSenderManager) LazyGetSenderManager() (sender.SenderManage config := sender.deps.Config haAgent := sender.deps.HaAgent forwarder := defaultforwarder.NewDefaultForwarder(config, log, defaultforwarder.NewOptions(config, log, nil)) - orchestratorForwarder := optional.NewOptionPtr[defaultforwarder.Forwarder](defaultforwarder.NoopForwarder{}) - eventPlatformForwarder := optional.NewOptionPtr[eventplatform.Forwarder](eventplatformimpl.NewNoopEventPlatformForwarder(sender.deps.Hostname)) + orchestratorForwarder := option.NewPtr[defaultforwarder.Forwarder](defaultforwarder.NoopForwarder{}) + eventPlatformForwarder := option.NewPtr[eventplatform.Forwarder](eventplatformimpl.NewNoopEventPlatformForwarder(sender.deps.Hostname, sender.deps.LogsCompressor)) senderManager = aggregator.InitAndStartAgentDemultiplexer( log, forwarder, @@ -82,7 +84,7 @@ func (sender *diagnoseSenderManager) LazyGetSenderManager() (sender.SenderManage opts, eventPlatformForwarder, haAgent, - sender.deps.Compressor, + sender.deps.MetricsCompressor, sender.deps.Tagger, hostnameDetected) diff --git a/comp/api/api/apiimpl/api.go b/comp/api/api/apiimpl/api.go index a917739150f33..dcfef9b84b6da 100644 --- a/comp/api/api/apiimpl/api.go +++ b/comp/api/api/apiimpl/api.go @@ -30,7 +30,7 @@ import ( "github.com/DataDog/datadog-agent/comp/remote-config/rcservice" "github.com/DataDog/datadog-agent/comp/remote-config/rcservicemrf" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // Module defines the fx options for this component. @@ -45,14 +45,14 @@ type apiServer struct { cfg config.Component pidMap pidmap.Component secretResolver secrets.Component - rcService optional.Option[rcservice.Component] - rcServiceMRF optional.Option[rcservicemrf.Component] + rcService option.Option[rcservice.Component] + rcServiceMRF option.Option[rcservicemrf.Component] authToken authtoken.Component taggerComp tagger.Component autoConfig autodiscovery.Component - logsAgentComp optional.Option[logsAgent.Component] + logsAgentComp option.Option[logsAgent.Component] wmeta workloadmeta.Component - collector optional.Option[collector.Component] + collector option.Option[collector.Component] senderManager diagnosesendermanager.Component remoteAgentRegistry remoteagentregistry.Component cmdListener net.Listener @@ -69,15 +69,15 @@ type dependencies struct { Capture replay.Component PidMap pidmap.Component SecretResolver secrets.Component - RcService optional.Option[rcservice.Component] - RcServiceMRF optional.Option[rcservicemrf.Component] + RcService option.Option[rcservice.Component] + RcServiceMRF option.Option[rcservicemrf.Component] AuthToken authtoken.Component Tagger tagger.Component Cfg config.Component AutoConfig autodiscovery.Component - LogsAgentComp optional.Option[logsAgent.Component] + LogsAgentComp option.Option[logsAgent.Component] WorkloadMeta workloadmeta.Component - Collector optional.Option[collector.Component] + Collector option.Option[collector.Component] DiagnoseSenderManager diagnosesendermanager.Component Telemetry telemetry.Component EndpointProviders []api.EndpointProvider `group:"agent_endpoint"` diff --git a/comp/api/api/apiimpl/api_test.go b/comp/api/api/apiimpl/api_test.go index b248dc9531330..d046a711916d2 100644 --- a/comp/api/api/apiimpl/api_test.go +++ b/comp/api/api/apiimpl/api_test.go @@ -47,7 +47,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/api/util" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" // third-party dependencies dto "github.com/prometheus/client_model/go" @@ -74,8 +74,8 @@ func getTestAPIServer(t *testing.T, params config.MockParams) testdeps { replaymock.MockModule(), secretsimpl.MockModule(), demultiplexerimpl.MockModule(), - fx.Supply(optional.NewNoneOption[rcservice.Component]()), - fx.Supply(optional.NewNoneOption[rcservicemrf.Component]()), + fx.Supply(option.None[rcservice.Component]()), + fx.Supply(option.None[rcservicemrf.Component]()), createandfetchimpl.Module(), fx.Supply(context.Background()), taggermock.Module(), @@ -87,8 +87,8 @@ func getTestAPIServer(t *testing.T, params config.MockParams) testdeps { fx.Provide(func(mock autodiscovery.Mock) autodiscovery.Component { return mock }), - fx.Supply(optional.NewNoneOption[logsAgent.Component]()), - fx.Supply(optional.NewNoneOption[collector.Component]()), + fx.Supply(option.None[logsAgent.Component]()), + fx.Supply(option.None[collector.Component]()), pidmapimpl.Module(), // Ensure we pass a nil endpoint to test that we always filter out nil endpoints fx.Provide(func() api.AgentEndpointProvider { diff --git a/comp/api/api/apiimpl/grpc.go b/comp/api/api/apiimpl/grpc.go index 6272a7f00fe05..b139c8957d3a0 100644 --- a/comp/api/api/apiimpl/grpc.go +++ b/comp/api/api/apiimpl/grpc.go @@ -10,6 +10,10 @@ import ( "fmt" "time" + "github.com/DataDog/datadog-agent/comp/metadata/host/hostimpl/hosttags" + "github.com/DataDog/datadog-agent/comp/remote-config/rcservice" + "github.com/DataDog/datadog-agent/comp/remote-config/rcservicemrf" + "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/status" @@ -17,6 +21,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery" autodiscoverystream "github.com/DataDog/datadog-agent/comp/core/autodiscovery/stream" + "github.com/DataDog/datadog-agent/comp/core/config" remoteagentregistry "github.com/DataDog/datadog-agent/comp/core/remoteagentregistry/def" rarproto "github.com/DataDog/datadog-agent/comp/core/remoteagentregistry/proto" tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" @@ -27,13 +32,11 @@ import ( "github.com/DataDog/datadog-agent/comp/dogstatsd/pidmap" dsdReplay "github.com/DataDog/datadog-agent/comp/dogstatsd/replay/def" dogstatsdServer "github.com/DataDog/datadog-agent/comp/dogstatsd/server" - "github.com/DataDog/datadog-agent/comp/remote-config/rcservice" - "github.com/DataDog/datadog-agent/comp/remote-config/rcservicemrf" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util/grpc" "github.com/DataDog/datadog-agent/pkg/util/hostname" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) type grpcServer struct { @@ -45,13 +48,14 @@ type serverSecure struct { taggerServer *taggerserver.Server taggerComp tagger.Component workloadmetaServer *workloadmetaServer.Server - configService optional.Option[rcservice.Component] - configServiceMRF optional.Option[rcservicemrf.Component] + configService option.Option[rcservice.Component] + configServiceMRF option.Option[rcservicemrf.Component] dogstatsdServer dogstatsdServer.Component capture dsdReplay.Component pidMap pidmap.Component remoteAgentRegistry remoteagentregistry.Component autodiscovery autodiscovery.Component + configComp config.Component } func (s *grpcServer) GetHostname(ctx context.Context, _ *pb.HostnameRequest) (*pb.HostnameReply, error) { @@ -213,6 +217,11 @@ func (s *serverSecure) AutodiscoveryStreamConfig(_ *emptypb.Empty, out pb.AgentS return autodiscoverystream.Config(s.autodiscovery, out) } +func (s *serverSecure) GetHostTags(ctx context.Context, _ *pb.HostTagRequest) (*pb.HostTagReply, error) { + tags := hosttags.Get(ctx, true, s.configComp) + return &pb.HostTagReply{System: tags.System, GoogleCloudPlatform: tags.GoogleCloudPlatform}, nil +} + func init() { grpclog.SetLoggerV2(grpc.NewLogger()) } diff --git a/comp/api/api/apiimpl/internal/agent/agent.go b/comp/api/api/apiimpl/internal/agent/agent.go index cfeb9250e338c..e091bbf028eb6 100644 --- a/comp/api/api/apiimpl/internal/agent/agent.go +++ b/comp/api/api/apiimpl/internal/agent/agent.go @@ -25,6 +25,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/secrets" tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" httputils "github.com/DataDog/datadog-agent/pkg/util/http" + "github.com/DataDog/datadog-agent/pkg/util/option" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" logsAgent "github.com/DataDog/datadog-agent/comp/logs/agent" @@ -33,17 +34,16 @@ import ( "github.com/DataDog/datadog-agent/pkg/diagnose/diagnosis" "github.com/DataDog/datadog-agent/pkg/status/health" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" ) // SetupHandlers adds the specific handlers for /agent endpoints func SetupHandlers( r *mux.Router, wmeta workloadmeta.Component, - logsAgent optional.Option[logsAgent.Component], + logsAgent option.Option[logsAgent.Component], senderManager sender.DiagnoseSenderManager, secretResolver secrets.Component, - collector optional.Option[collector.Component], + collector option.Option[collector.Component], ac autodiscovery.Component, providers []api.EndpointProvider, tagger tagger.Component, @@ -59,7 +59,7 @@ func SetupHandlers( r.HandleFunc("/{component}/status", componentStatusHandler).Methods("POST") r.HandleFunc("/{component}/configs", componentConfigHandler).Methods("GET") r.HandleFunc("/diagnose", func(w http.ResponseWriter, r *http.Request) { - diagnoseDeps := diagnose.NewSuitesDeps(senderManager, collector, secretResolver, optional.NewOption(wmeta), ac, tagger) + diagnoseDeps := diagnose.NewSuitesDeps(senderManager, collector, secretResolver, option.New(wmeta), ac, tagger) getDiagnose(w, r, diagnoseDeps) }).Methods("POST") diff --git a/comp/api/api/apiimpl/internal/agent/agent_jmx.go b/comp/api/api/apiimpl/internal/agent/agent_jmx.go index 2c9cc45954393..36d643a8f9ba8 100644 --- a/comp/api/api/apiimpl/internal/agent/agent_jmx.go +++ b/comp/api/api/apiimpl/internal/agent/agent_jmx.go @@ -22,7 +22,6 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/pkg/jmxfetch" jmxStatus "github.com/DataDog/datadog-agent/pkg/status/jmx" - "github.com/DataDog/datadog-agent/pkg/util" yaml "gopkg.in/yaml.v2" ) @@ -55,7 +54,7 @@ func getJMXConfigs(w http.ResponseWriter, r *http.Request) { } c := map[string]interface{}{} - c["init_config"] = util.GetJSONSerializableMap(rawInitConfig) + c["init_config"] = jmxfetch.GetJSONSerializableMap(rawInitConfig) instances := []integration.JSONMap{} for _, instance := range config.Instances { var rawInstanceConfig integration.JSONMap @@ -65,7 +64,7 @@ func getJMXConfigs(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), 500) return } - instances = append(instances, util.GetJSONSerializableMap(rawInstanceConfig).(integration.JSONMap)) + instances = append(instances, jmxfetch.GetJSONSerializableMap(rawInstanceConfig).(integration.JSONMap)) } c["instances"] = instances @@ -75,7 +74,7 @@ func getJMXConfigs(w http.ResponseWriter, r *http.Request) { } j["configs"] = configs j["timestamp"] = time.Now().Unix() - jsonPayload, err := json.Marshal(util.GetJSONSerializableMap(j)) + jsonPayload, err := json.Marshal(jmxfetch.GetJSONSerializableMap(j)) if err != nil { log.Errorf("unable to parse JMX configuration: %s", err) http.Error(w, err.Error(), 500) diff --git a/comp/api/api/apiimpl/internal/agent/agent_test.go b/comp/api/api/apiimpl/internal/agent/agent_test.go index 224ff5ff366fc..d33802dbc84b7 100644 --- a/comp/api/api/apiimpl/internal/agent/agent_test.go +++ b/comp/api/api/apiimpl/internal/agent/agent_test.go @@ -40,7 +40,7 @@ import ( // package dependencies "github.com/DataDog/datadog-agent/pkg/aggregator" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" // third-party dependencies "github.com/gorilla/mux" @@ -51,11 +51,11 @@ type handlerdeps struct { fx.In Wmeta workloadmeta.Component - LogsAgent optional.Option[logsAgent.Component] + LogsAgent option.Option[logsAgent.Component] HostMetadata host.Component SecretResolver secrets.Component Demux demultiplexer.Component - Collector optional.Option[collector.Component] + Collector option.Option[collector.Component] Ac autodiscovery.Mock Tagger taggermock.Mock } @@ -65,14 +65,14 @@ func getComponentDeps(t *testing.T) handlerdeps { t, fx.Supply(context.Background()), hostnameinterface.MockModule(), - fx.Provide(func() optional.Option[logsAgent.Component] { - return optional.NewNoneOption[logsAgent.Component]() + fx.Provide(func() option.Option[logsAgent.Component] { + return option.None[logsAgent.Component]() }), hostimpl.MockModule(), demultiplexerimpl.MockModule(), secretsimpl.MockModule(), - fx.Provide(func() optional.Option[collector.Component] { - return optional.NewNoneOption[collector.Component]() + fx.Provide(func() option.Option[collector.Component] { + return option.None[collector.Component]() }), taggermock.Module(), fx.Options( diff --git a/comp/api/api/apiimpl/server_cmd.go b/comp/api/api/apiimpl/server_cmd.go index 2eca0fe7abd09..dcb7aa1bcb17e 100644 --- a/comp/api/api/apiimpl/server_cmd.go +++ b/comp/api/api/apiimpl/server_cmd.go @@ -13,7 +13,7 @@ import ( gorilla "github.com/gorilla/mux" grpc_auth "github.com/grpc-ecosystem/go-grpc-middleware/auth" - "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" "google.golang.org/grpc" "google.golang.org/grpc/credentials" @@ -64,7 +64,7 @@ func (server *apiServer) startCMDServer( pb.RegisterAgentSecureServer(s, &serverSecure{ configService: server.rcService, configServiceMRF: server.rcServiceMRF, - taggerServer: taggerserver.NewServer(server.taggerComp, maxEventSize), + taggerServer: taggerserver.NewServer(server.taggerComp, maxEventSize, cfg.GetInt("remote_tagger.max_concurrent_sync")), taggerComp: server.taggerComp, // TODO(components): decide if workloadmetaServer should be componentized itself workloadmetaServer: workloadmetaServer.NewServer(server.wmeta), @@ -73,6 +73,7 @@ func (server *apiServer) startCMDServer( pidMap: server.pidMap, remoteAgentRegistry: server.remoteAgentRegistry, autodiscovery: server.autoConfig, + configComp: cfg, }) dopts := []grpc.DialOption{grpc.WithTransportCredentials(credentials.NewTLS(server.authToken.GetTLSClientConfig()))} diff --git a/comp/api/api/apiimpl/tools.go b/comp/api/api/apiimpl/tools.go index a056acf6a638a..a850ea0c48a14 100644 --- a/comp/api/api/apiimpl/tools.go +++ b/comp/api/api/apiimpl/tools.go @@ -12,6 +12,5 @@ package apiimpl import ( _ "github.com/golang/protobuf/protoc-gen-go" - _ "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway" - _ "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger" + _ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway" ) diff --git a/comp/api/api/def/go.mod b/comp/api/api/def/go.mod index 8a89ca045e6d6..62a39aa3a65f2 100644 --- a/comp/api/api/def/go.mod +++ b/comp/api/api/def/go.mod @@ -11,5 +11,5 @@ require ( go.uber.org/dig v1.18.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/sys v0.29.0 // indirect ) diff --git a/comp/api/api/def/go.sum b/comp/api/api/def/go.sum index 7b69276ecf542..d7ce7d478b536 100644 --- a/comp/api/api/def/go.sum +++ b/comp/api/api/def/go.sum @@ -14,7 +14,7 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/comp/api/authtoken/component.go b/comp/api/authtoken/component.go index fbe0ef3558028..f0247bd7a679e 100644 --- a/comp/api/authtoken/component.go +++ b/comp/api/authtoken/component.go @@ -14,7 +14,7 @@ import ( "go.uber.org/fx" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // team: agent-shared-components @@ -31,7 +31,7 @@ type Component interface { // This helper allows code that needs a disabled Optional type for authtoken to get it. The helper is split from // the implementation to avoid linking with the dependencies from sysprobeconfig. func NoneModule() fxutil.Module { - return fxutil.Component(fx.Provide(func() optional.Option[Component] { - return optional.NewNoneOption[Component]() + return fxutil.Component(fx.Provide(func() option.Option[Component] { + return option.None[Component]() })) } diff --git a/comp/api/authtoken/fetchonlyimpl/authtoken.go b/comp/api/authtoken/fetchonlyimpl/authtoken.go index f353bc3706c3d..cf27082157cf0 100644 --- a/comp/api/authtoken/fetchonlyimpl/authtoken.go +++ b/comp/api/authtoken/fetchonlyimpl/authtoken.go @@ -76,7 +76,6 @@ func (at *authToken) Get() string { func (at *authToken) GetTLSClientConfig() *tls.Config { if err := at.setToken(); err != nil { at.log.Debugf("%s", err.Error()) - return nil } return util.GetTLSClientConfig() @@ -86,7 +85,6 @@ func (at *authToken) GetTLSClientConfig() *tls.Config { func (at *authToken) GetTLSServerConfig() *tls.Config { if err := at.setToken(); err != nil { at.log.Debugf("%s", err.Error()) - return nil } return util.GetTLSServerConfig() diff --git a/comp/api/authtoken/go.mod b/comp/api/authtoken/go.mod index 24081f4824780..8119028c321c4 100644 --- a/comp/api/authtoken/go.mod +++ b/comp/api/authtoken/go.mod @@ -1,6 +1,6 @@ module github.com/DataDog/datadog-agent/comp/api/authtoken -go 1.22.0 +go 1.23.0 replace ( github.com/DataDog/datadog-agent/comp/api/api/def => ../../../comp/api/api/def @@ -29,7 +29,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../../pkg/util/hostname/validate/ github.com/DataDog/datadog-agent/pkg/util/log => ../../../pkg/util/log github.com/DataDog/datadog-agent/pkg/util/log/setup => ../../../pkg/util/log/setup - github.com/DataDog/datadog-agent/pkg/util/optional => ../../../pkg/util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../../pkg/util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../pkg/util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../pkg/util/scrubber github.com/DataDog/datadog-agent/pkg/util/system => ../../../pkg/util/system @@ -42,11 +42,11 @@ replace ( require ( github.com/DataDog/datadog-agent/comp/core/config v0.56.0 - github.com/DataDog/datadog-agent/comp/core/log/def v0.58.0-devel + github.com/DataDog/datadog-agent/comp/core/log/def v0.61.0 github.com/DataDog/datadog-agent/comp/core/log/mock v0.58.0-devel github.com/DataDog/datadog-agent/pkg/api v0.56.0 github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0 - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 github.com/stretchr/testify v1.10.0 go.uber.org/fx v1.23.0 ) @@ -59,22 +59,22 @@ require ( github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/mock v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.60.0-devel // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.60.0-devel // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/log/setup v0.58.0-devel // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/log/setup v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect github.com/DataDog/viper v1.14.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect @@ -87,16 +87,16 @@ require ( github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -107,8 +107,8 @@ require ( go.uber.org/dig v1.18.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/comp/api/authtoken/go.sum b/comp/api/authtoken/go.sum index dd16364891695..77eac717c35df 100644 --- a/comp/api/authtoken/go.sum +++ b/comp/api/authtoken/go.sum @@ -72,7 +72,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -110,8 +109,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -138,8 +137,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -156,8 +155,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -171,8 +170,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -183,8 +182,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -239,8 +238,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -277,8 +276,8 @@ golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= @@ -306,8 +305,8 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/comp/autoscaling/datadogclient/fx/fx.go b/comp/autoscaling/datadogclient/fx/fx.go index a42b63887f5db..51812ac5da264 100644 --- a/comp/autoscaling/datadogclient/fx/fx.go +++ b/comp/autoscaling/datadogclient/fx/fx.go @@ -10,7 +10,7 @@ import ( datadogclient "github.com/DataDog/datadog-agent/comp/autoscaling/datadogclient/def" datadogclientimpl "github.com/DataDog/datadog-agent/comp/autoscaling/datadogclient/impl" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "go.uber.org/fx" ) @@ -20,11 +20,11 @@ func Module() fxutil.Module { fxutil.ProvideComponentConstructor( datadogclientimpl.NewComponent, ), - fx.Provide(func(c datadogclient.Component) optional.Option[datadogclient.Component] { + fx.Provide(func(c datadogclient.Component) option.Option[datadogclient.Component] { if _, ok := c.(*datadogclientimpl.ImplNone); ok { - return optional.NewNoneOption[datadogclient.Component]() + return option.None[datadogclient.Component]() } - return optional.NewOption[datadogclient.Component](c) + return option.New[datadogclient.Component](c) }), ) } diff --git a/comp/checks/agentcrashdetect/agentcrashdetectimpl/agentcrashdetect.go b/comp/checks/agentcrashdetect/agentcrashdetectimpl/agentcrashdetect.go index 6098dcf98f629..013dc0177eff6 100644 --- a/comp/checks/agentcrashdetect/agentcrashdetectimpl/agentcrashdetect.go +++ b/comp/checks/agentcrashdetect/agentcrashdetectimpl/agentcrashdetect.go @@ -30,7 +30,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/crashreport" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -189,7 +189,7 @@ func newAgentCrashComponent(deps dependencies) agentcrashdetect.Component { instance.tconfig = deps.TConfig.Object() deps.Lifecycle.Append(fx.Hook{ OnStart: func(_ context.Context) error { - core.RegisterCheck(CheckName, optional.NewOption(func() check.Check { + core.RegisterCheck(CheckName, option.New(func() check.Check { checkInstance := &AgentCrashDetect{ CheckBase: core.NewCheckBase(CheckName), instance: &WinCrashConfig{}, diff --git a/comp/checks/windowseventlog/windowseventlogimpl/check/check.go b/comp/checks/windowseventlog/windowseventlogimpl/check/check.go index c8920647cb322..4dbaa18a40986 100644 --- a/comp/checks/windowseventlog/windowseventlogimpl/check/check.go +++ b/comp/checks/windowseventlog/windowseventlogimpl/check/check.go @@ -23,7 +23,7 @@ import ( core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" agentEvent "github.com/DataDog/datadog-agent/pkg/metrics/event" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" evtapi "github.com/DataDog/datadog-agent/pkg/util/winutil/eventlog/api" winevtapi "github.com/DataDog/datadog-agent/pkg/util/winutil/eventlog/api/windows" evtsession "github.com/DataDog/datadog-agent/pkg/util/winutil/eventlog/session" @@ -45,7 +45,7 @@ type Check struct { core.CheckBase config *Config - logsAgent optional.Option[logsAgent.Component] + logsAgent option.Option[logsAgent.Component] agentConfig configComponent.Component fetchEventsLoopWaiter sync.WaitGroup @@ -300,8 +300,8 @@ func (c *Check) Cancel() { } // Factory creates a new check factory -func Factory(logsAgent optional.Option[logsAgent.Component], config configComponent.Component) optional.Option[func() check.Check] { - return optional.NewOption(func() check.Check { +func Factory(logsAgent option.Option[logsAgent.Component], config configComponent.Component) option.Option[func() check.Check] { + return option.New(func() check.Check { return &Check{ CheckBase: core.NewCheckBase(CheckName), logsAgent: logsAgent, diff --git a/comp/checks/windowseventlog/windowseventlogimpl/check/config.go b/comp/checks/windowseventlog/windowseventlogimpl/check/config.go index 2408a384ba063..14af2aa604d4e 100644 --- a/comp/checks/windowseventlog/windowseventlogimpl/check/config.go +++ b/comp/checks/windowseventlog/windowseventlogimpl/check/config.go @@ -11,8 +11,7 @@ import ( "fmt" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" - "github.com/DataDog/datadog-agent/pkg/util/optional" - + "github.com/DataDog/datadog-agent/pkg/util/option" yaml "gopkg.in/yaml.v2" ) @@ -37,27 +36,27 @@ type Config struct { } type instanceConfig struct { - DDSecurityEvents optional.Option[string] `yaml:"dd_security_events"` - ChannelPath optional.Option[string] `yaml:"path"` - Query optional.Option[string] `yaml:"query"` - Start optional.Option[string] `yaml:"start"` - Timeout optional.Option[int] `yaml:"timeout"` - PayloadSize optional.Option[int] `yaml:"payload_size"` - BookmarkFrequency optional.Option[int] `yaml:"bookmark_frequency"` - LegacyMode optional.Option[bool] `yaml:"legacy_mode"` - LegacyModeV2 optional.Option[bool] `yaml:"legacy_mode_v2"` - EventPriority optional.Option[string] `yaml:"event_priority"` - TagEventID optional.Option[bool] `yaml:"tag_event_id"` - TagSID optional.Option[bool] `yaml:"tag_sid"` - Filters optional.Option[filtersConfig] `yaml:"filters"` - IncludedMessages optional.Option[[]string] `yaml:"included_messages"` - ExcludedMessages optional.Option[[]string] `yaml:"excluded_messages"` - AuthType optional.Option[string] `yaml:"auth_type"` - Server optional.Option[string] `yaml:"server"` - User optional.Option[string] `yaml:"user"` - Domain optional.Option[string] `yaml:"domain"` - Password optional.Option[string] `yaml:"password"` - InterpretMessages optional.Option[bool] `yaml:"interpret_messages"` + DDSecurityEvents option.Option[string] `yaml:"dd_security_events"` + ChannelPath option.Option[string] `yaml:"path"` + Query option.Option[string] `yaml:"query"` + Start option.Option[string] `yaml:"start"` + Timeout option.Option[int] `yaml:"timeout"` + PayloadSize option.Option[int] `yaml:"payload_size"` + BookmarkFrequency option.Option[int] `yaml:"bookmark_frequency"` + LegacyMode option.Option[bool] `yaml:"legacy_mode"` + LegacyModeV2 option.Option[bool] `yaml:"legacy_mode_v2"` + EventPriority option.Option[string] `yaml:"event_priority"` + TagEventID option.Option[bool] `yaml:"tag_event_id"` + TagSID option.Option[bool] `yaml:"tag_sid"` + Filters option.Option[filtersConfig] `yaml:"filters"` + IncludedMessages option.Option[[]string] `yaml:"included_messages"` + ExcludedMessages option.Option[[]string] `yaml:"excluded_messages"` + AuthType option.Option[string] `yaml:"auth_type"` + Server option.Option[string] `yaml:"server"` + User option.Option[string] `yaml:"user"` + Domain option.Option[string] `yaml:"domain"` + Password option.Option[string] `yaml:"password"` + InterpretMessages option.Option[bool] `yaml:"interpret_messages"` } type filtersConfig struct { @@ -67,12 +66,12 @@ type filtersConfig struct { } type initConfig struct { - TagEventID optional.Option[bool] `yaml:"tag_event_id"` - TagSID optional.Option[bool] `yaml:"tag_sid"` - EventPriority optional.Option[string] `yaml:"event_priority"` - InterpretMessages optional.Option[bool] `yaml:"interpret_messages"` - LegacyMode optional.Option[bool] `yaml:"legacy_mode"` - LegacyModeV2 optional.Option[bool] `yaml:"legacy_mode_v2"` + TagEventID option.Option[bool] `yaml:"tag_event_id"` + TagSID option.Option[bool] `yaml:"tag_sid"` + EventPriority option.Option[string] `yaml:"event_priority"` + InterpretMessages option.Option[bool] `yaml:"interpret_messages"` + LegacyMode option.Option[bool] `yaml:"legacy_mode"` + LegacyModeV2 option.Option[bool] `yaml:"legacy_mode_v2"` } func (f *filtersConfig) Sources() []string { @@ -133,11 +132,11 @@ func (c *Config) genQuery() error { return nil } -func setOptionalDefault[T any](optional *optional.Option[T], def T) { +func setOptionalDefault[T any](optional *option.Option[T], def T) { optional.SetIfNone(def) } -func setOptionalDefaultWithInitConfig[T any](instance *optional.Option[T], shared optional.Option[T], def T) { +func setOptionalDefaultWithInitConfig[T any](instance *option.Option[T], shared option.Option[T], def T) { instance.SetOptionIfNone(shared) instance.SetIfNone(def) } diff --git a/comp/checks/windowseventlog/windowseventlogimpl/check/config_helpers.go b/comp/checks/windowseventlog/windowseventlogimpl/check/config_helpers.go index b2643b6053aff..05e7ad00c593f 100644 --- a/comp/checks/windowseventlog/windowseventlogimpl/check/config_helpers.go +++ b/comp/checks/windowseventlog/windowseventlogimpl/check/config_helpers.go @@ -12,7 +12,7 @@ import ( "regexp" agentEvent "github.com/DataDog/datadog-agent/pkg/metrics/event" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" evtapi "github.com/DataDog/datadog-agent/pkg/util/winutil/eventlog/api" ) @@ -28,7 +28,7 @@ func compileRegexPatterns(patterns []string) ([]*regexp.Regexp, error) { return res, nil } -func serverIsLocal(server optional.Option[string]) bool { +func serverIsLocal(server option.Option[string]) bool { val, isSet := server.Get() return !isSet || len(val) == 0 || @@ -52,7 +52,7 @@ func evtRPCFlagsFromString(flags string) (uint, error) { } } -func evtRPCFlagsFromOption(authType optional.Option[string]) (uint, error) { +func evtRPCFlagsFromOption(authType option.Option[string]) (uint, error) { val, isSet := authType.Get() if !isSet { return 0, fmt.Errorf("option is not set") @@ -60,12 +60,12 @@ func evtRPCFlagsFromOption(authType optional.Option[string]) (uint, error) { return evtRPCFlagsFromString(val) } -func isaffirmative(o optional.Option[bool]) bool { +func isaffirmative(o option.Option[bool]) bool { val, isSet := o.Get() return isSet && val } -func getEventPriorityFromOption(o optional.Option[string]) (agentEvent.Priority, error) { +func getEventPriorityFromOption(o option.Option[string]) (agentEvent.Priority, error) { val, isSet := o.Get() if !isSet { return "", fmt.Errorf("option is not set") diff --git a/comp/checks/windowseventlog/windowseventlogimpl/check/config_test.go b/comp/checks/windowseventlog/windowseventlogimpl/check/config_test.go index 43e27649884f2..4db089f7ea69c 100644 --- a/comp/checks/windowseventlog/windowseventlogimpl/check/config_test.go +++ b/comp/checks/windowseventlog/windowseventlogimpl/check/config_test.go @@ -13,10 +13,10 @@ import ( "github.com/stretchr/testify/assert" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) -func assertOptionalValue[T any](t *testing.T, assertCompare assert.ComparisonAssertionFunc, o optional.Option[T], expected T) bool { +func assertOptionalValue[T any](t *testing.T, assertCompare assert.ComparisonAssertionFunc, o option.Option[T], expected T) bool { actual, isSet := o.Get() return assert.True(t, isSet, fmt.Sprintf("%v is not set", o)) && assertCompare(t, expected, actual, fmt.Sprintf("%v does not match expcted value", o)) diff --git a/comp/checks/windowseventlog/windowseventlogimpl/check/unsupported_platforms.go b/comp/checks/windowseventlog/windowseventlogimpl/check/unsupported_platforms.go index 721fecc4ff6ad..ee9588ec13f15 100644 --- a/comp/checks/windowseventlog/windowseventlogimpl/check/unsupported_platforms.go +++ b/comp/checks/windowseventlog/windowseventlogimpl/check/unsupported_platforms.go @@ -10,7 +10,7 @@ package evtlog import ( "github.com/DataDog/datadog-agent/pkg/collector/check" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -19,6 +19,6 @@ const ( ) // Factory creates a new check factory -func Factory(_ ...any) optional.Option[func() check.Check] { - return optional.NewNoneOption[func() check.Check]() +func Factory(_ ...any) option.Option[func() check.Check] { + return option.None[func() check.Check]() } diff --git a/comp/checks/windowseventlog/windowseventlogimpl/windows_event_log.go b/comp/checks/windowseventlog/windowseventlogimpl/windows_event_log.go index c517a4189ad78..94c72cc2d2b56 100644 --- a/comp/checks/windowseventlog/windowseventlogimpl/windows_event_log.go +++ b/comp/checks/windowseventlog/windowseventlogimpl/windows_event_log.go @@ -8,6 +8,7 @@ package windowseventlogimpl import ( "context" + "go.uber.org/fx" "github.com/DataDog/datadog-agent/comp/checks/windowseventlog" @@ -16,7 +17,7 @@ import ( logsAgent "github.com/DataDog/datadog-agent/comp/logs/agent" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // Module defines the fx options for this component. @@ -31,7 +32,7 @@ type dependencies struct { // Logs Agent component, used to send integration logs // It is optional because the Logs Agent can be disabled - LogsComponent optional.Option[logsAgent.Component] + LogsComponent option.Option[logsAgent.Component] Config configComponent.Component Lifecycle fx.Lifecycle diff --git a/comp/checks/winregistry/impl/winregistryimpl.go b/comp/checks/winregistry/impl/winregistryimpl.go index 62cfaba59da44..5409e145ba6cd 100644 --- a/comp/checks/winregistry/impl/winregistryimpl.go +++ b/comp/checks/winregistry/impl/winregistryimpl.go @@ -30,7 +30,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/util/fxutil" agentLog "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" yy "github.com/ghodss/yaml" "github.com/swaggest/jsonschema-go" "github.com/xeipuuv/gojsonschema" @@ -55,7 +55,7 @@ type dependencies struct { // Logs Agent component, used to send integration logs // It is optional because the Logs Agent can be disabled - LogsComponent optional.Option[agent.Component] + LogsComponent option.Option[agent.Component] // Datadog Agent logs component, used to log to the Agent logs Log log.Component @@ -63,9 +63,9 @@ type dependencies struct { } type registryValueCfg struct { - Name string `json:"name" yaml:"name" required:"true"` // The metric name of the registry value - DefaultValue optional.Option[float64] `json:"default_value" yaml:"default_value"` - Mappings []map[string]float64 `json:"mapping" yaml:"mapping"` + Name string `json:"name" yaml:"name" required:"true"` // The metric name of the registry value + DefaultValue option.Option[float64] `json:"default_value" yaml:"default_value"` + Mappings []map[string]float64 `json:"mapping" yaml:"mapping"` } type registryKeyCfg struct { @@ -76,12 +76,12 @@ type registryKeyCfg struct { // checkCfg is the config that is specific to each check instance type checkCfg struct { RegistryKeys map[string]registryKeyCfg `json:"registry_keys" yaml:"registry_keys" nullable:"false" required:"true"` - SendOnStart optional.Option[bool] `json:"send_on_start" yaml:"send_on_start"` + SendOnStart option.Option[bool] `json:"send_on_start" yaml:"send_on_start"` } // checkInitCfg is the config that is common to all check instances type checkInitCfg struct { - SendOnStart optional.Option[bool] `yaml:"send_on_start"` + SendOnStart option.Option[bool] `yaml:"send_on_start"` } // registryKey is the in-memory representation of the key to monitor @@ -110,9 +110,9 @@ type WindowsRegistryCheck struct { } func createOptionMapping[T any](reflector *jsonschema.Reflector, sourceType jsonschema.SimpleType) { - option := jsonschema.Schema{} - option.AddType(sourceType) - reflector.AddTypeMapping(optional.Option[T]{}, option) + optionValue := jsonschema.Schema{} + optionValue.AddType(sourceType) + reflector.AddTypeMapping(option.Option[T]{}, optionValue) } func createSchema() ([]byte, error) { @@ -359,7 +359,7 @@ func (c *WindowsRegistryCheck) Run() error { func newWindowsRegistryComponent(deps dependencies) winregistry.Component { deps.Lifecycle.Append(fx.Hook{ OnStart: func(_ context.Context) error { - core.RegisterCheck(checkName, optional.NewOption(func() check.Check { + core.RegisterCheck(checkName, option.New(func() check.Check { integrationLogs, _ := deps.LogsComponent.Get() return &WindowsRegistryCheck{ CheckBase: core.NewCheckBase(checkName), diff --git a/comp/collector/collector/collectorimpl/agent_check_metadata_test.go b/comp/collector/collector/collectorimpl/agent_check_metadata_test.go index f997bd0332518..479f67cb6d163 100644 --- a/comp/collector/collector/collectorimpl/agent_check_metadata_test.go +++ b/comp/collector/collector/collectorimpl/agent_check_metadata_test.go @@ -20,7 +20,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/externalhost" "github.com/DataDog/datadog-agent/pkg/serializer" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) func TestExternalHostTags(t *testing.T) { @@ -38,8 +38,8 @@ func TestExternalHostTags(t *testing.T) { core.MockBundle(), demultiplexerimpl.MockModule(), haagentmock.Module(), - fx.Provide(func() optional.Option[serializer.MetricSerializer] { - return optional.NewNoneOption[serializer.MetricSerializer]() + fx.Provide(func() option.Option[serializer.MetricSerializer] { + return option.None[serializer.MetricSerializer]() }), fx.Replace(config.MockParams{ Overrides: map[string]interface{}{"check_cancel_timeout": 500 * time.Millisecond}, diff --git a/comp/collector/collector/collectorimpl/collector.go b/comp/collector/collector/collectorimpl/collector.go index 94b10a962d3ed..59f16e2be3544 100644 --- a/comp/collector/collector/collectorimpl/collector.go +++ b/comp/collector/collector/collectorimpl/collector.go @@ -38,7 +38,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/serializer" collectorStatus "github.com/DataDog/datadog-agent/pkg/status/collector" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -55,7 +55,7 @@ type dependencies struct { HaAgent haagent.Component SenderManager sender.SenderManager - MetricSerializer optional.Option[serializer.MetricSerializer] + MetricSerializer option.Option[serializer.MetricSerializer] } type collectorImpl struct { @@ -64,7 +64,7 @@ type collectorImpl struct { haAgent haagent.Component senderManager sender.SenderManager - metricSerializer optional.Option[serializer.MetricSerializer] + metricSerializer option.Option[serializer.MetricSerializer] checkInstances int64 // state is 'started' or 'stopped' @@ -95,8 +95,8 @@ type provides struct { func Module() fxutil.Module { return fxutil.Component( fx.Provide(newProvides), - fx.Provide(func(c collector.Component) optional.Option[collector.Component] { - return optional.NewOption[collector.Component](c) + fx.Provide(func(c collector.Component) option.Option[collector.Component] { + return option.New[collector.Component](c) }), ) } diff --git a/comp/collector/collector/collectorimpl/collector_demux_test.go b/comp/collector/collector/collectorimpl/collector_demux_test.go index ed91be6ef1ade..e6210a69cf0d8 100644 --- a/comp/collector/collector/collectorimpl/collector_demux_test.go +++ b/comp/collector/collector/collectorimpl/collector_demux_test.go @@ -22,7 +22,8 @@ import ( log "github.com/DataDog/datadog-agent/comp/core/log/def" logmock "github.com/DataDog/datadog-agent/comp/core/log/mock" haagentmock "github.com/DataDog/datadog-agent/comp/haagent/mock" - compressionmock "github.com/DataDog/datadog-agent/comp/serializer/compression/fx-mock" + logscompressionmock "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx-mock" + metricscompressionmock "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/fx-mock" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer" @@ -33,7 +34,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/serializer" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) type CollectorDemuxTestSuite struct { @@ -82,7 +83,7 @@ func (s *SenderManagerProxy) GetDefaultSender() (sender.Sender, error) { } func (suite *CollectorDemuxTestSuite) SetupTest() { - suite.demux = fxutil.Test[demultiplexer.FakeSamplerMock](suite.T(), fx.Provide(func() log.Component { return logmock.New(suite.T()) }), compressionmock.MockModule(), demultiplexerimpl.FakeSamplerMockModule(), hostnameimpl.MockModule()) + suite.demux = fxutil.Test[demultiplexer.FakeSamplerMock](suite.T(), fx.Provide(func() log.Component { return logmock.New(suite.T()) }), metricscompressionmock.MockModule(), logscompressionmock.MockModule(), demultiplexerimpl.FakeSamplerMockModule(), hostnameimpl.MockModule()) suite.SenderManagerMock = NewSenderManagerMock(suite.demux) suite.c = newCollector(fxutil.Test[dependencies](suite.T(), core.MockBundle(), @@ -90,8 +91,8 @@ func (suite *CollectorDemuxTestSuite) SetupTest() { fx.Provide(func() sender.SenderManager { return suite.SenderManagerMock }), - fx.Provide(func() optional.Option[serializer.MetricSerializer] { - return optional.NewNoneOption[serializer.MetricSerializer]() + fx.Provide(func() option.Option[serializer.MetricSerializer] { + return option.None[serializer.MetricSerializer]() }), fx.Replace(config.MockParams{ Overrides: map[string]interface{}{"check_cancel_timeout": 500 * time.Millisecond}, diff --git a/comp/collector/collector/collectorimpl/collector_test.go b/comp/collector/collector/collectorimpl/collector_test.go index 44ddc7e2f357c..92ff610b1dc02 100644 --- a/comp/collector/collector/collectorimpl/collector_test.go +++ b/comp/collector/collector/collectorimpl/collector_test.go @@ -29,7 +29,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check/stub" "github.com/DataDog/datadog-agent/pkg/serializer" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // FIXTURE @@ -99,8 +99,8 @@ func (suite *CollectorTestSuite) SetupTest() { core.MockBundle(), demultiplexerimpl.MockModule(), haagentmock.Module(), - fx.Provide(func() optional.Option[serializer.MetricSerializer] { - return optional.NewNoneOption[serializer.MetricSerializer]() + fx.Provide(func() option.Option[serializer.MetricSerializer] { + return option.None[serializer.MetricSerializer]() }), fx.Replace(config.MockParams{ Overrides: map[string]interface{}{"check_cancel_timeout": 500 * time.Millisecond}, diff --git a/comp/collector/collector/component.go b/comp/collector/collector/component.go index 0e0dafbdcc3f9..2752c9d0dd5f6 100644 --- a/comp/collector/collector/component.go +++ b/comp/collector/collector/component.go @@ -10,7 +10,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "go.uber.org/fx" ) @@ -53,8 +53,8 @@ type Component interface { // the implementation to avoid linking with the implementation. func NoneModule() fxutil.Module { return fxutil.Component( - fx.Provide(func() optional.Option[Component] { - return optional.NewNoneOption[Component]() + fx.Provide(func() option.Option[Component] { + return option.None[Component]() }), ) } diff --git a/comp/core/agenttelemetry/impl/agenttelemetry.go b/comp/core/agenttelemetry/impl/agenttelemetry.go index 2b54f2b1eb5c7..0109e4445a35b 100644 --- a/comp/core/agenttelemetry/impl/agenttelemetry.go +++ b/comp/core/agenttelemetry/impl/agenttelemetry.go @@ -208,12 +208,15 @@ func (a *atel) aggregateMetricTags(mCfg *MetricConfig, mt dto.MetricType, ms []* // create a key from the tags (and drop not specified in the configuration tags) var specTags = make([]*dto.LabelPair, 0, len(origTags)) + var sb strings.Builder for _, t := range tags { if _, ok := mCfg.aggregateTagsMap[t.GetName()]; ok { specTags = append(specTags, t) - tagsKey += makeLabelPairKey(t) + sb.WriteString(makeLabelPairKey(t)) } } + tagsKey = sb.String() + if mCfg.AggregateTotal { aggregateMetric(mt, totalm, m) } @@ -255,65 +258,73 @@ func (a *atel) aggregateMetricTags(mCfg *MetricConfig, mt dto.MetricType, ms []* return maps.Values(amMap) } +// Using Prometheus terminology. Metrics name or in "Prom" MetricFamily is technically a Datadog metrics. +// dto.Metric are a metric values for each timeseries (tag/value combination). func buildKeysForMetricsPreviousValues(mt dto.MetricType, metricName string, metrics []*dto.Metric) []string { keyNames := make([]string, 0, len(metrics)) for _, m := range metrics { var keyName string tags := m.GetLabel() if len(tags) == 0 { - // start with the metric name + // For "tagless" MetricFamily, len(metrics) will be 1, with single iteration and m.GetLabel() + // will be nil. Accordingly, to form a key for that metric its name alone is sufficient. keyName = metricName } else { - // Sort tags to stability of the key - sortedTags := cloneLabelsSorted(tags) - var builder strings.Builder - - // start with the metric name plus the tags - builder.WriteString(metricName) - for _, tag := range sortedTags { - builder.WriteString(makeLabelPairKey(tag)) - } - keyName = builder.String() + //If the metric has tags, len(metrics) will be equal to the number of metric's timeseries. + // Each timeseries or "m" on each iteration in this code, will contain a set of unique + // tagset (as m.GetLabel()). Accordingly, each timeseries should be represented by a unique + // and stable (reproducible) key formed by tagset key names and values. + keyName = fmt.Sprintf("%s%s:", metricName, convertLabelsToKey(tags)) } if mt == dto.MetricType_HISTOGRAM { - // add bucket names to the key + // On each iteration for metrics without tags (only 1 iteration) or with tags (iteration per + // timeseries). If the metric is a HISTOGRAM, each timeseries bucket individually plus + // implicit "+Inf" bucket. For example, for 3 timeseries with 4-bucket histogram, we will + // track 15 values using 15 keys (3x(4+1)). for _, bucket := range m.Histogram.GetBucket() { keyNames = append(keyNames, fmt.Sprintf("%v:%v", keyName, bucket.GetUpperBound())) } - } else { - keyNames = append(keyNames, keyName) } + + // Add the key for Counter, Gauge metric and HISTOGRAM's +Inf bucket + keyNames = append(keyNames, keyName) } return keyNames } +// Swap current value with the previous value and deduct the previous value from the current value +func deductAndUpdatePrevValue(key string, prevPromMetricValues map[string]uint64, curValue *uint64) { + origCurValue := *curValue + if prevValue, ok := prevPromMetricValues[key]; ok { + *curValue -= prevValue + } + prevPromMetricValues[key] = origCurValue +} + func convertPromHistogramsToDatadogHistogramsValues(metrics []*dto.Metric, prevPromMetricValues map[string]uint64, keyNames []string) { if len(metrics) > 0 { bucketCount := len(metrics[0].Histogram.GetBucket()) + var prevValue uint64 + for i, m := range metrics { - // First, deduct the previous cumulative count from the current one + // 1. deduct the previous cumulative count from each explicit buckets for j, b := range m.Histogram.GetBucket() { - key := keyNames[(i*bucketCount)+j] - curValue := b.GetCumulativeCount() - - // Adjust the counter value if found - if prevValue, ok := prevPromMetricValues[key]; ok { - *b.CumulativeCount -= prevValue - } - - // Upsert the cache of previous counter values - prevPromMetricValues[key] = curValue + deductAndUpdatePrevValue(keyNames[(i*(bucketCount+1))+j], prevPromMetricValues, b.CumulativeCount) } + // 2. deduct the previous cumulative count from the implicit "+Inf" bucket + deductAndUpdatePrevValue(keyNames[((i+1)*(bucketCount+1))-1], prevPromMetricValues, m.Histogram.SampleCount) - // Then, de-cumulate next bucket value from the previous bucket values - var prevValue uint64 + // 3. "De-cumulate" next explicit bucket value from the preceding bucket value + prevValue = 0 for _, b := range m.Histogram.GetBucket() { curValue := b.GetCumulativeCount() *b.CumulativeCount -= prevValue prevValue = curValue } + // 4. "De-cumulate" implicit "+Inf" bucket value from the preceding bucket value + *m.Histogram.SampleCount -= prevValue } } } @@ -443,13 +454,21 @@ func (a *atel) reportAgentMetrics(session *senderSession, pms []*telemetry.Metri func (a *atel) loadPayloads(profiles []*Profile) (*senderSession, error) { // Gather all prom metrics. Currently Gather() does not allow filtering by // metric name, so we need to gather all metrics and filter them on our own. - // pms, err := a.telemetry.Gather(false) pms, err := a.telComp.Gather(false) if err != nil { a.logComp.Errorf("failed to get filtered telemetry metrics: %v", err) return nil, err } + // Ensure that metrics from the default Prometheus registry are also collected. + pmsDefault, errDefault := a.telComp.Gather(true) + if errDefault == nil { + pms = append(pms, pmsDefault...) + } else { + // Not a fatal error, just log it + a.logComp.Errorf("failed to get filtered telemetry metrics: %v", err) + } + session := a.sender.startSession(a.cancelCtx) for _, p := range profiles { a.reportAgentMetrics(session, pms, p) diff --git a/comp/core/agenttelemetry/impl/agenttelemetry_test.go b/comp/core/agenttelemetry/impl/agenttelemetry_test.go index f0524cae80129..ddd1edfb1c751 100644 --- a/comp/core/agenttelemetry/impl/agenttelemetry_test.go +++ b/comp/core/agenttelemetry/impl/agenttelemetry_test.go @@ -10,6 +10,7 @@ import ( "encoding/json" "fmt" "io" + "maps" "net/http" "testing" @@ -27,6 +28,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/telemetry" "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl" "github.com/DataDog/datadog-agent/pkg/util/fxutil" + "github.com/DataDog/zstd" ) // HTTP client mock @@ -138,12 +140,14 @@ func makeLogMock(t *testing.T) log.Component { return logmock.New(t) } -func makeSenderImpl(t *testing.T, c string) sender { +func makeSenderImpl(t *testing.T, cl client, c string) sender { o := convertYamlStrToMap(t, c) cfg := makeCfgMock(t, o) log := makeLogMock(t) - client := newClientMock() - sndr, err := newSenderImpl(cfg, log, client) + if cl == nil { + cl = newClientMock() + } + sndr, err := newSenderImpl(cfg, log, cl) assert.NoError(t, err) return sndr } @@ -198,6 +202,94 @@ func getCommonOverrideConfig(enabled bool, site string) map[string]any { } } +func (p *Payload) UnmarshalAgentMetrics(itfPayload map[string]interface{}) error { + var ok bool + + p.RequestType = "agent-metrics" + p.APIVersion = itfPayload["request_type"].(string) + + var metricsItfPayload map[string]interface{} + metricsItfPayload, ok = itfPayload["payload"].(map[string]interface{}) + if !ok { + return fmt.Errorf("payload not found") + } + var metricsItf map[string]interface{} + metricsItf, ok = metricsItfPayload["metrics"].(map[string]interface{}) + if !ok { + return fmt.Errorf("metrics not found") + } + + var err error + var metricsPayload AgentMetricsPayload + metricsPayload.Metrics = make(map[string]interface{}) + for k, v := range metricsItf { + if k == "agent_metadata" { + // Re(un)marshal the meatadata + var metadata AgentMetadataPayload + var metadataBytes []byte + if metadataBytes, err = json.Marshal(v); err != nil { + return err + } + if err = json.Unmarshal(metadataBytes, &metadata); err != nil { + return err + } + metricsPayload.Metrics[k] = metadata + } else { + // Re(un)marshal the metric + var metric MetricPayload + var metricBytes []byte + if metricBytes, err = json.Marshal(v); err != nil { + return err + } + if err = json.Unmarshal(metricBytes, &metric); err != nil { + return err + } + metricsPayload.Metrics[k] = metric + } + } + p.Payload = metricsPayload + return nil +} + +func (p *Payload) UnmarshalMessageBatch(itfPayload map[string]interface{}) error { + payloadsRaw, ok := itfPayload["payload"].([]interface{}) + if !ok { + return fmt.Errorf("payload not found") + } + + // ensure all payloads which should be agent-metrics + var payloads []Payload + for _, payloadRaw := range payloadsRaw { + itfChildPayload, ok := payloadRaw.(map[string]interface{}) + if !ok { + return fmt.Errorf("invalid payload item type") + } + + requestTypeRaw, ok := itfChildPayload["request_type"] + if !ok { + return fmt.Errorf("request_type not found") + } + requestType, ok := requestTypeRaw.(string) + if !ok { + return fmt.Errorf("request_type type is invalid") + } + + if requestType != "agent-metrics" { + return fmt.Errorf("request_type should be agent-metrics") + } + + var payload Payload + if err := payload.UnmarshalAgentMetrics(itfChildPayload); err != nil { + return err + } + payloads = append(payloads, payload) + + } + p.Payload = payloads + + return nil +} + // This is a unit test function do not use it for actual code (at least yet) // since it is not 100% full implementation of the unmarshalling func (p *Payload) UnmarshalJSON(b []byte) (err error) { @@ -206,60 +298,21 @@ func (p *Payload) UnmarshalJSON(b []byte) (err error) { return err } - requestType, ok := itfPayload["request_type"] + requestTypeRaw, ok := itfPayload["request_type"] if !ok { return fmt.Errorf("request_type not found") } - if requestType.(string) == "agent-metrics" { - p.RequestType = requestType.(string) - p.APIVersion = itfPayload["request_type"].(string) - p.EventTime = int64(itfPayload["event_time"].(float64)) - p.DebugFlag = itfPayload["debug"].(bool) - - var metricsItfPayload map[string]interface{} - metricsItfPayload, ok = itfPayload["payload"].(map[string]interface{}) - if !ok { - return fmt.Errorf("payload not found") - } - var metricsItf map[string]interface{} - metricsItf, ok = metricsItfPayload["metrics"].(map[string]interface{}) - if !ok { - return fmt.Errorf("metrics not found") - } + requestType, ok := requestTypeRaw.(string) + if !ok { + return fmt.Errorf("request_type type is invalid") + } - var metricsPayload AgentMetricsPayload - metricsPayload.Metrics = make(map[string]interface{}) - for k, v := range metricsItf { - if k == "agent_metadata" { - // Re(un)marshal the meatadata - var metadata AgentMetadataPayload - var metadataBytes []byte - if metadataBytes, err = json.Marshal(v); err != nil { - return err - } - if err = json.Unmarshal(metadataBytes, &metadata); err != nil { - return err - } - metricsPayload.Metrics[k] = metadata - } else { - // Re(un)marshal the metric - var metric MetricPayload - var metricBytes []byte - if metricBytes, err = json.Marshal(v); err != nil { - return err - } - if err = json.Unmarshal(metricBytes, &metric); err != nil { - return err - } - metricsPayload.Metrics[k] = metric - } - } - p.Payload = metricsPayload - return nil + if requestType == "agent-metrics" { + return p.UnmarshalAgentMetrics(itfPayload) } - if requestType.(string) == "message-batch" { - return fmt.Errorf("message-batch request_type is not supported yet") + if requestType == "message-batch" { + return p.UnmarshalMessageBatch(itfPayload) } return fmt.Errorf("request_type should be either agent-metrics or message-batch") @@ -290,6 +343,68 @@ func getPayloadMetric(a *atel, metricName string) (*MetricPayload, bool) { return nil, false } +// If you have multiple metrics with the same name (timeseries) and filtered by a metric, use getPayloadFilteredMetricList +func getPayloadFilteredMetricList(a *atel, metricName string) ([]*MetricPayload, bool) { + payload, err := getPayload(a) + if err != nil { + return nil, false + } + + var payloads []*MetricPayload + for _, payload := range payload.Payload.([]Payload) { + metrics := payload.Payload.(AgentMetricsPayload).Metrics + if metricItf, ok := metrics[metricName]; ok { + metric := metricItf.(MetricPayload) + payloads = append(payloads, &metric) + } + } + + return payloads, true +} + +// If you have multiple metrics with different name (timeseries), meaning no multiple tags use getPayloadMetricMap +func getPayloadMetricMap(a *atel) map[string]*MetricPayload { + payload, err := getPayload(a) + if err != nil { + return nil + } + + payloads := make(map[string]*MetricPayload) + + if mm, ok := payload.Payload.([]Payload); ok { + for _, payload := range mm { + metrics := payload.Payload.(AgentMetricsPayload).Metrics + for metricName, metricItf := range metrics { + metric := metricItf.(MetricPayload) + payloads[metricName] = &metric + } + } + return payloads + } + + if m, ok := payload.Payload.(AgentMetricsPayload); ok { + metrics := m.Metrics + for metricName, metricItf := range metrics { + if metric, ok2 := metricItf.(MetricPayload); ok2 { + payloads[metricName] = &metric + } + } + return payloads + } + + return nil +} + +func getPayloadMetricByTagValues(metrics []*MetricPayload, tags map[string]interface{}) (*MetricPayload, bool) { + for _, m := range metrics { + if maps.Equal(m.Tags, tags) { + return m, true + } + } + + return nil, false +} + // Validate the payload // metric, ok := metrics["foo.bar"] @@ -419,7 +534,7 @@ func TestNoTagSpecifiedAggregationCounter(t *testing.T) { assert.Nil(t, m.GetLabel()) } -func TestNoTagSpecifiedAggregationGauge(t *testing.T) { +func TestNoTagSpecifiedExplicitAggregationGauge(t *testing.T) { var c = ` agent_telemetry: enabled: true @@ -459,6 +574,45 @@ func TestNoTagSpecifiedAggregationGauge(t *testing.T) { assert.Nil(t, m.GetLabel()) } +func TestNoTagSpecifiedImplicitAggregationGauge(t *testing.T) { + var c = ` + agent_telemetry: + enabled: true + profiles: + - name: foo + metric: + metrics: + - name: bar.zoo + ` + + // setup and initiate atel + tel := makeTelMock(t) + gauge := tel.NewGauge("bar", "zoo", []string{"tag1", "tag2", "tag3"}, "") + gauge.WithTags(map[string]string{"tag1": "a1", "tag2": "b1", "tag3": "c1"}).Set(10) + gauge.WithTags(map[string]string{"tag1": "a2", "tag2": "b2", "tag3": "c2"}).Set(20) + gauge.WithTags(map[string]string{"tag1": "a3", "tag2": "b3", "tag3": "c3"}).Set(30) + + o := convertYamlStrToMap(t, c) + s := &senderMock{} + r := newRunnerMock() + a := getTestAtel(t, tel, o, s, nil, r) + require.True(t, a.enabled) + + // run the runner to trigger the telemetry report + a.start() + r.(*runnerMock).run() + + // 1 metric sent + assert.Equal(t, 1, len(s.sentMetrics)) + + // aggregated to 10 + 20 + 30 = 60 + m := s.sentMetrics[0].metrics[0] + assert.Equal(t, float64(60), m.Gauge.GetValue()) + + // no tags + assert.Nil(t, m.GetLabel()) +} + func TestNoTagSpecifiedAggregationHistogram(t *testing.T) { var c = ` agent_telemetry: @@ -474,10 +628,10 @@ func TestNoTagSpecifiedAggregationHistogram(t *testing.T) { // setup and initiate atel tel := makeTelMock(t) buckets := []float64{10, 100, 1000, 10000} - gauge := tel.NewHistogram("bar", "zoo", []string{"tag1", "tag2", "tag3"}, "", buckets) - gauge.WithTags(map[string]string{"tag1": "a1", "tag2": "b1", "tag3": "c1"}).Observe(1001) - gauge.WithTags(map[string]string{"tag1": "a2", "tag2": "b2", "tag3": "c2"}).Observe(1002) - gauge.WithTags(map[string]string{"tag1": "a3", "tag2": "b3", "tag3": "c3"}).Observe(1003) + hist := tel.NewHistogram("bar", "zoo", []string{"tag1", "tag2", "tag3"}, "", buckets) + hist.WithTags(map[string]string{"tag1": "a1", "tag2": "b1", "tag3": "c1"}).Observe(1001) + hist.WithTags(map[string]string{"tag1": "a2", "tag2": "b2", "tag3": "c2"}).Observe(1002) + hist.WithTags(map[string]string{"tag1": "a3", "tag2": "b3", "tag3": "c3"}).Observe(1003) o := convertYamlStrToMap(t, c) s := &senderMock{} @@ -636,7 +790,7 @@ func TestTwoProfilesOnTheSameScheduleGenerateSinglePayload(t *testing.T) { counter2.AddWithTags(20, map[string]string{"tag1": "a1", "tag2": "b1", "tag3": "c1"}) o := convertYamlStrToMap(t, c) - s := makeSenderImpl(t, c) + s := makeSenderImpl(t, nil, c) r := newRunnerMock() a := getTestAtel(t, tel, o, s, nil, r) require.True(t, a.enabled) @@ -673,7 +827,7 @@ func TestOneProfileWithOneMetricMultipleContextsGenerateTwoPayloads(t *testing.T counter1.AddWithTags(20, map[string]string{"tag1": "a2", "tag2": "b2", "tag3": "c2"}) o := convertYamlStrToMap(t, c) - s := makeSenderImpl(t, c) + s := makeSenderImpl(t, nil, c) r := newRunnerMock() a := getTestAtel(t, tel, o, s, nil, r) require.True(t, a.enabled) @@ -749,7 +903,7 @@ func TestOneProfileWithTwoMetricGenerateSinglePayloads(t *testing.T) { counter2.AddWithTags(20, map[string]string{"tag1": "a1", "tag2": "b1", "tag3": "c1"}) o := convertYamlStrToMap(t, c) - s := makeSenderImpl(t, c) + s := makeSenderImpl(t, nil, c) r := newRunnerMock() a := getTestAtel(t, tel, o, s, nil, r) require.True(t, a.enabled) @@ -771,7 +925,7 @@ func TestSenderConfigNoConfig(t *testing.T) { agent_telemetry: enabled: true ` - sndr := makeSenderImpl(t, c) + sndr := makeSenderImpl(t, nil, c) url := buildURL(sndr.(*senderImpl).endpoints.Main) assert.Equal(t, "https://instrumentation-telemetry-intake.datadoghq.com/api/v2/apmtelemetry", url) @@ -799,7 +953,7 @@ func TestSenderConfigOnlySites(t *testing.T) { for _, tt := range tests { c := fmt.Sprintf(ctemp, tt.site) - sndr := makeSenderImpl(t, c) + sndr := makeSenderImpl(t, nil, c) url := buildURL(sndr.(*senderImpl).endpoints.Main) assert.Equal(t, tt.testURL, url) } @@ -816,7 +970,7 @@ func TestSenderConfigAdditionalEndpoint(t *testing.T) { - api_key: bar host: instrumentation-telemetry-intake.us5.datadoghq.com ` - sndr := makeSenderImpl(t, c) + sndr := makeSenderImpl(t, nil, c) assert.NotNil(t, sndr) assert.Len(t, sndr.(*senderImpl).endpoints.Endpoints, 2) @@ -835,7 +989,7 @@ func TestSenderConfigPartialDDUrl(t *testing.T) { enabled: true dd_url: instrumentation-telemetry-intake.us5.datadoghq.com. ` - sndr := makeSenderImpl(t, c) + sndr := makeSenderImpl(t, nil, c) assert.NotNil(t, sndr) assert.Len(t, sndr.(*senderImpl).endpoints.Endpoints, 1) @@ -852,7 +1006,7 @@ func TestSenderConfigFullDDUrl(t *testing.T) { enabled: true dd_url: https://instrumentation-telemetry-intake.us5.datadoghq.com. ` - sndr := makeSenderImpl(t, c) + sndr := makeSenderImpl(t, nil, c) assert.NotNil(t, sndr) assert.Len(t, sndr.(*senderImpl).endpoints.Endpoints, 1) @@ -872,7 +1026,7 @@ func TestSenderConfigDDUrlWithAdditionalEndpoints(t *testing.T) { - api_key: bar host: instrumentation-telemetry-intake.us3.datadoghq.com. ` - sndr := makeSenderImpl(t, c) + sndr := makeSenderImpl(t, nil, c) assert.NotNil(t, sndr) assert.Len(t, sndr.(*senderImpl).endpoints.Endpoints, 2) @@ -892,7 +1046,7 @@ func TestSenderConfigDDUrlWithEmptyAdditionalPoint(t *testing.T) { dd_url: instrumentation-telemetry-intake.us5.datadoghq.com. additional_endpoints: ` - sndr := makeSenderImpl(t, c) + sndr := makeSenderImpl(t, nil, c) assert.NotNil(t, sndr) assert.Len(t, sndr.(*senderImpl).endpoints.Endpoints, 1) @@ -932,7 +1086,7 @@ func TestGetAsJSONScrub(t *testing.T) { counter3.AddWithTags(11, map[string]string{"text": "test"}) o := convertYamlStrToMap(t, c) - s := makeSenderImpl(t, c) + s := makeSenderImpl(t, nil, c) r := newRunnerMock() a := getTestAtel(t, tel, o, s, nil, r) require.True(t, a.enabled) @@ -955,7 +1109,7 @@ func TestGetAsJSONScrub(t *testing.T) { assert.Equal(t, "test", metric.(MetricPayload).Tags["text"]) } -func TestAdjustPrometheusCounterValue(t *testing.T) { +func TestAdjustPrometheusCounterValueMultipleTags(t *testing.T) { var c = ` agent_telemetry: enabled: true @@ -980,7 +1134,7 @@ func TestAdjustPrometheusCounterValue(t *testing.T) { // setup and initiate atel tel := makeTelMock(t) o := convertYamlStrToMap(t, c) - s := makeSenderImpl(t, c) + s := makeSenderImpl(t, nil, c) r := newRunnerMock() a := getTestAtel(t, tel, o, s, nil, r) require.True(t, a.enabled) @@ -1073,6 +1227,189 @@ func TestAdjustPrometheusCounterValue(t *testing.T) { } } +func TestAdjustPrometheusCounterValueMultipleTagValues(t *testing.T) { + var c = ` + agent_telemetry: + enabled: true + profiles: + - name: xxx + metric: + metrics: + - name: foo.bar + aggregate_tags: + - tag + ` + + // setup and initiate atel + tel := makeTelMock(t) + o := convertYamlStrToMap(t, c) + s := makeSenderImpl(t, nil, c) + r := newRunnerMock() + a := getTestAtel(t, tel, o, s, nil, r) + require.True(t, a.enabled) + + // setup metrics using few family names, metric names and tag- and tag-less counters + // to test various scenarios + counter := tel.NewCounter("foo", "bar", []string{"tag"}, "") + + // First addition (expected values should be the same as the added values) + counter.AddWithTags(1, map[string]string{"tag": "val1"}) + counter.AddWithTags(2, map[string]string{"tag": "val2"}) + + ms, ok := getPayloadFilteredMetricList(a, "foo.bar") + require.True(t, ok) + m1, ok1 := getPayloadMetricByTagValues(ms, map[string]interface{}{"tag": "val1"}) + require.True(t, ok1) + assert.Equal(t, m1.Value, 1.0) + m2, ok2 := getPayloadMetricByTagValues(ms, map[string]interface{}{"tag": "val2"}) + require.True(t, ok2) + assert.Equal(t, m2.Value, 2.0) + + // Second addition (expected values should be the same as the added values) + counter.AddWithTags(10, map[string]string{"tag": "val1"}) + counter.AddWithTags(20, map[string]string{"tag": "val2"}) + ms, ok = getPayloadFilteredMetricList(a, "foo.bar") + require.True(t, ok) + m1, ok1 = getPayloadMetricByTagValues(ms, map[string]interface{}{"tag": "val1"}) + require.True(t, ok1) + assert.Equal(t, m1.Value, 10.0) + m2, ok2 = getPayloadMetricByTagValues(ms, map[string]interface{}{"tag": "val2"}) + require.True(t, ok2) + assert.Equal(t, m2.Value, 20.0) + + // Third and fourth addition (expected values should be the sum of 3rd and 4th values) + counter.AddWithTags(100, map[string]string{"tag": "val1"}) + counter.AddWithTags(200, map[string]string{"tag": "val2"}) + ms, ok = getPayloadFilteredMetricList(a, "foo.bar") + require.True(t, ok) + m1, ok1 = getPayloadMetricByTagValues(ms, map[string]interface{}{"tag": "val1"}) + require.True(t, ok1) + assert.Equal(t, m1.Value, 100.0) + m2, ok2 = getPayloadMetricByTagValues(ms, map[string]interface{}{"tag": "val2"}) + require.True(t, ok2) + assert.Equal(t, m2.Value, 200.0) + + // No addition (expected values should be zero) + ms, ok = getPayloadFilteredMetricList(a, "foo.bar") + require.True(t, ok) + m1, ok1 = getPayloadMetricByTagValues(ms, map[string]interface{}{"tag": "val1"}) + require.True(t, ok1) + assert.Equal(t, m1.Value, 0.0) + m2, ok2 = getPayloadMetricByTagValues(ms, map[string]interface{}{"tag": "val2"}) + require.True(t, ok2) + assert.Equal(t, m2.Value, 0.0) +} + +func TestAdjustPrometheusCounterValueTagless(t *testing.T) { + var c = ` + agent_telemetry: + enabled: true + profiles: + - name: xxx + metric: + metrics: + - name: foo.bar + - name: foo.cat + - name: zoo.bar + - name: zoo.cat + ` + + // setup and initiate atel + tel := makeTelMock(t) + o := convertYamlStrToMap(t, c) + s := makeSenderImpl(t, nil, c) + r := newRunnerMock() + a := getTestAtel(t, tel, o, s, nil, r) + require.True(t, a.enabled) + + // setup metrics using few family names, metric names and tag- and tag-less counters + // to test various scenarios + counter1 := tel.NewCounter("foo", "bar", nil, "") + counter2 := tel.NewCounter("foo", "cat", nil, "") + counter3 := tel.NewCounter("zoo", "bar", nil, "") + counter4 := tel.NewCounter("zoo", "cat", nil, "") + + // First addition (expected values should be the same as the added values) + counter1.Add(1) + counter2.Add(2) + counter3.Add(3) + counter4.Add(4) + payload1, err1 := getPayload(a) + require.NoError(t, err1) + metrics1 := payload1.Payload.(AgentMetricsPayload).Metrics + expecVals1 := map[string]float64{ + "foo.bar": 1.0, + "foo.cat": 2.0, + "zoo.bar": 3.0, + "zoo.cat": 4.0, + } + for ek, ev := range expecVals1 { + v, ok := metrics1[ek] + require.True(t, ok) + assert.Equal(t, ev, v.(MetricPayload).Value) + } + + // Second addition (expected values should be the same as the added values) + counter1.Add(10) + counter2.Add(20) + counter3.Add(30) + counter4.Add(40) + payload2, err2 := getPayload(a) + require.NoError(t, err2) + metrics2 := payload2.Payload.(AgentMetricsPayload).Metrics + expecVals2 := map[string]float64{ + "foo.bar": 10.0, + "foo.cat": 20.0, + "zoo.bar": 30.0, + "zoo.cat": 40.0, + } + for ek, ev := range expecVals2 { + v, ok := metrics2[ek] + require.True(t, ok) + assert.Equal(t, ev, v.(MetricPayload).Value) + } + + // Third and fourth addition (expected values should be the sum of 3rd and 4th values) + counter1.Add(100) + counter2.Add(200) + counter3.Add(300) + counter4.Add(400) + counter1.Add(1000) + counter2.Add(2000) + counter3.Add(3000) + counter4.Add(4000) + payload34, err34 := getPayload(a) + require.NoError(t, err34) + metrics34 := payload34.Payload.(AgentMetricsPayload).Metrics + expecVals34 := map[string]float64{ + "foo.bar": 1100.0, + "foo.cat": 2200.0, + "zoo.bar": 3300.0, + "zoo.cat": 4400.0, + } + for ek, ev := range expecVals34 { + v, ok := metrics34[ek] + require.True(t, ok) + assert.Equal(t, ev, v.(MetricPayload).Value) + } + + // No addition (expected values should be zero) + payload5, err5 := getPayload(a) + require.NoError(t, err5) + metrics5 := payload5.Payload.(AgentMetricsPayload).Metrics + expecVals5 := map[string]float64{ + "foo.bar": 0.0, + "foo.cat": 0.0, + "zoo.bar": 0.0, + "zoo.cat": 0.0, + } + for ek, ev := range expecVals5 { + v, ok := metrics5[ek] + require.True(t, ok) + assert.Equal(t, ev, v.(MetricPayload).Value) + } +} + func TestHistogramFloatUpperBoundNormalization(t *testing.T) { var c = ` agent_telemetry: @@ -1087,7 +1424,7 @@ func TestHistogramFloatUpperBoundNormalization(t *testing.T) { // setup and initiate atel tel := makeTelMock(t) o := convertYamlStrToMap(t, c) - s := makeSenderImpl(t, c) + s := makeSenderImpl(t, nil, c) r := newRunnerMock() a := getTestAtel(t, tel, o, s, nil, r) require.True(t, a.enabled) @@ -1113,16 +1450,20 @@ func TestHistogramFloatUpperBoundNormalization(t *testing.T) { hist.Observe(100) hist.Observe(100) hist.Observe(100) + // +inf - 2 + hist.Observe(10000) + hist.Observe(20000) // Test payload1 metric1, ok := getPayloadMetric(a, "foo.bar") require.True(t, ok) - require.True(t, len(metric1.Buckets) > 0) + require.Len(t, metric1.Buckets, 5) expecVals1 := map[string]uint64{ - "1": 5, - "2": 0, - "5": 3, - "100": 6, + "1": 5, + "2": 0, + "5": 3, + "100": 6, + "+Inf": 2, } for k, b := range metric1.Buckets { assert.Equal(t, expecVals1[k], b) @@ -1131,12 +1472,13 @@ func TestHistogramFloatUpperBoundNormalization(t *testing.T) { // Test payload2 (no new observations, everything is reset) metric2, ok := getPayloadMetric(a, "foo.bar") require.True(t, ok) - require.True(t, len(metric2.Buckets) > 0) + require.Len(t, metric2.Buckets, 5) expecVals2 := map[string]uint64{ - "1": 0, - "2": 0, - "5": 0, - "100": 0, + "1": 0, + "2": 0, + "5": 0, + "100": 0, + "+Inf": 0, } for k, b := range metric2.Buckets { assert.Equal(t, expecVals2[k], b) @@ -1162,15 +1504,21 @@ func TestHistogramFloatUpperBoundNormalization(t *testing.T) { hist.Observe(100) hist.Observe(100) hist.Observe(100) + // +inf - 3 + hist.Observe(10000) + hist.Observe(20000) + hist.Observe(30000) + // Test payload3 metric3, ok := getPayloadMetric(a, "foo.bar") require.True(t, ok) - require.True(t, len(metric3.Buckets) > 0) + require.Len(t, metric3.Buckets, 5) expecVals3 := map[string]uint64{ - "1": 5, - "2": 0, - "5": 3, - "100": 6, + "1": 5, + "2": 0, + "5": 3, + "100": 6, + "+Inf": 3, } for k, b := range metric3.Buckets { assert.Equal(t, expecVals3[k], b) @@ -1202,7 +1550,7 @@ func TestHistogramFloatUpperBoundNormalizationWithTags(t *testing.T) { // setup and initiate atel tel := makeTelMock(t) o := convertYamlStrToMap(t, c) - s := makeSenderImpl(t, c) + s := makeSenderImpl(t, nil, c) r := newRunnerMock() a := getTestAtel(t, tel, o, s, nil, r) require.True(t, a.enabled) @@ -1232,12 +1580,13 @@ func TestHistogramFloatUpperBoundNormalizationWithTags(t *testing.T) { // Test payload1 metric1, ok := getPayloadMetric(a, "foo.bar") require.True(t, ok) - require.True(t, len(metric1.Buckets) > 0) + require.Len(t, metric1.Buckets, 5) expecVals1 := map[string]uint64{ - "1": 5, - "2": 0, - "5": 3, - "100": 6, + "1": 5, + "2": 0, + "5": 3, + "100": 6, + "+inf": 0, } for k, b := range metric1.Buckets { assert.Equal(t, expecVals1[k], b) @@ -1246,12 +1595,13 @@ func TestHistogramFloatUpperBoundNormalizationWithTags(t *testing.T) { // Test payload2 (no new observations, everything is reset) metric2, ok := getPayloadMetric(a, "foo.bar") require.True(t, ok) - require.True(t, len(metric2.Buckets) > 0) + require.Len(t, metric2.Buckets, 5) expecVals2 := map[string]uint64{ - "1": 0, - "2": 0, - "5": 0, - "100": 0, + "1": 0, + "2": 0, + "5": 0, + "100": 0, + "+inf": 0, } for k, b := range metric2.Buckets { assert.Equal(t, expecVals2[k], b) @@ -1280,12 +1630,13 @@ func TestHistogramFloatUpperBoundNormalizationWithTags(t *testing.T) { // Test payload3 metric3, ok := getPayloadMetric(a, "foo.bar") require.True(t, ok) - require.True(t, len(metric3.Buckets) > 0) + require.Len(t, metric3.Buckets, 5) expecVals3 := map[string]uint64{ - "1": 5, - "2": 0, - "5": 3, - "100": 6, + "1": 5, + "2": 0, + "5": 3, + "100": 6, + "+inf": 0, } for k, b := range metric3.Buckets { assert.Equal(t, expecVals3[k], b) @@ -1299,3 +1650,429 @@ func TestHistogramFloatUpperBoundNormalizationWithTags(t *testing.T) { assert.Equal(t, expecVals4[i], b.Count) } } + +func TestHistogramFloatUpperBoundNormalizationWithMultivalueTags(t *testing.T) { + var c = ` + agent_telemetry: + enabled: true + profiles: + - name: xxx + metric: + metrics: + - name: foo.bar + aggregate_tags: + - tag + ` + + // setup and initiate atel + tel := makeTelMock(t) + o := convertYamlStrToMap(t, c) + s := makeSenderImpl(t, nil, c) + r := newRunnerMock() + a := getTestAtel(t, tel, o, s, nil, r) + require.True(t, a.enabled) + + // setup and initiate atel + hist := tel.NewHistogram("foo", "bar", []string{"tag"}, "", []float64{1, 2, 5, 100}) + + // bucket 0 - 5 + hist.Observe(1, "val1") + hist.Observe(1, "val1") + hist.Observe(1, "val1") + hist.Observe(1, "val1") + hist.Observe(1, "val1") + // bucket 1 - 0 + // .. + // bucket 2 - 3 + hist.Observe(5, "val1") + hist.Observe(5, "val1") + hist.Observe(5, "val1") + // bucket 4 - 6 + hist.Observe(6, "val1") + hist.Observe(100, "val1") + hist.Observe(100, "val1") + hist.Observe(100, "val1") + hist.Observe(100, "val1") + hist.Observe(100, "val1") + // bucket +inf - 2 + hist.Observe(1000, "val1") + hist.Observe(2000, "val1") + + // bucket 0 - 10 + hist.Observe(1, "val2") + hist.Observe(1, "val2") + hist.Observe(1, "val2") + hist.Observe(1, "val2") + hist.Observe(1, "val2") + hist.Observe(1, "val2") + hist.Observe(1, "val2") + hist.Observe(1, "val2") + hist.Observe(1, "val2") + hist.Observe(1, "val2") + // bucket 1 - 5 + hist.Observe(2, "val2") + hist.Observe(2, "val2") + hist.Observe(2, "val2") + hist.Observe(2, "val2") + hist.Observe(2, "val2") + // bucket 2 - 6 + hist.Observe(5, "val2") + hist.Observe(5, "val2") + hist.Observe(5, "val2") + hist.Observe(5, "val2") + hist.Observe(5, "val2") + hist.Observe(5, "val2") + // bucket 4 - 12 + hist.Observe(6, "val2") + hist.Observe(6, "val2") + hist.Observe(100, "val2") + hist.Observe(100, "val2") + hist.Observe(100, "val2") + hist.Observe(100, "val2") + hist.Observe(100, "val2") + hist.Observe(100, "val2") + hist.Observe(100, "val2") + hist.Observe(100, "val2") + hist.Observe(100, "val2") + hist.Observe(100, "val2") + // bucket +inf - 4 + hist.Observe(1000, "val2") + hist.Observe(1000, "val2") + hist.Observe(2000, "val2") + hist.Observe(2000, "val2") + + // Test payload1 + metrics1, ok := getPayloadFilteredMetricList(a, "foo.bar") + require.True(t, ok) + require.Len(t, metrics1, 2) + require.Len(t, metrics1[0].Buckets, 5) + expecVals1 := map[string]struct { + n1 uint64 + n2 uint64 + }{ + "1": {5, 10}, + "2": {0, 5}, + "5": {3, 6}, + "100": {6, 12}, + "+Inf": {2, 4}, + } + metrics11, ok := getPayloadMetricByTagValues(metrics1, map[string]interface{}{"tag": "val1"}) + require.True(t, ok) + for k, b := range metrics11.Buckets { + assert.Equal(t, expecVals1[k].n1, b) + } + metrics12, ok := getPayloadMetricByTagValues(metrics1, map[string]interface{}{"tag": "val2"}) + require.True(t, ok) + for k, b := range metrics12.Buckets { + assert.Equal(t, expecVals1[k].n2, b) + } + + // Test payload2 (no new observations, everything is reset) + metrics2, ok := getPayloadFilteredMetricList(a, "foo.bar") + require.True(t, ok) + require.Len(t, metrics2, 2) + require.Len(t, metrics2[0].Buckets, 5) + require.Len(t, metrics2[1].Buckets, 5) + expecVals2 := map[string]struct { + n1 uint64 + n2 uint64 + }{ + "1": {0, 0}, + "2": {0, 0}, + "5": {0, 0}, + "100": {0, 0}, + "+Inf": {0, 0}, + } + metrics21, ok := getPayloadMetricByTagValues(metrics2, map[string]interface{}{"tag": "val1"}) + require.True(t, ok) + for k, b := range metrics21.Buckets { + assert.Equal(t, expecVals2[k].n1, b) + } + metrics22, ok := getPayloadMetricByTagValues(metrics2, map[string]interface{}{"tag": "val2"}) + require.True(t, ok) + for k, b := range metrics22.Buckets { + assert.Equal(t, expecVals2[k].n2, b) + } + + // Repeat the same observation with the same results) + // bucket 0 - 5 + hist.Observe(1, "val1") + hist.Observe(1, "val1") + hist.Observe(1, "val1") + hist.Observe(1, "val1") + hist.Observe(1, "val1") + // bucket 1 - 0 + // .. + // bucket 2 - 3 + hist.Observe(5, "val1") + hist.Observe(5, "val1") + hist.Observe(5, "val1") + // bucket 4 - 6 + hist.Observe(6, "val1") + hist.Observe(100, "val1") + hist.Observe(100, "val1") + hist.Observe(100, "val1") + hist.Observe(100, "val1") + hist.Observe(100, "val1") + // bucket +inf - 2 + hist.Observe(1000, "val1") + hist.Observe(2000, "val1") + + // bucket 0 - 10 + hist.Observe(1, "val2") + hist.Observe(1, "val2") + hist.Observe(1, "val2") + hist.Observe(1, "val2") + hist.Observe(1, "val2") + hist.Observe(1, "val2") + hist.Observe(1, "val2") + hist.Observe(1, "val2") + hist.Observe(1, "val2") + hist.Observe(1, "val2") + // bucket 1 - 5 + hist.Observe(2, "val2") + hist.Observe(2, "val2") + hist.Observe(2, "val2") + hist.Observe(2, "val2") + hist.Observe(2, "val2") + // bucket 2 - 6 + hist.Observe(5, "val2") + hist.Observe(5, "val2") + hist.Observe(5, "val2") + hist.Observe(5, "val2") + hist.Observe(5, "val2") + hist.Observe(5, "val2") + // bucket 4 - 12 + hist.Observe(6, "val2") + hist.Observe(6, "val2") + hist.Observe(100, "val2") + hist.Observe(100, "val2") + hist.Observe(100, "val2") + hist.Observe(100, "val2") + hist.Observe(100, "val2") + hist.Observe(100, "val2") + hist.Observe(100, "val2") + hist.Observe(100, "val2") + hist.Observe(100, "val2") + hist.Observe(100, "val2") + // bucket +inf - 4 + hist.Observe(1000, "val2") + hist.Observe(1000, "val2") + hist.Observe(2000, "val2") + hist.Observe(2000, "val2") + + // Test payload3 + metrics3, ok := getPayloadFilteredMetricList(a, "foo.bar") + require.True(t, ok) + require.Len(t, metrics3, 2) + require.Len(t, metrics3[0].Buckets, 5) + require.Len(t, metrics3[1].Buckets, 5) + expecVals3 := map[string]struct { + n1 uint64 + n2 uint64 + }{ + "1": {5, 10}, + "2": {0, 5}, + "5": {3, 6}, + "100": {6, 12}, + "+Inf": {2, 4}, + } + metrics31, ok := getPayloadMetricByTagValues(metrics3, map[string]interface{}{"tag": "val1"}) + require.True(t, ok) + for k, b := range metrics31.Buckets { + assert.Equal(t, expecVals3[k].n1, b) + } + metrics32, ok := getPayloadMetricByTagValues(metrics3, map[string]interface{}{"tag": "val2"}) + require.True(t, ok) + for k, b := range metrics32.Buckets { + assert.Equal(t, expecVals3[k].n2, b) + } + + // Test raw buckets, they should be still accumulated + tags1 := map[string]string{"tag": "val1"} + rawHist1 := hist.WithTags(tags1) + expecVals41 := []uint64{10, 10, 16, 28} + for i, b := range rawHist1.Get().Buckets { + assert.Equal(t, expecVals41[i], b.Count) + } + tags2 := map[string]string{"tag": "val2"} + rawHist2 := hist.WithTags(tags2) + expecVals42 := []uint64{20, 30, 42, 66} + for i, b := range rawHist2.Get().Buckets { + assert.Equal(t, expecVals42[i], b.Count) + } +} + +func TestHistogramPercentile(t *testing.T) { + var c = ` + agent_telemetry: + enabled: true + profiles: + - name: xxx + metric: + metrics: + - name: foo.bar + ` + + // setup and initiate atel + tel := makeTelMock(t) + o := convertYamlStrToMap(t, c) + s := makeSenderImpl(t, nil, c) + r := newRunnerMock() + a := getTestAtel(t, tel, o, s, nil, r) + require.True(t, a.enabled) + + // setup and initiate atel + hist := tel.NewHistogram("foo", "bar", nil, "", []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) + for i := 1; i <= 10; i++ { + hist.Observe(1) + hist.Observe(2) + hist.Observe(3) + hist.Observe(4) + hist.Observe(5) + hist.Observe(6) + hist.Observe(7) + hist.Observe(8) + hist.Observe(9) + } + hist.Observe(10) + hist.Observe(10) + + metric, ok := getPayloadMetric(a, "foo.bar") + require.True(t, ok) + require.NotNil(t, metric.P75) + require.NotNil(t, metric.P95) + require.NotNil(t, metric.P99) + + // 75% of 92 observations is 69.0 (upper bound of the 6th bucket - 7) + // 95% of 92 observations is 87.0 (upper bound of the 8th bucket - 9) + // 95% of 92 observations is 92.0 (upper bound of the 10th bucket - 10) + assert.Equal(t, 7.0, *metric.P75) + assert.Equal(t, 9.0, *metric.P95) + assert.Equal(t, 10.0, *metric.P99) + + // Test percentile in +Inf upper bound (p75 in 10th bucket) and p95 and p99 in +Inf bucket) + for i := 1; i <= 10; i++ { + hist.Observe(10) + } + for i := 1; i <= 4; i++ { + hist.Observe(11) + } + + metric, ok = getPayloadMetric(a, "foo.bar") + require.True(t, ok) + require.NotNil(t, metric.P75) + require.NotNil(t, metric.P95) + require.NotNil(t, metric.P99) + + // For percentile point of view +Inf bucket upper boundary is 2x of last explicit upper boundary + // maybe in the future it will be configurable + assert.Equal(t, 10.0, *metric.P75) + assert.Equal(t, 20.0, *metric.P95) + assert.Equal(t, 20.0, *metric.P99) +} + +func TestUsingPayloadCompressionInAgentTelemetrySender(t *testing.T) { + // Run with compression (by default default) + var cfg1 = ` + agent_telemetry: + enabled: true + profiles: + - name: xxx + metric: + metrics: + - name: foo.bar + ` + + tel := makeTelMock(t) + hist := tel.NewHistogram("foo", "bar", nil, "", []float64{1, 2, 5, 100}) + hist.Observe(1) + hist.Observe(5) + hist.Observe(6) + hist.Observe(100) + + // setup and initiate atel + o1 := convertYamlStrToMap(t, cfg1) + cl1 := newClientMock() + s1 := makeSenderImpl(t, cl1, cfg1) + r1 := newRunnerMock() + a1 := getTestAtel(t, tel, o1, s1, cl1, r1) + require.True(t, a1.enabled) + + // run the runner to trigger the telemetry report + a1.start() + r1.(*runnerMock).run() + assert.True(t, len(cl1.(*clientMock).body) > 0) + + // Run without compression + var cfg2 = ` + agent_telemetry: + use_compression: false + enabled: true + profiles: + - name: xxx + metric: + metrics: + - name: foo.bar + aggregate_tags: + ` + + // setup and initiate atel + o2 := convertYamlStrToMap(t, cfg2) + cl2 := newClientMock() + s2 := makeSenderImpl(t, cl2, cfg2) + r2 := newRunnerMock() + a2 := getTestAtel(t, tel, o2, s2, cl2, r2) + require.True(t, a2.enabled) + + // run the runner to trigger the telemetry report + a2.start() + r2.(*runnerMock).run() + assert.True(t, len(cl2.(*clientMock).body) > 0) + decompressBody, err := zstd.Decompress(nil, cl1.(*clientMock).body) + require.NoError(t, err) + require.NotZero(t, len(decompressBody)) + + // we cannot compare body (time stamp different and internal + // bucket serialization, but success above and significant size differences + // should be suffient + compressBodyLen := len(cl1.(*clientMock).body) + nonCompressBodyLen := len(cl2.(*clientMock).body) + assert.True(t, float64(nonCompressBodyLen)/float64(compressBodyLen) > 1.5) +} + +func TestDefaultAndNoDefaultPromRegistries(t *testing.T) { + var c = ` + agent_telemetry: + enabled: true + profiles: + - name: xxx + metric: + metrics: + - name: foo.bar + - name: bar.foo + ` + + // setup and initiate atel + tel := makeTelMock(t) + o := convertYamlStrToMap(t, c) + s := makeSenderImpl(t, nil, c) + r := newRunnerMock() + a := getTestAtel(t, tel, o, s, nil, r) + require.True(t, a.enabled) + + gaugeFooBar := tel.NewGaugeWithOpts("foo", "bar", nil, "", telemetry.Options{DefaultMetric: false}) + gaugeBarFoo := tel.NewGaugeWithOpts("bar", "foo", nil, "", telemetry.Options{DefaultMetric: true}) + gaugeFooBar.Set(10) + gaugeBarFoo.Set(20) + + // Test payload + metrics := getPayloadMetricMap(a) + require.Len(t, metrics, 2) + m1, ok1 := metrics["foo.bar"] + require.True(t, ok1) + assert.Equal(t, 10.0, m1.Value) + m2, ok2 := metrics["bar.foo"] + require.True(t, ok2) + assert.Equal(t, 20.0, m2.Value) +} diff --git a/comp/core/agenttelemetry/impl/sender.go b/comp/core/agenttelemetry/impl/sender.go index 585807eb6a539..b7b62f460b933 100644 --- a/comp/core/agenttelemetry/impl/sender.go +++ b/comp/core/agenttelemetry/impl/sender.go @@ -11,6 +11,7 @@ import ( "encoding/json" "errors" "fmt" + "math" "net/http" "net/url" "strconv" @@ -26,6 +27,7 @@ import ( httputils "github.com/DataDog/datadog-agent/pkg/util/http" "github.com/DataDog/datadog-agent/pkg/util/scrubber" "github.com/DataDog/datadog-agent/pkg/version" + "github.com/DataDog/zstd" ) const ( @@ -57,7 +59,9 @@ type senderImpl struct { cfgComp config.Reader logComp log.Component - client client + compress bool + compressionLevel int + client client endpoints *logconfig.Endpoints @@ -133,6 +137,9 @@ type MetricPayload struct { Type string `json:"type"` Tags map[string]interface{} `json:"tags,omitempty"` Buckets map[string]uint64 `json:"buckets,omitempty"` + P75 *float64 `json:"p75,omitempty"` + P95 *float64 `json:"p95,omitempty"` + P99 *float64 `json:"p99,omitempty"` } func httpClientFactory(cfg config.Reader, timeout time.Duration) func() *http.Client { @@ -207,9 +214,12 @@ func newSenderImpl( cfgComp: cfgComp, logComp: logComp, - client: client, - endpoints: endpoints, - agentVersion: agentVersion.GetNumberAndPre(), + compress: cfgComp.GetBool("agent_telemetry.use_compression"), + compressionLevel: cfgComp.GetInt("agent_telemetry.compression_level"), + client: client, + endpoints: endpoints, + agentVersion: agentVersion.GetNumberAndPre(), + // pre-fill parts of payload which are not changing during run-time payloadTemplate: Payload{ APIVersion: "v2", @@ -253,8 +263,53 @@ func (s *senderImpl) addMetricPayload( for _, bucket := range histogram.GetBucket() { boundNameRaw := fmt.Sprintf("%v", bucket.GetUpperBound()) boundName := strings.ReplaceAll(boundNameRaw, ".", "_") + payload.Buckets[boundName] = bucket.GetCumulativeCount() } + payload.Buckets["+Inf"] = histogram.GetSampleCount() + + // Calculate fixed 75, 95 and 99 precentiles. Percentile calculation finds + // a bucket which, with all preceding buckets, contains that percentile item. + // For convenience, percentile values are not the bucket number but its + // upper-bound. If a percentile belongs to the implicit "+inf" bucket, which + // has no explicit upper-bound, we will use the last bucket upper bound times 2. + // The upper-bound of the "+Inf" bucket is defined as 2x of the preceding + // bucket boundary, but it is totally arbitrary. In the future we may use a + // configuration value to set it up. + var totalCount uint64 + for _, bucket := range histogram.GetBucket() { + totalCount += bucket.GetCumulativeCount() + } + totalCount += histogram.GetSampleCount() + p75 := uint64(math.Floor(float64(totalCount) * 0.75)) + p95 := uint64(math.Floor(float64(totalCount) * 0.95)) + p99 := uint64(math.Floor(float64(totalCount) * 0.99)) + var curCount uint64 + for _, bucket := range histogram.GetBucket() { + curCount += bucket.GetCumulativeCount() + if payload.P75 == nil && curCount >= p75 { + p75Value := bucket.GetUpperBound() + payload.P75 = &p75Value + } + if payload.P95 == nil && curCount >= p95 { + p95Value := bucket.GetUpperBound() + payload.P95 = &p95Value + } + if payload.P99 == nil && curCount >= p99 { + p99Value := bucket.GetUpperBound() + payload.P99 = &p99Value + } + } + maxUpperBound := 2 * (histogram.GetBucket()[len(histogram.GetBucket())-1].GetUpperBound()) + if payload.P75 == nil { + payload.P75 = &maxUpperBound + } + if payload.P95 == nil { + payload.P95 = &maxUpperBound + } + if payload.P99 == nil { + payload.P99 = &maxUpperBound + } } // Add metric tags @@ -321,11 +376,24 @@ func (s *senderImpl) flushSession(ss *senderSession) error { return fmt.Errorf("failed to marshal agent telemetry payload: %w", err) } - reqBody, err := scrubber.ScrubJSON(payloadJSON) + reqBodyRaw, err := scrubber.ScrubJSON(payloadJSON) if err != nil { return fmt.Errorf("failed to scrubl agent telemetry payload: %w", err) } + // Try to compress the payload if needed + reqBody := reqBodyRaw + compressed := false + if s.compress { + reqBodyCompressed, err2 := zstd.CompressLevel(nil, reqBodyRaw, s.compressionLevel) + if err2 == nil { + compressed = true + reqBody = reqBodyCompressed + } else { + s.logComp.Errorf("Failed to compress agent telemetry payload: %v", err) + } + } + // Send the payload to all endpoints var errs error reqType := payloads.RequestType @@ -337,7 +405,7 @@ func (s *senderImpl) flushSession(ss *senderSession) error { errs = errors.Join(errs, err) continue } - s.addHeaders(req, reqType, ep.GetAPIKey(), bodyLen) + s.addHeaders(req, reqType, ep.GetAPIKey(), bodyLen, compressed) resp, err := s.client.Do(req.WithContext(ss.cancelCtx)) if err != nil { errs = errors.Join(errs, err) @@ -387,7 +455,7 @@ func (s *senderImpl) sendAgentMetricPayloads(ss *senderSession, metrics []*agent } } -func (s *senderImpl) addHeaders(req *http.Request, requesttype, apikey, bodylen string) { +func (s *senderImpl) addHeaders(req *http.Request, requesttype, apikey, bodylen string, compressed bool) { req.Header.Add("DD-Api-Key", apikey) req.Header.Add("Content-Type", "application/json") req.Header.Add("Content-Length", bodylen) @@ -397,4 +465,8 @@ func (s *senderImpl) addHeaders(req *http.Request, requesttype, apikey, bodylen req.Header.Add("DD-Telemetry-Product-Version", s.agentVersion) // Not clear how to acquire that. Appears that EVP adds it automatically req.Header.Add("datadog-container-id", "") + + if compressed { + req.Header.Set("Content-Encoding", "zstd") + } } diff --git a/comp/core/agenttelemetry/impl/utils.go b/comp/core/agenttelemetry/impl/utils.go index 07f6b86aebf40..01ca50637a6b7 100644 --- a/comp/core/agenttelemetry/impl/utils.go +++ b/comp/core/agenttelemetry/impl/utils.go @@ -8,6 +8,7 @@ package agenttelemetryimpl import ( "fmt" "sort" + "strings" dto "github.com/prometheus/client_model/go" ) @@ -98,11 +99,16 @@ func aggregateMetric(mt dto.MetricType, aggm *dto.Metric, srcm *dto.Metric) { aggmb.Exemplar.Label = nil } } + + // copy the sample count (it is implicit "+Inf" bucket) + aggm.Histogram.SampleCount = srcm.Histogram.SampleCount } else { // for the same metric family bucket structure is the same for i, srcb := range srcm.Histogram.Bucket { *aggm.Histogram.Bucket[i].CumulativeCount += srcb.GetCumulativeCount() } + // copy the sample count (it is implicit "+Inf" bucket) + *aggm.Histogram.SampleCount += srcm.Histogram.GetSampleCount() } } } @@ -123,3 +129,13 @@ func cloneLabelsSorted(labels []*dto.LabelPair) []*dto.LabelPair { func makeLabelPairKey(l *dto.LabelPair) string { return fmt.Sprintf("%s:%s:", l.GetName(), l.GetValue()) } + +// Sort and serialize labels into a string +func convertLabelsToKey(labels []*dto.LabelPair) string { + sortedLabels := cloneLabelsSorted(labels) + var sb strings.Builder + for _, tag := range sortedLabels { + sb.WriteString(makeLabelPairKey(tag)) + } + return sb.String() +} diff --git a/comp/core/autodiscovery/autodiscoveryimpl/autoconfig.go b/comp/core/autodiscovery/autodiscoveryimpl/autoconfig.go index f8df2b1618eba..badc378d8a011 100644 --- a/comp/core/autodiscovery/autodiscoveryimpl/autoconfig.go +++ b/comp/core/autodiscovery/autodiscoveryimpl/autoconfig.go @@ -43,7 +43,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/fxutil" httputils "github.com/DataDog/datadog-agent/pkg/util/http" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/util/retry" "github.com/DataDog/datadog-agent/pkg/util/scrubber" ) @@ -58,7 +58,7 @@ type dependencies struct { Log logComp.Component TaggerComp tagger.Component Secrets secrets.Component - WMeta optional.Option[workloadmeta.Component] + WMeta option.Option[workloadmeta.Component] Telemetry telemetry.Component } @@ -81,7 +81,7 @@ type AutoConfig struct { serviceListenerFactories map[string]listeners.ServiceListenerFactory providerCatalog map[string]providers.ConfigProviderFactory started bool - wmeta optional.Option[workloadmeta.Component] + wmeta option.Option[workloadmeta.Component] taggerComp tagger.Component logs logComp.Component telemetryStore *acTelemetry.Store @@ -151,7 +151,7 @@ func newAutoConfig(deps dependencies) autodiscovery.Component { } // createNewAutoConfig creates an AutoConfig instance (without starting). -func createNewAutoConfig(schedulerController *scheduler.Controller, secretResolver secrets.Component, wmeta optional.Option[workloadmeta.Component], taggerComp tagger.Component, logs logComp.Component, telemetryComp telemetry.Component) *AutoConfig { +func createNewAutoConfig(schedulerController *scheduler.Controller, secretResolver secrets.Component, wmeta option.Option[workloadmeta.Component], taggerComp tagger.Component, logs logComp.Component, telemetryComp telemetry.Component) *AutoConfig { cfgMgr := newReconcilingConfigManager(secretResolver) ac := &AutoConfig{ configPollers: make([]*configPoller, 0, 9), diff --git a/comp/core/autodiscovery/autodiscoveryimpl/autoconfig_mock.go b/comp/core/autodiscovery/autodiscoveryimpl/autoconfig_mock.go index c6890b6988726..981a4a51738f1 100644 --- a/comp/core/autodiscovery/autodiscoveryimpl/autoconfig_mock.go +++ b/comp/core/autodiscovery/autodiscoveryimpl/autoconfig_mock.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/telemetry" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // MockParams defines the parameters for the mock component. @@ -28,7 +28,7 @@ type MockParams struct { type mockdependencies struct { fx.In - WMeta optional.Option[workloadmeta.Component] + WMeta option.Option[workloadmeta.Component] Params MockParams TaggerComp mockTagger.Mock LogsComp log.Component diff --git a/comp/core/autodiscovery/autodiscoveryimpl/autoconfig_test.go b/comp/core/autodiscovery/autodiscoveryimpl/autoconfig_test.go index 9c981d8d60dc3..ef1588b3b6ee3 100644 --- a/comp/core/autodiscovery/autodiscoveryimpl/autoconfig_test.go +++ b/comp/core/autodiscovery/autodiscoveryimpl/autoconfig_test.go @@ -37,7 +37,7 @@ import ( pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" pkglogsetup "github.com/DataDog/datadog-agent/pkg/util/log/setup" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/util/retry" ) @@ -189,7 +189,7 @@ func (suite *AutoConfigTestSuite) SetupTest() { suite.deps = createDeps(suite.T()) } -func getAutoConfig(schedulerController *scheduler.Controller, secretResolver secrets.Component, wmeta optional.Option[workloadmeta.Component], taggerComp tagger.Component, logsComp log.Component, telemetryComp telemetry.Component) *AutoConfig { +func getAutoConfig(schedulerController *scheduler.Controller, secretResolver secrets.Component, wmeta option.Option[workloadmeta.Component], taggerComp tagger.Component, logsComp log.Component, telemetryComp telemetry.Component) *AutoConfig { ac := createNewAutoConfig(schedulerController, secretResolver, wmeta, taggerComp, logsComp, telemetryComp) go ac.serviceListening() return ac @@ -593,7 +593,7 @@ func TestWriteConfigEndpoint(t *testing.T) { type Deps struct { fx.In - WMeta optional.Option[workloadmeta.Component] + WMeta option.Option[workloadmeta.Component] TaggerComp tagger.Component LogsComp log.Component Telemetry telemetry.Component diff --git a/comp/core/autodiscovery/common/utils/annotations.go b/comp/core/autodiscovery/common/utils/annotations.go index 606e7264c16fc..4e3699446dcfb 100644 --- a/comp/core/autodiscovery/common/utils/annotations.go +++ b/comp/core/autodiscovery/common/utils/annotations.go @@ -176,14 +176,14 @@ func BuildTemplates(adID string, checkNames []string, initConfigs, instances [][ // sanity checks if len(checkNames) != len(initConfigs) || len(checkNames) != len(instances) { - log.Errorf("Template entries don't all have the same length. "+ + log.Errorf("Template entries in entity with ID %q don't all have the same length. "+ "checkNames: %d, initConfigs: %d, instances: %d. Not using them.", - len(checkNames), len(initConfigs), len(instances)) + adID, len(checkNames), len(initConfigs), len(instances)) return templates } for idx := range initConfigs { if len(initConfigs[idx]) != 1 { - log.Error("Templates init Configs list is not valid, not using Templates entries") + log.Errorf("Templates init Configs list in entity with ID %q is not valid, not using Templates entries", adID) return templates } } diff --git a/comp/core/autodiscovery/listeners/types.go b/comp/core/autodiscovery/listeners/types.go index 1f7c13de3c14e..a8b1bb7d2774f 100644 --- a/comp/core/autodiscovery/listeners/types.go +++ b/comp/core/autodiscovery/listeners/types.go @@ -15,7 +15,7 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/util/containers" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // ContainerPort represents a network port in a Service. @@ -70,7 +70,7 @@ type ServiceListernerDeps struct { Config Config Telemetry *telemetry.Store Tagger tagger.Component - Wmeta optional.Option[workloadmeta.Component] + Wmeta option.Option[workloadmeta.Component] } // ServiceListenerFactory builds a service listener diff --git a/comp/core/autodiscovery/providers/README.md b/comp/core/autodiscovery/providers/README.md index fcc9227692975..2bfd6d8bb60e2 100644 --- a/comp/core/autodiscovery/providers/README.md +++ b/comp/core/autodiscovery/providers/README.md @@ -75,3 +75,7 @@ The `ZookeeperConfigProvider` reads the check configs from zookeeper. ### `RemoteConfigProvider` The `RemoteConfigProvider` reads the check configs from remote-config. + +### `GPUConfigProvider` + +The `GPUConfigProvider` generates check configs from visible GPUs on the host. diff --git a/comp/core/autodiscovery/providers/gpu.go b/comp/core/autodiscovery/providers/gpu.go new file mode 100644 index 0000000000000..5dcf794420a75 --- /dev/null +++ b/comp/core/autodiscovery/providers/gpu.go @@ -0,0 +1,142 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build !serverless + +package providers + +import ( + "context" + "sync" + + "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" + "github.com/DataDog/datadog-agent/comp/core/autodiscovery/providers/names" + "github.com/DataDog/datadog-agent/comp/core/autodiscovery/telemetry" + workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + "github.com/DataDog/datadog-agent/pkg/util/log" +) + +// gpuCheckName is the name of the GPU check, to avoid importing the code from the GPU package +const gpuCheckName = "gpu" + +// GPUConfigProvider implements the ConfigProvider interface for GPUs. This provider listens +// in Workloadmeta for GPU events. If any GPU is detected, it will generate a config to +// schedule the GPU check. As the GPU check covers all GPUs automatically, further GPUs +// will not trigger new configs. +type GPUConfigProvider struct { + workloadmetaStore workloadmeta.Component + + // scheduledConfig is the config that is scheduled for the GPU check. Stored here for + // unscheduling purposes. + scheduledConfig *integration.Config + + // gpuDeviceCache is a cache of GPU devices that have been seen. If we stop seeing all GPU + // devices, we will unschedule the GPU check. + gpuDeviceCache map[string]struct{} + mu sync.RWMutex +} + +var _ ConfigProvider = &GPUConfigProvider{} +var _ StreamingConfigProvider = &GPUConfigProvider{} + +// NewGPUConfigProvider returns a new ConfigProvider subscribed to GPU events +func NewGPUConfigProvider(_ *pkgconfigsetup.ConfigurationProviders, wmeta workloadmeta.Component, _ *telemetry.Store) (ConfigProvider, error) { + return &GPUConfigProvider{ + workloadmetaStore: wmeta, + gpuDeviceCache: make(map[string]struct{}), + }, nil +} + +// String returns a string representation of the GPUConfigProvider +func (k *GPUConfigProvider) String() string { + return names.GPU +} + +// Stream starts listening to workloadmeta to generate configs as they come +// instead of relying on a periodic call to Collect. +func (k *GPUConfigProvider) Stream(ctx context.Context) <-chan integration.ConfigChanges { + const name = "ad-gpuprovider" + + // outCh must be unbuffered. processing of workloadmeta events must not + // proceed until the config is processed by autodiscovery, as configs + // need to be generated before any associated services. + outCh := make(chan integration.ConfigChanges) + + filter := workloadmeta.NewFilterBuilder(). + AddKind(workloadmeta.KindGPU). + Build() + inCh := k.workloadmetaStore.Subscribe(name, workloadmeta.ConfigProviderPriority, filter) + + go func() { + for { + select { + case <-ctx.Done(): + k.workloadmetaStore.Unsubscribe(inCh) + + case evBundle, ok := <-inCh: + if !ok { + return + } + + // send changes even when they're empty, as we + // need to signal that an event has been + // received, for flow control reasons + outCh <- k.processEvents(evBundle) + evBundle.Acknowledge() + } + } + }() + + return outCh +} + +func (k *GPUConfigProvider) processEvents(evBundle workloadmeta.EventBundle) integration.ConfigChanges { + k.mu.Lock() + defer k.mu.Unlock() + + changes := integration.ConfigChanges{} + + for _, event := range evBundle.Events { + gpuUUID := event.Entity.GetID().ID + + switch event.Type { + case workloadmeta.EventTypeSet: + // Track seen GPU devices + k.gpuDeviceCache[gpuUUID] = struct{}{} + + // We only need to schedule the check once + if k.scheduledConfig != nil { + continue + } + + k.scheduledConfig = &integration.Config{ + Name: gpuCheckName, + Instances: []integration.Data{[]byte{}}, + InitConfig: []byte{}, + Provider: names.GPU, + Source: names.GPU, + } + + changes.ScheduleConfig(*k.scheduledConfig) + case workloadmeta.EventTypeUnset: + delete(k.gpuDeviceCache, gpuUUID) + + // Unschedule the check if no more GPUs are detected + if len(k.gpuDeviceCache) == 0 && k.scheduledConfig != nil { + changes.UnscheduleConfig(*k.scheduledConfig) + } + default: + log.Errorf("cannot handle event of type %d", event.Type) + } + } + + return changes +} + +// GetConfigErrors returns a map of configuration errors, which is always empty for the GPUConfigProvider +func (k *GPUConfigProvider) GetConfigErrors() map[string]ErrorMsgSet { + return make(map[string]ErrorMsgSet) +} diff --git a/comp/core/autodiscovery/providers/gpu_test.go b/comp/core/autodiscovery/providers/gpu_test.go new file mode 100644 index 0000000000000..e51fe696b48e0 --- /dev/null +++ b/comp/core/autodiscovery/providers/gpu_test.go @@ -0,0 +1,74 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2025-present Datadog, Inc. + +package providers + +import ( + "testing" + + "github.com/stretchr/testify/require" + + workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/gpu" +) + +func TestGPUProcessEvents(t *testing.T) { + // the processEvents function doesn't need any of the deps, so make them nill + provider, err := NewGPUConfigProvider(nil, nil, nil) + require.NoError(t, err) + + // Cast from the generic factory method + gpuProvider, ok := provider.(*GPUConfigProvider) + require.True(t, ok) + + gpuIDs := []string{"gpu-1234", "gpu-5678"} + + var gpuCreateEvents []workloadmeta.Event + var gpuDestroyEvents []workloadmeta.Event + for _, gpuID := range gpuIDs { + entityID := workloadmeta.EntityID{ + Kind: workloadmeta.KindGPU, + ID: gpuID, + } + + entity := &workloadmeta.GPU{ + EntityID: entityID, + EntityMeta: workloadmeta.EntityMeta{ + Name: entityID.ID, + }, + Vendor: "nvidia", + Device: "tesla-v100", + } + + gpuCreateEvents = append(gpuCreateEvents, workloadmeta.Event{Type: workloadmeta.EventTypeSet, Entity: entity}) + gpuDestroyEvents = append(gpuDestroyEvents, workloadmeta.Event{Type: workloadmeta.EventTypeUnset, Entity: entity}) + } + + createBundle := workloadmeta.EventBundle{Events: gpuCreateEvents} + destroyBundle1 := workloadmeta.EventBundle{Events: gpuDestroyEvents[0:1]} + destroyBundle2 := workloadmeta.EventBundle{Events: gpuDestroyEvents[1:2]} + + // Multiple events should only create one config + changes := gpuProvider.processEvents(createBundle) + require.Len(t, changes.Schedule, 1) + require.Len(t, changes.Unschedule, 0) + require.Equal(t, changes.Schedule[0].Name, gpu.CheckName) + + // More events should not create more configs + changes = gpuProvider.processEvents(createBundle) + require.Len(t, changes.Schedule, 0) + require.Len(t, changes.Unschedule, 0) + + // Destroying one GPU should not unschedule the check + changes = gpuProvider.processEvents(destroyBundle1) + require.Len(t, changes.Schedule, 0) + require.Len(t, changes.Unschedule, 0) + + // Destroying the last GPU should unschedule the check + changes = gpuProvider.processEvents(destroyBundle2) + require.Len(t, changes.Schedule, 0) + require.Len(t, changes.Unschedule, 1) + require.Equal(t, changes.Unschedule[0].Name, gpu.CheckName) +} diff --git a/comp/core/autodiscovery/providers/names/provider_names.go b/comp/core/autodiscovery/providers/names/provider_names.go index 57e7c3b867dd1..8527c6d94bd4d 100644 --- a/comp/core/autodiscovery/providers/names/provider_names.go +++ b/comp/core/autodiscovery/providers/names/provider_names.go @@ -27,6 +27,7 @@ const ( RemoteConfig = "remote-config" SNMP = "snmp" Zookeeper = "zookeeper" + GPU = "gpu" ) // Internal Autodiscovery names for the config providers diff --git a/comp/core/autodiscovery/providers/providers.go b/comp/core/autodiscovery/providers/providers.go index 5eb4b3b2cf853..acf98590fdbae 100644 --- a/comp/core/autodiscovery/providers/providers.go +++ b/comp/core/autodiscovery/providers/providers.go @@ -58,6 +58,7 @@ func RegisterProviders(providerCatalog map[string]ConfigProviderFactory) { RegisterProvider(names.PrometheusPodsRegisterName, NewPrometheusPodsConfigProvider, providerCatalog) RegisterProvider(names.PrometheusServicesRegisterName, NewPrometheusServicesConfigProvider, providerCatalog) RegisterProvider(names.ZookeeperRegisterName, NewZookeeperConfigProvider, providerCatalog) + RegisterProviderWithComponents(names.GPU, NewGPUConfigProvider, providerCatalog) } // ConfigProviderFactory is any function capable to create a ConfigProvider instance diff --git a/comp/core/bundle.go b/comp/core/bundle.go index 0e9062b216497..b75f5a8a76dfe 100644 --- a/comp/core/bundle.go +++ b/comp/core/bundle.go @@ -25,7 +25,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/sysprobeconfig/sysprobeconfigimpl" "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // team: agent-shared-components @@ -41,7 +41,7 @@ func Bundle() fxutil.BundleOptions { fx.Provide(func(params BundleParams) sysprobeconfigimpl.Params { return params.SysprobeConfigParams }), secretsimpl.Module(), fx.Provide(func(params BundleParams) secrets.Params { return params.SecretParams }), - fx.Provide(func(secrets secrets.Component) optional.Option[secrets.Component] { return optional.NewOption(secrets) }), + fx.Provide(func(secrets secrets.Component) option.Option[secrets.Component] { return option.New(secrets) }), sysprobeconfigimpl.Module(), telemetryimpl.Module(), hostnameimpl.Module(), diff --git a/comp/core/config/config.go b/comp/core/config/config.go index 7eacbbd994354..94da6f33ed145 100644 --- a/comp/core/config/config.go +++ b/comp/core/config/config.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/secrets" pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // Reader is a subset of Config that only allows reading of configuration @@ -43,7 +43,7 @@ type dependencies struct { fx.In Params Params - Secret optional.Option[secrets.Component] + Secret option.Option[secrets.Component] } func (d dependencies) getParams() *Params { diff --git a/comp/core/config/go.mod b/comp/core/config/go.mod index 00be19b3b9ba7..10520fadee75d 100644 --- a/comp/core/config/go.mod +++ b/comp/core/config/go.mod @@ -23,7 +23,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../pkg/util/fxutil github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../../pkg/util/hostname/validate github.com/DataDog/datadog-agent/pkg/util/log => ../../../pkg/util/log - github.com/DataDog/datadog-agent/pkg/util/optional => ../../../pkg/util/optional/ + github.com/DataDog/datadog-agent/pkg/util/option => ../../../pkg/util/option/ github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../pkg/util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../pkg/util/scrubber/ github.com/DataDog/datadog-agent/pkg/util/system => ../../../pkg/util/system @@ -37,12 +37,12 @@ require ( github.com/DataDog/datadog-agent/comp/core/secrets v0.59.0 github.com/DataDog/datadog-agent/comp/core/telemetry v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/config/mock v0.59.0 - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 - github.com/DataDog/datadog-agent/pkg/util/defaultpaths v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/pkg/util/defaultpaths v0.61.0 github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 github.com/DataDog/viper v1.14.0 github.com/stretchr/testify v1.10.0 go.uber.org/fx v1.23.0 @@ -54,16 +54,16 @@ require ( github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect @@ -78,21 +78,21 @@ require ( github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.60.1 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -103,10 +103,10 @@ require ( go.uber.org/dig v1.18.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect - google.golang.org/protobuf v1.35.2 // indirect + google.golang.org/protobuf v1.36.3 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/comp/core/config/go.sum b/comp/core/config/go.sum index 323c4fa804e83..4f65f318c619a 100644 --- a/comp/core/config/go.sum +++ b/comp/core/config/go.sum @@ -73,7 +73,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -113,8 +112,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -141,8 +140,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -159,8 +158,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -174,8 +173,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -186,8 +185,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -242,8 +241,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -280,8 +279,8 @@ golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= @@ -309,8 +308,8 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/comp/core/configsync/configsyncimpl/module.go b/comp/core/configsync/configsyncimpl/module.go index a89bbacc91197..af4685f032344 100644 --- a/comp/core/configsync/configsyncimpl/module.go +++ b/comp/core/configsync/configsyncimpl/module.go @@ -35,19 +35,19 @@ type dependencies struct { } // Module defines the fx options for this component. -func Module() fxutil.Module { - return fxutil.Component( - fx.Provide(newComponent), - fx.Supply(Params{}), - ) -} - -// ModuleWithParams defines the fx options for this component, but -// requires additionally specifying custom Params from the fx App, to be -// passed to the constructor. -func ModuleWithParams() fxutil.Module { +func Module(params Params) fxutil.Module { return fxutil.Component( fx.Provide(newComponent), + fx.Supply(params), + + // configSync is a component with no public method, therefore nobody depends on it and FX only instantiates + // components when they're needed. Adding a dummy function that takes our Component as a parameter force + // the instantiation of configsync. This means that simply using 'configsync.Module()' will run our + // component (which is the expected behavior). + // + // This prevent silent corner case where including 'configsync' in the main function would not actually + // instantiate it. This also remove the need for every main using configsync to add the line bellow. + fx.Invoke(func(_ configsync.Component) {}), ) } @@ -64,13 +64,13 @@ type configSync struct { } // newComponent checks if the component was enabled as per the config and return a enable/disabled configsync -func newComponent(deps dependencies) configsync.Component { +func newComponent(deps dependencies) (configsync.Component, error) { agentIPCPort := deps.Config.GetInt("agent_ipc.port") configRefreshIntervalSec := deps.Config.GetInt("agent_ipc.config_refresh_interval") if agentIPCPort <= 0 || configRefreshIntervalSec <= 0 { deps.Log.Infof("configsync disabled (agent_ipc.port: %d | agent_ipc.config_refresh_interval: %d)", agentIPCPort, configRefreshIntervalSec) - return configSync{} + return configSync{}, nil } deps.Log.Infof("configsync enabled (agent_ipc '%s:%d' | agent_ipc.config_refresh_interval: %d)", deps.Config.GetString("agent_ipc.host"), agentIPCPort, configRefreshIntervalSec) @@ -79,7 +79,7 @@ func newComponent(deps dependencies) configsync.Component { // newConfigSync creates a new configSync component. // agentIPCPort and configRefreshIntervalSec must be strictly positive. -func newConfigSync(deps dependencies, agentIPCPort int, configRefreshIntervalSec int) configsync.Component { +func newConfigSync(deps dependencies, agentIPCPort int, configRefreshIntervalSec int) (configsync.Component, error) { agentIPCHost := deps.Config.GetString("agent_ipc.host") url := &url.URL{ @@ -102,17 +102,20 @@ func newConfigSync(deps dependencies, agentIPCPort int, configRefreshIntervalSec enabled: true, } - if deps.SyncParams.OnInit { - if deps.SyncParams.Delay != 0 { - select { - case <-ctx.Done(): //context cancelled - // TODO: this component should return an error + if deps.SyncParams.OnInitSync { + deps.Log.Infof("triggering configsync on init (will retry for %s)", deps.SyncParams.OnInitSyncTimeout) + deadline := time.Now().Add(deps.SyncParams.OnInitSyncTimeout) + for { + if err := configSync.updater(); err == nil { + break + } + if time.Now().After(deadline) { cancel() - return nil - case <-time.After(deps.SyncParams.Delay): + return nil, deps.Log.Errorf("failed to sync config at startup, is the core agent listening on '%s' ?", url.String()) } + time.Sleep(2 * time.Second) } - configSync.updater() + deps.Log.Infof("triggering configsync on init succeeded") } // start and stop the routine in fx hooks @@ -127,5 +130,5 @@ func newConfigSync(deps dependencies, agentIPCPort int, configRefreshIntervalSec }, }) - return configSync + return configSync, nil } diff --git a/comp/core/configsync/configsyncimpl/module_integration_test.go b/comp/core/configsync/configsyncimpl/module_integration_test.go index 5d0fde62a449a..e4e2372363bef 100644 --- a/comp/core/configsync/configsyncimpl/module_integration_test.go +++ b/comp/core/configsync/configsyncimpl/module_integration_test.go @@ -46,7 +46,7 @@ func TestOptionalModule(t *testing.T) { comp := fxutil.Test[configsync.Component](t, fx.Options( core.MockBundle(), fetchonlyimpl.Module(), - Module(), + Module(Params{}), fx.Populate(&cfg), fx.Replace(config.MockParams{Overrides: overrides}), )) diff --git a/comp/core/configsync/configsyncimpl/module_test.go b/comp/core/configsync/configsyncimpl/module_test.go index 8051e4cce793c..731b5378c4139 100644 --- a/comp/core/configsync/configsyncimpl/module_test.go +++ b/comp/core/configsync/configsyncimpl/module_test.go @@ -10,6 +10,7 @@ import ( pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestNewConfigSync(t *testing.T) { @@ -17,21 +18,24 @@ func TestNewConfigSync(t *testing.T) { deps := makeDeps(t) deps.Config.Set("agent_ipc.port", 1234, pkgconfigmodel.SourceFile) deps.Config.Set("agent_ipc.config_refresh_interval", 30, pkgconfigmodel.SourceFile) - comp := newComponent(deps) + comp, err := newComponent(deps) + require.NoError(t, err) assert.True(t, comp.(configSync).enabled) }) t.Run("disabled ipc port zero", func(t *testing.T) { deps := makeDeps(t) deps.Config.Set("agent_ipc.port", 0, pkgconfigmodel.SourceFile) - comp := newComponent(deps) + comp, err := newComponent(deps) + require.NoError(t, err) assert.False(t, comp.(configSync).enabled) }) t.Run("disabled config refresh interval zero", func(t *testing.T) { deps := makeDeps(t) deps.Config.Set("agent_ipc.config_refresh_interval", 0, pkgconfigmodel.SourceFile) - comp := newComponent(deps) + comp, err := newComponent(deps) + require.NoError(t, err) assert.False(t, comp.(configSync).enabled) }) } diff --git a/comp/core/configsync/configsyncimpl/params.go b/comp/core/configsync/configsyncimpl/params.go index 79442bce59537..84de8bfeeb2fb 100644 --- a/comp/core/configsync/configsyncimpl/params.go +++ b/comp/core/configsync/configsyncimpl/params.go @@ -10,17 +10,26 @@ import "time" // Params defines the parameters for the configsync component. type Params struct { + // Timeout is the timeout use for each call to the core-agent Timeout time.Duration - Delay time.Duration - OnInit bool + // OnInitSync makes configsync synchronize the configuration at initialization and fails init if we can get the + // configuration from the core agent + OnInitSync bool + // OnInitSyncTimeout represents how long configsync should retry to synchronize configuration at init + OnInitSyncTimeout time.Duration } // NewParams creates a new instance of Params -func NewParams(to time.Duration, delay time.Duration, sync bool) Params { +func NewParams(syncTimeout time.Duration, syncOnInit bool, syncOnInitTimeout time.Duration) Params { params := Params{ - Timeout: to, - Delay: delay, - OnInit: sync, + Timeout: syncTimeout, + OnInitSync: syncOnInit, + OnInitSyncTimeout: syncOnInitTimeout, } return params } + +// NewDefaultParams returns the default params for configsync +func NewDefaultParams() Params { + return Params{} +} diff --git a/comp/core/configsync/configsyncimpl/sync.go b/comp/core/configsync/configsyncimpl/sync.go index e22cdeae254ce..8678781de45b6 100644 --- a/comp/core/configsync/configsyncimpl/sync.go +++ b/comp/core/configsync/configsyncimpl/sync.go @@ -9,7 +9,6 @@ import ( "context" "encoding/json" "net/http" - "reflect" "strconv" "time" @@ -17,16 +16,17 @@ import ( pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" ) -func (cs *configSync) updater() { +func (cs *configSync) updater() error { + cs.Log.Debugf("Pulling new configuration from agent-core at '%s'", cs.url.String()) cfg, err := fetchConfig(cs.ctx, cs.client, cs.Authtoken.Get(), cs.url.String()) if err != nil { if cs.connected { - cs.Log.Warnf("Failed to fetch config from core agent: %v", err) + cs.Log.Warnf("Loosed connectivity to core-agent to fetch config: %v", err) cs.connected = false } else { cs.Log.Debugf("Failed to fetch config from core agent: %v", err) } - return + return err } if cs.connected { @@ -42,7 +42,7 @@ func (cs *configSync) updater() { valueMap, ok := value.(map[string]string) if !ok { // this would be unexpected - but deal with it - updateConfig(cs.Config, key, value) + cs.Config.Set(key, value, pkgconfigmodel.SourceLocalConfigProcess) continue } @@ -55,13 +55,13 @@ func (cs *configSync) updater() { typedValues[cfgkey] = cfgval } } - updateConfig(cs.Config, key, typedValues) + cs.Config.Set(key, typedValues, pkgconfigmodel.SourceLocalConfigProcess) } - } else { - updateConfig(cs.Config, key, value) + cs.Config.Set(key, value, pkgconfigmodel.SourceLocalConfigProcess) } } + return nil } func (cs *configSync) runWithInterval(refreshInterval time.Duration) { @@ -72,7 +72,6 @@ func (cs *configSync) runWithInterval(refreshInterval time.Duration) { } func (cs *configSync) runWithChan(ch <-chan time.Time) { - cs.Log.Infof("Starting to sync config with core agent at %s", cs.url) for { @@ -80,7 +79,7 @@ func (cs *configSync) runWithChan(ch <-chan time.Time) { case <-cs.ctx.Done(): return case <-ch: - cs.updater() + _ = cs.updater() } } } @@ -104,11 +103,3 @@ func fetchConfig(ctx context.Context, client *http.Client, authtoken, url string return config, nil } - -func updateConfig(cfg pkgconfigmodel.ReaderWriter, key string, value interface{}) bool { - // check if the value changed to only log if it effectively changed the value - oldvalue := cfg.Get(key) - cfg.Set(key, value, pkgconfigmodel.SourceLocalConfigProcess) - - return !reflect.DeepEqual(oldvalue, cfg.Get(key)) -} diff --git a/comp/core/configsync/configsyncimpl/sync_test.go b/comp/core/configsync/configsyncimpl/sync_test.go index 2038e2add03ef..7faec27fb5f4d 100644 --- a/comp/core/configsync/configsyncimpl/sync_test.go +++ b/comp/core/configsync/configsyncimpl/sync_test.go @@ -13,8 +13,11 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/DataDog/datadog-agent/comp/api/authtoken" + "github.com/DataDog/datadog-agent/comp/api/authtoken/fetchonlyimpl" + logmock "github.com/DataDog/datadog-agent/comp/core/log/mock" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" - pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" + "github.com/DataDog/datadog-agent/pkg/config/model" ) func TestFetchConfig(t *testing.T) { @@ -63,12 +66,33 @@ func TestFetchConfig(t *testing.T) { }) } -func TestUpdateConfig(t *testing.T) { - cfg := configmock.New(t) - cfg.Set("key1", "value1", pkgconfigmodel.SourceFile) - cfg.Set("key3", "set-with-cli", pkgconfigmodel.SourceCLI) +func TestUpdater(t *testing.T) { + callbackCalled := 0 + handler := func(w http.ResponseWriter, _ *http.Request) { + callbackCalled++ + w.Write([]byte(`{"key1": "value1"}`)) + } + _, client, url := makeServer(t, handler) - assert.False(t, updateConfig(cfg, "key1", "value1")) - assert.True(t, updateConfig(cfg, "key2", "value2")) - assert.False(t, updateConfig(cfg, "key3", "value3")) + cfg := configmock.New(t) + cfg.Set("key1", "base_value", model.SourceDefault) + + cs := configSync{ + Config: cfg, + Log: logmock.New(t), + Authtoken: authtoken.Component(&fetchonlyimpl.MockFetchOnly{}), + url: url, + client: client, + ctx: context.Background(), + } + + cs.updater() + assert.Equal(t, "value1", cfg.Get("key1")) + assert.Equal(t, 1, callbackCalled) + + cfg.Set("key1", "cli_value", model.SourceCLI) + + cs.updater() + assert.Equal(t, "cli_value", cfg.Get("key1")) + assert.Equal(t, 2, callbackCalled) } diff --git a/comp/core/configsync/configsyncimpl/test_common.go b/comp/core/configsync/configsyncimpl/test_common.go index c6699ef0f1a02..19a7c9108f8c1 100644 --- a/comp/core/configsync/configsyncimpl/test_common.go +++ b/comp/core/configsync/configsyncimpl/test_common.go @@ -28,7 +28,7 @@ func makeDeps(t *testing.T) dependencies { return fxutil.Test[dependencies](t, fx.Options( core.MockBundle(), fetchonlyimpl.MockModule(), // use the mock to avoid trying to read the file - fx.Supply(NewParams(0, 0, false)), + fx.Supply(NewParams(0, false, 0)), )) } diff --git a/comp/core/flare/flare.go b/comp/core/flare/flare.go index f3c7a5287759b..2914288671014 100644 --- a/comp/core/flare/flare.go +++ b/comp/core/flare/flare.go @@ -35,7 +35,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/diagnose" pkgFlare "github.com/DataDog/datadog-agent/pkg/flare" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // ProfileData maps (pprof) profile names to the profile data. @@ -49,8 +49,8 @@ type dependencies struct { Diagnosesendermanager diagnosesendermanager.Component Params Params Providers []*types.FlareFiller `group:"flare"` - Collector optional.Option[collector.Component] - WMeta optional.Option[workloadmeta.Component] + Collector option.Option[collector.Component] + WMeta option.Option[workloadmeta.Component] Secrets secrets.Component AC autodiscovery.Component Tagger tagger.Component diff --git a/comp/core/flare/types/go.mod b/comp/core/flare/types/go.mod index a3526c64a15dc..349bb3423a2a2 100644 --- a/comp/core/flare/types/go.mod +++ b/comp/core/flare/types/go.mod @@ -16,5 +16,5 @@ require ( go.uber.org/dig v1.18.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/sys v0.29.0 // indirect ) diff --git a/comp/core/flare/types/go.sum b/comp/core/flare/types/go.sum index 7b69276ecf542..d7ce7d478b536 100644 --- a/comp/core/flare/types/go.sum +++ b/comp/core/flare/types/go.sum @@ -14,7 +14,7 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/comp/core/gui/guiimpl/gui.go b/comp/core/gui/guiimpl/gui.go index 94be94e231c1f..65e657dfba7a6 100644 --- a/comp/core/gui/guiimpl/gui.go +++ b/comp/core/gui/guiimpl/gui.go @@ -41,7 +41,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/api/security" "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/util/system" ) @@ -91,7 +91,7 @@ type dependencies struct { type provides struct { fx.Out - Comp optional.Option[guicomp.Component] + Comp option.Option[guicomp.Component] Endpoint api.AgentEndpointProvider } @@ -101,7 +101,7 @@ type provides struct { func newGui(deps dependencies) provides { p := provides{ - Comp: optional.NewNoneOption[guicomp.Component](), + Comp: option.None[guicomp.Component](), } guiPort := deps.Config.GetString("GUI_port") @@ -158,7 +158,7 @@ func newGui(deps dependencies) provides { OnStart: g.start, OnStop: g.stop}) - p.Comp = optional.NewOption[guicomp.Component](g) + p.Comp = option.New[guicomp.Component](g) p.Endpoint = api.NewAgentEndpointProvider(g.getIntentToken, "/gui/intent", "GET") return p diff --git a/comp/core/hostname/hostnameinterface/go.mod b/comp/core/hostname/hostnameinterface/go.mod index c097205307cf6..8153709f1b821 100644 --- a/comp/core/hostname/hostnameinterface/go.mod +++ b/comp/core/hostname/hostnameinterface/go.mod @@ -5,7 +5,7 @@ go 1.22.0 replace ( github.com/DataDog/datadog-agent/comp/def => ../../../../comp/def github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../../pkg/util/fxutil - github.com/DataDog/datadog-agent/pkg/util/optional => ../../../../pkg/util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../../../pkg/util/option ) require ( @@ -16,7 +16,7 @@ require ( require ( github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.55.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.55.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect @@ -25,6 +25,6 @@ require ( go.uber.org/dig v1.18.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/sys v0.29.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/comp/core/hostname/hostnameinterface/go.sum b/comp/core/hostname/hostnameinterface/go.sum index 3dcc180d9ba1c..66bf0027fa26b 100644 --- a/comp/core/hostname/hostnameinterface/go.sum +++ b/comp/core/hostname/hostnameinterface/go.sum @@ -28,8 +28,8 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/comp/core/log/impl-trace/go.mod b/comp/core/log/impl-trace/go.mod index 309b259ad3616..ab40af48de2c7 100644 --- a/comp/core/log/impl-trace/go.mod +++ b/comp/core/log/impl-trace/go.mod @@ -30,7 +30,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../../../pkg/util/hostname/validate github.com/DataDog/datadog-agent/pkg/util/log => ../../../../pkg/util/log github.com/DataDog/datadog-agent/pkg/util/log/setup => ../../../../pkg/util/log/setup - github.com/DataDog/datadog-agent/pkg/util/optional => ../../../../pkg/util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../../../pkg/util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../../pkg/util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../../pkg/util/scrubber/ github.com/DataDog/datadog-agent/pkg/util/system => ../../../../pkg/util/system @@ -41,11 +41,11 @@ replace ( require ( github.com/DataDog/datadog-agent/comp/core/config v0.59.0 - github.com/DataDog/datadog-agent/comp/core/log/def v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/comp/core/log/def v0.61.0 github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/trace v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/fxutil v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect; v2.6 github.com/stretchr/testify v1.10.0 go.uber.org/fx v1.23.0 // indirect @@ -54,7 +54,7 @@ require ( require ( github.com/DataDog/datadog-agent/comp/def v0.59.0 github.com/DataDog/datadog-agent/pkg/config/mock v0.59.0 - github.com/DataDog/datadog-agent/pkg/util/log/setup v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/pkg/util/log/setup v0.61.0 ) require ( @@ -62,20 +62,20 @@ require ( github.com/DataDog/datadog-agent/comp/core/flare/types v0.59.0 // indirect github.com/DataDog/datadog-agent/comp/core/secrets v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect github.com/DataDog/viper v1.14.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect @@ -87,16 +87,16 @@ require ( github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -107,8 +107,8 @@ require ( go.uber.org/dig v1.18.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/comp/core/log/impl-trace/go.sum b/comp/core/log/impl-trace/go.sum index dd16364891695..77eac717c35df 100644 --- a/comp/core/log/impl-trace/go.sum +++ b/comp/core/log/impl-trace/go.sum @@ -72,7 +72,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -110,8 +109,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -138,8 +137,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -156,8 +155,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -171,8 +170,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -183,8 +182,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -239,8 +238,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -277,8 +276,8 @@ golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= @@ -306,8 +305,8 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/comp/core/log/impl/go.mod b/comp/core/log/impl/go.mod index 12d048a84bd4d..5bf17e04019ed 100644 --- a/comp/core/log/impl/go.mod +++ b/comp/core/log/impl/go.mod @@ -25,7 +25,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../../../pkg/util/hostname/validate github.com/DataDog/datadog-agent/pkg/util/log => ../../../../pkg/util/log github.com/DataDog/datadog-agent/pkg/util/log/setup => ../../../../pkg/util/log/setup - github.com/DataDog/datadog-agent/pkg/util/optional => ../../../../pkg/util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../../../pkg/util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../../pkg/util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../../pkg/util/scrubber github.com/DataDog/datadog-agent/pkg/util/system => ../../../../pkg/util/system @@ -35,12 +35,12 @@ replace ( ) require ( - github.com/DataDog/datadog-agent/comp/core/config v0.0.0-00010101000000-000000000000 - github.com/DataDog/datadog-agent/comp/core/log/def v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/comp/core/config v0.61.0 + github.com/DataDog/datadog-agent/comp/core/log/def v0.61.0 github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/config/mock v0.59.0 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 - github.com/DataDog/datadog-agent/pkg/util/log/setup v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 + github.com/DataDog/datadog-agent/pkg/util/log/setup v0.61.0 github.com/stretchr/testify v1.10.0 ) @@ -50,21 +50,21 @@ require ( github.com/DataDog/datadog-agent/comp/core/secrets v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect github.com/DataDog/viper v1.14.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect @@ -77,16 +77,16 @@ require ( github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -98,8 +98,8 @@ require ( go.uber.org/fx v1.23.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/comp/core/log/impl/go.sum b/comp/core/log/impl/go.sum index dd16364891695..77eac717c35df 100644 --- a/comp/core/log/impl/go.sum +++ b/comp/core/log/impl/go.sum @@ -72,7 +72,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -110,8 +109,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -138,8 +137,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -156,8 +155,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -171,8 +170,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -183,8 +182,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -239,8 +238,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -277,8 +276,8 @@ golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= @@ -306,8 +305,8 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/comp/core/log/mock/go.mod b/comp/core/log/mock/go.mod index e4f0f87cabdcb..367cf9625ea51 100644 --- a/comp/core/log/mock/go.mod +++ b/comp/core/log/mock/go.mod @@ -20,7 +20,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../../../pkg/util/hostname/validate github.com/DataDog/datadog-agent/pkg/util/log => ../../../../pkg/util/log github.com/DataDog/datadog-agent/pkg/util/log/setup => ../../../../pkg/util/log/setup - github.com/DataDog/datadog-agent/pkg/util/optional => ../../../../pkg/util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../../../pkg/util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../../pkg/util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../../pkg/util/scrubber github.com/DataDog/datadog-agent/pkg/util/system => ../../../../pkg/util/system @@ -30,16 +30,15 @@ replace ( ) require ( - github.com/DataDog/datadog-agent/comp/core/log/def v0.0.0-00010101000000-000000000000 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 - github.com/DataDog/datadog-agent/pkg/util/log/setup v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/comp/core/log/def v0.61.0 + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 + github.com/DataDog/datadog-agent/pkg/util/log/setup v0.61.0 ) require ( - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.60.0-devel // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.60.0-devel // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect github.com/DataDog/viper v1.14.0 // indirect github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect @@ -50,12 +49,11 @@ require ( github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/comp/core/log/mock/go.sum b/comp/core/log/mock/go.sum index 4b0aad4f95926..65314372d4ec2 100644 --- a/comp/core/log/mock/go.sum +++ b/comp/core/log/mock/go.sum @@ -101,8 +101,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -127,8 +127,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -151,8 +151,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -163,8 +163,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= @@ -207,8 +207,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -241,8 +241,8 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= diff --git a/comp/core/remoteagentregistry/impl/remoteagentregistry_test.go b/comp/core/remoteagentregistry/impl/remoteagentregistry_test.go index 03518a94b25d8..56c41c0f9aa94 100644 --- a/comp/core/remoteagentregistry/impl/remoteagentregistry_test.go +++ b/comp/core/remoteagentregistry/impl/remoteagentregistry_test.go @@ -233,6 +233,7 @@ type testRemoteAgentServer struct { StatusMain map[string]string StatusNamed map[string]map[string]string FlareFiles map[string][]byte + pbgo.UnimplementedRemoteAgentServer } func (t *testRemoteAgentServer) GetStatusDetails(context.Context, *pbgo.GetStatusDetailsRequest) (*pbgo.GetStatusDetailsResponse, error) { diff --git a/comp/core/remoteagentregistry/proto/proto.go b/comp/core/remoteagentregistry/proto/proto.go index a38aa6d5ebce3..c275249e1e9f1 100644 --- a/comp/core/remoteagentregistry/proto/proto.go +++ b/comp/core/remoteagentregistry/proto/proto.go @@ -40,7 +40,10 @@ func ProtobufToStatusData(agentID string, displayName string, resp *pb.GetStatus } func protobufToStatusSection(statusSection *pb.StatusSection) remoteagentregistry.StatusSection { - return statusSection.Fields + if statusSection != nil { + return statusSection.Fields + } + return remoteagentregistry.StatusSection{} } func protobufToNamedSections(namedSections map[string]*pb.StatusSection) map[string]remoteagentregistry.StatusSection { diff --git a/comp/core/secrets/go.mod b/comp/core/secrets/go.mod index 82884c83888ab..dff40b1a8230d 100644 --- a/comp/core/secrets/go.mod +++ b/comp/core/secrets/go.mod @@ -10,7 +10,7 @@ replace ( github.com/DataDog/datadog-agent/comp/def => ../../def github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../pkg/util/fxutil github.com/DataDog/datadog-agent/pkg/util/log => ../../../pkg/util/log - github.com/DataDog/datadog-agent/pkg/util/optional => ../../../pkg/util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../../pkg/util/option github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../pkg/util/scrubber github.com/DataDog/datadog-agent/pkg/util/winutil => ../../../pkg/util/winutil @@ -27,15 +27,15 @@ require ( github.com/benbjohnson/clock v1.3.5 github.com/stretchr/testify v1.10.0 go.uber.org/fx v1.23.0 - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 - golang.org/x/sys v0.28.0 + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 + golang.org/x/sys v0.29.0 gopkg.in/yaml.v2 v2.4.0 ) require ( github.com/DataDog/datadog-agent/comp/core/flare/builder v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.55.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.55.0 // indirect github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect @@ -47,7 +47,7 @@ require ( github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.60.1 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -55,7 +55,7 @@ require ( go.uber.org/dig v1.18.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - google.golang.org/protobuf v1.35.2 // indirect + google.golang.org/protobuf v1.36.3 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/comp/core/secrets/go.sum b/comp/core/secrets/go.sum index 73448320077c4..2eafa4ccda6ad 100644 --- a/comp/core/secrets/go.sum +++ b/comp/core/secrets/go.sum @@ -29,8 +29,8 @@ github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+ github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= @@ -54,12 +54,12 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/comp/core/status/go.mod b/comp/core/status/go.mod index 0080f560df33f..53ec63c72737d 100644 --- a/comp/core/status/go.mod +++ b/comp/core/status/go.mod @@ -5,7 +5,7 @@ go 1.22.0 require ( github.com/dustin/go-humanize v1.0.1 github.com/fatih/color v1.18.0 - github.com/spf13/cast v1.7.0 + github.com/spf13/cast v1.7.1 github.com/stretchr/testify v1.10.0 go.uber.org/fx v1.23.0 golang.org/x/text v0.21.0 @@ -21,7 +21,7 @@ require ( go.uber.org/dig v1.18.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/sys v0.29.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/comp/core/status/go.sum b/comp/core/status/go.sum index 5008e39745d3e..deb52d76e2dfe 100644 --- a/comp/core/status/go.sum +++ b/comp/core/status/go.sum @@ -24,8 +24,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw= @@ -40,8 +40,8 @@ go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/comp/core/status/statusimpl/common_header_provider.go b/comp/core/status/statusimpl/common_header_provider.go index 7dd7e2a615731..0a7d725e75a07 100644 --- a/comp/core/status/statusimpl/common_header_provider.go +++ b/comp/core/status/statusimpl/common_header_provider.go @@ -21,6 +21,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/status" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + "github.com/DataDog/datadog-agent/pkg/fips" "github.com/DataDog/datadog-agent/pkg/util/flavor" "github.com/DataDog/datadog-agent/pkg/version" ) @@ -73,6 +74,7 @@ func (h *headerProvider) HTML(_ bool, buffer io.Writer) error { func (h *headerProvider) data() map[string]interface{} { data := maps.Clone(h.constdata) data["time_nano"] = nowFunc().UnixNano() + data["fips_status"] = fips.Status() data["config"] = populateConfig(h.config) return data } diff --git a/comp/core/status/statusimpl/go.mod b/comp/core/status/statusimpl/go.mod index c121259bfa141..709322f3f64a9 100644 --- a/comp/core/status/statusimpl/go.mod +++ b/comp/core/status/statusimpl/go.mod @@ -20,6 +20,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../../pkg/config/nodetreemodel github.com/DataDog/datadog-agent/pkg/config/setup => ../../../../pkg/config/setup github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../../pkg/config/teeconfig + github.com/DataDog/datadog-agent/pkg/fips => ../../../../pkg/fips github.com/DataDog/datadog-agent/pkg/telemetry => ../../../../pkg/telemetry github.com/DataDog/datadog-agent/pkg/util/defaultpaths => ../../../../pkg/util/defaultpaths github.com/DataDog/datadog-agent/pkg/util/executable => ../../../../pkg/util/executable @@ -29,7 +30,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../../../pkg/util/hostname/validate github.com/DataDog/datadog-agent/pkg/util/log => ../../../../pkg/util/log github.com/DataDog/datadog-agent/pkg/util/log/setup => ../../../../pkg/util/log/setup - github.com/DataDog/datadog-agent/pkg/util/optional => ../../../../pkg/util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../../../pkg/util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../../pkg/util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../../pkg/util/scrubber github.com/DataDog/datadog-agent/pkg/util/system => ../../../../pkg/util/system @@ -43,11 +44,12 @@ require ( github.com/DataDog/datadog-agent/comp/api/api/def v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/core/config v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/core/flare/types v0.56.0-rc.3 - github.com/DataDog/datadog-agent/comp/core/log/def v0.0.0-00010101000000-000000000000 - github.com/DataDog/datadog-agent/comp/core/log/mock v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/comp/core/log/def v0.61.0 + github.com/DataDog/datadog-agent/comp/core/log/mock v0.61.0 github.com/DataDog/datadog-agent/comp/core/status v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 + github.com/DataDog/datadog-agent/pkg/fips v0.0.0 github.com/DataDog/datadog-agent/pkg/util/flavor v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/version v0.59.1 @@ -64,20 +66,20 @@ require ( github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/mock v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.60.0-devel // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.60.0-devel // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/log/setup v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/log/setup v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect github.com/DataDog/viper v1.14.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect @@ -91,7 +93,7 @@ require ( github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect @@ -99,10 +101,10 @@ require ( github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -113,8 +115,8 @@ require ( go.uber.org/dig v1.18.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/sys v0.29.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/comp/core/status/statusimpl/go.sum b/comp/core/status/statusimpl/go.sum index 73fea28e04146..416a08482758c 100644 --- a/comp/core/status/statusimpl/go.sum +++ b/comp/core/status/statusimpl/go.sum @@ -76,7 +76,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -116,8 +115,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -149,8 +148,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -167,8 +166,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -182,8 +181,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -194,8 +193,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -250,8 +249,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -290,8 +289,8 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= @@ -319,8 +318,8 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/comp/core/status/statusimpl/templates/html.tmpl b/comp/core/status/statusimpl/templates/html.tmpl index 6dc699f0a5638..dc3efef7b9f23 100644 --- a/comp/core/status/statusimpl/templates/html.tmpl +++ b/comp/core/status/statusimpl/templates/html.tmpl @@ -3,6 +3,9 @@ Version: {{.version}}
Flavor: {{.flavor}}
+ {{- if .fips_status}} + FIPS compliant: {{.fips_status}}
+ {{- end }} PID: {{.pid}}
Agent start: {{ formatUnixTime .agent_start_nano }}
{{- if .config.log_file}} diff --git a/comp/core/status/statusimpl/templates/text.tmpl b/comp/core/status/statusimpl/templates/text.tmpl index a33c7a5218a05..a86ad4069da38 100644 --- a/comp/core/status/statusimpl/templates/text.tmpl +++ b/comp/core/status/statusimpl/templates/text.tmpl @@ -7,6 +7,9 @@ {{- end }} Build arch: {{.build_arch}} Agent flavor: {{.flavor}} + {{- if .fips_status}} + FIPS compliant: {{.fips_status}} + {{- end }} {{- if .config.log_file}} Log File: {{.config.log_file}} {{- end }} diff --git a/comp/core/sysprobeconfig/component.go b/comp/core/sysprobeconfig/component.go index 5a87cfdf04115..90f836e5382fc 100644 --- a/comp/core/sysprobeconfig/component.go +++ b/comp/core/sysprobeconfig/component.go @@ -21,7 +21,7 @@ import ( sysconfigtypes "github.com/DataDog/datadog-agent/cmd/system-probe/config/types" "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // team: ebpf-platform @@ -42,7 +42,7 @@ type Component interface { // This helper allows code that needs a disabled Optional type for sysprobeconfig to get it. The helper is split from // the implementation to avoid linking with the dependencies from sysprobeconfig. func NoneModule() fxutil.Module { - return fxutil.Component(fx.Provide(func() optional.Option[Component] { - return optional.NewNoneOption[Component]() + return fxutil.Component(fx.Provide(func() option.Option[Component] { + return option.None[Component]() })) } diff --git a/comp/core/tagger/README.md b/comp/core/tagger/README.md index 774a2fcaeecf2..060a3e5680cf3 100644 --- a/comp/core/tagger/README.md +++ b/comp/core/tagger/README.md @@ -57,6 +57,7 @@ Tagger entities are identified by a string-typed ID, with one of the following f | workloadmeta.KindKubernetesMetadata | `kubernetes_metadata://///` (`` is empty in cluster-scoped objects) | | workloadmeta.KindKubernetesPod | `kubernetes_pod_uid://` | | workloadmeta.KindProcess | `process://` | +| workloadmeta.KindGPU | `gpu://` | ## Tagger diff --git a/comp/core/tagger/collectors/workloadmeta_extract.go b/comp/core/tagger/collectors/workloadmeta_extract.go index 062931c0e3311..b19b534856e00 100644 --- a/comp/core/tagger/collectors/workloadmeta_extract.go +++ b/comp/core/tagger/collectors/workloadmeta_extract.go @@ -149,6 +149,8 @@ func (c *WorkloadMetaCollector) processEvents(evBundle workloadmeta.EventBundle) // tagInfos = append(tagInfos, c.handleProcess(ev)...) No tags for now case workloadmeta.KindKubernetesDeployment: tagInfos = append(tagInfos, c.handleKubeDeployment(ev)...) + case workloadmeta.KindGPU: + tagInfos = append(tagInfos, c.handleGPU(ev)...) default: log.Errorf("cannot handle event for entity %q with kind %q", entityID.ID, entityID.Kind) } @@ -613,6 +615,35 @@ func (c *WorkloadMetaCollector) handleKubeMetadata(ev workloadmeta.Event) []*typ return tagInfos } +func (c *WorkloadMetaCollector) handleGPU(ev workloadmeta.Event) []*types.TagInfo { + gpu := ev.Entity.(*workloadmeta.GPU) + + tagList := taglist.NewTagList() + + tagList.AddLow(tags.KubeGPUVendor, gpu.Vendor) + tagList.AddLow(tags.KubeGPUDevice, gpu.Device) + tagList.AddLow(tags.KubeGPUUUID, gpu.ID) + + low, orch, high, standard := tagList.Compute() + + if len(low)+len(orch)+len(high)+len(standard) == 0 { + return nil + } + + tagInfos := []*types.TagInfo{ + { + Source: gpuSource, + EntityID: common.BuildTaggerEntityID(gpu.EntityID), + HighCardTags: high, + OrchestratorCardTags: orch, + LowCardTags: low, + StandardTags: standard, + }, + } + + return tagInfos +} + func (c *WorkloadMetaCollector) extractTagsFromPodLabels(pod *workloadmeta.KubernetesPod, tagList *taglist.TagList) { for name, value := range pod.Labels { switch name { diff --git a/comp/core/tagger/collectors/workloadmeta_main.go b/comp/core/tagger/collectors/workloadmeta_main.go index 15fec7fe4ce5b..c3dfc7cfd6d8c 100644 --- a/comp/core/tagger/collectors/workloadmeta_main.go +++ b/comp/core/tagger/collectors/workloadmeta_main.go @@ -18,10 +18,10 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" configutils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/status/health" - "github.com/DataDog/datadog-agent/pkg/util" "github.com/DataDog/datadog-agent/pkg/util/flavor" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/clustername" "github.com/DataDog/datadog-agent/pkg/util/log" + tagutil "github.com/DataDog/datadog-agent/pkg/util/tags" ) const ( @@ -35,6 +35,7 @@ const ( processSource = workloadmetaCollectorName + "-" + string(workloadmeta.KindProcess) kubeMetadataSource = workloadmetaCollectorName + "-" + string(workloadmeta.KindKubernetesMetadata) deploymentSource = workloadmetaCollectorName + "-" + string(workloadmeta.KindKubernetesDeployment) + gpuSource = workloadmetaCollectorName + "-" + string(workloadmeta.KindGPU) clusterTagNamePrefix = "kube_cluster_name" ) @@ -96,7 +97,7 @@ func (c *WorkloadMetaCollector) Run(ctx context.Context, datadogConfig config.Co } func (c *WorkloadMetaCollector) collectStaticGlobalTags(ctx context.Context, datadogConfig config.Component) { - c.staticTags = util.GetStaticTags(ctx, datadogConfig) + c.staticTags = tagutil.GetStaticTags(ctx, datadogConfig) if _, exists := c.staticTags[clusterTagNamePrefix]; flavor.GetFlavor() == flavor.ClusterAgent && !exists { // If we are running the cluster agent, we want to set the kube_cluster_name tag as a global tag if we are able // to read it, for the instances where we are running in an environment where hostname cannot be detected. @@ -112,7 +113,7 @@ func (c *WorkloadMetaCollector) collectStaticGlobalTags(ctx context.Context, dat } // These are the global tags that should only be applied to the internal global entity on DCA. // Whereas the static tags are applied to containers and pods directly as well. - globalEnvTags := util.GetGlobalEnvTags(datadogConfig) + globalEnvTags := tagutil.GetGlobalEnvTags(datadogConfig) tagList := taglist.NewTagList() diff --git a/comp/core/tagger/collectors/workloadmeta_test.go b/comp/core/tagger/collectors/workloadmeta_test.go index a15d7e674f41e..5837cbfbaf260 100644 --- a/comp/core/tagger/collectors/workloadmeta_test.go +++ b/comp/core/tagger/collectors/workloadmeta_test.go @@ -2253,6 +2253,61 @@ func TestHandleContainerImage(t *testing.T) { } } +func TestHandleGPU(t *testing.T) { + entityID := workloadmeta.EntityID{ + Kind: workloadmeta.KindGPU, + ID: "gpu-1234", + } + + taggerEntityID := types.NewEntityID(types.GPU, entityID.ID) + + tests := []struct { + name string + gpu workloadmeta.GPU + expected []*types.TagInfo + }{ + { + name: "basic", + gpu: workloadmeta.GPU{ + EntityID: entityID, + EntityMeta: workloadmeta.EntityMeta{ + Name: entityID.ID, + }, + Vendor: "nvidia", + Device: "tesla-v100", + }, + expected: []*types.TagInfo{ + { + Source: gpuSource, + EntityID: taggerEntityID, + HighCardTags: []string{}, + OrchestratorCardTags: []string{}, + LowCardTags: []string{ + "gpu_vendor:nvidia", + "gpu_device:tesla-v100", + "gpu_uuid:gpu-1234", + }, + StandardTags: []string{}, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := configmock.New(t) + collector := NewWorkloadMetaCollector(context.Background(), cfg, nil, nil) + + actual := collector.handleGPU(workloadmeta.Event{ + Type: workloadmeta.EventTypeSet, + Entity: &tt.gpu, + }) + + assertTagInfoListEqual(t, tt.expected, actual) + }) + } +} + func TestHandleDelete(t *testing.T) { const ( podName = "datadog-agent-foobar" diff --git a/comp/core/tagger/common/entity_id_builder.go b/comp/core/tagger/common/entity_id_builder.go index 054a0b4493ba6..108f440e0cf3f 100644 --- a/comp/core/tagger/common/entity_id_builder.go +++ b/comp/core/tagger/common/entity_id_builder.go @@ -29,6 +29,8 @@ func BuildTaggerEntityID(entityID workloadmeta.EntityID) types.EntityID { return types.NewEntityID(types.KubernetesDeployment, entityID.ID) case workloadmeta.KindKubernetesMetadata: return types.NewEntityID(types.KubernetesMetadata, entityID.ID) + case workloadmeta.KindGPU: + return types.NewEntityID(types.GPU, entityID.ID) default: log.Errorf("can't recognize entity %q with kind %q; trying %s://%s as tagger entity", entityID.ID, entityID.Kind, entityID.ID, entityID.Kind) diff --git a/comp/core/tagger/impl-remote/remote.go b/comp/core/tagger/impl-remote/remote.go index 2da451af35302..80a8f4a35d9a8 100644 --- a/comp/core/tagger/impl-remote/remote.go +++ b/comp/core/tagger/impl-remote/remote.go @@ -258,7 +258,6 @@ func (t *remoteTagger) LegacyTag(entity string, cardinality types.TagCardinality } // GenerateContainerIDFromOriginInfo returns a container ID for the given Origin Info. -// This function currently only uses the External Data from the Origin Info to generate the container ID. func (t *remoteTagger) GenerateContainerIDFromOriginInfo(originInfo origindetection.OriginInfo) (string, error) { fail := true defer func() { @@ -269,15 +268,10 @@ func (t *remoteTagger) GenerateContainerIDFromOriginInfo(originInfo origindetect } }() - // Generate cache key - initPrefix := "" - if originInfo.ExternalData.Init { - initPrefix = "i/" - } key := cache.BuildAgentKey( "remoteTagger", - "cid", - initPrefix+originInfo.ExternalData.PodUID+"/"+originInfo.ExternalData.ContainerName, + "originInfo", + origindetection.OriginInfoString(originInfo), ) cachedContainerID, err := cache.GetWithExpiration(key, func() (containerID string, err error) { diff --git a/comp/core/tagger/impl/local_tagger.go b/comp/core/tagger/impl/local_tagger.go index 85c8741c52b02..3a2345b4a9389 100644 --- a/comp/core/tagger/impl/local_tagger.go +++ b/comp/core/tagger/impl/local_tagger.go @@ -12,6 +12,7 @@ import ( "time" "github.com/DataDog/datadog-agent/comp/core/config" + log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/core/tagger/collectors" tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" "github.com/DataDog/datadog-agent/comp/core/tagger/origindetection" @@ -22,7 +23,16 @@ import ( taggertypes "github.com/DataDog/datadog-agent/pkg/tagger/types" "github.com/DataDog/datadog-agent/pkg/tagset" "github.com/DataDog/datadog-agent/pkg/util/containers/metrics" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" +) + +const ( + // pidCacheTTL is the time to live for the PID cache + pidCacheTTL = 1 * time.Second + // inodeCacheTTL is the time to live for the inode cache + inodeCacheTTL = 1 * time.Second + // externalDataCacheTTL is the time to live for the external data cache + externalDataCacheTTL = 1 * time.Second ) // Tagger is the entry class for entity tagging. It hold the tagger collector, @@ -34,6 +44,7 @@ type localTagger struct { tagStore *tagstore.TagStore workloadStore workloadmeta.Component + log log.Component cfg config.Component collector *collectors.WorkloadMetaCollector @@ -42,10 +53,11 @@ type localTagger struct { telemetryStore *telemetry.Store } -func newLocalTagger(cfg config.Component, wmeta workloadmeta.Component, telemetryStore *telemetry.Store) (tagger.Component, error) { +func newLocalTagger(cfg config.Component, wmeta workloadmeta.Component, log log.Component, telemetryStore *telemetry.Store) (tagger.Component, error) { return &localTagger{ tagStore: tagstore.NewTagStore(telemetryStore), workloadStore: wmeta, + log: log, telemetryStore: telemetryStore, cfg: cfg, }, nil @@ -104,9 +116,63 @@ func (t *localTagger) Tag(entityID types.EntityID, cardinality types.TagCardinal } // GenerateContainerIDFromOriginInfo generates a container ID from Origin Info. -func (t *localTagger) GenerateContainerIDFromOriginInfo(originInfo origindetection.OriginInfo) (string, error) { - metaCollector := metrics.GetProvider(optional.NewOption(t.workloadStore)).GetMetaCollector() - return metaCollector.ContainerIDForPodUIDAndContName(originInfo.ExternalData.PodUID, originInfo.ExternalData.ContainerName, originInfo.ExternalData.Init, time.Second) +// The resolutions will be done in the following order: +// * OriginInfo.LocalData.ContainerID: If the container ID is already known, return it. +// * OriginInfo.LocalData.ProcessID: If the process ID is known, do a PID resolution. +// * OriginInfo.LocalData.Inode: If the inode is known, do an inode resolution. +// * OriginInfo.ExternalData: If the ExternalData are known, do an ExternalData resolution. +func (t *localTagger) GenerateContainerIDFromOriginInfo(originInfo origindetection.OriginInfo) (containerID string, err error) { + t.log.Debugf("Generating container ID from OriginInfo: %+v", originInfo) + // If the container ID is already known, return it. + if originInfo.LocalData.ContainerID != "" { + t.log.Debugf("Found OriginInfo.LocalData.ContainerID: %s", originInfo.LocalData.ContainerID) + containerID = originInfo.LocalData.ContainerID + return + } + + // Get the MetaCollector from WorkloadMeta. + metaCollector := metrics.GetProvider(option.New(t.workloadStore)).GetMetaCollector() + + // If the process ID is known, do a PID resolution. + if originInfo.LocalData.ProcessID != 0 { + t.log.Debugf("Resolving container ID from PID: %d", originInfo.LocalData.ProcessID) + containerID, err = metaCollector.GetContainerIDForPID(int(originInfo.LocalData.ProcessID), pidCacheTTL) + if err != nil { + t.log.Errorf("Error resolving container ID from PID: %v", err) + } else if containerID == "" { + t.log.Errorf("No container ID found for PID: %d", originInfo.LocalData.ProcessID) + } else { + return + } + } + + // If the inode is known, do an inode resolution. + if originInfo.LocalData.Inode != 0 { + t.log.Debugf("Resolving container ID from inode: %d", originInfo.LocalData.Inode) + containerID, err = metaCollector.GetContainerIDForInode(originInfo.LocalData.Inode, inodeCacheTTL) + if err != nil { + t.log.Errorf("Error resolving container ID from inode: %v", err) + } else if containerID == "" { + t.log.Errorf("No container ID found for inode: %d", originInfo.LocalData.Inode) + } else { + return + } + } + + // If the ExternalData are known, do an ExternalData resolution. + if originInfo.ExternalData.PodUID != "" && originInfo.ExternalData.ContainerName != "" { + t.log.Debugf("Resolving container ID from ExternalData: %+v", originInfo.ExternalData) + containerID, err = metaCollector.ContainerIDForPodUIDAndContName(originInfo.ExternalData.PodUID, originInfo.ExternalData.ContainerName, originInfo.ExternalData.Init, externalDataCacheTTL) + if err != nil { + t.log.Errorf("Error resolving container ID from ExternalData: %v", err) + } else if containerID == "" { + t.log.Errorf("No container ID found for ExternalData: %+v", originInfo.ExternalData) + } else { + return + } + } + + return "", fmt.Errorf("unable to resolve container ID from OriginInfo: %+v", originInfo) } // LegacyTag has the same behaviour as the Tag method, but it receives the entity id as a string and parses it. diff --git a/comp/core/tagger/impl/local_tagger_test.go b/comp/core/tagger/impl/local_tagger_test.go index e587407860e01..43d819cf6e09d 100644 --- a/comp/core/tagger/impl/local_tagger_test.go +++ b/comp/core/tagger/impl/local_tagger_test.go @@ -7,6 +7,7 @@ package taggerimpl import ( "context" + "fmt" "testing" "github.com/stretchr/testify/assert" @@ -15,6 +16,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" logmock "github.com/DataDog/datadog-agent/comp/core/log/mock" + "github.com/DataDog/datadog-agent/comp/core/tagger/origindetection" taggerTelemetry "github.com/DataDog/datadog-agent/comp/core/tagger/telemetry" "github.com/DataDog/datadog-agent/comp/core/tagger/types" "github.com/DataDog/datadog-agent/comp/core/telemetry" @@ -23,6 +25,7 @@ import ( workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/tagset" + collectormock "github.com/DataDog/datadog-agent/pkg/util/containers/metrics/mock" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -38,9 +41,10 @@ func TestAccumulateTagsFor(t *testing.T) { )) tel := fxutil.Test[telemetry.Component](t, telemetryimpl.MockModule()) + logComponent := logmock.New(t) telemetryStore := taggerTelemetry.NewStore(tel) cfg := configmock.New(t) - tagger, err := newLocalTagger(cfg, store, telemetryStore) + tagger, err := newLocalTagger(cfg, store, logComponent, telemetryStore) assert.NoError(t, err) localTagger := tagger.(*localTagger) localTagger.Start(context.Background()) @@ -78,9 +82,10 @@ func TestTag(t *testing.T) { )) tel := fxutil.Test[telemetry.Component](t, telemetryimpl.MockModule()) + logComponent := logmock.New(t) telemetryStore := taggerTelemetry.NewStore(tel) cfg := configmock.New(t) - tagger, err := newLocalTagger(cfg, store, telemetryStore) + tagger, err := newLocalTagger(cfg, store, logComponent, telemetryStore) assert.NoError(t, err) localTagger := tagger.(*localTagger) @@ -113,3 +118,113 @@ func TestTag(t *testing.T) { assert.NoError(t, err) assert.ElementsMatch(t, []string{"low1", "low2", "orchestrator1", "orchestrator2", "high1", "high2"}, highCardTags) } + +func TestGenerateContainerIDFromOriginInfo(t *testing.T) { + store := fxutil.Test[workloadmeta.Component](t, fx.Options( + fx.Supply(config.Params{}), + fx.Supply(log.Params{}), + fx.Provide(func() log.Component { return logmock.New(t) }), + config.MockModule(), + workloadmetafxmock.MockModule(workloadmeta.NewParams()), + )) + + tel := fxutil.Test[telemetry.Component](t, telemetryimpl.MockModule()) + logComponent := logmock.New(t) + telemetryStore := taggerTelemetry.NewStore(tel) + cfg := configmock.New(t) + tagger, taggerErr := newLocalTagger(cfg, store, logComponent, telemetryStore) + assert.NoError(t, taggerErr) + localTagger := tagger.(*localTagger) + + // Overriding the GetProvider function to use the mock metrics provider + mockMetricsProvider := collectormock.NewMetricsProvider() + cleanUp := setupFakeMetricsProvider(mockMetricsProvider) + defer cleanUp() + + for _, tt := range []struct { + name string + originInfo origindetection.OriginInfo + expectedContainerID string + expectedError error + setup func() + }{ + { + name: "with empty OriginInfo", + originInfo: origindetection.OriginInfo{}, + expectedContainerID: "", + expectedError: fmt.Errorf("unable to resolve container ID from OriginInfo: %+v", origindetection.OriginInfo{}), + setup: func() {}, + }, + { + name: "with container ID", + originInfo: origindetection.OriginInfo{ + LocalData: origindetection.LocalData{ContainerID: "container_id"}, + }, + expectedContainerID: "container_id", + setup: func() {}, + }, + { + name: "with ProcessID", + originInfo: origindetection.OriginInfo{ + LocalData: origindetection.LocalData{ProcessID: 123}, + }, + expectedContainerID: "container_id", + setup: func() { + mockCollector := collectormock.MetaCollector{CIDFromPID: map[int]string{123: "container_id"}} + mockMetricsProvider.RegisterMetaCollector(&mockCollector) + }, + }, + { + name: "with Inode", + originInfo: origindetection.OriginInfo{ + LocalData: origindetection.LocalData{Inode: 123}, + }, + expectedContainerID: "container_id", + setup: func() { + mockCollector := collectormock.MetaCollector{CIDFromInode: map[uint64]string{123: "container_id"}} + mockMetricsProvider.RegisterMetaCollector(&mockCollector) + }, + }, + { + name: "with External Data", + originInfo: origindetection.OriginInfo{ + ExternalData: origindetection.ExternalData{ + ContainerName: "container_name", + PodUID: "pod_uid", + }, + }, + expectedContainerID: "container_id", + setup: func() { + mockCollector := collectormock.MetaCollector{CIDFromPodUIDContName: map[string]string{"pod_uid/container_name": "container_id"}} + mockMetricsProvider.RegisterMetaCollector(&mockCollector) + }, + }, + { + name: "with External Data and Init Container", + originInfo: origindetection.OriginInfo{ + ExternalData: origindetection.ExternalData{ + Init: true, + ContainerName: "container_name", + PodUID: "pod_uid", + }, + }, + expectedContainerID: "container_id", + setup: func() { + mockCollector := collectormock.MetaCollector{CIDFromPodUIDContName: map[string]string{"i-pod_uid/container_name": "container_id"}} + mockMetricsProvider.RegisterMetaCollector(&mockCollector) + }, + }, + } { + t.Run(tt.name, func(t *testing.T) { + tt.setup() + containerID, err := localTagger.GenerateContainerIDFromOriginInfo(tt.originInfo) + if tt.expectedError != nil { + assert.Error(t, err) + assert.Equal(t, tt.expectedError, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tt.expectedContainerID, containerID) + } + }) + } +} diff --git a/comp/core/tagger/impl/tagger.go b/comp/core/tagger/impl/tagger.go index de05b9f6eaa09..03ff624e140bf 100644 --- a/comp/core/tagger/impl/tagger.go +++ b/comp/core/tagger/impl/tagger.go @@ -17,8 +17,6 @@ import ( "encoding/json" "net/http" "reflect" - "strconv" - "strings" "sync" "time" @@ -41,24 +39,9 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/containers/metrics" "github.com/DataDog/datadog-agent/pkg/util/containers/metrics/provider" httputils "github.com/DataDog/datadog-agent/pkg/util/http" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) -const ( - // External Data Prefixes - // These prefixes are used to build the External Data Environment Variable. - // This variable is then used for Origin Detection. - externalDataInitPrefix = "it-" - externalDataContainerNamePrefix = "cn-" - externalDataPodUIDPrefix = "pu-" -) - -type externalData struct { - init bool - containerName string - podUID string -} - // Requires defines the dependencies of the tagger component. type Requires struct { compdef.In @@ -145,7 +128,7 @@ func NewTaggerClient(params tagger.Params, cfg config.Component, wmeta workloadm if params.UseFakeTagger { defaultTagger = taggermock.New().Comp } else { - defaultTagger, err = newLocalTagger(cfg, wmeta, telemetryStore) + defaultTagger, err = newLocalTagger(cfg, wmeta, log, telemetryStore) } if err != nil { @@ -313,7 +296,7 @@ func (t *TaggerWrapper) Standard(entityID types.EntityID) ([]string, error) { // AgentTags returns the agent tags // It relies on the container provider utils to get the Agent container ID func (t *TaggerWrapper) AgentTags(cardinality types.TagCardinality) ([]string, error) { - ctrID, err := metrics.GetProvider(optional.NewOption(t.wmeta)).GetMetaCollector().GetSelfContainerID() + ctrID, err := metrics.GetProvider(option.New(t.wmeta)).GetMetaCollector().GetSelfContainerID() if err != nil { return nil, err } @@ -389,14 +372,23 @@ func (t *TaggerWrapper) EnrichTags(tb tagset.TagsAccumulator, originInfo taggert productOrigin := originInfo.ProductOrigin // If origin_detection_unified is disabled, we use DogStatsD's Legacy Origin Detection. // TODO: remove this when origin_detection_unified is enabled by default - if !t.datadogConfig.originDetectionUnifiedEnabled && productOrigin == taggertypes.ProductOriginDogStatsD { - productOrigin = taggertypes.ProductOriginDogStatsDLegacy + if !t.datadogConfig.originDetectionUnifiedEnabled && productOrigin == origindetection.ProductOriginDogStatsD { + productOrigin = origindetection.ProductOriginDogStatsDLegacy } containerIDFromSocketCutIndex := len(types.ContainerID) + types.GetSeparatorLengh() + // Generate container ID from Inode + if originInfo.LocalData.ContainerID == "" { + var inodeResolutionError error + originInfo.LocalData.ContainerID, inodeResolutionError = t.generateContainerIDFromInode(originInfo.LocalData, metrics.GetProvider(option.New(t.wmeta)).GetMetaCollector()) + if inodeResolutionError != nil { + t.log.Tracef("Failed to resolve container ID from inode %d: %v", originInfo.LocalData.Inode, inodeResolutionError) + } + } + switch productOrigin { - case taggertypes.ProductOriginDogStatsDLegacy: + case origindetection.ProductOriginDogStatsDLegacy: // The following was moved from the dogstatsd package // originFromUDS is the origin discovered via UDS origin detection (container ID). // originFromTag is the origin sent by the client via the dd.internal.entity_id tag (non-prefixed pod uid). @@ -424,15 +416,15 @@ func (t *TaggerWrapper) EnrichTags(tb tagset.TagsAccumulator, originInfo taggert // | none | not empty || container prefix + originFromMsg | if t.datadogConfig.dogstatsdOptOutEnabled && originInfo.Cardinality == types.NoneCardinalityString { originInfo.ContainerIDFromSocket = packets.NoOrigin - originInfo.PodUID = "" - originInfo.ContainerID = "" + originInfo.LocalData.PodUID = "" + originInfo.LocalData.ContainerID = "" return } // We use the UDS socket origin if no origin ID was specify in the tags // or 'dogstatsd_entity_id_precedence' is set to False (default false). if originInfo.ContainerIDFromSocket != packets.NoOrigin && - (originInfo.PodUID == "" || !t.datadogConfig.dogstatsdEntityIDPrecedenceEnabled) && + (originInfo.LocalData.PodUID == "" || !t.datadogConfig.dogstatsdEntityIDPrecedenceEnabled) && len(originInfo.ContainerIDFromSocket) > containerIDFromSocketCutIndex { containerID := originInfo.ContainerIDFromSocket[containerIDFromSocketCutIndex:] originFromClient := types.NewEntityID(types.ContainerID, containerID) @@ -443,13 +435,13 @@ func (t *TaggerWrapper) EnrichTags(tb tagset.TagsAccumulator, originInfo taggert // originFromClient can either be originInfo.FromTag or originInfo.FromMsg var originFromClient types.EntityID - if originInfo.PodUID != "" && originInfo.PodUID != "none" { + if originInfo.LocalData.PodUID != "" && originInfo.LocalData.PodUID != "none" { // Check if the value is not "none" in order to avoid calling the tagger for entity that doesn't exist. // Currently only supported for pods - originFromClient = types.NewEntityID(types.KubernetesPodUID, originInfo.PodUID) - } else if originInfo.PodUID == "" && len(originInfo.ContainerID) > 0 { + originFromClient = types.NewEntityID(types.KubernetesPodUID, originInfo.LocalData.PodUID) + } else if originInfo.LocalData.PodUID == "" && len(originInfo.LocalData.ContainerID) > 0 { // originInfo.FromMsg is the container ID sent by the newer clients. - originFromClient = types.NewEntityID(types.ContainerID, originInfo.ContainerID) + originFromClient = types.NewEntityID(types.ContainerID, originInfo.LocalData.ContainerID) } if !originFromClient.Empty() { @@ -462,8 +454,8 @@ func (t *TaggerWrapper) EnrichTags(tb tagset.TagsAccumulator, originInfo taggert // Disable origin detection if cardinality is none if originInfo.Cardinality == types.NoneCardinalityString { originInfo.ContainerIDFromSocket = packets.NoOrigin - originInfo.PodUID = "" - originInfo.ContainerID = "" + originInfo.LocalData.PodUID = "" + originInfo.LocalData.ContainerID = "" return } @@ -476,59 +468,31 @@ func (t *TaggerWrapper) EnrichTags(tb tagset.TagsAccumulator, originInfo taggert } } - if err := t.AccumulateTagsFor(types.NewEntityID(types.ContainerID, originInfo.ContainerID), cardinality, tb); err != nil { - t.log.Tracef("Cannot get tags for entity %s: %s", originInfo.ContainerID, err) + if err := t.AccumulateTagsFor(types.NewEntityID(types.ContainerID, originInfo.LocalData.ContainerID), cardinality, tb); err != nil { + t.log.Tracef("Cannot get tags for entity %s: %s", originInfo.LocalData.ContainerID, err) } - if err := t.AccumulateTagsFor(types.NewEntityID(types.KubernetesPodUID, originInfo.PodUID), cardinality, tb); err != nil { - t.log.Tracef("Cannot get tags for entity %s: %s", originInfo.PodUID, err) + if err := t.AccumulateTagsFor(types.NewEntityID(types.KubernetesPodUID, originInfo.LocalData.PodUID), cardinality, tb); err != nil { + t.log.Tracef("Cannot get tags for entity %s: %s", originInfo.LocalData.PodUID, err) } - // Tag using External Data. - // External Data is a list that contain prefixed-items, split by a ','. Current items are: - // * "it-" if the container is an init container. - // * "cn-" for the container name. - // * "pu-" for the pod UID. - // Order does not matter. - // Possible values: - // * "it-false,cn-nginx,pu-3413883c-ac60-44ab-96e0-9e52e4e173e2" - // * "cn-init,pu-cb4aba1d-0129-44f1-9f1b-b4dc5d29a3b3,it-true" - if originInfo.ExternalData != "" { - // Parse the external data and get the tags for the entity - var parsedExternalData externalData - var initParsingError error - for _, item := range strings.Split(originInfo.ExternalData, ",") { - switch { - case strings.HasPrefix(item, externalDataInitPrefix): - parsedExternalData.init, initParsingError = strconv.ParseBool(item[len(externalDataInitPrefix):]) - if initParsingError != nil { - t.log.Tracef("Cannot parse bool from %s: %s", item[len(externalDataInitPrefix):], initParsingError) - } - case strings.HasPrefix(item, externalDataContainerNamePrefix): - parsedExternalData.containerName = item[len(externalDataContainerNamePrefix):] - case strings.HasPrefix(item, externalDataPodUIDPrefix): - parsedExternalData.podUID = item[len(externalDataPodUIDPrefix):] - } - } - - // Accumulate tags for pod UID - if parsedExternalData.podUID != "" { - if err := t.AccumulateTagsFor(types.NewEntityID(types.KubernetesPodUID, parsedExternalData.podUID), cardinality, tb); err != nil { - t.log.Tracef("Cannot get tags for entity %s: %s", originInfo.ContainerID, err) - } + // Accumulate tags for pod UID + if originInfo.ExternalData.PodUID != "" { + if err := t.AccumulateTagsFor(types.NewEntityID(types.KubernetesPodUID, originInfo.ExternalData.PodUID), cardinality, tb); err != nil { + t.log.Tracef("Cannot get tags for entity %s: %s", originInfo.ExternalData.PodUID, err) } + } - // Generate container ID from External Data - generatedContainerID, err := t.generateContainerIDFromExternalData(parsedExternalData, metrics.GetProvider(optional.NewOption(t.wmeta)).GetMetaCollector()) - if err != nil { - t.log.Tracef("Failed to generate container ID from %s: %s", originInfo.ExternalData, err) - } + // Generate container ID from External Data + generatedContainerID, err := t.generateContainerIDFromExternalData(originInfo.ExternalData, metrics.GetProvider(option.New(t.wmeta)).GetMetaCollector()) + if err != nil { + t.log.Tracef("Failed to generate container ID from %s: %s", originInfo.ExternalData, err) + } - // Accumulate tags for generated container ID - if generatedContainerID != "" { - if err := t.AccumulateTagsFor(types.NewEntityID(types.ContainerID, generatedContainerID), cardinality, tb); err != nil { - t.log.Tracef("Cannot get tags for entity %s: %s", generatedContainerID, err) - } + // Accumulate tags for generated container ID + if generatedContainerID != "" { + if err := t.AccumulateTagsFor(types.NewEntityID(types.ContainerID, generatedContainerID), cardinality, tb); err != nil { + t.log.Tracef("Cannot get tags for entity %s: %s", generatedContainerID, err) } } } @@ -543,9 +507,14 @@ func (t *TaggerWrapper) GenerateContainerIDFromOriginInfo(originInfo origindetec return t.defaultTagger.GenerateContainerIDFromOriginInfo(originInfo) } +// generateContainerIDFromInode generates a container ID from the CGroup inode. +func (t *TaggerWrapper) generateContainerIDFromInode(e origindetection.LocalData, metricsProvider provider.ContainerIDForInodeRetriever) (string, error) { + return metricsProvider.GetContainerIDForInode(e.Inode, time.Second) +} + // generateContainerIDFromExternalData generates a container ID from the External Data. -func (t *TaggerWrapper) generateContainerIDFromExternalData(e externalData, metricsProvider provider.ContainerIDForPodUIDAndContNameRetriever) (string, error) { - return metricsProvider.ContainerIDForPodUIDAndContName(e.podUID, e.containerName, e.init, time.Second) +func (t *TaggerWrapper) generateContainerIDFromExternalData(e origindetection.ExternalData, metricsProvider provider.ContainerIDForPodUIDAndContNameRetriever) (string, error) { + return metricsProvider.ContainerIDForPodUIDAndContName(e.PodUID, e.ContainerName, e.Init, time.Second) } // ChecksCardinality defines the cardinality of tags we should send for check metrics diff --git a/comp/core/tagger/impl/tagger_test.go b/comp/core/tagger/impl/tagger_test.go index da581f20f3fd0..ed2cb328a02d8 100644 --- a/comp/core/tagger/impl/tagger_test.go +++ b/comp/core/tagger/impl/tagger_test.go @@ -18,6 +18,7 @@ import ( logmock "github.com/DataDog/datadog-agent/comp/core/log/mock" tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" "github.com/DataDog/datadog-agent/comp/core/tagger/mock" + "github.com/DataDog/datadog-agent/comp/core/tagger/origindetection" "github.com/DataDog/datadog-agent/comp/core/tagger/types" noopTelemetry "github.com/DataDog/datadog-agent/comp/core/telemetry/noopsimpl" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" @@ -28,7 +29,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/containers/metrics" collectormock "github.com/DataDog/datadog-agent/pkg/util/containers/metrics/mock" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) type fakeCIDProvider struct { @@ -47,7 +48,7 @@ func (f *fakeCIDProvider) ContainerIDForPodUIDAndContName(podUID, contName strin // Sets up the fake metrics provider and returns a function to reset the original metrics provider func setupFakeMetricsProvider(mockMetricsProvider metrics.Provider) func() { originalMetricsProvider := metrics.GetProvider - metrics.GetProvider = func(_ optional.Option[workloadmeta.Component]) metrics.Provider { + metrics.GetProvider = func(_ option.Option[workloadmeta.Component]) metrics.Provider { return mockMetricsProvider } return func() { metrics.GetProvider = originalMetricsProvider } @@ -89,28 +90,53 @@ func TestEnrichTags(t *testing.T) { expectedTags: []string{}, }, { - name: "with local data (containerID) and low cardinality", - originInfo: taggertypes.OriginInfo{ContainerID: containerID, Cardinality: "low"}, + name: "with local data (containerID) and low cardinality", + originInfo: taggertypes.OriginInfo{ + LocalData: origindetection.LocalData{ + ContainerID: containerID, + }, + Cardinality: "low", + }, expectedTags: []string{"container-low"}, }, { - name: "with local data (containerID) and high cardinality", - originInfo: taggertypes.OriginInfo{ContainerID: containerID, Cardinality: "high"}, - expectedTags: []string{"container-low", "container-orch", "container-high"}, + name: "with local data (containerID) and high cardinality", + originInfo: taggertypes.OriginInfo{ + LocalData: origindetection.LocalData{ + ContainerID: containerID, + }, + Cardinality: "high", + }, expectedTags: []string{"container-low", "container-orch", "container-high"}, }, { - name: "with local data (podUID) and low cardinality", - originInfo: taggertypes.OriginInfo{PodUID: podUID, Cardinality: "low"}, + name: "with local data (podUID) and low cardinality", + originInfo: taggertypes.OriginInfo{ + LocalData: origindetection.LocalData{ + PodUID: podUID, + }, + Cardinality: "low", + }, expectedTags: []string{"pod-low"}, }, { - name: "with local data (podUID) and high cardinality", - originInfo: taggertypes.OriginInfo{PodUID: podUID, Cardinality: "high"}, + name: "with local data (podUID) and high cardinality", + originInfo: taggertypes.OriginInfo{ + LocalData: origindetection.LocalData{ + PodUID: podUID, + }, + Cardinality: "high", + }, expectedTags: []string{"pod-low", "pod-orch", "pod-high"}, }, { - name: "with local data (podUID, containerIDFromSocket) and high cardinality, APM origin", - originInfo: taggertypes.OriginInfo{PodUID: podUID, Cardinality: "high", ContainerIDFromSocket: fmt.Sprintf("container_id://%s", containerID), ProductOrigin: taggertypes.ProductOriginAPM}, + name: "with local data (podUID, containerIDFromSocket) and high cardinality, APM origin", + originInfo: taggertypes.OriginInfo{ + ContainerIDFromSocket: fmt.Sprintf("container_id://%s", containerID), + LocalData: origindetection.LocalData{ + PodUID: podUID, + }, + Cardinality: "high", + ProductOrigin: origindetection.ProductOriginAPM}, expectedTags: []string{"container-low", "container-orch", "container-high", "pod-low", "pod-orch", "pod-high"}, }, } { @@ -137,32 +163,70 @@ func TestEnrichTags(t *testing.T) { setup func() // register the proper meta collector for the test }{ { - name: "with external data (containerName) and high cardinality", - originInfo: taggertypes.OriginInfo{ProductOrigin: taggertypes.ProductOriginAPM, ExternalData: fmt.Sprintf("cn-%s,it-false", containerName), Cardinality: "high"}, + name: "with external data (containerName) and high cardinality", + originInfo: taggertypes.OriginInfo{ + ProductOrigin: origindetection.ProductOriginAPM, + ExternalData: origindetection.ExternalData{ + Init: false, + ContainerName: containerName, + }, + Cardinality: "high", + }, expectedTags: []string{}, setup: func() { mockMetricsProvider.RegisterMetaCollector(&containerMetaCollector) }, }, { - name: "with external data (containerName, podUID) and low cardinality", - originInfo: taggertypes.OriginInfo{ProductOrigin: taggertypes.ProductOriginAPM, ExternalData: fmt.Sprintf("it-invalid,cn-%s,pu-%s", containerName, podUID), Cardinality: "low"}, + name: "with external data (containerName, podUID) and low cardinality", + originInfo: taggertypes.OriginInfo{ + ProductOrigin: origindetection.ProductOriginAPM, + ExternalData: origindetection.ExternalData{ + Init: false, + ContainerName: containerName, + PodUID: podUID, + }, + Cardinality: "low", + }, expectedTags: []string{"pod-low", "container-low"}, setup: func() { mockMetricsProvider.RegisterMetaCollector(&containerMetaCollector) }, }, { - name: "with external data (podUID) and high cardinality", - originInfo: taggertypes.OriginInfo{ProductOrigin: taggertypes.ProductOriginAPM, ExternalData: fmt.Sprintf("pu-%s,it-false", podUID), Cardinality: "high"}, + name: "with external data (podUID) and high cardinality", + originInfo: taggertypes.OriginInfo{ + ProductOrigin: origindetection.ProductOriginAPM, + ExternalData: origindetection.ExternalData{ + Init: false, + PodUID: podUID, + }, + Cardinality: "high", + }, expectedTags: []string{"pod-low", "pod-orch", "pod-high"}, setup: func() { mockMetricsProvider.RegisterMetaCollector(&containerMetaCollector) }, }, { - name: "with external data (containerName, podUID) and high cardinality", - originInfo: taggertypes.OriginInfo{ProductOrigin: taggertypes.ProductOriginAPM, ExternalData: fmt.Sprintf("pu-%s,it-false,cn-%s", podUID, containerName), Cardinality: "high"}, + name: "with external data (containerName, podUID) and high cardinality", + originInfo: taggertypes.OriginInfo{ + ProductOrigin: origindetection.ProductOriginAPM, + ExternalData: origindetection.ExternalData{ + Init: false, + ContainerName: containerName, + PodUID: podUID, + }, + Cardinality: "high", + }, expectedTags: []string{"pod-low", "pod-orch", "pod-high", "container-low", "container-orch", "container-high"}, setup: func() { mockMetricsProvider.RegisterMetaCollector(&containerMetaCollector) }, }, { - name: "with external data (containerName, podUID, initContainer) and low cardinality", - originInfo: taggertypes.OriginInfo{ProductOrigin: taggertypes.ProductOriginAPM, ExternalData: fmt.Sprintf("pu-%s,cn-%s,it-true", podUID, initContainerName), Cardinality: "low"}, + name: "with external data (containerName, podUID, initContainer) and low cardinality", + originInfo: taggertypes.OriginInfo{ + ProductOrigin: origindetection.ProductOriginAPM, + ExternalData: origindetection.ExternalData{ + Init: true, + ContainerName: initContainerName, + PodUID: podUID, + }, + Cardinality: "low", + }, expectedTags: []string{"pod-low", "init-container-low"}, setup: func() { mockMetricsProvider.RegisterMetaCollector(&initContainerMetaCollector) }, }, @@ -223,33 +287,48 @@ func TestEnrichTagsOptOut(t *testing.T) { tb := tagset.NewHashingTagsAccumulator() // Test with none cardinality - tagger.EnrichTags(tb, taggertypes.OriginInfo{ContainerIDFromSocket: "container_id://bar", ContainerID: "container-id", Cardinality: "none", ProductOrigin: taggertypes.ProductOriginDogStatsD}) + tagger.EnrichTags(tb, taggertypes.OriginInfo{ + ContainerIDFromSocket: "container_id://bar", + LocalData: origindetection.LocalData{ + ContainerID: "container-id", + }, + Cardinality: "none", + ProductOrigin: origindetection.ProductOriginDogStatsD, + }) assert.Equal(t, []string{}, tb.Get()) // Test without none cardinality - tagger.EnrichTags(tb, taggertypes.OriginInfo{ContainerIDFromSocket: "container_id://bar", PodUID: "pod-uid", ContainerID: "container-id", Cardinality: "low", ProductOrigin: taggertypes.ProductOriginDogStatsD}) + tagger.EnrichTags(tb, taggertypes.OriginInfo{ + ContainerIDFromSocket: "container_id://bar", + LocalData: origindetection.LocalData{ + ContainerID: "container-id", + PodUID: "pod-uid", + }, + Cardinality: "low", + ProductOrigin: origindetection.ProductOriginDogStatsD, + }) assert.Equal(t, []string{"container-low"}, tb.Get()) } func TestGenerateContainerIDFromExternalData(t *testing.T) { for _, tt := range []struct { name string - externalData externalData + externalData origindetection.ExternalData expected string cidProvider *fakeCIDProvider }{ { name: "empty", - externalData: externalData{}, + externalData: origindetection.ExternalData{}, expected: "", cidProvider: &fakeCIDProvider{}, }, { name: "found container", - externalData: externalData{ - init: false, - containerName: "containerName", - podUID: "podUID", + externalData: origindetection.ExternalData{ + Init: false, + ContainerName: "containerName", + PodUID: "podUID", }, expected: "containerID", cidProvider: &fakeCIDProvider{ @@ -261,10 +340,10 @@ func TestGenerateContainerIDFromExternalData(t *testing.T) { }, { name: "found init container", - externalData: externalData{ - init: true, - containerName: "initContainerName", - podUID: "podUID", + externalData: origindetection.ExternalData{ + Init: true, + ContainerName: "initContainerName", + PodUID: "podUID", }, expected: "initContainerID", cidProvider: &fakeCIDProvider{ @@ -276,10 +355,10 @@ func TestGenerateContainerIDFromExternalData(t *testing.T) { }, { name: "container not found", - externalData: externalData{ - init: true, - containerName: "containerName", - podUID: "podUID", + externalData: origindetection.ExternalData{ + Init: true, + ContainerName: "containerName", + PodUID: "podUID", }, expected: "", cidProvider: &fakeCIDProvider{ @@ -299,6 +378,45 @@ func TestGenerateContainerIDFromExternalData(t *testing.T) { } } +func TestGenerateContainerIDFromInode(t *testing.T) { + // Create mock metrics provider + mockProvider := collectormock.NewMetricsProvider() + mockProvider.RegisterMetaCollector(&collectormock.MetaCollector{ + CIDFromInode: map[uint64]string{ + uint64(1234): "abcdef", + }, + }) + + for _, tt := range []struct { + name string + localData origindetection.LocalData + expected string + inodeProvider *collectormock.MetricsProvider + }{ + { + name: "empty", + localData: origindetection.LocalData{}, + expected: "", + inodeProvider: mockProvider, + }, + { + name: "found container", + localData: origindetection.LocalData{ + Inode: 1234, + }, + expected: "abcdef", + inodeProvider: mockProvider, + }, + } { + t.Run(tt.name, func(t *testing.T) { + fakeTagger := TaggerWrapper{} + containerID, err := fakeTagger.generateContainerIDFromInode(tt.localData, mockProvider.GetMetaCollector()) + assert.NoError(t, err) + assert.Equal(t, tt.expected, containerID) + }) + } +} + func TestAgentTags(t *testing.T) { c := configmock.New(t) params := tagger.Params{ diff --git a/comp/core/tagger/origindetection/origindetection.go b/comp/core/tagger/origindetection/origindetection.go index 712792c54f298..a0790938c62c5 100644 --- a/comp/core/tagger/origindetection/origindetection.go +++ b/comp/core/tagger/origindetection/origindetection.go @@ -10,6 +10,7 @@ package origindetection import ( + "fmt" "strconv" "strings" ) @@ -26,6 +27,14 @@ const ( // ProductOriginAPM is the ProductOrigin for APM. ProductOriginAPM ProductOrigin = iota + // Local Data Prefixes + // These prefixes are used to build the Local Data list. + + // LocalDataContainerIDPrefix is the prefix used for the Container ID sent in the Local Data list. + LocalDataContainerIDPrefix = "ci-" + // LocalDataInodePrefix is the prefix used for the Inode sent in the Local Data list. + LocalDataInodePrefix = "in-" + // External Data Prefixes // These prefixes are used to build the External Data Environment Variable. @@ -45,6 +54,11 @@ type OriginInfo struct { ProductOrigin ProductOrigin // ProductOrigin is the product that sent the origin information. } +// OriginInfoString returns a string representation of the OriginInfo. +func OriginInfoString(originInfo OriginInfo) string { + return LocalDataString(originInfo.LocalData) + ExternalDataString(originInfo.ExternalData) +} + // LocalData that is generated by the client and sent to the Agent. type LocalData struct { ProcessID uint32 // ProcessID of the container process on the host. @@ -53,6 +67,11 @@ type LocalData struct { PodUID string // PodUID of the pod sent from the client. } +// LocalDataString returns a string representation of the LocalData. +func LocalDataString(localData LocalData) string { + return fmt.Sprintf("%v%v%v%v", localData.ProcessID, localData.ContainerID, localData.Inode, localData.PodUID) +} + // ExternalData generated by the Admission Controller and sent to the Agent. type ExternalData struct { Init bool // Init is true if the container is an init container. @@ -60,16 +79,57 @@ type ExternalData struct { PodUID string // PodUID is the UID of the pod as seen by the Admission Controller. } +// ExternalDataString returns a string representation of the ExternalData. +func ExternalDataString(externalData ExternalData) string { + return fmt.Sprintf("%v%v%v", externalData.Init, externalData.ContainerName, externalData.PodUID) +} + // GenerateContainerIDFromExternalData generates a container ID from the external data. type GenerateContainerIDFromExternalData func(externalData ExternalData) (string, error) +// ParseLocalData parses the local data string into a LocalData struct. +func ParseLocalData(rawLocalData string) (LocalData, error) { + if rawLocalData == "" { + return LocalData{}, nil + } + + var localData LocalData + var parsingError error + + if strings.Contains(rawLocalData, ",") { + // The Local Data can contain a list. + items := strings.Split(rawLocalData, ",") + for _, item := range items { + if strings.HasPrefix(item, LocalDataContainerIDPrefix) { + localData.ContainerID = item[len(LocalDataContainerIDPrefix):] + } else if strings.HasPrefix(item, LocalDataInodePrefix) { + localData.Inode, parsingError = strconv.ParseUint(item[len(LocalDataInodePrefix):], 10, 64) + } + } + } else { + // The Local Data can contain a single value. + if strings.HasPrefix(rawLocalData, LocalDataContainerIDPrefix) { + localData.ContainerID = rawLocalData[len(LocalDataContainerIDPrefix):] + } else if strings.HasPrefix(rawLocalData, LocalDataInodePrefix) { + localData.Inode, parsingError = strconv.ParseUint(rawLocalData[len(LocalDataInodePrefix):], 10, 64) + } else { + // Container ID with old format: + localData.ContainerID = rawLocalData + } + } + + return localData, parsingError +} + // ParseExternalData parses the external data string into an ExternalData struct. func ParseExternalData(externalEnv string) (ExternalData, error) { if externalEnv == "" { return ExternalData{}, nil } + var externalData ExternalData var parsingError error + for _, item := range strings.Split(externalEnv, ",") { switch { case strings.HasPrefix(item, ExternalDataInitPrefix): @@ -80,5 +140,6 @@ func ParseExternalData(externalEnv string) (ExternalData, error) { externalData.PodUID = item[len(ExternalDataPodUIDPrefix):] } } + return externalData, parsingError } diff --git a/comp/core/tagger/origindetection/origindetection_test.go b/comp/core/tagger/origindetection/origindetection_test.go index a873093f6191b..b38c82c89b35d 100644 --- a/comp/core/tagger/origindetection/origindetection_test.go +++ b/comp/core/tagger/origindetection/origindetection_test.go @@ -12,6 +12,65 @@ import ( "github.com/stretchr/testify/assert" ) +func TestParseLocalData(t *testing.T) { + tests := []struct { + name string + rawLocalData string + expected LocalData + expectError bool + }{ + { + name: "Empty string", + rawLocalData: "", + expected: LocalData{}, + expectError: false, + }, + { + name: "Single container ID", + rawLocalData: "ci-abc123", + expected: LocalData{ContainerID: "abc123"}, + expectError: false, + }, + { + name: "Single inode", + rawLocalData: "in-12345", + expected: LocalData{Inode: 12345}, + expectError: false, + }, + { + name: "Multiple values", + rawLocalData: "ci-abc123,in-12345", + expected: LocalData{ContainerID: "abc123", Inode: 12345}, + expectError: false, + }, + { + name: "Invalid inode", + rawLocalData: "in-invalid", + expected: LocalData{}, + expectError: true, + }, + { + name: "Old container format", + rawLocalData: "abc123", + expected: LocalData{ContainerID: "abc123"}, + expectError: false, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + result, err := ParseLocalData(tc.rawLocalData) + + if tc.expectError { + assert.Error(t, err) + } else { + assert.Equal(t, tc.expected, result) + } + + }) + } +} + func TestParseExternalData(t *testing.T) { tests := []struct { name string diff --git a/comp/core/tagger/server/server.go b/comp/core/tagger/server/server.go index 34a719f32c440..6e7cf4c1fd5c8 100644 --- a/comp/core/tagger/server/server.go +++ b/comp/core/tagger/server/server.go @@ -34,13 +34,15 @@ const ( type Server struct { taggerComponent tagger.Component maxEventSize int + throttler Throttler } // NewServer returns a new Server -func NewServer(t tagger.Component, maxEventSize int) *Server { +func NewServer(t tagger.Component, maxEventSize int, maxParallelSync int) *Server { return &Server{ taggerComponent: t, maxEventSize: maxEventSize, + throttler: NewSyncThrottler(uint32(maxParallelSync)), } } @@ -53,6 +55,42 @@ func (s *Server) TaggerStreamEntities(in *pb.StreamTagsRequest, out pb.AgentSecu return err } + ticker := time.NewTicker(streamKeepAliveInterval) + defer ticker.Stop() + + timeoutRefreshError := make(chan error) + + go func() { + // The remote tagger client has a timeout that closes the + // connection after 10 minutes of inactivity (implemented in + // comp/core/tagger/remote/tagger.go) In order to avoid closing the + // connection and having to open it again, the server will send + // an empty message after 9 minutes of inactivity. The goal is + // only to keep the connection alive without losing the + // protection against “half” closed connections brought by the + // timeout. + for { + select { + case <-out.Context().Done(): + return + + case <-ticker.C: + err = grpc.DoWithTimeout(func() error { + return out.Send(&pb.StreamTagsResponse{ + Events: []*pb.StreamTagsEvent{}, + }) + }, taggerStreamSendTimeout) + + if err != nil { + log.Warnf("error sending tagger keep-alive: %s", err) + s.taggerComponent.GetTaggerTelemetryStore().ServerStreamErrors.Inc() + timeoutRefreshError <- err + return + } + } + } + }() + filterBuilder := types.NewFilterBuilder() for _, prefix := range in.GetPrefixes() { filterBuilder = filterBuilder.Include(types.EntityIDPrefix(prefix)) @@ -62,23 +100,27 @@ func (s *Server) TaggerStreamEntities(in *pb.StreamTagsRequest, out pb.AgentSecu streamingID := in.GetStreamingID() if streamingID == "" { - // this is done to preserve backward compatibility - // if CLC runner is using an old version, the streaming ID would be an empty string, - // and the server needs to auto-assign a unique id streamingID = uuid.New().String() } - subscriptionID := fmt.Sprintf("streaming-client-%s", streamingID) + + // initBurst is a flag indicating if the initial sync is still in progress or not + // true means the sync hasn't yet been finalised + // false means the streaming client has already caught up with the server + initBurst := true + log.Debugf("requesting token from server throttler for streaming id: %q", streamingID) + tk := s.throttler.RequestToken() + defer s.throttler.Release(tk) + subscription, err := s.taggerComponent.Subscribe(subscriptionID, filter) + log.Debugf("cluster tagger has just initiated subscription for %q at time %v", subscriptionID, time.Now().Unix()) if err != nil { + log.Errorf("Failed to subscribe to tagger for subscription %q", subscriptionID) return err } defer subscription.Unsubscribe() - ticker := time.NewTicker(streamKeepAliveInterval) - defer ticker.Stop() - sendFunc := func(chunk []*pb.StreamTagsEvent) error { return grpc.DoWithTimeout(func() error { return out.Send(&pb.StreamTagsResponse{ @@ -114,29 +156,17 @@ func (s *Server) TaggerStreamEntities(in *pb.StreamTagsRequest, out pb.AgentSecu return err } + if initBurst { + initBurst = false + s.throttler.Release(tk) + log.Infof("cluster tagger has just finished initialization for subscription %q at time %v", subscriptionID, time.Now().Unix()) + } + case <-out.Context().Done(): return nil - // The remote tagger client has a timeout that closes the - // connection after 10 minutes of inactivity (implemented in - // comp/core/tagger/remote/tagger.go) In order to avoid closing the - // connection and having to open it again, the server will send - // an empty message after 9 minutes of inactivity. The goal is - // only to keep the connection alive without losing the - // protection against “half” closed connections brought by the - // timeout. - case <-ticker.C: - err = grpc.DoWithTimeout(func() error { - return out.Send(&pb.StreamTagsResponse{ - Events: []*pb.StreamTagsEvent{}, - }) - }, taggerStreamSendTimeout) - - if err != nil { - log.Warnf("error sending tagger keep-alive: %s", err) - s.taggerComponent.GetTaggerTelemetryStore().ServerStreamErrors.Inc() - return err - } + case err = <-timeoutRefreshError: + return err } } } diff --git a/comp/core/tagger/server/syncthrottler.go b/comp/core/tagger/server/syncthrottler.go new file mode 100644 index 0000000000000..5309e97c09a7a --- /dev/null +++ b/comp/core/tagger/server/syncthrottler.go @@ -0,0 +1,62 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package server + +import ( + "sync" + + "github.com/google/uuid" +) + +type token string + +// Throttler provides tokens with throttling logic that limits the number of active tokens at the same time +// When a component is done with a token, it should release the token by calling the Release method +type Throttler interface { + // RequestToken returns a token + RequestToken() token + // ReleaseToken returns token back to the throttler + // This method is idempotent (i.e. invoking it on the same token multiple times will have the same effect) + Release(t token) +} + +// limiter implements the Throttler interface +type limiter struct { + mutex sync.RWMutex + tokensChan chan struct{} + activeRequests map[token]struct{} +} + +// NewSyncThrottler creates and returns a new Throttler +func NewSyncThrottler(maxConcurrentSync uint32) Throttler { + return &limiter{ + mutex: sync.RWMutex{}, + tokensChan: make(chan struct{}, maxConcurrentSync), + activeRequests: make(map[token]struct{}), + } +} + +// RequestToken implements Throttler#RequestToken +func (l *limiter) RequestToken() token { + tk := token(uuid.New().String()) + l.tokensChan <- struct{}{} + + l.mutex.Lock() + defer l.mutex.Unlock() + + l.activeRequests[tk] = struct{}{} + return tk +} + +// Release implements Throttler#Release +func (l *limiter) Release(t token) { + l.mutex.Lock() + defer l.mutex.Unlock() + if _, found := l.activeRequests[t]; found { + <-l.tokensChan + delete(l.activeRequests, t) + } +} diff --git a/comp/core/tagger/server/syncthrottler_test.go b/comp/core/tagger/server/syncthrottler_test.go new file mode 100644 index 0000000000000..a4065184baab9 --- /dev/null +++ b/comp/core/tagger/server/syncthrottler_test.go @@ -0,0 +1,32 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package server + +import ( + "sync" + "testing" + "time" +) + +func TestSyncThrottler(_ *testing.T) { + + throtler := NewSyncThrottler(3) + + var wg sync.WaitGroup + + for i := 0; i < 30; i++ { + wg.Add(1) + go func() { + defer wg.Done() + t := throtler.RequestToken() + time.Sleep(200 * time.Millisecond) + throtler.Release(t) + throtler.Release(t) // Release method should be idempotent + }() + } + + wg.Wait() +} diff --git a/comp/core/tagger/tags/tags.go b/comp/core/tagger/tags/tags.go index 818ab9a7576cb..8b4517f96e371 100644 --- a/comp/core/tagger/tags/tags.go +++ b/comp/core/tagger/tags/tags.go @@ -91,9 +91,18 @@ const ( // GPU related tags - // KubeGPUVendor the tag for the Kubernetes Resource GPU vendor + // KubeGPUVendor the tag for the Kubernetes Resource GPU vendor (e.g., NVIDIA). KubeGPUVendor = "gpu_vendor" + // KubeGPUDevice is the tag for the Kubernetes Resource GPU device. This is + // the commercial name of the device (e.g., Tesla T4). See + // comp/core/workloadmeta/def/types.go:GPU.Device for more detail on this + // field. + KubeGPUDevice = "gpu_device" + + // KubeGPUUUID is the tag for the Kubernetes Resource GPU UUID + KubeGPUUUID = "gpu_uuid" + // OpenshiftDeploymentConfig is the tag for the OpenShift deployment config name OpenshiftDeploymentConfig = "oshift_deployment_config" diff --git a/comp/core/tagger/types/entity_id.go b/comp/core/tagger/types/entity_id.go index c598c13fcf43d..8fb3969e007c2 100644 --- a/comp/core/tagger/types/entity_id.go +++ b/comp/core/tagger/types/entity_id.go @@ -71,6 +71,8 @@ const ( Process EntityIDPrefix = "process" // InternalID is the prefix `internal` InternalID EntityIDPrefix = "internal" + // GPU is the prefix `gpu` + GPU EntityIDPrefix = "gpu" ) // AllPrefixesSet returns a set of all possible entity id prefixes that can be used in the tagger @@ -85,6 +87,7 @@ func AllPrefixesSet() map[EntityIDPrefix]struct{} { KubernetesPodUID: {}, Process: {}, InternalID: {}, + GPU: {}, } } diff --git a/comp/core/tagger/types/filter_builder_test.go b/comp/core/tagger/types/filter_builder_test.go index 72beadbf33910..4c05a49452ec1 100644 --- a/comp/core/tagger/types/filter_builder_test.go +++ b/comp/core/tagger/types/filter_builder_test.go @@ -59,6 +59,7 @@ func TestFilterBuilderOps(t *testing.T) { KubernetesPodUID: {}, Process: {}, InternalID: {}, + GPU: {}, }, cardinality: HighCardinality, }, diff --git a/comp/core/tagger/types/go.mod b/comp/core/tagger/types/go.mod index e47b03b32f48f..fe63a629a04e2 100644 --- a/comp/core/tagger/types/go.mod +++ b/comp/core/tagger/types/go.mod @@ -21,7 +21,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../../pkg/util/fxutil github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../../../pkg/util/hostname/validate github.com/DataDog/datadog-agent/pkg/util/log => ../../../../pkg/util/log - github.com/DataDog/datadog-agent/pkg/util/optional => ../../../../pkg/util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../../../pkg/util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../../pkg/util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../../pkg/util/scrubber github.com/DataDog/datadog-agent/pkg/util/system => ../../../../pkg/util/system diff --git a/comp/core/telemetry/go.mod b/comp/core/telemetry/go.mod index 4d38df7857b77..a7a82765d9b48 100644 --- a/comp/core/telemetry/go.mod +++ b/comp/core/telemetry/go.mod @@ -5,7 +5,7 @@ go 1.22.0 replace ( github.com/DataDog/datadog-agent/comp/def => ../../../comp/def github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../pkg/util/fxutil - github.com/DataDog/datadog-agent/pkg/util/optional => ../../../pkg/util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../../pkg/util/option ) require ( @@ -18,7 +18,7 @@ require ( require ( github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.55.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.55.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect @@ -26,14 +26,14 @@ require ( github.com/klauspost/compress v1.17.11 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/common v0.60.1 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect go.uber.org/dig v1.18.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/sys v0.28.0 // indirect - google.golang.org/protobuf v1.35.2 // indirect + golang.org/x/sys v0.29.0 // indirect + google.golang.org/protobuf v1.36.3 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/comp/core/telemetry/go.sum b/comp/core/telemetry/go.sum index 9067c10872735..a634c4b601bd2 100644 --- a/comp/core/telemetry/go.sum +++ b/comp/core/telemetry/go.sum @@ -25,8 +25,8 @@ github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+ github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= @@ -48,10 +48,10 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/comp/core/workloadmeta/collectors/catalog-core/options.go b/comp/core/workloadmeta/collectors/catalog-core/options.go index bba615c6bd790..5dbb5398da63e 100644 --- a/comp/core/workloadmeta/collectors/catalog-core/options.go +++ b/comp/core/workloadmeta/collectors/catalog-core/options.go @@ -21,6 +21,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/kubeapiserver" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/kubelet" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/kubemetadata" + "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/nvml" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/podman" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/process" remoteprocesscollector "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/remote/processcollector" @@ -41,5 +42,6 @@ func getCollectorOptions() []fx.Option { podman.GetFxOptions(), remoteprocesscollector.GetFxOptions(), process.GetFxOptions(), + nvml.GetFxOptions(), } } diff --git a/comp/core/workloadmeta/collectors/catalog/options.go b/comp/core/workloadmeta/collectors/catalog/options.go index 05f6ca9ac5e12..1778576a3573e 100644 --- a/comp/core/workloadmeta/collectors/catalog/options.go +++ b/comp/core/workloadmeta/collectors/catalog/options.go @@ -21,6 +21,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/kubeapiserver" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/kubelet" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/kubemetadata" + "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/nvml" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/podman" "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/remote/processcollector" remoteworkloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/collectors/internal/remote/workloadmeta" @@ -42,5 +43,6 @@ func getCollectorOptions() []fx.Option { remoteworkloadmeta.GetFxOptions(), remoteWorkloadmetaParams(), processcollector.GetFxOptions(), + nvml.GetFxOptions(), } } diff --git a/comp/core/workloadmeta/collectors/internal/ecs/v1parser.go b/comp/core/workloadmeta/collectors/internal/ecs/v1parser.go index 48f524b154dc3..6d742ba6068fc 100644 --- a/comp/core/workloadmeta/collectors/internal/ecs/v1parser.go +++ b/comp/core/workloadmeta/collectors/internal/ecs/v1parser.go @@ -102,9 +102,14 @@ func (c *collector) parseTaskContainers( Type: workloadmeta.EventTypeSet, Entity: &workloadmeta.Container{ EntityID: entityID, + Runtime: workloadmeta.ContainerRuntimeDocker, EntityMeta: workloadmeta.EntityMeta{ Name: container.DockerName, }, + State: workloadmeta.ContainerState{ + Status: workloadmeta.ContainerStatusUnknown, + Health: workloadmeta.ContainerHealthUnknown, + }, }, }) } diff --git a/comp/core/workloadmeta/collectors/internal/nvml/nvml.go b/comp/core/workloadmeta/collectors/internal/nvml/nvml.go new file mode 100644 index 0000000000000..049a6389b7137 --- /dev/null +++ b/comp/core/workloadmeta/collectors/internal/nvml/nvml.go @@ -0,0 +1,174 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build linux + +package nvml + +import ( + "context" + "fmt" + + "go.uber.org/fx" + + "github.com/NVIDIA/go-nvml/pkg/nvml" + + workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" + "github.com/DataDog/datadog-agent/pkg/config/env" + "github.com/DataDog/datadog-agent/pkg/errors" + "github.com/DataDog/datadog-agent/pkg/util/log" +) + +const ( + collectorID = "nvml" + componentName = "workloadmeta-nvml" + nvidiaVendor = "nvidia" +) + +type collector struct { + id string + catalog workloadmeta.AgentType + store workloadmeta.Component + nvmlLib nvml.Interface +} + +// NewCollector returns a kubelet CollectorProvider that instantiates its collector +func NewCollector() (workloadmeta.CollectorProvider, error) { + return workloadmeta.CollectorProvider{ + Collector: &collector{ + id: collectorID, + catalog: workloadmeta.NodeAgent, + }, + }, nil +} + +// GetFxOptions returns the FX framework options for the collector +func GetFxOptions() fx.Option { + return fx.Provide(NewCollector) +} + +// Start initializes the NVML library and sets the store +func (c *collector) Start(_ context.Context, store workloadmeta.Component) error { + if !env.IsFeaturePresent(env.NVML) { + return errors.NewDisabled(componentName, "Agent does not have NVML library available") + } + + c.store = store + // TODO: Add configuration option for NVML library path + c.nvmlLib = nvml.New() + ret := c.nvmlLib.Init() + if ret != nvml.SUCCESS && ret != nvml.ERROR_ALREADY_INITIALIZED { + return fmt.Errorf("failed to initialize NVML library: %v", nvml.ErrorString(ret)) + } + + return nil +} + +// Pull collects the GPUs available on the node and notifies the store +func (c *collector) Pull(_ context.Context) error { + count, ret := c.nvmlLib.DeviceGetCount() + if ret != nvml.SUCCESS { + return fmt.Errorf("failed to get device count: %v", nvml.ErrorString(ret)) + } + + var events []workloadmeta.CollectorEvent + for i := 0; i < count; i++ { + dev, ret := c.nvmlLib.DeviceGetHandleByIndex(i) + if ret != nvml.SUCCESS { + return fmt.Errorf("failed to get device handle for index %d: %v", i, nvml.ErrorString(ret)) + } + + uuid, ret := dev.GetUUID() + if ret != nvml.SUCCESS { + return fmt.Errorf("failed to get device UUID for index %d: %v", i, nvml.ErrorString(ret)) + } + + name, ret := dev.GetName() + if ret != nvml.SUCCESS { + return fmt.Errorf("failed to get device name for index %d: %v", i, nvml.ErrorString(ret)) + } + + gpu := &workloadmeta.GPU{ + EntityID: workloadmeta.EntityID{ + Kind: workloadmeta.KindGPU, + ID: uuid, + }, + EntityMeta: workloadmeta.EntityMeta{ + Name: name, + }, + Vendor: nvidiaVendor, + Device: name, + Index: i, + } + + arch, ret := dev.GetArchitecture() + if ret != nvml.SUCCESS { + log.Warnf("failed to get architecture for device index %d: %v", i, nvml.ErrorString(ret)) + } else { + gpu.Architecture = gpuArchToString(arch) + } + + major, minor, ret := dev.GetCudaComputeCapability() + if ret != nvml.SUCCESS { + log.Warnf("failed to get CUDA compute capability for device index %d: %v", i, nvml.ErrorString(ret)) + } else { + gpu.ComputeCapability.Major = major + gpu.ComputeCapability.Minor = minor + } + + devAttr, ret := dev.GetAttributes() + if ret != nvml.SUCCESS { + log.Warnf("failed to get device attributes for device index %d: %v", i, nvml.ErrorString(ret)) + } else { + gpu.SMCount = int(devAttr.MultiprocessorCount) + } + + event := workloadmeta.CollectorEvent{ + Source: workloadmeta.SourceRuntime, + Type: workloadmeta.EventTypeSet, + Entity: gpu, + } + events = append(events, event) + } + + c.store.Notify(events) + + return nil +} + +func (c *collector) GetID() string { + return c.id +} + +func (c *collector) GetTargetCatalog() workloadmeta.AgentType { + return c.catalog +} + +func gpuArchToString(nvmlArch nvml.DeviceArchitecture) string { + switch nvmlArch { + case nvml.DEVICE_ARCH_KEPLER: + return "kepler" + case nvml.DEVICE_ARCH_PASCAL: + return "pascal" + case nvml.DEVICE_ARCH_VOLTA: + return "volta" + case nvml.DEVICE_ARCH_TURING: + return "turing" + case nvml.DEVICE_ARCH_AMPERE: + return "ampere" + case nvml.DEVICE_ARCH_ADA: + return "ada" + case nvml.DEVICE_ARCH_HOPPER: + return "hopper" + case nvml.DEVICE_ARCH_UNKNOWN: + return "unknown" + default: + // Distinguish invalid and unknown, NVML can return unknown but we should always + // be able to process the return value of NVML. If we reach this part, we forgot + // to add a new case for a new architecture. + return "invalid" + } + +} diff --git a/comp/core/workloadmeta/collectors/internal/nvml/nvml_nop.go b/comp/core/workloadmeta/collectors/internal/nvml/nvml_nop.go new file mode 100644 index 0000000000000..7d6beacc270f9 --- /dev/null +++ b/comp/core/workloadmeta/collectors/internal/nvml/nvml_nop.go @@ -0,0 +1,15 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build !linux + +package nvml + +import "go.uber.org/fx" + +// GetFxOptions returns the FX framework options for the collector +func GetFxOptions() fx.Option { + return nil +} diff --git a/comp/core/workloadmeta/collectors/internal/nvml/nvml_test.go b/comp/core/workloadmeta/collectors/internal/nvml/nvml_test.go new file mode 100644 index 0000000000000..cac922adb813e --- /dev/null +++ b/comp/core/workloadmeta/collectors/internal/nvml/nvml_test.go @@ -0,0 +1,70 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build linux + +package nvml + +import ( + "context" + "testing" + + "github.com/NVIDIA/go-nvml/pkg/nvml" + "github.com/stretchr/testify/require" + + workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" + "github.com/DataDog/datadog-agent/pkg/gpu/testutil" +) + +func TestPull(t *testing.T) { + wmetaMock := testutil.GetWorkloadMetaMock(t) + nvmlMock := testutil.GetBasicNvmlMock() + + c := &collector{ + id: collectorID, + catalog: workloadmeta.NodeAgent, + store: wmetaMock, + nvmlLib: nvmlMock, + } + + c.Pull(context.Background()) + + gpus := wmetaMock.ListGPUs() + require.Equal(t, len(testutil.GPUUUIDs), len(gpus)) + + foundIDs := make(map[string]bool) + for _, gpu := range gpus { + foundIDs[gpu.ID] = true + + require.Equal(t, nvidiaVendor, gpu.Vendor) + require.Equal(t, testutil.DefaultGPUName, gpu.Name) + require.Equal(t, testutil.DefaultGPUName, gpu.Device) + require.Equal(t, "hopper", gpu.Architecture) + require.Equal(t, testutil.DefaultGPUComputeCapMajor, gpu.ComputeCapability.Major) + require.Equal(t, testutil.DefaultGPUComputeCapMinor, gpu.ComputeCapability.Minor) + require.Equal(t, int(testutil.DefaultGPUAttributes.MultiprocessorCount), gpu.SMCount) + } + + for _, uuid := range testutil.GPUUUIDs { + require.True(t, foundIDs[uuid], "GPU with UUID %s not found", uuid) + } +} + +func TestGpuArchToString(t *testing.T) { + tests := []struct { + arch nvml.DeviceArchitecture + expected string + }{ + {nvml.DEVICE_ARCH_KEPLER, "kepler"}, + {nvml.DEVICE_ARCH_UNKNOWN, "unknown"}, + {nvml.DeviceArchitecture(3751), "invalid"}, + } + + for _, tt := range tests { + t.Run(tt.expected, func(t *testing.T) { + require.Equal(t, tt.expected, gpuArchToString(tt.arch)) + }) + } +} diff --git a/comp/core/workloadmeta/collectors/internal/nvml/stub.go b/comp/core/workloadmeta/collectors/internal/nvml/stub.go new file mode 100644 index 0000000000000..bb7f692241c97 --- /dev/null +++ b/comp/core/workloadmeta/collectors/internal/nvml/stub.go @@ -0,0 +1,7 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +// Package nvml implements the NVML collector for workloadmeta +package nvml diff --git a/comp/core/workloadmeta/collectors/internal/remote/processcollector/process_collector.go b/comp/core/workloadmeta/collectors/internal/remote/processcollector/process_collector.go index fc36cfa790f25..fdcb72858fc52 100644 --- a/comp/core/workloadmeta/collectors/internal/remote/processcollector/process_collector.go +++ b/comp/core/workloadmeta/collectors/internal/remote/processcollector/process_collector.go @@ -28,7 +28,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/flavor" grpcutil "github.com/DataDog/datadog-agent/pkg/util/grpc" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -221,7 +221,7 @@ func (s *streamHandler) populateMissingContainerID(collectorEvents []workloadmet if ctrID == "" { pidAsInt, _ := strconv.Atoi(pid) - containerProvider := metrics.GetProvider(optional.NewOption(store)) + containerProvider := metrics.GetProvider(option.New(store)) ctrIDFromProvider, err := containerProvider.GetMetaCollector().GetContainerIDForPID(pidAsInt, cacheValidityNoRT) if err != nil { log.Debugf("failed to get container id for process %s: %v", pid, err) diff --git a/comp/core/workloadmeta/def/component.go b/comp/core/workloadmeta/def/component.go index 328555d2e525c..fcc04e13c34a0 100644 --- a/comp/core/workloadmeta/def/component.go +++ b/comp/core/workloadmeta/def/component.go @@ -96,6 +96,14 @@ type Component interface { // to all entities with kind KindProcess. ListProcesses() []*Process + // GetGPU returns metadata about a GPU device. It fetches the entity + // with kind KindGPU and the given ID. + GetGPU(id string) (*GPU, error) + + // ListGPUs returns metadata about all known GPU devices, equivalent + // to all entities with kind KindGPU. + ListGPUs() []*GPU + // ListProcessesWithFilter returns all the processes for which the passed // filter evaluates to true. ListProcessesWithFilter(filterFunc EntityFilterFunc[*Process]) []*Process diff --git a/comp/core/workloadmeta/def/merge.go b/comp/core/workloadmeta/def/merge.go index a01416927b6b0..e529b01aca133 100644 --- a/comp/core/workloadmeta/def/merge.go +++ b/comp/core/workloadmeta/def/merge.go @@ -22,6 +22,7 @@ var ( timeType = reflect.TypeOf(time.Time{}) portSliceType = reflect.TypeOf([]ContainerPort{}) containerHealthType = reflect.TypeOf(ContainerHealthUnknown) + containerStatusType = reflect.TypeOf(ContainerStatusUnknown) mergerInstance = merger{} ) @@ -34,6 +35,9 @@ func (merger) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { // Even though Health is string alias, the matching only matches actual Health case containerHealthType: return healthMerge + // Even though Status is string alias, the matching only matches actual Status + case containerStatusType: + return statusMerge } return nil @@ -54,6 +58,21 @@ func healthMerge(dst, src reflect.Value) error { return nil } +func statusMerge(dst, src reflect.Value) error { + if !dst.CanSet() { + return nil + } + + srcStatus := src.Interface().(ContainerStatus) + dstStatus := dst.Interface().(ContainerStatus) + + if srcStatus != "" && srcStatus != ContainerStatusUnknown && (dstStatus == "" || dstStatus == ContainerStatusUnknown) { + dst.Set(src) + } + + return nil +} + func timeMerge(dst, src reflect.Value) error { if !dst.CanSet() { return nil diff --git a/comp/core/workloadmeta/def/types.go b/comp/core/workloadmeta/def/types.go index 47315eca9db06..e02a834f9ab82 100644 --- a/comp/core/workloadmeta/def/types.go +++ b/comp/core/workloadmeta/def/types.go @@ -47,6 +47,7 @@ const ( KindECSTask Kind = "ecs_task" KindContainerImageMetadata Kind = "container_image_metadata" KindProcess Kind = "process" + KindGPU Kind = "gpu" ) // Source is the source name of an entity. @@ -1349,3 +1350,96 @@ func (e EventBundle) Acknowledge() { // InitHelper this should be provided as a helper to allow passing the component into // the inithook for additional start-time configutation. type InitHelper func(context.Context, Component, config.Component) error + +// GPU represents a GPU resource. +type GPU struct { + EntityID + EntityMeta + // Vendor is the name of the manufacturer of the device (e.g., NVIDIA) + Vendor string + + // Device is the comercial name of the device (e.g., Tesla V100) as returned + // by the device driver (NVML for NVIDIA GPUs). Note that some models might + // have some additional information like the memory size (e.g., Tesla + // A100-SXM2-80GB), the exact format of this field is vendor and device + // specific. + Device string + ActivePIDs []int + + // Index is the index of the GPU in the host system. This is useful as sometimes + // GPUs will be identified by their index instead of their UUID. Note that the index + // is not guaranteed to be stable across reboots, nor is necessarily the same inside + // of containers. + Index int + + // Architecture contains the architecture of the GPU (e.g., Pascal, Volta, etc.). Optional, can be empty. + Architecture string + + // ComputeCapability contains the compute capability version of the GPU. Optional, can be 0/0 + ComputeCapability GPUComputeCapability + + // SMCount is the number of streaming multiprocessors in the GPU. Optional, can be empty. + SMCount int +} + +var _ Entity = &GPU{} + +// GetID implements Entity#GetID. +func (g GPU) GetID() EntityID { + return g.EntityID +} + +// Merge implements Entity#Merge. +func (g *GPU) Merge(e Entity) error { + gg, ok := e.(*GPU) + if !ok { + return fmt.Errorf("cannot merge GPU with different kind %T", e) + } + + // If the source has active PIDs, remove the ones from the destination so merge() takes latest active PIDs from the soure + if gg.ActivePIDs != nil { + g.ActivePIDs = nil + } + + return merge(g, gg) +} + +// DeepCopy implements Entity#DeepCopy. +func (g GPU) DeepCopy() Entity { + cp := deepcopy.Copy(g).(GPU) + return &cp +} + +// String implements Entity#String. +func (g GPU) String(verbose bool) string { + var sb strings.Builder + + _, _ = fmt.Fprintln(&sb, "----------- Entity ID -----------") + _, _ = fmt.Fprintln(&sb, g.EntityID.String(verbose)) + + _, _ = fmt.Fprintln(&sb, "----------- Entity Meta -----------") + _, _ = fmt.Fprintln(&sb, g.EntityMeta.String(verbose)) + + _, _ = fmt.Fprintln(&sb, "Vendor:", g.Vendor) + _, _ = fmt.Fprintln(&sb, "Device:", g.Device) + _, _ = fmt.Fprintln(&sb, "Active PIDs:", g.ActivePIDs) + _, _ = fmt.Fprintln(&sb, "Index:", g.Index) + _, _ = fmt.Fprintln(&sb, "Architecture:", g.Architecture) + _, _ = fmt.Fprintln(&sb, "Compute Capability:", g.ComputeCapability) + _, _ = fmt.Fprintln(&sb, "Streaming Multiprocessor Count:", g.SMCount) + + return sb.String() +} + +// GPUComputeCapability represents the compute capability version of a GPU. +type GPUComputeCapability struct { + // Major represents the major version of the compute capability. + Major int + + // Minor represents the minor version of the compute capability. + Minor int +} + +func (gcc GPUComputeCapability) String() string { + return fmt.Sprintf("%d.%d", gcc.Major, gcc.Minor) +} diff --git a/comp/core/workloadmeta/def/types_test.go b/comp/core/workloadmeta/def/types_test.go index d64d49d9e7b4b..27d4d68b44951 100644 --- a/comp/core/workloadmeta/def/types_test.go +++ b/comp/core/workloadmeta/def/types_test.go @@ -149,3 +149,36 @@ func TestMergeECSContainer(t *testing.T) { assert.Nil(t, container2.ECSContainer) assert.EqualValues(t, container1.ECSContainer.DisplayName, "ecs-container-1") } + +func TestMergeGPU(t *testing.T) { + gpu1 := GPU{ + EntityID: EntityID{ + Kind: KindGPU, + ID: "gpu-1-id", + }, + EntityMeta: EntityMeta{ + Name: "gpu-1", + }, + Vendor: "nvidia", + Device: "", + ActivePIDs: []int{123, 456}, + } + gpu2 := GPU{ + EntityID: EntityID{ + Kind: KindGPU, + ID: "gpu-1-id", + }, + EntityMeta: EntityMeta{ + Name: "gpu-1", + }, + Vendor: "nvidia", + Device: "tesla", + ActivePIDs: []int{654}, + } + + err := gpu1.Merge(&gpu2) + assert.NoError(t, err) + assert.Equal(t, gpu1.Device, "tesla") + assert.ElementsMatch(t, gpu1.ActivePIDs, []int{654}) + assert.Equal(t, gpu1.Vendor, "nvidia") +} diff --git a/comp/core/workloadmeta/fx-mock/fx.go b/comp/core/workloadmeta/fx-mock/fx.go index 8af26b2a550de..13aa272236d10 100644 --- a/comp/core/workloadmeta/fx-mock/fx.go +++ b/comp/core/workloadmeta/fx-mock/fx.go @@ -15,7 +15,7 @@ import ( wmimpl "github.com/DataDog/datadog-agent/comp/core/workloadmeta/impl" wmmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/mock" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // team: container-platform @@ -25,8 +25,8 @@ func MockModule(params wmdef.Params) fxutil.Module { return fxutil.Component( fxutil.ProvideComponentConstructor(wmimpl.NewWorkloadMetaMock), fx.Provide(func(mock wmmock.Mock) wmdef.Component { return mock }), - fx.Provide(func(mock wmmock.Mock) optional.Option[wmdef.Component] { - return optional.NewOption[wmdef.Component](mock) + fx.Provide(func(mock wmmock.Mock) option.Option[wmdef.Component] { + return option.New[wmdef.Component](mock) }), fx.Supply(params), ) diff --git a/comp/core/workloadmeta/fx/fx.go b/comp/core/workloadmeta/fx/fx.go index 96f4c659db370..99c406075e198 100644 --- a/comp/core/workloadmeta/fx/fx.go +++ b/comp/core/workloadmeta/fx/fx.go @@ -12,7 +12,7 @@ import ( wmdef "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/impl" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // team: container-platform @@ -33,8 +33,8 @@ func module(options ...fx.Option) fxutil.Module { fxutil.ProvideComponentConstructor( workloadmeta.NewWorkloadMeta, ), - fx.Provide(func(wmeta wmdef.Component) optional.Option[wmdef.Component] { - return optional.NewOption(wmeta) + fx.Provide(func(wmeta wmdef.Component) option.Option[wmdef.Component] { + return option.New(wmeta) }), fx.Options(options...), ) diff --git a/comp/core/workloadmeta/impl/dump.go b/comp/core/workloadmeta/impl/dump.go index 3ffadfdaf6f7f..fd2b626c20d24 100644 --- a/comp/core/workloadmeta/impl/dump.go +++ b/comp/core/workloadmeta/impl/dump.go @@ -35,6 +35,8 @@ func (w *workloadmeta) Dump(verbose bool) wmdef.WorkloadDumpResponse { info = e.String(verbose) case *wmdef.KubernetesMetadata: info = e.String(verbose) + case *wmdef.GPU: + info = e.String(verbose) default: return "", fmt.Errorf("unsupported type %T", e) } diff --git a/comp/core/workloadmeta/impl/store.go b/comp/core/workloadmeta/impl/store.go index 5b49df81f228f..b6288f36f453a 100644 --- a/comp/core/workloadmeta/impl/store.go +++ b/comp/core/workloadmeta/impl/store.go @@ -401,6 +401,28 @@ func (w *workloadmeta) ListKubernetesMetadata(filterFunc wmdef.EntityFilterFunc[ return metadata } +// GetGPU implements Store#GetGPU. +func (w *workloadmeta) GetGPU(id string) (*wmdef.GPU, error) { + entity, err := w.getEntityByKind(wmdef.KindGPU, id) + if err != nil { + return nil, err + } + + return entity.(*wmdef.GPU), nil +} + +// ListGPUs implements Store#ListGPUs. +func (w *workloadmeta) ListGPUs() []*wmdef.GPU { + entities := w.listEntitiesByKind(wmdef.KindGPU) + + gpuList := make([]*wmdef.GPU, 0, len(entities)) + for i := range entities { + gpuList = append(gpuList, entities[i].(*wmdef.GPU)) + } + + return gpuList +} + // Notify implements Store#Notify func (w *workloadmeta) Notify(events []wmdef.CollectorEvent) { if len(events) > 0 { diff --git a/comp/core/workloadmeta/proto/proto.go b/comp/core/workloadmeta/proto/proto.go index bcea3d73398e2..eb7dc25ac233a 100644 --- a/comp/core/workloadmeta/proto/proto.go +++ b/comp/core/workloadmeta/proto/proto.go @@ -234,6 +234,9 @@ func toProtoContainerPort(port *workloadmeta.ContainerPort) *pb.ContainerPort { func toProtoRuntime(runtime workloadmeta.ContainerRuntime) (pb.Runtime, error) { switch runtime { + case "": + // we need to handle "" because we don't enforce populating this property by collectors + return pb.Runtime_UNKNOWN, nil case workloadmeta.ContainerRuntimeDocker: return pb.Runtime_DOCKER, nil case workloadmeta.ContainerRuntimeContainerd: @@ -248,7 +251,7 @@ func toProtoRuntime(runtime workloadmeta.ContainerRuntime) (pb.Runtime, error) { return pb.Runtime_ECS_FARGATE, nil } - return pb.Runtime_DOCKER, fmt.Errorf("unknown runtime: %s", runtime) + return pb.Runtime_DOCKER, fmt.Errorf("unknown runtime: %q", runtime) } func toProtoContainerState(state *workloadmeta.ContainerState) (*pb.ContainerState, error) { @@ -280,7 +283,8 @@ func toProtoContainerState(state *workloadmeta.ContainerState) (*pb.ContainerSta func toProtoContainerStatus(status workloadmeta.ContainerStatus) (pb.ContainerStatus, error) { switch status { - case workloadmeta.ContainerStatusUnknown: + case "", workloadmeta.ContainerStatusUnknown: + // we need to handle "" because we don't enforce populating this property by collectors return pb.ContainerStatus_CONTAINER_STATUS_UNKNOWN, nil case workloadmeta.ContainerStatusCreated: return pb.ContainerStatus_CONTAINER_STATUS_CREATED, nil @@ -294,7 +298,7 @@ func toProtoContainerStatus(status workloadmeta.ContainerStatus) (pb.ContainerSt return pb.ContainerStatus_CONTAINER_STATUS_STOPPED, nil } - return pb.ContainerStatus_CONTAINER_STATUS_UNKNOWN, fmt.Errorf("unknown status: %s", status) + return pb.ContainerStatus_CONTAINER_STATUS_UNKNOWN, fmt.Errorf("unknown status: %q", status) } func toProtoContainerHealth(health workloadmeta.ContainerHealth) (pb.ContainerHealth, error) { @@ -666,6 +670,8 @@ func toWorkloadmetaContainerRuntime(protoRuntime pb.Runtime) (workloadmeta.Conta return workloadmeta.ContainerRuntimeGarden, nil case pb.Runtime_ECS_FARGATE: return workloadmeta.ContainerRuntimeECSFargate, nil + case pb.Runtime_UNKNOWN: + return "", nil } return workloadmeta.ContainerRuntimeDocker, fmt.Errorf("unknown runtime: %s", protoRuntime) diff --git a/comp/core/workloadmeta/proto/proto_test.go b/comp/core/workloadmeta/proto/proto_test.go index d3687cf3a87d0..866eae716352d 100644 --- a/comp/core/workloadmeta/proto/proto_test.go +++ b/comp/core/workloadmeta/proto/proto_test.go @@ -382,6 +382,76 @@ func TestConversions(t *testing.T) { } } +// This is added to test cases where some fields are unpopulated, resulting in asymmetric +// conversion (i.e. chaining both conversions doesn't yield an identity conversion) +func TestConvertWorkloadEventToProtoWithUnpopulatedFields(t *testing.T) { + createdAt := time.Unix(1669071600, 0) + + wlmEvent := workloadmeta.Event{ + Type: workloadmeta.EventTypeSet, + Entity: &workloadmeta.Container{ + EntityID: workloadmeta.EntityID{ + Kind: workloadmeta.KindContainer, + ID: "123", + }, + EntityMeta: workloadmeta.EntityMeta{ + Name: "abc", + Namespace: "default", + }, + Image: workloadmeta.ContainerImage{ + ID: "123", + RawName: "datadog/agent:7", + Name: "datadog/agent", + ShortName: "agent", + Tag: "7", + }, + State: workloadmeta.ContainerState{ + Running: true, + CreatedAt: createdAt, + StartedAt: createdAt, + FinishedAt: time.Time{}, + ExitCode: nil, + }, + }, + } + + expectedProtoEvent := &pb.WorkloadmetaEvent{ + Type: pb.WorkloadmetaEventType_EVENT_TYPE_SET, + Container: &pb.Container{ + EntityId: &pb.WorkloadmetaEntityId{ + Kind: pb.WorkloadmetaKind_CONTAINER, + Id: "123", + }, + EntityMeta: &pb.EntityMeta{ + Name: "abc", + Namespace: "default", + }, + Image: &pb.ContainerImage{ + Id: "123", + RawName: "datadog/agent:7", + Name: "datadog/agent", + ShortName: "agent", + Tag: "7", + }, + Pid: 0, + Runtime: pb.Runtime_UNKNOWN, + State: &pb.ContainerState{ + Running: true, + Status: pb.ContainerStatus_CONTAINER_STATUS_UNKNOWN, + Health: pb.ContainerHealth_CONTAINER_HEALTH_UNKNOWN, + CreatedAt: createdAt.Unix(), + StartedAt: createdAt.Unix(), + FinishedAt: time.Time{}.Unix(), + ExitCode: 0, + }, + }, + } + + actualProtoEvent, err := ProtobufEventFromWorkloadmetaEvent(wlmEvent) + assert.NoError(t, err) + assert.Equal(t, expectedProtoEvent, actualProtoEvent) +} + func TestProtobufFilterFromWorkloadmetaFilter(t *testing.T) { filter := workloadmeta.NewFilterBuilder(). diff --git a/comp/dogstatsd/listeners/uds_common.go b/comp/dogstatsd/listeners/uds_common.go index ff165228b3bf1..f523bdacf54c3 100644 --- a/comp/dogstatsd/listeners/uds_common.go +++ b/comp/dogstatsd/listeners/uds_common.go @@ -27,7 +27,7 @@ import ( replay "github.com/DataDog/datadog-agent/comp/dogstatsd/replay/def" "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ddsync "github.com/DataDog/datadog-agent/pkg/util/sync" ) @@ -59,7 +59,7 @@ type UDSListener struct { OriginDetection bool config model.Reader - wmeta optional.Option[workloadmeta.Component] + wmeta option.Option[workloadmeta.Component] transport string @@ -150,7 +150,7 @@ func NewUDSOobPoolManager() *packets.PoolManager[[]byte] { } // NewUDSListener returns an idle UDS Statsd listener -func NewUDSListener(packetOut chan packets.Packets, sharedPacketPoolManager *packets.PoolManager[packets.Packet], sharedOobPacketPoolManager *packets.PoolManager[[]byte], cfg model.Reader, capture replay.Component, transport string, wmeta optional.Option[workloadmeta.Component], pidMap pidmap.Component, telemetryStore *TelemetryStore, packetsTelemetryStore *packets.TelemetryStore, telemetry telemetry.Component) (*UDSListener, error) { +func NewUDSListener(packetOut chan packets.Packets, sharedPacketPoolManager *packets.PoolManager[packets.Packet], sharedOobPacketPoolManager *packets.PoolManager[[]byte], cfg model.Reader, capture replay.Component, transport string, wmeta option.Option[workloadmeta.Component], pidMap pidmap.Component, telemetryStore *TelemetryStore, packetsTelemetryStore *packets.TelemetryStore, telemetry telemetry.Component) (*UDSListener, error) { originDetection := cfg.GetBool("dogstatsd_origin_detection") listener := &UDSListener{ @@ -328,6 +328,7 @@ func (l *UDSListener) handleConnection(conn netUnixConn, closeFunc CloseFunction udsOriginDetectionErrors.Add(1) l.telemetryStore.tlmUDSOriginDetectionError.Inc(tlmListenerID, l.transport) } else { + packet.ProcessID = uint32(pid) packet.Origin = container if capBuff != nil { capBuff.ContainerID = container diff --git a/comp/dogstatsd/listeners/uds_datagram.go b/comp/dogstatsd/listeners/uds_datagram.go index be834e4ba2b09..9a5c220a4f214 100644 --- a/comp/dogstatsd/listeners/uds_datagram.go +++ b/comp/dogstatsd/listeners/uds_datagram.go @@ -16,7 +16,7 @@ import ( replay "github.com/DataDog/datadog-agent/comp/dogstatsd/replay/def" "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // UDSDatagramListener implements the StatsdListener interface for Unix Domain (datagrams) @@ -27,7 +27,7 @@ type UDSDatagramListener struct { } // NewUDSDatagramListener returns an idle UDS datagram Statsd listener -func NewUDSDatagramListener(packetOut chan packets.Packets, sharedPacketPoolManager *packets.PoolManager[packets.Packet], sharedOobPoolManager *packets.PoolManager[[]byte], cfg model.Reader, capture replay.Component, wmeta optional.Option[workloadmeta.Component], pidMap pidmap.Component, telemetryStore *TelemetryStore, packetsTelemetryStore *packets.TelemetryStore, telemetryComponent telemetry.Component) (*UDSDatagramListener, error) { +func NewUDSDatagramListener(packetOut chan packets.Packets, sharedPacketPoolManager *packets.PoolManager[packets.Packet], sharedOobPoolManager *packets.PoolManager[[]byte], cfg model.Reader, capture replay.Component, wmeta option.Option[workloadmeta.Component], pidMap pidmap.Component, telemetryStore *TelemetryStore, packetsTelemetryStore *packets.TelemetryStore, telemetryComponent telemetry.Component) (*UDSDatagramListener, error) { socketPath := cfg.GetString("dogstatsd_socket") transport := "unixgram" diff --git a/comp/dogstatsd/listeners/uds_datagram_test.go b/comp/dogstatsd/listeners/uds_datagram_test.go index 4525bc5848abb..79745270e94c5 100644 --- a/comp/dogstatsd/listeners/uds_datagram_test.go +++ b/comp/dogstatsd/listeners/uds_datagram_test.go @@ -21,11 +21,11 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/comp/dogstatsd/packets" "github.com/DataDog/datadog-agent/comp/dogstatsd/pidmap" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) func udsDatagramListenerFactory(packetOut chan packets.Packets, manager *packets.PoolManager[packets.Packet], cfg config.Component, pidMap pidmap.Component, telemetryStore *TelemetryStore, packetsTelemetryStore *packets.TelemetryStore, telemetry telemetry.Component) (StatsdListener, error) { - return NewUDSDatagramListener(packetOut, manager, nil, cfg, nil, optional.NewNoneOption[workloadmeta.Component](), pidMap, telemetryStore, packetsTelemetryStore, telemetry) + return NewUDSDatagramListener(packetOut, manager, nil, cfg, nil, option.None[workloadmeta.Component](), pidMap, telemetryStore, packetsTelemetryStore, telemetry) } func TestNewUDSDatagramListener(t *testing.T) { diff --git a/comp/dogstatsd/listeners/uds_linux.go b/comp/dogstatsd/listeners/uds_linux.go index 62a43fd225cb4..cdb728cb9085c 100644 --- a/comp/dogstatsd/listeners/uds_linux.go +++ b/comp/dogstatsd/listeners/uds_linux.go @@ -20,7 +20,7 @@ import ( replay "github.com/DataDog/datadog-agent/comp/dogstatsd/replay/def" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/containers/metrics/provider" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -61,7 +61,7 @@ func enableUDSPassCred(conn netUnixConn) error { // source, and an error if any. // PID is added to ancillary data by the Linux kernel if we added the // SO_PASSCRED to the socket, see enableUDSPassCred. -func processUDSOrigin(ancillary []byte, wmeta optional.Option[workloadmeta.Component], state pidmap.Component) (int, string, error) { +func processUDSOrigin(ancillary []byte, wmeta option.Option[workloadmeta.Component], state pidmap.Component) (int, string, error) { messages, err := unix.ParseSocketControlMessage(ancillary) if err != nil { return 0, packets.NoOrigin, err @@ -97,7 +97,7 @@ func processUDSOrigin(ancillary []byte, wmeta optional.Option[workloadmeta.Compo // getEntityForPID returns the container entity name and caches the value for future lookups // As the result is cached and the lookup is really fast (parsing local files), it can be // called from the intake goroutine. -func getEntityForPID(pid int32, capture bool, wmeta optional.Option[workloadmeta.Component], state pidmap.Component) (string, error) { +func getEntityForPID(pid int32, capture bool, wmeta option.Option[workloadmeta.Component], state pidmap.Component) (string, error) { key := cache.BuildAgentKey(pidToEntityCacheKeyPrefix, strconv.Itoa(int(pid))) if x, found := cache.Cache.Get(key); found { return x.(string), nil @@ -123,7 +123,7 @@ func getEntityForPID(pid int32, capture bool, wmeta optional.Option[workloadmeta // entityForPID returns the entity ID for a given PID. It can return // errNoContainerMatch if no match is found for the PID. -func entityForPID(pid int32, capture bool, wmeta optional.Option[workloadmeta.Component], state pidmap.Component) (string, error) { +func entityForPID(pid int32, capture bool, wmeta option.Option[workloadmeta.Component], state pidmap.Component) (string, error) { if capture { return state.ContainerIDForPID(pid) } diff --git a/comp/dogstatsd/listeners/uds_linux_test.go b/comp/dogstatsd/listeners/uds_linux_test.go index 330056f2b6dea..086c949cce501 100644 --- a/comp/dogstatsd/listeners/uds_linux_test.go +++ b/comp/dogstatsd/listeners/uds_linux_test.go @@ -23,7 +23,7 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/comp/dogstatsd/packets" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) func TestUDSPassCred(t *testing.T) { @@ -39,7 +39,7 @@ func TestUDSPassCred(t *testing.T) { listernersTelemetryStore := NewTelemetryStore(nil, deps.Telemetry) pool := packets.NewPool(512, packetsTelemetryStore) poolManager := packets.NewPoolManager(pool) - s, err := NewUDSDatagramListener(nil, poolManager, nil, deps.Config, nil, optional.NewNoneOption[workloadmeta.Component](), deps.PidMap, listernersTelemetryStore, packetsTelemetryStore, deps.Telemetry) + s, err := NewUDSDatagramListener(nil, poolManager, nil, deps.Config, nil, option.None[workloadmeta.Component](), deps.PidMap, listernersTelemetryStore, packetsTelemetryStore, deps.Telemetry) defer s.Stop() assert.Nil(t, err) diff --git a/comp/dogstatsd/listeners/uds_nolinux.go b/comp/dogstatsd/listeners/uds_nolinux.go index 4ab0b62117646..4acb1755730c9 100644 --- a/comp/dogstatsd/listeners/uds_nolinux.go +++ b/comp/dogstatsd/listeners/uds_nolinux.go @@ -13,7 +13,7 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/comp/dogstatsd/packets" "github.com/DataDog/datadog-agent/comp/dogstatsd/pidmap" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // ErrLinuxOnly is emitted on non-linux platforms @@ -34,6 +34,6 @@ func enableUDSPassCred(_ netUnixConn) error { // processUDSOrigin returns a "not implemented" error on non-linux hosts // //nolint:revive // TODO(AML) Fix revive linter -func processUDSOrigin(_ []byte, _ optional.Option[workloadmeta.Component], _ pidmap.Component) (int, string, error) { +func processUDSOrigin(_ []byte, _ option.Option[workloadmeta.Component], _ pidmap.Component) (int, string, error) { return 0, packets.NoOrigin, ErrLinuxOnly } diff --git a/comp/dogstatsd/listeners/uds_stream.go b/comp/dogstatsd/listeners/uds_stream.go index 48a11e86cdccb..d259cac1d8098 100644 --- a/comp/dogstatsd/listeners/uds_stream.go +++ b/comp/dogstatsd/listeners/uds_stream.go @@ -18,7 +18,7 @@ import ( replay "github.com/DataDog/datadog-agent/comp/dogstatsd/replay/def" "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // UDSStreamListener implements the StatsdListener interface for Unix Domain (streams) @@ -29,7 +29,7 @@ type UDSStreamListener struct { } // NewUDSStreamListener returns an idle UDS datagram Statsd listener -func NewUDSStreamListener(packetOut chan packets.Packets, sharedPacketPoolManager *packets.PoolManager[packets.Packet], sharedOobPacketPoolManager *packets.PoolManager[[]byte], cfg model.Reader, capture replay.Component, wmeta optional.Option[workloadmeta.Component], pidMap pidmap.Component, telemetryStore *TelemetryStore, packetsTelemetryStore *packets.TelemetryStore, telemetry telemetry.Component) (*UDSStreamListener, error) { +func NewUDSStreamListener(packetOut chan packets.Packets, sharedPacketPoolManager *packets.PoolManager[packets.Packet], sharedOobPacketPoolManager *packets.PoolManager[[]byte], cfg model.Reader, capture replay.Component, wmeta option.Option[workloadmeta.Component], pidMap pidmap.Component, telemetryStore *TelemetryStore, packetsTelemetryStore *packets.TelemetryStore, telemetry telemetry.Component) (*UDSStreamListener, error) { socketPath := cfg.GetString("dogstatsd_stream_socket") transport := "unix" diff --git a/comp/dogstatsd/listeners/uds_stream_test.go b/comp/dogstatsd/listeners/uds_stream_test.go index 05359bd0d943e..20ea7272065ec 100644 --- a/comp/dogstatsd/listeners/uds_stream_test.go +++ b/comp/dogstatsd/listeners/uds_stream_test.go @@ -21,11 +21,11 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/comp/dogstatsd/packets" "github.com/DataDog/datadog-agent/comp/dogstatsd/pidmap" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) func udsStreamListenerFactory(packetOut chan packets.Packets, manager *packets.PoolManager[packets.Packet], cfg config.Component, pidMap pidmap.Component, telemetryStore *TelemetryStore, packetsTelemetryStore *packets.TelemetryStore, telemetry telemetry.Component) (StatsdListener, error) { - return NewUDSStreamListener(packetOut, manager, nil, cfg, nil, optional.NewNoneOption[workloadmeta.Component](), pidMap, telemetryStore, packetsTelemetryStore, telemetry) + return NewUDSStreamListener(packetOut, manager, nil, cfg, nil, option.None[workloadmeta.Component](), pidMap, telemetryStore, packetsTelemetryStore, telemetry) } func TestNewUDSStreamListener(t *testing.T) { diff --git a/comp/dogstatsd/packets/buffer_test.go b/comp/dogstatsd/packets/buffer_test.go index 117b26d35ef86..633f1edcf7bcb 100644 --- a/comp/dogstatsd/packets/buffer_test.go +++ b/comp/dogstatsd/packets/buffer_test.go @@ -32,6 +32,7 @@ func TestBufferTelemetry(t *testing.T) { Contents: []byte("test"), Buffer: []byte("test read"), Origin: "test origin", + ProcessID: uint32(1234), ListenerID: "1", Source: 0, } @@ -56,7 +57,7 @@ func TestBufferTelemetry(t *testing.T) { bufferSizeBytesMetricLabel := bufferSizeBytesMetrics[0].Tags() assert.Equal(t, bufferSizeBytesMetricLabel["listener_id"], "test_buffer") - assert.Equal(t, float64(246), bufferSizeBytesMetrics[0].Value()) + assert.Equal(t, float64(262), bufferSizeBytesMetrics[0].Value()) } func TestBufferTelemetryFull(t *testing.T) { @@ -123,7 +124,7 @@ func TestBufferTelemetryFull(t *testing.T) { channelPacketsBytesMetricLabel := channelPacketsBytesMetrics[0].Tags() assert.Equal(t, channelPacketsBytesMetricLabel["listener_id"], "test_buffer") - assert.Equal(t, float64(123), channelPacketsBytesMetrics[0].Value()) + assert.Equal(t, float64(131), channelPacketsBytesMetrics[0].Value()) assert.Equal(t, float64(1), channelSizeMetrics[0].Value()) } diff --git a/comp/dogstatsd/packets/types.go b/comp/dogstatsd/packets/types.go index 8844c759d1dc5..35faae4c3750b 100644 --- a/comp/dogstatsd/packets/types.go +++ b/comp/dogstatsd/packets/types.go @@ -8,7 +8,7 @@ package packets import ( "unsafe" - "github.com/DataDog/datadog-agent/pkg/util" + "github.com/DataDog/datadog-agent/pkg/util/size" ) // SourceType is the type of listener @@ -33,6 +33,7 @@ type Packet struct { Contents []byte // Contents, might contain several messages Buffer []byte // Underlying buffer for data read Origin string // Origin container if identified + ProcessID uint32 // ProcessID that sent the packet ListenerID string // Listener ID Source SourceType // Type of listener that produced the packet } @@ -56,7 +57,7 @@ func (p *Packet) DataSizeInBytes() int { return len(p.Contents) + len(p.Buffer) + len(p.Origin) + len(p.ListenerID) } -var _ util.HasSizeInBytes = (*Packet)(nil) +var _ size.HasSizeInBytes = (*Packet)(nil) // SizeInBytes returns the size of the packets in bytes func (ps *Packets) SizeInBytes() int { @@ -72,4 +73,4 @@ func (ps *Packets) DataSizeInBytes() int { return size } -var _ util.HasSizeInBytes = (*Packets)(nil) +var _ size.HasSizeInBytes = (*Packets)(nil) diff --git a/comp/dogstatsd/server/batch.go b/comp/dogstatsd/server/batch.go index 7ad59ff21e3b9..f36ea92cc9eb3 100644 --- a/comp/dogstatsd/server/batch.go +++ b/comp/dogstatsd/server/batch.go @@ -103,14 +103,14 @@ func (s *shardKeyGeneratorPerOrigin) Generate(sample metrics.MetricSample, shard // We fall back on the generic sharding if: // - the sample has a custom cardinality // - we don't have the origin - if sample.OriginInfo.Cardinality != "" || (sample.OriginInfo.ContainerIDFromSocket == "" && sample.OriginInfo.PodUID == "" && sample.OriginInfo.ContainerID == "") { + if sample.OriginInfo.Cardinality != "" || (sample.OriginInfo.ContainerIDFromSocket == "" && sample.OriginInfo.LocalData.PodUID == "" && sample.OriginInfo.LocalData.ContainerID == "") { return s.shardKeyGeneratorBase.Generate(sample, shards) } // Otherwise, we isolate the samples based on the origin. i, j := uint64(0), uint64(0) - i, j = murmur3.SeedStringSum128(i, j, sample.OriginInfo.PodUID) - i, j = murmur3.SeedStringSum128(i, j, sample.OriginInfo.ContainerID) + i, j = murmur3.SeedStringSum128(i, j, sample.OriginInfo.LocalData.PodUID) + i, j = murmur3.SeedStringSum128(i, j, sample.OriginInfo.LocalData.ContainerID) i, _ = murmur3.SeedStringSum128(i, j, sample.OriginInfo.ContainerIDFromSocket) return fastrange(ckey.ContextKey(i), shards) diff --git a/comp/dogstatsd/server/component.go b/comp/dogstatsd/server/component.go index b1c86c69521a6..6871739c87c19 100644 --- a/comp/dogstatsd/server/component.go +++ b/comp/dogstatsd/server/component.go @@ -20,9 +20,6 @@ type Component interface { // IsRunning returns true if the server is running IsRunning() bool - // UdsListenerRunning returns true if the uds listener is running - UdsListenerRunning() bool - // ServerlessFlush flushes all the data to the aggregator to them send it to the Datadog intake. ServerlessFlush(time.Duration) diff --git a/comp/dogstatsd/server/convert_bench_test.go b/comp/dogstatsd/server/convert_bench_test.go index eaf39be242898..6a5f120418470 100644 --- a/comp/dogstatsd/server/convert_bench_test.go +++ b/comp/dogstatsd/server/convert_bench_test.go @@ -18,7 +18,7 @@ import ( workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) func buildRawSample(tagCount int, multipleValues bool) []byte { @@ -61,7 +61,7 @@ func runParseMetricBenchmark(b *testing.B, multipleValues bool) { continue } - benchSamples = enrichMetricSample(samples, parsed, "", "", conf) + benchSamples = enrichMetricSample(samples, parsed, "", 0, "", conf) } }) } @@ -78,7 +78,7 @@ func BenchmarkParseMultipleMetric(b *testing.B) { type ServerDeps struct { fx.In Config config.Component - WMeta optional.Option[workloadmeta.Component] + WMeta option.Option[workloadmeta.Component] Telemetry telemetry.Component } diff --git a/comp/dogstatsd/server/enrich.go b/comp/dogstatsd/server/enrich.go index 4441521b66bad..3ff1fdb26e61d 100644 --- a/comp/dogstatsd/server/enrich.go +++ b/comp/dogstatsd/server/enrich.go @@ -9,6 +9,7 @@ import ( "strings" "time" + "github.com/DataDog/datadog-agent/comp/core/tagger/origindetection" "github.com/DataDog/datadog-agent/comp/dogstatsd/constants" "github.com/DataDog/datadog-agent/pkg/metrics" metricsevent "github.com/DataDog/datadog-agent/pkg/metrics/event" @@ -36,15 +37,18 @@ type enrichConfig struct { } // extractTagsMetadata returns tags (client tags + host tag) and information needed to query tagger (origins, cardinality). -func extractTagsMetadata(tags []string, originFromUDS string, originFromMsg []byte, externalData string, conf enrichConfig) ([]string, string, taggertypes.OriginInfo, metrics.MetricSource) { +func extractTagsMetadata(tags []string, originFromUDS string, processID uint32, localData origindetection.LocalData, externalData origindetection.ExternalData, conf enrichConfig) ([]string, string, taggertypes.OriginInfo, metrics.MetricSource) { host := conf.defaultHostname metricSource := metrics.MetricSourceDogstatsd + + // Add Origin Detection metadata origin := taggertypes.OriginInfo{ ContainerIDFromSocket: originFromUDS, - ContainerID: string(originFromMsg), + LocalData: localData, ExternalData: externalData, - ProductOrigin: taggertypes.ProductOriginDogStatsD, + ProductOrigin: origindetection.ProductOriginDogStatsD, } + origin.LocalData.ProcessID = processID n := 0 for _, tag := range tags { @@ -52,7 +56,7 @@ func extractTagsMetadata(tags []string, originFromUDS string, originFromMsg []by host = tag[len(hostTagPrefix):] continue } else if strings.HasPrefix(tag, entityIDTagPrefix) { - origin.PodUID = tag[len(entityIDTagPrefix):] + origin.LocalData.PodUID = tag[len(entityIDTagPrefix):] continue } else if strings.HasPrefix(tag, CardinalityTagPrefix) { origin.Cardinality = tag[len(CardinalityTagPrefix):] @@ -109,9 +113,9 @@ func tsToFloatForSamples(ts time.Time) float64 { return float64(ts.Unix()) } -func enrichMetricSample(dest []metrics.MetricSample, ddSample dogstatsdMetricSample, origin string, listenerID string, conf enrichConfig) []metrics.MetricSample { +func enrichMetricSample(dest []metrics.MetricSample, ddSample dogstatsdMetricSample, origin string, processID uint32, listenerID string, conf enrichConfig) []metrics.MetricSample { metricName := ddSample.name - tags, hostnameFromTags, extractedOrigin, metricSource := extractTagsMetadata(ddSample.tags, origin, ddSample.containerID, ddSample.externalData, conf) + tags, hostnameFromTags, extractedOrigin, metricSource := extractTagsMetadata(ddSample.tags, origin, processID, ddSample.localData, ddSample.externalData, conf) if !isExcluded(metricName, conf.metricPrefix, conf.metricPrefixBlacklist) { metricName = conf.metricPrefix + metricName @@ -190,8 +194,8 @@ func enrichEventAlertType(dogstatsdAlertType alertType) metricsevent.AlertType { return metricsevent.AlertTypeSuccess } -func enrichEvent(event dogstatsdEvent, origin string, conf enrichConfig) *metricsevent.Event { - tags, hostnameFromTags, extractedOrigin, _ := extractTagsMetadata(event.tags, origin, event.containerID, event.externalData, conf) +func enrichEvent(event dogstatsdEvent, origin string, processID uint32, conf enrichConfig) *metricsevent.Event { + tags, hostnameFromTags, extractedOrigin, _ := extractTagsMetadata(event.tags, origin, processID, event.localData, event.externalData, conf) enrichedEvent := &metricsevent.Event{ Title: event.title, @@ -227,8 +231,8 @@ func enrichServiceCheckStatus(status serviceCheckStatus) servicecheck.ServiceChe return servicecheck.ServiceCheckUnknown } -func enrichServiceCheck(serviceCheck dogstatsdServiceCheck, origin string, conf enrichConfig) *servicecheck.ServiceCheck { - tags, hostnameFromTags, extractedOrigin, _ := extractTagsMetadata(serviceCheck.tags, origin, serviceCheck.containerID, serviceCheck.externalData, conf) +func enrichServiceCheck(serviceCheck dogstatsdServiceCheck, origin string, processID uint32, conf enrichConfig) *servicecheck.ServiceCheck { + tags, hostnameFromTags, extractedOrigin, _ := extractTagsMetadata(serviceCheck.tags, origin, processID, serviceCheck.localData, serviceCheck.externalData, conf) enrichedServiceCheck := &servicecheck.ServiceCheck{ CheckName: serviceCheck.name, diff --git a/comp/dogstatsd/server/enrich_bench_test.go b/comp/dogstatsd/server/enrich_bench_test.go index bd60a62a781ba..80d405280f539 100644 --- a/comp/dogstatsd/server/enrich_bench_test.go +++ b/comp/dogstatsd/server/enrich_bench_test.go @@ -9,6 +9,7 @@ import ( "fmt" "testing" + "github.com/DataDog/datadog-agent/comp/core/tagger/origindetection" "github.com/DataDog/datadog-agent/pkg/metrics" ) @@ -34,7 +35,7 @@ func BenchmarkExtractTagsMetadata(b *testing.B) { sb.ResetTimer() for n := 0; n < sb.N; n++ { - tags, _, _, _ = extractTagsMetadata(baseTags, "", []byte{}, "", conf) + tags, _, _, _ = extractTagsMetadata(baseTags, "", 0, origindetection.LocalData{}, origindetection.ExternalData{}, conf) } }) } @@ -51,7 +52,7 @@ func BenchmarkMetricsExclusion(b *testing.B) { b.Run("none", func(b *testing.B) { for i := 0; i < b.N; i++ { - enrichMetricSample(out, sample, "", "", conf) + enrichMetricSample(out, sample, "", 0, "", conf) } }) @@ -65,7 +66,7 @@ func BenchmarkMetricsExclusion(b *testing.B) { b.Run(fmt.Sprintf("%d-exact", i), func(b *testing.B) { for i := 0; i < b.N; i++ { - enrichMetricSample(out, sample, "", "", conf) + enrichMetricSample(out, sample, "", 0, "", conf) } }) } diff --git a/comp/dogstatsd/server/enrich_test.go b/comp/dogstatsd/server/enrich_test.go index 29b79597fac09..2e17893686a23 100644 --- a/comp/dogstatsd/server/enrich_test.go +++ b/comp/dogstatsd/server/enrich_test.go @@ -12,6 +12,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/DataDog/datadog-agent/comp/core/tagger/origindetection" "github.com/DataDog/datadog-agent/comp/core/tagger/types" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/metrics/event" @@ -40,7 +41,7 @@ func parseAndEnrichSingleMetricMessage(t *testing.T, message []byte, conf enrich } samples := []metrics.MetricSample{} - samples = enrichMetricSample(samples, parsed, "", "", conf) + samples = enrichMetricSample(samples, parsed, "", 0, "", conf) if len(samples) != 1 { return metrics.MetricSample{}, fmt.Errorf("wrong number of metrics parsed") } @@ -57,7 +58,7 @@ func parseAndEnrichMultipleMetricMessage(t *testing.T, message []byte, conf enri } samples := []metrics.MetricSample{} - return enrichMetricSample(samples, parsed, "", "", conf), nil + return enrichMetricSample(samples, parsed, "", 0, "", conf), nil } func parseAndEnrichServiceCheckMessage(t *testing.T, message []byte, conf enrichConfig) (*servicecheck.ServiceCheck, error) { @@ -68,7 +69,7 @@ func parseAndEnrichServiceCheckMessage(t *testing.T, message []byte, conf enrich if err != nil { return nil, err } - return enrichServiceCheck(parsed, "", conf), nil + return enrichServiceCheck(parsed, "", 0, conf), nil } func parseAndEnrichEventMessage(t *testing.T, message []byte, conf enrichConfig) (*event.Event, error) { @@ -79,7 +80,7 @@ func parseAndEnrichEventMessage(t *testing.T, message []byte, conf enrichConfig) if err != nil { return nil, err } - return enrichEvent(parsed, "", conf), nil + return enrichEvent(parsed, "", 0, conf), nil } func TestConvertParseMultiple(t *testing.T) { @@ -99,8 +100,8 @@ func TestConvertParseMultiple(t *testing.T) { assert.Equal(t, 0, len(parsed[0].Tags)) assert.Equal(t, "default-hostname", parsed[0].Host) assert.Equal(t, "", parsed[0].OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "", parsed[0].OriginInfo.PodUID) - assert.Equal(t, "", parsed[0].OriginInfo.ContainerID) + assert.Equal(t, "", parsed[0].OriginInfo.LocalData.PodUID) + assert.Equal(t, "", parsed[0].OriginInfo.LocalData.ContainerID) assert.InEpsilon(t, 1.0, parsed[0].SampleRate, epsilon) assert.Equal(t, "daemon", parsed[1].Name) @@ -109,8 +110,8 @@ func TestConvertParseMultiple(t *testing.T) { assert.Equal(t, 0, len(parsed[1].Tags)) assert.Equal(t, "default-hostname", parsed[1].Host) assert.Equal(t, "", parsed[0].OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "", parsed[0].OriginInfo.PodUID) - assert.Equal(t, "", parsed[0].OriginInfo.ContainerID) + assert.Equal(t, "", parsed[0].OriginInfo.LocalData.PodUID) + assert.Equal(t, "", parsed[0].OriginInfo.LocalData.ContainerID) assert.InEpsilon(t, 1.0, parsed[1].SampleRate, epsilon) } } @@ -133,8 +134,8 @@ func TestConvertParseSingle(t *testing.T) { assert.Equal(t, 0, len(parsed[0].Tags)) assert.Equal(t, "default-hostname", parsed[0].Host) assert.Equal(t, "", parsed[0].OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "", parsed[0].OriginInfo.PodUID) - assert.Equal(t, "", parsed[0].OriginInfo.ContainerID) + assert.Equal(t, "", parsed[0].OriginInfo.LocalData.PodUID) + assert.Equal(t, "", parsed[0].OriginInfo.LocalData.ContainerID) assert.InEpsilon(t, 1.0, parsed[0].SampleRate, epsilon) } } @@ -159,8 +160,8 @@ func TestConvertParseSingleWithTags(t *testing.T) { assert.Equal(t, "bench", parsed[0].Tags[1]) assert.Equal(t, "default-hostname", parsed[0].Host) assert.Equal(t, "", parsed[0].OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "", parsed[0].OriginInfo.PodUID) - assert.Equal(t, "", parsed[0].OriginInfo.ContainerID) + assert.Equal(t, "", parsed[0].OriginInfo.LocalData.PodUID) + assert.Equal(t, "", parsed[0].OriginInfo.LocalData.ContainerID) assert.InEpsilon(t, 1.0, parsed[0].SampleRate, epsilon) } } @@ -185,8 +186,8 @@ func TestConvertParseSingleWithHostTags(t *testing.T) { assert.Equal(t, "bench", parsed[0].Tags[1]) assert.Equal(t, "custom-host", parsed[0].Host) assert.Equal(t, "", parsed[0].OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "", parsed[0].OriginInfo.PodUID) - assert.Equal(t, "", parsed[0].OriginInfo.ContainerID) + assert.Equal(t, "", parsed[0].OriginInfo.LocalData.PodUID) + assert.Equal(t, "", parsed[0].OriginInfo.LocalData.ContainerID) assert.InEpsilon(t, 1.0, parsed[0].SampleRate, epsilon) } } @@ -211,8 +212,8 @@ func TestConvertParseSingleWithEmptyHostTags(t *testing.T) { assert.Equal(t, "bench", parsed[0].Tags[1]) assert.Equal(t, "", parsed[0].Host) assert.Equal(t, "", parsed[0].OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "", parsed[0].OriginInfo.PodUID) - assert.Equal(t, "", parsed[0].OriginInfo.ContainerID) + assert.Equal(t, "", parsed[0].OriginInfo.LocalData.PodUID) + assert.Equal(t, "", parsed[0].OriginInfo.LocalData.ContainerID) assert.InEpsilon(t, 1.0, parsed[0].SampleRate, epsilon) } } @@ -235,8 +236,8 @@ func TestConvertParseSingleWithSampleRate(t *testing.T) { assert.Equal(t, 0, len(parsed[0].Tags)) assert.Equal(t, "default-hostname", parsed[0].Host) assert.Equal(t, "", parsed[0].OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "", parsed[0].OriginInfo.PodUID) - assert.Equal(t, "", parsed[0].OriginInfo.ContainerID) + assert.Equal(t, "", parsed[0].OriginInfo.LocalData.PodUID) + assert.Equal(t, "", parsed[0].OriginInfo.LocalData.ContainerID) assert.InEpsilon(t, 0.21, parsed[0].SampleRate, epsilon) } } @@ -256,8 +257,8 @@ func TestConvertParseSet(t *testing.T) { assert.Equal(t, 0, len(parsed.Tags)) assert.Equal(t, "default-hostname", parsed.Host) assert.Equal(t, "", parsed.OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "", parsed.OriginInfo.PodUID) - assert.Equal(t, "", parsed.OriginInfo.ContainerID) + assert.Equal(t, "", parsed.OriginInfo.LocalData.PodUID) + assert.Equal(t, "", parsed.OriginInfo.LocalData.ContainerID) assert.InEpsilon(t, 1.0, parsed.SampleRate, epsilon) } @@ -276,8 +277,8 @@ func TestConvertParseSetUnicode(t *testing.T) { assert.Equal(t, 0, len(parsed.Tags)) assert.Equal(t, "default-hostname", parsed.Host) assert.Equal(t, "", parsed.OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "", parsed.OriginInfo.PodUID) - assert.Equal(t, "", parsed.OriginInfo.ContainerID) + assert.Equal(t, "", parsed.OriginInfo.LocalData.PodUID) + assert.Equal(t, "", parsed.OriginInfo.LocalData.ContainerID) assert.InEpsilon(t, 1.0, parsed.SampleRate, epsilon) } @@ -296,8 +297,8 @@ func TestConvertParseGaugeWithPoundOnly(t *testing.T) { assert.Equal(t, 0, len(parsed.Tags)) assert.Equal(t, "default-hostname", parsed.Host) assert.Equal(t, "", parsed.OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "", parsed.OriginInfo.PodUID) - assert.Equal(t, "", parsed.OriginInfo.ContainerID) + assert.Equal(t, "", parsed.OriginInfo.LocalData.PodUID) + assert.Equal(t, "", parsed.OriginInfo.LocalData.ContainerID) assert.InEpsilon(t, 1.0, parsed.SampleRate, epsilon) } @@ -317,8 +318,8 @@ func TestConvertParseGaugeWithUnicode(t *testing.T) { assert.Equal(t, "intitulé:T0µ", parsed.Tags[0]) assert.Equal(t, "default-hostname", parsed.Host) assert.Equal(t, "", parsed.OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "", parsed.OriginInfo.PodUID) - assert.Equal(t, "", parsed.OriginInfo.ContainerID) + assert.Equal(t, "", parsed.OriginInfo.LocalData.PodUID) + assert.Equal(t, "", parsed.OriginInfo.LocalData.ContainerID) assert.InEpsilon(t, 1.0, parsed.SampleRate, epsilon) } @@ -393,8 +394,8 @@ func TestConvertServiceCheckMinimal(t *testing.T) { assert.Equal(t, servicecheck.ServiceCheckOK, sc.Status) assert.Equal(t, "", sc.Message) assert.Equal(t, "", sc.OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "", sc.OriginInfo.PodUID) - assert.Equal(t, "", sc.OriginInfo.ContainerID) + assert.Equal(t, "", sc.OriginInfo.LocalData.PodUID) + assert.Equal(t, "", sc.OriginInfo.LocalData.ContainerID) assert.Equal(t, []string(nil), sc.Tags) } @@ -440,8 +441,8 @@ func TestConvertServiceCheckMetadataTimestamp(t *testing.T) { assert.Equal(t, servicecheck.ServiceCheckOK, sc.Status) assert.Equal(t, "", sc.Message) assert.Equal(t, "", sc.OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "", sc.OriginInfo.PodUID) - assert.Equal(t, "", sc.OriginInfo.ContainerID) + assert.Equal(t, "", sc.OriginInfo.LocalData.PodUID) + assert.Equal(t, "", sc.OriginInfo.LocalData.ContainerID) assert.Equal(t, []string(nil), sc.Tags) } @@ -458,8 +459,8 @@ func TestConvertServiceCheckMetadataHostname(t *testing.T) { assert.Equal(t, servicecheck.ServiceCheckOK, sc.Status) assert.Equal(t, "", sc.Message) assert.Equal(t, "", sc.OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "", sc.OriginInfo.PodUID) - assert.Equal(t, "", sc.OriginInfo.ContainerID) + assert.Equal(t, "", sc.OriginInfo.LocalData.PodUID) + assert.Equal(t, "", sc.OriginInfo.LocalData.ContainerID) assert.Equal(t, []string(nil), sc.Tags) } @@ -476,8 +477,8 @@ func TestConvertServiceCheckMetadataHostnameInTag(t *testing.T) { assert.Equal(t, servicecheck.ServiceCheckOK, sc.Status) assert.Equal(t, "", sc.Message) assert.Equal(t, "", sc.OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "", sc.OriginInfo.PodUID) - assert.Equal(t, "", sc.OriginInfo.ContainerID) + assert.Equal(t, "", sc.OriginInfo.LocalData.PodUID) + assert.Equal(t, "", sc.OriginInfo.LocalData.ContainerID) assert.Equal(t, []string{}, sc.Tags) } @@ -494,8 +495,8 @@ func TestConvertServiceCheckMetadataEmptyHostTag(t *testing.T) { assert.Equal(t, servicecheck.ServiceCheckOK, sc.Status) assert.Equal(t, "", sc.Message) assert.Equal(t, "", sc.OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "", sc.OriginInfo.PodUID) - assert.Equal(t, "", sc.OriginInfo.ContainerID) + assert.Equal(t, "", sc.OriginInfo.LocalData.PodUID) + assert.Equal(t, "", sc.OriginInfo.LocalData.ContainerID) assert.Equal(t, []string{"other:tag"}, sc.Tags) } @@ -512,8 +513,8 @@ func TestConvertServiceCheckMetadataTags(t *testing.T) { assert.Equal(t, servicecheck.ServiceCheckOK, sc.Status) assert.Equal(t, "", sc.Message) assert.Equal(t, "", sc.OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "", sc.OriginInfo.PodUID) - assert.Equal(t, "", sc.OriginInfo.ContainerID) + assert.Equal(t, "", sc.OriginInfo.LocalData.PodUID) + assert.Equal(t, "", sc.OriginInfo.LocalData.ContainerID) assert.Equal(t, []string{"tag1", "tag2:test", "tag3"}, sc.Tags) } @@ -530,8 +531,8 @@ func TestConvertServiceCheckMetadataMessage(t *testing.T) { assert.Equal(t, servicecheck.ServiceCheckOK, sc.Status) assert.Equal(t, "this is fine", sc.Message) assert.Equal(t, "", sc.OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "", sc.OriginInfo.PodUID) - assert.Equal(t, "", sc.OriginInfo.ContainerID) + assert.Equal(t, "", sc.OriginInfo.LocalData.PodUID) + assert.Equal(t, "", sc.OriginInfo.LocalData.ContainerID) assert.Equal(t, []string(nil), sc.Tags) } @@ -548,8 +549,8 @@ func TestConvertServiceCheckMetadataMultiple(t *testing.T) { assert.Equal(t, servicecheck.ServiceCheckOK, sc.Status) assert.Equal(t, "this is fine", sc.Message) assert.Equal(t, "", sc.OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "", sc.OriginInfo.PodUID) - assert.Equal(t, "", sc.OriginInfo.ContainerID) + assert.Equal(t, "", sc.OriginInfo.LocalData.PodUID) + assert.Equal(t, "", sc.OriginInfo.LocalData.ContainerID) assert.Equal(t, []string{"tag1:test", "tag2"}, sc.Tags) // multiple time the same tag @@ -561,8 +562,8 @@ func TestConvertServiceCheckMetadataMultiple(t *testing.T) { assert.Equal(t, servicecheck.ServiceCheckOK, sc.Status) assert.Equal(t, "", sc.Message) assert.Equal(t, "", sc.OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "", sc.OriginInfo.PodUID) - assert.Equal(t, "", sc.OriginInfo.ContainerID) + assert.Equal(t, "", sc.OriginInfo.LocalData.PodUID) + assert.Equal(t, "", sc.OriginInfo.LocalData.ContainerID) assert.Equal(t, []string(nil), sc.Tags) } @@ -578,8 +579,8 @@ func TestServiceCheckOriginTag(t *testing.T) { assert.Equal(t, servicecheck.ServiceCheckOK, sc.Status) assert.Equal(t, "this is fine", sc.Message) assert.Equal(t, "", sc.OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "testID", sc.OriginInfo.PodUID) - assert.Equal(t, "", sc.OriginInfo.ContainerID) + assert.Equal(t, "testID", sc.OriginInfo.LocalData.PodUID) + assert.Equal(t, "", sc.OriginInfo.LocalData.ContainerID) assert.Equal(t, []string{"tag1:test", "tag2"}, sc.Tags) } @@ -601,8 +602,8 @@ func TestConvertEventMinimal(t *testing.T) { assert.Equal(t, "", e.SourceTypeName) assert.Equal(t, "", e.EventType) assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "", e.OriginInfo.PodUID) - assert.Equal(t, "", e.OriginInfo.ContainerID) + assert.Equal(t, "", e.OriginInfo.LocalData.PodUID) + assert.Equal(t, "", e.OriginInfo.LocalData.ContainerID) } func TestConvertEventMultilinesText(t *testing.T) { @@ -623,8 +624,8 @@ func TestConvertEventMultilinesText(t *testing.T) { assert.Equal(t, "", e.SourceTypeName) assert.Equal(t, "", e.EventType) assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "", e.OriginInfo.PodUID) - assert.Equal(t, "", e.OriginInfo.ContainerID) + assert.Equal(t, "", e.OriginInfo.LocalData.PodUID) + assert.Equal(t, "", e.OriginInfo.LocalData.ContainerID) } func TestConvertEventPipeInTitle(t *testing.T) { @@ -645,8 +646,8 @@ func TestConvertEventPipeInTitle(t *testing.T) { assert.Equal(t, "", e.SourceTypeName) assert.Equal(t, "", e.EventType) assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "", e.OriginInfo.PodUID) - assert.Equal(t, "", e.OriginInfo.ContainerID) + assert.Equal(t, "", e.OriginInfo.LocalData.PodUID) + assert.Equal(t, "", e.OriginInfo.LocalData.ContainerID) } func TestConvertEventError(t *testing.T) { @@ -735,8 +736,8 @@ func TestConvertEventMetadataTimestamp(t *testing.T) { assert.Equal(t, "", e.SourceTypeName) assert.Equal(t, "", e.EventType) assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "", e.OriginInfo.PodUID) - assert.Equal(t, "", e.OriginInfo.ContainerID) + assert.Equal(t, "", e.OriginInfo.LocalData.PodUID) + assert.Equal(t, "", e.OriginInfo.LocalData.ContainerID) } func TestConvertEventMetadataPriority(t *testing.T) { @@ -757,8 +758,8 @@ func TestConvertEventMetadataPriority(t *testing.T) { assert.Equal(t, "", e.SourceTypeName) assert.Equal(t, "", e.EventType) assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "", e.OriginInfo.PodUID) - assert.Equal(t, "", e.OriginInfo.ContainerID) + assert.Equal(t, "", e.OriginInfo.LocalData.PodUID) + assert.Equal(t, "", e.OriginInfo.LocalData.ContainerID) } func TestConvertEventMetadataHostname(t *testing.T) { @@ -779,8 +780,8 @@ func TestConvertEventMetadataHostname(t *testing.T) { assert.Equal(t, "", e.SourceTypeName) assert.Equal(t, "", e.EventType) assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "", e.OriginInfo.PodUID) - assert.Equal(t, "", e.OriginInfo.ContainerID) + assert.Equal(t, "", e.OriginInfo.LocalData.PodUID) + assert.Equal(t, "", e.OriginInfo.LocalData.ContainerID) } func TestConvertEventMetadataHostnameInTag(t *testing.T) { @@ -801,8 +802,8 @@ func TestConvertEventMetadataHostnameInTag(t *testing.T) { assert.Equal(t, "", e.SourceTypeName) assert.Equal(t, "", e.EventType) assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "", e.OriginInfo.PodUID) - assert.Equal(t, "", e.OriginInfo.ContainerID) + assert.Equal(t, "", e.OriginInfo.LocalData.PodUID) + assert.Equal(t, "", e.OriginInfo.LocalData.ContainerID) } func TestConvertEventMetadataEmptyHostTag(t *testing.T) { @@ -823,8 +824,8 @@ func TestConvertEventMetadataEmptyHostTag(t *testing.T) { assert.Equal(t, "", e.SourceTypeName) assert.Equal(t, "", e.EventType) assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "", e.OriginInfo.PodUID) - assert.Equal(t, "", e.OriginInfo.ContainerID) + assert.Equal(t, "", e.OriginInfo.LocalData.PodUID) + assert.Equal(t, "", e.OriginInfo.LocalData.ContainerID) } func TestConvertEventMetadataAlertType(t *testing.T) { @@ -845,8 +846,8 @@ func TestConvertEventMetadataAlertType(t *testing.T) { assert.Equal(t, "", e.SourceTypeName) assert.Equal(t, "", e.EventType) assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "", e.OriginInfo.PodUID) - assert.Equal(t, "", e.OriginInfo.ContainerID) + assert.Equal(t, "", e.OriginInfo.LocalData.PodUID) + assert.Equal(t, "", e.OriginInfo.LocalData.ContainerID) } func TestConvertEventMetadataAggregatioKey(t *testing.T) { @@ -867,8 +868,8 @@ func TestConvertEventMetadataAggregatioKey(t *testing.T) { assert.Equal(t, "", e.SourceTypeName) assert.Equal(t, "", e.EventType) assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "", e.OriginInfo.PodUID) - assert.Equal(t, "", e.OriginInfo.ContainerID) + assert.Equal(t, "", e.OriginInfo.LocalData.PodUID) + assert.Equal(t, "", e.OriginInfo.LocalData.ContainerID) } func TestConvertEventMetadataSourceType(t *testing.T) { @@ -889,8 +890,8 @@ func TestConvertEventMetadataSourceType(t *testing.T) { assert.Equal(t, "this is the source", e.SourceTypeName) assert.Equal(t, "", e.EventType) assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "", e.OriginInfo.PodUID) - assert.Equal(t, "", e.OriginInfo.ContainerID) + assert.Equal(t, "", e.OriginInfo.LocalData.PodUID) + assert.Equal(t, "", e.OriginInfo.LocalData.ContainerID) } func TestConvertEventMetadataTags(t *testing.T) { @@ -911,8 +912,8 @@ func TestConvertEventMetadataTags(t *testing.T) { assert.Equal(t, "", e.SourceTypeName) assert.Equal(t, "", e.EventType) assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "", e.OriginInfo.PodUID) - assert.Equal(t, "", e.OriginInfo.ContainerID) + assert.Equal(t, "", e.OriginInfo.LocalData.PodUID) + assert.Equal(t, "", e.OriginInfo.LocalData.ContainerID) } func TestConvertEventMetadataMultiple(t *testing.T) { @@ -933,8 +934,8 @@ func TestConvertEventMetadataMultiple(t *testing.T) { assert.Equal(t, "source test", e.SourceTypeName) assert.Equal(t, "", e.EventType) assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "", e.OriginInfo.PodUID) - assert.Equal(t, "", e.OriginInfo.ContainerID) + assert.Equal(t, "", e.OriginInfo.LocalData.PodUID) + assert.Equal(t, "", e.OriginInfo.LocalData.ContainerID) } func TestEventOriginTag(t *testing.T) { @@ -955,8 +956,8 @@ func TestEventOriginTag(t *testing.T) { assert.Equal(t, "source test", e.SourceTypeName) assert.Equal(t, "", e.EventType) assert.Equal(t, "", e.OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "testID", e.OriginInfo.PodUID) - assert.Equal(t, "", e.OriginInfo.ContainerID) + assert.Equal(t, "testID", e.OriginInfo.LocalData.PodUID) + assert.Equal(t, "", e.OriginInfo.LocalData.ContainerID) } func TestConvertNamespace(t *testing.T) { @@ -1004,7 +1005,7 @@ func TestMetricBlocklistShouldBlock(t *testing.T) { parsed, err := parser.parseMetricSample(message) assert.NoError(t, err) samples := []metrics.MetricSample{} - samples = enrichMetricSample(samples, parsed, "", "", conf) + samples = enrichMetricSample(samples, parsed, "", 0, "", conf) assert.Equal(t, 0, len(samples)) } @@ -1022,7 +1023,7 @@ func TestServerlessModeShouldSetEmptyHostname(t *testing.T) { parsed, err := parser.parseMetricSample(message) assert.NoError(t, err) samples := []metrics.MetricSample{} - samples = enrichMetricSample(samples, parsed, "", "", conf) + samples = enrichMetricSample(samples, parsed, "", 0, "", conf) assert.Equal(t, 1, len(samples)) assert.Equal(t, "", samples[0].Host) @@ -1043,7 +1044,7 @@ func TestMetricBlocklistShouldNotBlock(t *testing.T) { parsed, err := parser.parseMetricSample(message) assert.NoError(t, err) samples := []metrics.MetricSample{} - samples = enrichMetricSample(samples, parsed, "", "", conf) + samples = enrichMetricSample(samples, parsed, "", 0, "", conf) assert.Equal(t, 1, len(samples)) } @@ -1063,8 +1064,8 @@ func TestConvertEntityOriginDetectionNoTags(t *testing.T) { assert.Equal(t, "sometag2:somevalue2", parsed.Tags[1]) assert.Equal(t, "my-hostname", parsed.Host) assert.Equal(t, "", parsed.OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "foo", parsed.OriginInfo.PodUID) - assert.Equal(t, "", parsed.OriginInfo.ContainerID) + assert.Equal(t, "foo", parsed.OriginInfo.LocalData.PodUID) + assert.Equal(t, "", parsed.OriginInfo.LocalData.ContainerID) assert.InEpsilon(t, 1.0, parsed.SampleRate, epsilon) } @@ -1082,8 +1083,8 @@ func TestConvertEntityOriginDetectionTags(t *testing.T) { assert.ElementsMatch(t, []string{"sometag1:somevalue1", "sometag2:somevalue2"}, parsed.Tags) assert.Equal(t, "my-hostname", parsed.Host) assert.Equal(t, "", parsed.OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "foo", parsed.OriginInfo.PodUID) - assert.Equal(t, "", parsed.OriginInfo.ContainerID) + assert.Equal(t, "foo", parsed.OriginInfo.LocalData.PodUID) + assert.Equal(t, "", parsed.OriginInfo.LocalData.ContainerID) assert.InEpsilon(t, 1.0, parsed.SampleRate, epsilon) } @@ -1102,8 +1103,8 @@ func TestConvertEntityOriginDetectionTagsError(t *testing.T) { assert.Equal(t, "sometag2:somevalue2", parsed.Tags[1]) assert.Equal(t, "my-hostname", parsed.Host) assert.Equal(t, "", parsed.OriginInfo.ContainerIDFromSocket) - assert.Equal(t, "foo", parsed.OriginInfo.PodUID) - assert.Equal(t, "", parsed.OriginInfo.ContainerID) + assert.Equal(t, "foo", parsed.OriginInfo.LocalData.PodUID) + assert.Equal(t, "", parsed.OriginInfo.LocalData.ContainerID) assert.InEpsilon(t, 1.0, parsed.SampleRate, epsilon) } @@ -1112,7 +1113,8 @@ func TestEnrichTags(t *testing.T) { tags []string originFromUDS string originFromMsg []byte - externalData string + localData origindetection.LocalData + externalData origindetection.ExternalData conf enrichConfig } tests := []struct { @@ -1127,7 +1129,8 @@ func TestEnrichTags(t *testing.T) { name: "empty tags, host=foo", args: args{ originFromUDS: "", - externalData: "", + localData: origindetection.LocalData{}, + externalData: origindetection.ExternalData{}, conf: enrichConfig{ defaultHostname: "foo", entityIDPrecedenceEnabled: true, @@ -1143,7 +1146,8 @@ func TestEnrichTags(t *testing.T) { args: args{ tags: []string{"env:prod"}, originFromUDS: "originID", - externalData: "", + localData: origindetection.LocalData{}, + externalData: origindetection.ExternalData{}, conf: enrichConfig{ defaultHostname: "foo", entityIDPrecedenceEnabled: true, @@ -1159,7 +1163,8 @@ func TestEnrichTags(t *testing.T) { args: args{ tags: nil, originFromUDS: "originID", - externalData: "", + localData: origindetection.LocalData{}, + externalData: origindetection.ExternalData{}, conf: enrichConfig{ defaultHostname: "foo", entityIDPrecedenceEnabled: true, @@ -1175,15 +1180,21 @@ func TestEnrichTags(t *testing.T) { args: args{ tags: []string{"env:prod", fmt.Sprintf("%s%s", entityIDTagPrefix, "my-id")}, originFromUDS: "originID", - externalData: "", + localData: origindetection.LocalData{}, + externalData: origindetection.ExternalData{}, conf: enrichConfig{ defaultHostname: "foo", entityIDPrecedenceEnabled: true, }, }, - wantedTags: []string{"env:prod"}, - wantedHost: "foo", - wantedOrigin: taggertypes.OriginInfo{ContainerIDFromSocket: "originID", PodUID: "my-id"}, + wantedTags: []string{"env:prod"}, + wantedHost: "foo", + wantedOrigin: taggertypes.OriginInfo{ + ContainerIDFromSocket: "originID", + LocalData: origindetection.LocalData{ + PodUID: "my-id", + }, + }, wantedMetricSource: metrics.MetricSourceDogstatsd, }, { @@ -1191,15 +1202,21 @@ func TestEnrichTags(t *testing.T) { args: args{ tags: []string{"env:prod", fmt.Sprintf("%s%s", entityIDTagPrefix, "none")}, originFromUDS: "originID", - externalData: "", + localData: origindetection.LocalData{}, + externalData: origindetection.ExternalData{}, conf: enrichConfig{ defaultHostname: "foo", entityIDPrecedenceEnabled: true, }, }, - wantedTags: []string{"env:prod"}, - wantedHost: "foo", - wantedOrigin: taggertypes.OriginInfo{ContainerIDFromSocket: "originID", PodUID: "none"}, + wantedTags: []string{"env:prod"}, + wantedHost: "foo", + wantedOrigin: taggertypes.OriginInfo{ + ContainerIDFromSocket: "originID", + LocalData: origindetection.LocalData{ + PodUID: "none", + }, + }, wantedMetricSource: metrics.MetricSourceDogstatsd, }, { @@ -1207,15 +1224,21 @@ func TestEnrichTags(t *testing.T) { args: args{ tags: []string{"env:prod", fmt.Sprintf("%s%s", entityIDTagPrefix, "42")}, originFromUDS: "originID", - externalData: "", + localData: origindetection.LocalData{}, + externalData: origindetection.ExternalData{}, conf: enrichConfig{ defaultHostname: "foo", entityIDPrecedenceEnabled: false, }, }, - wantedTags: []string{"env:prod"}, - wantedHost: "foo", - wantedOrigin: taggertypes.OriginInfo{ContainerIDFromSocket: "originID", PodUID: "42"}, + wantedTags: []string{"env:prod"}, + wantedHost: "foo", + wantedOrigin: taggertypes.OriginInfo{ + ContainerIDFromSocket: "originID", + LocalData: origindetection.LocalData{ + PodUID: "42", + }, + }, wantedMetricSource: metrics.MetricSourceDogstatsd, }, { @@ -1223,15 +1246,22 @@ func TestEnrichTags(t *testing.T) { args: args{ tags: []string{"env:prod", fmt.Sprintf("%s%s", entityIDTagPrefix, "42"), CardinalityTagPrefix + types.HighCardinalityString}, originFromUDS: "originID", - externalData: "", + localData: origindetection.LocalData{}, + externalData: origindetection.ExternalData{}, conf: enrichConfig{ defaultHostname: "foo", entityIDPrecedenceEnabled: false, }, }, - wantedTags: []string{"env:prod"}, - wantedHost: "foo", - wantedOrigin: taggertypes.OriginInfo{ContainerIDFromSocket: "originID", PodUID: "42", Cardinality: "high"}, + wantedTags: []string{"env:prod"}, + wantedHost: "foo", + wantedOrigin: taggertypes.OriginInfo{ + ContainerIDFromSocket: "originID", + LocalData: origindetection.LocalData{ + PodUID: "42", + }, + Cardinality: "high", + }, wantedMetricSource: metrics.MetricSourceDogstatsd, }, { @@ -1239,15 +1269,22 @@ func TestEnrichTags(t *testing.T) { args: args{ tags: []string{"env:prod", fmt.Sprintf("%s%s", entityIDTagPrefix, "42"), CardinalityTagPrefix + types.OrchestratorCardinalityString}, originFromUDS: "originID", - externalData: "", + localData: origindetection.LocalData{}, + externalData: origindetection.ExternalData{}, conf: enrichConfig{ defaultHostname: "foo", entityIDPrecedenceEnabled: false, }, }, - wantedTags: []string{"env:prod"}, - wantedHost: "foo", - wantedOrigin: taggertypes.OriginInfo{ContainerIDFromSocket: "originID", PodUID: "42", Cardinality: "orchestrator"}, + wantedTags: []string{"env:prod"}, + wantedHost: "foo", + wantedOrigin: taggertypes.OriginInfo{ + ContainerIDFromSocket: "originID", + LocalData: origindetection.LocalData{ + PodUID: "42", + }, + Cardinality: "orchestrator", + }, wantedMetricSource: metrics.MetricSourceDogstatsd, }, { @@ -1255,15 +1292,22 @@ func TestEnrichTags(t *testing.T) { args: args{ tags: []string{"env:prod", fmt.Sprintf("%s%s", entityIDTagPrefix, "42"), CardinalityTagPrefix + types.LowCardinalityString}, originFromUDS: "originID", - externalData: "", + localData: origindetection.LocalData{}, + externalData: origindetection.ExternalData{}, conf: enrichConfig{ defaultHostname: "foo", entityIDPrecedenceEnabled: false, }, }, - wantedTags: []string{"env:prod"}, - wantedHost: "foo", - wantedOrigin: taggertypes.OriginInfo{ContainerIDFromSocket: "originID", PodUID: "42", Cardinality: "low"}, + wantedTags: []string{"env:prod"}, + wantedHost: "foo", + wantedOrigin: taggertypes.OriginInfo{ + ContainerIDFromSocket: "originID", + LocalData: origindetection.LocalData{ + PodUID: "42", + }, + Cardinality: "low", + }, wantedMetricSource: metrics.MetricSourceDogstatsd, }, { @@ -1271,15 +1315,22 @@ func TestEnrichTags(t *testing.T) { args: args{ tags: []string{"env:prod", fmt.Sprintf("%s%s", entityIDTagPrefix, "42"), CardinalityTagPrefix + types.UnknownCardinalityString}, originFromUDS: "originID", - externalData: "", + localData: origindetection.LocalData{}, + externalData: origindetection.ExternalData{}, conf: enrichConfig{ defaultHostname: "foo", entityIDPrecedenceEnabled: false, }, }, - wantedTags: []string{"env:prod"}, - wantedHost: "foo", - wantedOrigin: taggertypes.OriginInfo{ContainerIDFromSocket: "originID", PodUID: "42", Cardinality: "unknown"}, + wantedTags: []string{"env:prod"}, + wantedHost: "foo", + wantedOrigin: taggertypes.OriginInfo{ + ContainerIDFromSocket: "originID", + LocalData: origindetection.LocalData{ + PodUID: "42", + }, + Cardinality: "unknown", + }, wantedMetricSource: metrics.MetricSourceDogstatsd, }, { @@ -1287,15 +1338,22 @@ func TestEnrichTags(t *testing.T) { args: args{ tags: []string{"env:prod", fmt.Sprintf("%s%s", entityIDTagPrefix, "42"), CardinalityTagPrefix}, originFromUDS: "originID", - externalData: "", + localData: origindetection.LocalData{}, + externalData: origindetection.ExternalData{}, conf: enrichConfig{ defaultHostname: "foo", entityIDPrecedenceEnabled: false, }, }, - wantedTags: []string{"env:prod"}, - wantedHost: "foo", - wantedOrigin: taggertypes.OriginInfo{ContainerIDFromSocket: "originID", PodUID: "42", Cardinality: ""}, + wantedTags: []string{"env:prod"}, + wantedHost: "foo", + wantedOrigin: taggertypes.OriginInfo{ + ContainerIDFromSocket: "originID", + LocalData: origindetection.LocalData{ + PodUID: "42", + }, + Cardinality: "", + }, wantedMetricSource: metrics.MetricSourceDogstatsd, }, { @@ -1304,14 +1362,23 @@ func TestEnrichTags(t *testing.T) { tags: []string{"env:prod", "dd.internal.entity_id:pod-uid"}, originFromUDS: "originID", originFromMsg: []byte("container-id"), - externalData: "", + localData: origindetection.LocalData{ + ContainerID: "container-id", + }, + externalData: origindetection.ExternalData{}, conf: enrichConfig{ defaultHostname: "foo", }, }, - wantedTags: []string{"env:prod"}, - wantedHost: "foo", - wantedOrigin: taggertypes.OriginInfo{ContainerIDFromSocket: "originID", PodUID: "pod-uid", ContainerID: "container-id"}, + wantedTags: []string{"env:prod"}, + wantedHost: "foo", + wantedOrigin: taggertypes.OriginInfo{ + ContainerIDFromSocket: "originID", + LocalData: origindetection.LocalData{ + ContainerID: "container-id", + PodUID: "pod-uid", + }, + }, wantedMetricSource: metrics.MetricSourceDogstatsd, }, { @@ -1320,14 +1387,22 @@ func TestEnrichTags(t *testing.T) { tags: []string{"env:prod"}, originFromUDS: "originID", originFromMsg: []byte("container-id"), - externalData: "", + localData: origindetection.LocalData{ + ContainerID: "container-id", + }, + externalData: origindetection.ExternalData{}, conf: enrichConfig{ defaultHostname: "foo", }, }, - wantedTags: []string{"env:prod"}, - wantedHost: "foo", - wantedOrigin: taggertypes.OriginInfo{ContainerIDFromSocket: "originID", ContainerID: "container-id"}, + wantedTags: []string{"env:prod"}, + wantedHost: "foo", + wantedOrigin: taggertypes.OriginInfo{ + ContainerIDFromSocket: "originID", + LocalData: origindetection.LocalData{ + ContainerID: "container-id", + }, + }, wantedMetricSource: metrics.MetricSourceDogstatsd, }, { @@ -1335,14 +1410,23 @@ func TestEnrichTags(t *testing.T) { args: args{ tags: []string{"env:prod"}, originFromUDS: "", - externalData: "it-false,cn-container_name,pu-pod_uid", + externalData: origindetection.ExternalData{ + Init: false, + ContainerName: "container_name", + PodUID: "pod_uid", + }, conf: enrichConfig{ defaultHostname: "foo", }, }, - wantedTags: []string{"env:prod"}, - wantedHost: "foo", - wantedOrigin: taggertypes.OriginInfo{ExternalData: "it-false,cn-container_name,pu-pod_uid"}, + wantedTags: []string{"env:prod"}, + wantedHost: "foo", + wantedOrigin: taggertypes.OriginInfo{ + ExternalData: origindetection.ExternalData{ + Init: false, + ContainerName: "container_name", + PodUID: "pod_uid", + }}, wantedMetricSource: metrics.MetricSourceDogstatsd, }, { @@ -1351,7 +1435,14 @@ func TestEnrichTags(t *testing.T) { tags: []string{"env:prod", "dd.internal.entity_id:pod-uid"}, originFromUDS: "originID", originFromMsg: []byte("container-id"), - externalData: "it-false,cn-container_name,pu-pod_uid", + localData: origindetection.LocalData{ + ContainerID: "container-id", + }, + externalData: origindetection.ExternalData{ + Init: false, + ContainerName: "container_name", + PodUID: "pod_uid", + }, conf: enrichConfig{ defaultHostname: "foo", }, @@ -1360,18 +1451,24 @@ func TestEnrichTags(t *testing.T) { wantedHost: "foo", wantedOrigin: taggertypes.OriginInfo{ ContainerIDFromSocket: "originID", - PodUID: "pod-uid", - ContainerID: "container-id", - ExternalData: "it-false,cn-container_name,pu-pod_uid", + LocalData: origindetection.LocalData{ + ContainerID: "container-id", + PodUID: "pod-uid", + }, + ExternalData: origindetection.ExternalData{ + Init: false, + ContainerName: "container_name", + PodUID: "pod_uid", + }, }, wantedMetricSource: metrics.MetricSourceDogstatsd, }, } for _, tt := range tests { - tt.wantedOrigin.ProductOrigin = taggertypes.ProductOriginDogStatsD + tt.wantedOrigin.ProductOrigin = origindetection.ProductOriginDogStatsD t.Run(tt.name, func(t *testing.T) { - tags, host, origin, metricSource := extractTagsMetadata(tt.args.tags, tt.args.originFromUDS, tt.args.originFromMsg, tt.args.externalData, tt.args.conf) + tags, host, origin, metricSource := extractTagsMetadata(tt.args.tags, tt.args.originFromUDS, 0, tt.args.localData, tt.args.externalData, tt.args.conf) assert.Equal(t, tt.wantedTags, tags) assert.Equal(t, tt.wantedHost, host) assert.Equal(t, tt.wantedOrigin, origin) @@ -1419,7 +1516,7 @@ func TestEnrichTagsWithJMXCheckName(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - tags, _, _, metricSource := extractTagsMetadata(tt.tags, "", []byte{}, "", enrichConfig{}) + tags, _, _, metricSource := extractTagsMetadata(tt.tags, "", 0, origindetection.LocalData{}, origindetection.ExternalData{}, enrichConfig{}) assert.Equal(t, tt.wantedTags, tags) assert.Equal(t, tt.wantedMetricSource, metricSource) assert.NotContains(t, tags, tt.jmxCheckName) diff --git a/comp/dogstatsd/server/parse.go b/comp/dogstatsd/server/parse.go index 3c15ad8b05a5b..8b7ae778af050 100644 --- a/comp/dogstatsd/server/parse.go +++ b/comp/dogstatsd/server/parse.go @@ -12,11 +12,12 @@ import ( "time" "unsafe" + "github.com/DataDog/datadog-agent/comp/core/tagger/origindetection" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/containers/metrics/provider" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) type messageType int @@ -47,11 +48,6 @@ var ( // externalDataPrefix is the prefix for a common field which contains the external data for Origin Detection. externalDataPrefix = []byte("e:") - - // containerIDPrefix is the prefix for a notation holding the sender's container Inode in the containerIDField - containerIDPrefix = []byte("ci-") - // inodePrefix is the prefix for a notation holding the sender's container Inode in the containerIDField - inodePrefix = []byte("in-") ) // parser parses dogstatsd messages @@ -72,7 +68,7 @@ type parser struct { provider provider.Provider } -func newParser(cfg model.Reader, float64List *float64ListPool, workerNum int, wmeta optional.Option[workloadmeta.Component], stringInternerTelemetry *stringInternerTelemetry) *parser { +func newParser(cfg model.Reader, float64List *float64ListPool, workerNum int, wmeta option.Option[workloadmeta.Component], stringInternerTelemetry *stringInternerTelemetry) *parser { stringInternerCacheSize := cfg.GetInt("dogstatsd_string_interner_size") readTimestamps := cfg.GetBool("dogstatsd_no_aggregation_pipeline") @@ -176,8 +172,8 @@ func (p *parser) parseMetricSample(message []byte) (dogstatsdMetricSample, error sampleRate := 1.0 var tags []string - var containerID []byte - var externalData string + var localData origindetection.LocalData + var externalData origindetection.ExternalData var optionalField []byte var timestamp time.Time for message != nil { @@ -205,12 +201,12 @@ func (p *parser) parseMetricSample(message []byte) (dogstatsdMetricSample, error return dogstatsdMetricSample{}, fmt.Errorf("dogstatsd timestamp should be > 0") } timestamp = time.Unix(ts, 0) - // container ID + // local data case p.dsdOriginEnabled && bytes.HasPrefix(optionalField, localDataPrefix): - containerID = p.resolveContainerIDFromLocalData(optionalField) + localData = p.parseLocalData(optionalField[len(localDataPrefix):]) // external data case p.dsdOriginEnabled && bytes.HasPrefix(optionalField, externalDataPrefix): - externalData = string(optionalField[len(externalDataPrefix):]) + externalData = p.parseExternalData(optionalField[len(externalDataPrefix):]) } } @@ -222,12 +218,38 @@ func (p *parser) parseMetricSample(message []byte) (dogstatsdMetricSample, error metricType: metricType, sampleRate: sampleRate, tags: tags, - containerID: containerID, + localData: localData, externalData: externalData, ts: timestamp, }, nil } +// parseLocalData parses the local data string into a LocalData struct. +func (p *parser) parseLocalData(rawLocalData []byte) origindetection.LocalData { + localDataString := string(rawLocalData) + + localData, err := origindetection.ParseLocalData(localDataString) + if err != nil { + log.Errorf("failed to parse c: field containing Local Data %q: %v", localDataString, err) + } + + // return localData even if there was a parsing error as some fields might have been parsed correctly. + return localData +} + +// parseExternalData parses the external data string into an ExternalData struct. +func (p *parser) parseExternalData(rawExternalData []byte) origindetection.ExternalData { + externalDataString := string(rawExternalData) + + externalData, err := origindetection.ParseExternalData(externalDataString) + if err != nil { + log.Errorf("failed to parse e: field containing External Data %q: %v", externalDataString, err) + } + + // return externalData even if there was a parsing error as some fields might have been parsed correctly. + return externalData +} + // parseFloat64List parses a list of float64 separated by colonSeparator. func (p *parser) parseFloat64List(rawFloats []byte) ([]float64, error) { var value float64 @@ -265,68 +287,6 @@ func (p *parser) parseFloat64List(rawFloats []byte) ([]float64, error) { return values, nil } -// resolveContainerIDFromLocalData returns the container ID for the given Local Data. -// The Local Data is a list that can contain one or two (split by a ',') of either: -// * "ci-" for the container ID. -// * "in-" for the cgroupv2 inode. -// Possible values: -// * "" -// * "ci-" -// * "ci-,in-" -func (p *parser) resolveContainerIDFromLocalData(rawLocalData []byte) []byte { - // Remove prefix from Local Data - localData := rawLocalData[len(localDataPrefix):] - - var containerID []byte - var containerIDFromInode []byte - - if bytes.Contains(localData, []byte(",")) { - // The Local Data can contain a list - items := bytes.Split(localData, []byte{','}) - for _, item := range items { - if bytes.HasPrefix(item, containerIDPrefix) { - containerID = item[len(containerIDPrefix):] - } else if bytes.HasPrefix(item, inodePrefix) { - containerIDFromInode = p.resolveContainerIDFromInode(item[len(inodePrefix):]) - } - } - if containerID == nil { - containerID = containerIDFromInode - } - } else { - // The Local Data can contain a single value - if bytes.HasPrefix(localData, containerIDPrefix) { // Container ID with new format: ci- - containerID = localData[len(containerIDPrefix):] - } else if bytes.HasPrefix(localData, inodePrefix) { // Cgroupv2 inode format: in- - containerID = p.resolveContainerIDFromInode(localData[len(inodePrefix):]) - } else { // Container ID with old format: - containerID = localData - } - } - - if containerID == nil { - log.Debugf("Could not parse container ID from Local Data: %s", localData) - } - - return containerID -} - -// resolveContainerIDFromInode returns the container ID for the given cgroupv2 inode. -func (p *parser) resolveContainerIDFromInode(inode []byte) []byte { - inodeField, err := strconv.ParseUint(string(inode), 10, 64) - if err != nil { - log.Debugf("Failed to parse inode from %s, got %v", inode, err) - return nil - } - - containerID, err := p.provider.GetMetaCollector().GetContainerIDForInode(inodeField, cacheValidity) - if err != nil { - log.Debugf("Failed to get container ID, got %v", err) - return nil - } - return []byte(containerID) -} - // the std API does not have methods to do []byte => float parsing // we use this unsafe trick to avoid having to allocate one string for // every parsed float diff --git a/comp/dogstatsd/server/parse_events.go b/comp/dogstatsd/server/parse_events.go index 0273f5a6f5ab3..b84a530d18099 100644 --- a/comp/dogstatsd/server/parse_events.go +++ b/comp/dogstatsd/server/parse_events.go @@ -9,6 +9,7 @@ import ( "bytes" "fmt" + "github.com/DataDog/datadog-agent/comp/core/tagger/origindetection" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -38,10 +39,10 @@ type dogstatsdEvent struct { sourceType string alertType alertType tags []string - // containerID represents the container ID of the sender (optional). - containerID []byte + // localData is used for Origin Detection + localData origindetection.LocalData // externalData is used for Origin Detection - externalData string + externalData origindetection.ExternalData } type eventHeader struct { @@ -166,9 +167,9 @@ func (p *parser) applyEventOptionalField(event dogstatsdEvent, optionalField []b case bytes.HasPrefix(optionalField, eventTagsPrefix): newEvent.tags = p.parseTags(optionalField[len(eventTagsPrefix):]) case p.dsdOriginEnabled && bytes.HasPrefix(optionalField, localDataPrefix): - newEvent.containerID = p.resolveContainerIDFromLocalData(optionalField) + newEvent.localData = p.parseLocalData(optionalField[len(localDataPrefix):]) case p.dsdOriginEnabled && bytes.HasPrefix(optionalField, externalDataPrefix): - newEvent.externalData = string(optionalField[len(externalDataPrefix):]) + newEvent.externalData = p.parseExternalData(optionalField[len(externalDataPrefix):]) } if err != nil { return event, err diff --git a/comp/dogstatsd/server/parse_metrics.go b/comp/dogstatsd/server/parse_metrics.go index a0ea5776e4ac8..0c6da45f9d120 100644 --- a/comp/dogstatsd/server/parse_metrics.go +++ b/comp/dogstatsd/server/parse_metrics.go @@ -8,6 +8,7 @@ package server import ( "bytes" "fmt" + "github.com/DataDog/datadog-agent/comp/core/tagger/origindetection" "time" ) @@ -46,10 +47,10 @@ type dogstatsdMetricSample struct { metricType metricType sampleRate float64 tags []string - // containerID represents the container ID of the sender (optional). - containerID []byte + // localData is used for Origin Detection + localData origindetection.LocalData // externalData is used for Origin Detection - externalData string + externalData origindetection.ExternalData // timestamp read in the message if any ts time.Time } diff --git a/comp/dogstatsd/server/parse_metrics_test.go b/comp/dogstatsd/server/parse_metrics_test.go index 4d6d80c71911c..58f88f0e5c4c6 100644 --- a/comp/dogstatsd/server/parse_metrics_test.go +++ b/comp/dogstatsd/server/parse_metrics_test.go @@ -14,7 +14,6 @@ import ( "go.uber.org/fx" "github.com/DataDog/datadog-agent/comp/core/config" - "github.com/DataDog/datadog-agent/pkg/util/containers/metrics/mock" ) func parseMetricSample(t *testing.T, overrides map[string]any, rawSample []byte) (dogstatsdMetricSample, error) { @@ -573,22 +572,5 @@ func TestParseContainerID(t *testing.T) { // Testing with a container ID sample, err := parseMetricSample(t, cfg, []byte("metric:1234|g|c:1234567890abcdef")) require.NoError(t, err) - assert.Equal(t, []byte("1234567890abcdef"), sample.containerID) - - // Testing with an Inode - deps := newServerDeps(t, fx.Replace(config.MockParams{Overrides: cfg})) - stringInternerTelemetry := newSiTelemetry(false, deps.Telemetry) - p := newParser(deps.Config, newFloat64ListPool(deps.Telemetry), 1, deps.WMeta, stringInternerTelemetry) - mockProvider := mock.NewMetricsProvider() - mockProvider.RegisterMetaCollector(&mock.MetaCollector{ - CIDFromInode: map[uint64]string{ - 1234567890: "1234567890abcdef", - }, - }) - p.provider = mockProvider - cfg["parser"] = p - - sample, err = parseMetricSample(t, cfg, []byte("metric:1234|g|c:in-1234567890")) - require.NoError(t, err) - assert.Equal(t, []byte("1234567890abcdef"), sample.containerID) + assert.Equal(t, "1234567890abcdef", sample.localData.ContainerID) } diff --git a/comp/dogstatsd/server/parse_service_checks.go b/comp/dogstatsd/server/parse_service_checks.go index 5c48073c4fa98..6f4cc2239f2fa 100644 --- a/comp/dogstatsd/server/parse_service_checks.go +++ b/comp/dogstatsd/server/parse_service_checks.go @@ -10,6 +10,7 @@ import ( "fmt" "strconv" + "github.com/DataDog/datadog-agent/comp/core/tagger/origindetection" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -29,10 +30,10 @@ type dogstatsdServiceCheck struct { hostname string message string tags []string - // containerID represents the container ID of the sender (optional). - containerID []byte + // localData is used for Origin Detection + localData origindetection.LocalData // externalData is used for Origin Detection - externalData string + externalData origindetection.ExternalData } var ( @@ -100,9 +101,9 @@ func (p *parser) applyServiceCheckOptionalField(serviceCheck dogstatsdServiceChe case bytes.HasPrefix(optionalField, serviceCheckMessagePrefix): newServiceCheck.message = string(optionalField[len(serviceCheckMessagePrefix):]) case p.dsdOriginEnabled && bytes.HasPrefix(optionalField, localDataPrefix): - newServiceCheck.containerID = p.resolveContainerIDFromLocalData(optionalField) + newServiceCheck.localData = p.parseLocalData(optionalField[len(localDataPrefix):]) case p.dsdOriginEnabled && bytes.HasPrefix(optionalField, externalDataPrefix): - newServiceCheck.externalData = string(optionalField[len(externalDataPrefix):]) + newServiceCheck.externalData = p.parseExternalData(optionalField[len(externalDataPrefix):]) } if err != nil { return serviceCheck, err diff --git a/comp/dogstatsd/server/parse_test.go b/comp/dogstatsd/server/parse_test.go index 67dd3e706fb02..1992b9161719e 100644 --- a/comp/dogstatsd/server/parse_test.go +++ b/comp/dogstatsd/server/parse_test.go @@ -10,8 +10,6 @@ import ( "testing" "github.com/stretchr/testify/assert" - - "github.com/DataDog/datadog-agent/pkg/util/containers/metrics/mock" ) func TestIdentifyEvent(t *testing.T) { @@ -111,105 +109,3 @@ func TestUnsafeParseInt(t *testing.T) { assert.Equal(t, integer, unsafeInteger) } - -func TestResolveContainerIDFromLocalData(t *testing.T) { - const ( - localDataPrefix = "c:" - containerIDPrefix = "ci-" - inodePrefix = "in-" - containerID = "abcdef" - containerInode = "4242" - ) - - deps := newServerDeps(t) - stringInternerTelemetry := newSiTelemetry(false, deps.Telemetry) - p := newParser(deps.Config, newFloat64ListPool(deps.Telemetry), 1, deps.WMeta, stringInternerTelemetry) - - // Mock the provider to resolve the container ID from the inode - mockProvider := mock.NewMetricsProvider() - containerInodeUint, _ := strconv.ParseUint(containerInode, 10, 64) - mockProvider.RegisterMetaCollector(&mock.MetaCollector{ - CIDFromInode: map[uint64]string{ - containerInodeUint: containerID, - }, - }) - p.provider = mockProvider - - tests := []struct { - name string - input []byte - expected []byte - }{ - { - name: "Empty LocalData", - input: []byte(localDataPrefix), - expected: []byte{}, - }, - { - name: "LocalData with new container ID", - input: []byte(localDataPrefix + containerIDPrefix + containerID), - expected: []byte(containerID), - }, - { - name: "LocalData with old container ID format", - input: []byte(localDataPrefix + containerID), - expected: []byte(containerID), - }, - { - name: "LocalData with inode", - input: []byte(localDataPrefix + inodePrefix + containerInode), - expected: []byte(containerID), - }, - { - name: "LocalData with invalid inode", - input: []byte(localDataPrefix + inodePrefix + "invalid"), - expected: []byte(nil), - }, - { - name: "LocalData as a list", - input: []byte(localDataPrefix + containerIDPrefix + containerID + "," + inodePrefix + containerInode), - expected: []byte(containerID), - }, - { - name: "LocalData as a list with only inode", - input: []byte(localDataPrefix + inodePrefix + containerInode), - expected: []byte(containerID), - }, - { - name: "LocalData as a list with only container ID", - input: []byte(localDataPrefix + containerIDPrefix + containerID), - expected: []byte(containerID), - }, - { - name: "LocalData as a list with only inode with trailing comma", - input: []byte(localDataPrefix + inodePrefix + containerInode + ","), - expected: []byte(containerID), - }, - { - name: "LocalData as a list with only container ID with trailing comma", - input: []byte(localDataPrefix + containerIDPrefix + containerID + ","), - expected: []byte(containerID), - }, - { - name: "LocalData as a list with only inode surrounded by commas", - input: []byte(localDataPrefix + "," + inodePrefix + containerInode + ","), // This is an invalid format, but we should still be able to extract the container ID - expected: []byte(containerID), - }, - { - name: "LocalData as a list with only inode surrounded by commas", - input: []byte(localDataPrefix + "," + containerIDPrefix + containerID + ","), // This is an invalid format, but we should still be able to extract the container ID - expected: []byte(containerID), - }, - { - name: "LocalData as an invalid list", - input: []byte(localDataPrefix + ","), - expected: []byte(nil), - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - assert.Equal(t, tc.expected, p.resolveContainerIDFromLocalData(tc.input)) - }) - } -} diff --git a/comp/dogstatsd/server/server.go b/comp/dogstatsd/server/server.go index a879dac31f828..9471539fc5c90 100644 --- a/comp/dogstatsd/server/server.go +++ b/comp/dogstatsd/server/server.go @@ -36,10 +36,11 @@ import ( "github.com/DataDog/datadog-agent/pkg/metrics/event" "github.com/DataDog/datadog-agent/pkg/metrics/servicecheck" "github.com/DataDog/datadog-agent/pkg/status/health" - - "github.com/DataDog/datadog-agent/pkg/util" "github.com/DataDog/datadog-agent/pkg/util/hostname" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" + "github.com/DataDog/datadog-agent/pkg/util/sort" + statutil "github.com/DataDog/datadog-agent/pkg/util/stat" + tagutil "github.com/DataDog/datadog-agent/pkg/util/tags" ) var ( @@ -76,7 +77,7 @@ type dependencies struct { Replay replay.Component PidMap pidmap.Component Params Params - WMeta optional.Option[workloadmeta.Component] + WMeta option.Option[workloadmeta.Component] Telemetry telemetry.Component } @@ -119,7 +120,7 @@ type server struct { sharedPacketPool *packets.Pool sharedPacketPoolManager *packets.PoolManager[packets.Packet] sharedFloat64List *float64ListPool - Statistics *util.Stats + Statistics *statutil.Stats Started bool stopChan chan bool health *health.Handle @@ -149,16 +150,15 @@ type server struct { cachedOrder []cachedOriginCounter // for cache eviction // ServerlessMode is set to true if we're running in a serverless environment. - ServerlessMode bool - udsListenerRunning bool - udpLocalAddr string + ServerlessMode bool + udpLocalAddr string // originTelemetry is true if we want to report telemetry per origin. originTelemetry bool enrichConfig enrichConfig - wmeta optional.Option[workloadmeta.Component] + wmeta option.Option[workloadmeta.Component] // telemetry telemetry telemetry.Component @@ -198,13 +198,13 @@ func newServer(deps dependencies) provides { } } -func newServerCompat(cfg model.Reader, log log.Component, capture replay.Component, debug serverdebug.Component, serverless bool, demux aggregator.Demultiplexer, wmeta optional.Option[workloadmeta.Component], pidMap pidmap.Component, telemetrycomp telemetry.Component) *server { +func newServerCompat(cfg model.Reader, log log.Component, capture replay.Component, debug serverdebug.Component, serverless bool, demux aggregator.Demultiplexer, wmeta option.Option[workloadmeta.Component], pidMap pidmap.Component, telemetrycomp telemetry.Component) *server { // This needs to be done after the configuration is loaded once.Do(func() { initTelemetry() }) - var stats *util.Stats + var stats *statutil.Stats if cfg.GetBool("dogstatsd_stats_enable") { buff := cfg.GetInt("dogstatsd_stats_buffer") - s, err := util.NewStats(uint32(buff)) + s, err := statutil.NewStats(uint32(buff)) if err != nil { log.Errorf("Dogstatsd: unable to start statistics facilities") } @@ -236,10 +236,10 @@ func newServerCompat(cfg model.Reader, log log.Component, capture replay.Compone // if the server is running in a context where static tags are required, add those // to extraTags. - if staticTags := util.GetStaticTagsSlice(context.TODO(), cfg); staticTags != nil { + if staticTags := tagutil.GetStaticTagsSlice(context.TODO(), cfg); staticTags != nil { extraTags = append(extraTags, staticTags...) } - util.SortUniqInPlace(extraTags) + sort.UniqInPlace(extraTags) entityIDPrecedenceEnabled := cfg.GetBool("dogstatsd_entity_id_precedence") @@ -290,7 +290,6 @@ func newServerCompat(cfg model.Reader, log log.Component, capture replay.Compone cfg.GetBool("telemetry.dogstatsd_origin"), tCapture: capture, pidMap: pidMap, - udsListenerRunning: false, cachedOriginCounters: make(map[string]cachedOriginCounter), ServerlessMode: serverless, enrichConfig: enrichConfig{ @@ -350,8 +349,6 @@ func (s *server) start(context.Context) error { sharedPacketPool := packets.NewPool(s.config.GetInt("dogstatsd_buffer_size"), s.packetsTelemetry) sharedPacketPoolManager := packets.NewPoolManager[packets.Packet](sharedPacketPool) - udsListenerRunning := false - socketPath := s.config.GetString("dogstatsd_socket") socketStreamPath := s.config.GetString("dogstatsd_stream_socket") originDetection := s.config.GetBool("dogstatsd_origin_detection") @@ -377,7 +374,6 @@ func (s *server) start(context.Context) error { s.log.Errorf("Can't init UDS listener on path %s: %s", socketPath, err.Error()) } else { tmpListeners = append(tmpListeners, unixListener) - udsListenerRunning = true } } @@ -415,7 +411,6 @@ func (s *server) start(context.Context) error { return fmt.Errorf("listening on neither udp nor socket, please check your configuration") } - s.udsListenerRunning = udsListenerRunning s.packetsIn = packetsChannel s.captureChan = packetsChannel s.sharedPacketPool = sharedPacketPool @@ -609,10 +604,6 @@ func nextMessage(packet *[]byte, eolTermination bool) (message []byte) { return message } -func (s *server) UdsListenerRunning() bool { - return s.udsListenerRunning -} - func (s *server) eolEnabled(sourceType packets.SourceType) bool { switch sourceType { case packets.UDS: @@ -652,14 +643,14 @@ func (s *server) parsePackets(batcher dogstatsdBatcher, parser *parser, packets switch messageType { case serviceCheckType: - serviceCheck, err := s.parseServiceCheckMessage(parser, message, packet.Origin) + serviceCheck, err := s.parseServiceCheckMessage(parser, message, packet.Origin, packet.ProcessID) if err != nil { s.errLog("Dogstatsd: error parsing service check '%q': %s", message, err) continue } batcher.appendServiceCheck(serviceCheck) case eventType: - event, err := s.parseEventMessage(parser, message, packet.Origin) + event, err := s.parseEventMessage(parser, message, packet.Origin, packet.ProcessID) if err != nil { s.errLog("Dogstatsd: error parsing event '%q': %s", message, err) continue @@ -670,7 +661,7 @@ func (s *server) parsePackets(batcher dogstatsdBatcher, parser *parser, packets samples = samples[0:0] - samples, err = s.parseMetricMessage(samples, parser, message, packet.Origin, packet.ListenerID, s.originTelemetry) + samples, err = s.parseMetricMessage(samples, parser, message, packet.Origin, packet.ProcessID, packet.ListenerID, s.originTelemetry) if err != nil { s.errLog("Dogstatsd: error parsing metric message '%q': %s", message, err) continue @@ -745,7 +736,7 @@ func (s *server) getOriginCounter(origin string) (okCnt telemetry.SimpleCounter, // which will be slower when processing millions of samples. It could use a boolean returned by `parseMetricSample` which // is the first part aware of processing a late metric. Also, it may help us having a telemetry of a "late_metrics" type here // which we can't do today. -func (s *server) parseMetricMessage(metricSamples []metrics.MetricSample, parser *parser, message []byte, origin string, listenerID string, originTelemetry bool) ([]metrics.MetricSample, error) { +func (s *server) parseMetricMessage(metricSamples []metrics.MetricSample, parser *parser, message []byte, origin string, processID uint32, listenerID string, originTelemetry bool) ([]metrics.MetricSample, error) { okCnt := s.tlmProcessedOk errorCnt := s.tlmProcessedError if origin != "" && originTelemetry { @@ -768,7 +759,7 @@ func (s *server) parseMetricMessage(metricSamples []metrics.MetricSample, parser } } - metricSamples = enrichMetricSample(metricSamples, sample, origin, listenerID, s.enrichConfig) + metricSamples = enrichMetricSample(metricSamples, sample, origin, processID, listenerID, s.enrichConfig) if len(sample.values) > 0 { s.sharedFloat64List.put(sample.values) @@ -788,28 +779,28 @@ func (s *server) parseMetricMessage(metricSamples []metrics.MetricSample, parser return metricSamples, nil } -func (s *server) parseEventMessage(parser *parser, message []byte, origin string) (*event.Event, error) { +func (s *server) parseEventMessage(parser *parser, message []byte, origin string, processID uint32) (*event.Event, error) { sample, err := parser.parseEvent(message) if err != nil { dogstatsdEventParseErrors.Add(1) s.tlmProcessed.Inc("events", "error", "") return nil, err } - event := enrichEvent(sample, origin, s.enrichConfig) + event := enrichEvent(sample, origin, processID, s.enrichConfig) event.Tags = append(event.Tags, s.extraTags...) s.tlmProcessed.Inc("events", "ok", "") dogstatsdEventPackets.Add(1) return event, nil } -func (s *server) parseServiceCheckMessage(parser *parser, message []byte, origin string) (*servicecheck.ServiceCheck, error) { +func (s *server) parseServiceCheckMessage(parser *parser, message []byte, origin string, processID uint32) (*servicecheck.ServiceCheck, error) { sample, err := parser.parseServiceCheck(message) if err != nil { dogstatsdServiceCheckParseErrors.Add(1) s.tlmProcessed.Inc("service_checks", "error", "") return nil, err } - serviceCheck := enrichServiceCheck(sample, origin, s.enrichConfig) + serviceCheck := enrichServiceCheck(sample, origin, processID, s.enrichConfig) serviceCheck.Tags = append(serviceCheck.Tags, s.extraTags...) dogstatsdServiceCheckPackets.Add(1) s.tlmProcessed.Inc("service_checks", "ok", "") diff --git a/comp/dogstatsd/server/server_bench_test.go b/comp/dogstatsd/server/server_bench_test.go index 368541c5829f4..3b57b9f3aab3d 100644 --- a/comp/dogstatsd/server/server_bench_test.go +++ b/comp/dogstatsd/server/server_bench_test.go @@ -108,7 +108,7 @@ func BenchmarkPbarseMetricMessage(b *testing.B) { b.RunParallel(func(pb *testing.PB) { samplesBench = make([]metrics.MetricSample, 0, 512) for pb.Next() { - s.parseMetricMessage(samplesBench, parser, message, "", "", false) + s.parseMetricMessage(samplesBench, parser, message, "", 0, "", false) samplesBench = samplesBench[0:0] } }) diff --git a/comp/dogstatsd/server/server_integration_test.go b/comp/dogstatsd/server/server_integration_test.go index ab693b7462f13..12e4c7a19cf5f 100644 --- a/comp/dogstatsd/server/server_integration_test.go +++ b/comp/dogstatsd/server/server_integration_test.go @@ -136,7 +136,6 @@ func TestUDSConn(t *testing.T) { cfg["dogstatsd_socket"] = socketPath deps := fulfillDepsWithConfigOverride(t, cfg) - require.True(t, deps.Server.UdsListenerRunning()) conn, err := net.Dial("unixgram", socketPath) require.NoError(t, err, "cannot connect to DSD socket") @@ -195,9 +194,7 @@ func TestUDSReceiverNoDir(t *testing.T) { cfg["dogstatsd_no_aggregation_pipeline"] = true // another test may have turned it off cfg["dogstatsd_socket"] = socketPath - deps := fulfillDepsWithConfigOverride(t, cfg) - require.False(t, deps.Server.UdsListenerRunning()) - + _ = fulfillDepsWithConfigOverride(t, cfg) _, err := net.Dial("unixgram", socketPath) require.Error(t, err, "UDS listener should be closed") } diff --git a/comp/dogstatsd/server/server_mock.go b/comp/dogstatsd/server/server_mock.go index 20892752c44a7..cac2a9cc567d3 100644 --- a/comp/dogstatsd/server/server_mock.go +++ b/comp/dogstatsd/server/server_mock.go @@ -54,11 +54,6 @@ func (s *serverMock) Capture(_ string, _ time.Duration, _ bool) (string, error) return "", nil } -// UdsListenerRunning is a mocked function that returns false -func (s *serverMock) UdsListenerRunning() bool { - return false -} - // UDPLocalAddr is a mocked function but UDP isn't enabled on the mock func (s *serverMock) UDPLocalAddr() string { return "" diff --git a/comp/dogstatsd/server/server_test.go b/comp/dogstatsd/server/server_test.go index fdb92e02a42b1..fd89d9f102067 100644 --- a/comp/dogstatsd/server/server_test.go +++ b/comp/dogstatsd/server/server_test.go @@ -9,7 +9,6 @@ package server import ( "fmt" - "runtime" "testing" "github.com/stretchr/testify/assert" @@ -32,19 +31,6 @@ func TestNewServer(t *testing.T) { } -func TestUDSReceiverDisabled(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("UDS isn't supported on windows") - } - cfg := make(map[string]interface{}) - cfg["dogstatsd_port"] = listeners.RandomPortName - cfg["dogstatsd_no_aggregation_pipeline"] = true // another test may have turned it off - cfg["dogstatsd_socket"] = "" // disabled - - deps := fulfillDepsWithConfigOverride(t, cfg) - require.False(t, deps.Server.UdsListenerRunning()) -} - // This test is proving that no data race occurred on the `cachedTlmOriginIds` map. // It should not fail since `cachedTlmOriginIds` and `cachedOrder` should be // properly protected from multiple accesses by `cachedTlmLock`. @@ -89,7 +75,7 @@ func TestNoMappingsConfig(t *testing.T) { assert.Nil(t, s.mapper) parser := newParser(deps.Config, s.sharedFloat64List, 1, deps.WMeta, s.stringInternerTelemetry) - samples, err := s.parseMetricMessage(samples, parser, []byte("test.metric:666|g"), "", "", false) + samples, err := s.parseMetricMessage(samples, parser, []byte("test.metric:666|g"), "", 0, "", false) assert.NoError(t, err) assert.Len(t, samples, 1) } @@ -147,22 +133,22 @@ func testContainerIDParsing(t *testing.T, cfg map[string]interface{}) { parser.dsdOriginEnabled = true // Metric - metrics, err := s.parseMetricMessage(nil, parser, []byte("metric.name:123|g|c:metric-container"), "", "", false) + metrics, err := s.parseMetricMessage(nil, parser, []byte("metric.name:123|g|c:metric-container"), "", 0, "", false) assert.NoError(err) assert.Len(metrics, 1) - assert.Equal("metric-container", metrics[0].OriginInfo.ContainerID) + assert.Equal("metric-container", metrics[0].OriginInfo.LocalData.ContainerID) // Event - event, err := s.parseEventMessage(parser, []byte("_e{10,10}:event title|test\\ntext|c:event-container"), "") + event, err := s.parseEventMessage(parser, []byte("_e{10,10}:event title|test\\ntext|c:event-container"), "", 0) assert.NoError(err) assert.NotNil(event) - assert.Equal("event-container", event.OriginInfo.ContainerID) + assert.Equal("event-container", event.OriginInfo.LocalData.ContainerID) // Service check - serviceCheck, err := s.parseServiceCheckMessage(parser, []byte("_sc|service-check.name|0|c:service-check-container"), "") + serviceCheck, err := s.parseServiceCheckMessage(parser, []byte("_sc|service-check.name|0|c:service-check-container"), "", 0) assert.NoError(err) assert.NotNil(serviceCheck) - assert.Equal("service-check-container", serviceCheck.OriginInfo.ContainerID) + assert.Equal("service-check-container", serviceCheck.OriginInfo.LocalData.ContainerID) } func TestContainerIDParsing(t *testing.T) { @@ -191,22 +177,25 @@ func TestOrigin(t *testing.T) { parser.dsdOriginEnabled = true // Metric - metrics, err := s.parseMetricMessage(nil, parser, []byte("metric.name:123|g|c:metric-container|#dd.internal.card:none"), "", "", false) + metrics, err := s.parseMetricMessage(nil, parser, []byte("metric.name:123|g|c:metric-container|#dd.internal.card:none"), "", 1234, "", false) assert.NoError(err) assert.Len(metrics, 1) - assert.Equal("metric-container", metrics[0].OriginInfo.ContainerID) + assert.Equal("metric-container", metrics[0].OriginInfo.LocalData.ContainerID) + assert.Equal(uint32(1234), metrics[0].OriginInfo.LocalData.ProcessID) // Event - event, err := s.parseEventMessage(parser, []byte("_e{10,10}:event title|test\\ntext|c:event-container|#dd.internal.card:none"), "") + event, err := s.parseEventMessage(parser, []byte("_e{10,10}:event title|test\\ntext|c:event-container|#dd.internal.card:none"), "", 1234) assert.NoError(err) assert.NotNil(event) - assert.Equal("event-container", event.OriginInfo.ContainerID) + assert.Equal("event-container", event.OriginInfo.LocalData.ContainerID) + assert.Equal(uint32(1234), event.OriginInfo.LocalData.ProcessID) // Service check - serviceCheck, err := s.parseServiceCheckMessage(parser, []byte("_sc|service-check.name|0|c:service-check-container|#dd.internal.card:none"), "") + serviceCheck, err := s.parseServiceCheckMessage(parser, []byte("_sc|service-check.name|0|c:service-check-container|#dd.internal.card:none"), "", 1234) assert.NoError(err) assert.NotNil(serviceCheck) - assert.Equal("service-check-container", serviceCheck.OriginInfo.ContainerID) + assert.Equal("service-check-container", serviceCheck.OriginInfo.LocalData.ContainerID) + assert.Equal(uint32(1234), serviceCheck.OriginInfo.LocalData.ProcessID) }) } diff --git a/comp/dogstatsd/server/server_util_test.go b/comp/dogstatsd/server/server_util_test.go index f2c542f45c0e0..d6080b0f2cb47 100644 --- a/comp/dogstatsd/server/server_util_test.go +++ b/comp/dogstatsd/server/server_util_test.go @@ -35,10 +35,11 @@ import ( replaymock "github.com/DataDog/datadog-agent/comp/dogstatsd/replay/fx-mock" serverdebug "github.com/DataDog/datadog-agent/comp/dogstatsd/serverDebug" "github.com/DataDog/datadog-agent/comp/dogstatsd/serverDebug/serverdebugimpl" - compressionmock "github.com/DataDog/datadog-agent/comp/serializer/compression/fx-mock" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx-mock" + metricscompression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/fx-mock" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // This is a copy of the serverDeps struct, but without the server field. @@ -52,7 +53,7 @@ type depsWithoutServer struct { Replay replay.Component PidMap pidmap.Component Debug serverdebug.Component - WMeta optional.Option[workloadmeta.Component] + WMeta option.Option[workloadmeta.Component] Telemetry telemetry.Component } @@ -65,7 +66,7 @@ type serverDeps struct { Replay replay.Component PidMap pidmap.Component Debug serverdebug.Component - WMeta optional.Option[workloadmeta.Component] + WMeta option.Option[workloadmeta.Component] Telemetry telemetry.Component Server Component } @@ -82,10 +83,12 @@ func fulfillDepsWithConfigOverride(t testing.TB, overrides map[string]interface{ Overrides: overrides, }), replaymock.MockModule(), - compressionmock.MockModule(), pidmapimpl.Module(), demultiplexerimpl.FakeSamplerMockModule(), workloadmetafxmock.MockModule(workloadmeta.NewParams()), + logscompression.MockModule(), + metricscompression.MockModule(), + Module(Params{Serverless: false}), )) } @@ -98,7 +101,8 @@ func fulfillDepsWithConfigYaml(t testing.TB, yaml string) serverDeps { hostnameimpl.MockModule(), serverdebugimpl.MockModule(), replaymock.MockModule(), - compressionmock.MockModule(), + metricscompression.MockModule(), + logscompression.MockModule(), pidmapimpl.Module(), demultiplexerimpl.FakeSamplerMockModule(), workloadmetafxmock.MockModule(workloadmeta.NewParams()), @@ -117,10 +121,11 @@ func fulfillDepsWithInactiveServer(t *testing.T, cfg map[string]interface{}) (de }), fx.Supply(Params{Serverless: false}), replaymock.MockModule(), - compressionmock.MockModule(), pidmapimpl.Module(), demultiplexerimpl.FakeSamplerMockModule(), workloadmetafxmock.MockModule(workloadmeta.NewParams()), + metricscompression.MockModule(), + logscompression.MockModule(), )) s := newServerCompat(deps.Config, deps.Log, deps.Replay, deps.Debug, false, deps.Demultiplexer, deps.WMeta, deps.PidMap, deps.Telemetry) diff --git a/comp/dogstatsd/server/server_worker.go b/comp/dogstatsd/server/server_worker.go index 3495e677ea521..33d64011f34cd 100644 --- a/comp/dogstatsd/server/server_worker.go +++ b/comp/dogstatsd/server/server_worker.go @@ -10,7 +10,7 @@ import ( "github.com/DataDog/datadog-agent/comp/dogstatsd/packets" "github.com/DataDog/datadog-agent/pkg/aggregator" "github.com/DataDog/datadog-agent/pkg/metrics" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) var ( @@ -36,7 +36,7 @@ type worker struct { packetsTelemetry *packets.TelemetryStore } -func newWorker(s *server, workerNum int, wmeta optional.Option[workloadmeta.Component], packetsTelemetry *packets.TelemetryStore, stringInternerTelemetry *stringInternerTelemetry) *worker { +func newWorker(s *server, workerNum int, wmeta option.Option[workloadmeta.Component], packetsTelemetry *packets.TelemetryStore, stringInternerTelemetry *stringInternerTelemetry) *worker { var batcher *batcher if s.ServerlessMode { batcher = newServerlessBatcher(s.demultiplexer, s.tlmChannel) diff --git a/comp/dogstatsd/server/server_worker_test.go b/comp/dogstatsd/server/server_worker_test.go index 5dbe409c9d863..dd5516b1f79cd 100644 --- a/comp/dogstatsd/server/server_worker_test.go +++ b/comp/dogstatsd/server/server_worker_test.go @@ -255,13 +255,13 @@ func TestParseMetricMessageTelemetry(t *testing.T) { parser := newParser(deps.Config, s.sharedFloat64List, 1, deps.WMeta, s.stringInternerTelemetry) assert.Equal(t, float64(0), s.tlmProcessedOk.Get()) - samples, err := s.parseMetricMessage(samples, parser, []byte("test.metric:666|g"), "", "", false) + samples, err := s.parseMetricMessage(samples, parser, []byte("test.metric:666|g"), "", 0, "", false) assert.NoError(t, err) assert.Len(t, samples, 1) assert.Equal(t, float64(1), s.tlmProcessedOk.Get()) assert.Equal(t, float64(0), s.tlmProcessedError.Get()) - samples, err = s.parseMetricMessage(samples, parser, nil, "", "", false) + samples, err = s.parseMetricMessage(samples, parser, nil, "", 0, "", false) assert.Error(t, err, "invalid dogstatsd message format") assert.Len(t, samples, 1) assert.Equal(t, float64(1), s.tlmProcessedError.Get()) @@ -387,11 +387,11 @@ func TestParseEventMessageTelemetry(t *testing.T) { assert.True(t, ok) // three successful events - s.parseEventMessage(parser, []byte("_e{10,10}:event title|test\\ntext|c:event-container"), "") - s.parseEventMessage(parser, []byte("_e{10,10}:event title|test\\ntext|c:event-container"), "") - s.parseEventMessage(parser, []byte("_e{10,10}:event title|test\\ntext|c:event-container"), "") + s.parseEventMessage(parser, []byte("_e{10,10}:event title|test\\ntext|c:event-container"), "", 0) + s.parseEventMessage(parser, []byte("_e{10,10}:event title|test\\ntext|c:event-container"), "", 0) + s.parseEventMessage(parser, []byte("_e{10,10}:event title|test\\ntext|c:event-container"), "", 0) // one error event - _, err := s.parseEventMessage(parser, nil, "") + _, err := s.parseEventMessage(parser, nil, "", 0) assert.Error(t, err) processedEvents, err := telemetryMock.GetCountMetric("dogstatsd", "processed") @@ -423,11 +423,11 @@ func TestParseServiceCheckMessageTelemetry(t *testing.T) { assert.True(t, ok) // three successful events - s.parseServiceCheckMessage(parser, []byte("_sc|service-check.name|0|c:service-check-container"), "") - s.parseServiceCheckMessage(parser, []byte("_sc|service-check.name|0|c:service-check-container"), "") - s.parseServiceCheckMessage(parser, []byte("_sc|service-check.name|0|c:service-check-container"), "") + s.parseServiceCheckMessage(parser, []byte("_sc|service-check.name|0|c:service-check-container"), "", 0) + s.parseServiceCheckMessage(parser, []byte("_sc|service-check.name|0|c:service-check-container"), "", 0) + s.parseServiceCheckMessage(parser, []byte("_sc|service-check.name|0|c:service-check-container"), "", 0) // one error event - _, err := s.parseServiceCheckMessage(parser, nil, "") + _, err := s.parseServiceCheckMessage(parser, nil, "", 0) assert.Error(t, err) processedEvents, err := telemetryMock.GetCountMetric("dogstatsd", "processed") @@ -463,12 +463,12 @@ func TestProcessedMetricsOrigin(t *testing.T) { parser := newParser(deps.Config, s.sharedFloat64List, 1, deps.WMeta, s.stringInternerTelemetry) samples := []metrics.MetricSample{} - samples, err := s.parseMetricMessage(samples, parser, []byte("test.metric:666|g"), "test_container", "1", false) + samples, err := s.parseMetricMessage(samples, parser, []byte("test.metric:666|g"), "test_container", 0, "1", false) assert.NoError(err) assert.Len(samples, 1) // one thing should have been stored when we parse a metric - samples, err = s.parseMetricMessage(samples, parser, []byte("test.metric:555|g"), "test_container", "1", true) + samples, err = s.parseMetricMessage(samples, parser, []byte("test.metric:555|g"), "test_container", 0, "1", true) assert.NoError(err) assert.Len(samples, 2) assert.Len(s.cachedOriginCounters, 1, "one entry should have been cached") @@ -476,7 +476,7 @@ func TestProcessedMetricsOrigin(t *testing.T) { assert.Equal(s.cachedOrder[0].origin, "test_container") // when we parse another metric (different value) with same origin, cache should contain only one entry - samples, err = s.parseMetricMessage(samples, parser, []byte("test.second_metric:525|g"), "test_container", "2", true) + samples, err = s.parseMetricMessage(samples, parser, []byte("test.second_metric:525|g"), "test_container", 0, "2", true) assert.NoError(err) assert.Len(samples, 3) assert.Len(s.cachedOriginCounters, 1, "one entry should have been cached") @@ -486,7 +486,7 @@ func TestProcessedMetricsOrigin(t *testing.T) { assert.Equal(s.cachedOrder[0].err, map[string]string{"message_type": "metrics", "state": "error", "origin": "test_container"}) // when we parse another metric (different value) but with a different origin, we should store a new entry - samples, err = s.parseMetricMessage(samples, parser, []byte("test.second_metric:525|g"), "another_container", "3", true) + samples, err = s.parseMetricMessage(samples, parser, []byte("test.second_metric:525|g"), "another_container", 0, "3", true) assert.NoError(err) assert.Len(samples, 4) assert.Len(s.cachedOriginCounters, 2, "two entries should have been cached") @@ -500,7 +500,7 @@ func TestProcessedMetricsOrigin(t *testing.T) { // oldest one should be removed once we reach the limit of the cache maxOriginCounters = 2 - samples, err = s.parseMetricMessage(samples, parser, []byte("yetanothermetric:525|g"), "third_origin", "3", true) + samples, err = s.parseMetricMessage(samples, parser, []byte("yetanothermetric:525|g"), "third_origin", 0, "3", true) assert.NoError(err) assert.Len(samples, 5) assert.Len(s.cachedOriginCounters, 2, "two entries should have been cached, one has been evicted already") @@ -514,7 +514,7 @@ func TestProcessedMetricsOrigin(t *testing.T) { // oldest one should be removed once we reach the limit of the cache maxOriginCounters = 2 - samples, err = s.parseMetricMessage(samples, parser, []byte("blablabla:555|g"), "fourth_origin", "4", true) + samples, err = s.parseMetricMessage(samples, parser, []byte("blablabla:555|g"), "fourth_origin", 0, "4", true) assert.NoError(err) assert.Len(samples, 6) assert.Len(s.cachedOriginCounters, 2, "two entries should have been cached, two have been evicted already") diff --git a/comp/dogstatsd/server/serverless.go b/comp/dogstatsd/server/serverless.go index 5426c4f2132c1..ba4bb5e9f6ad5 100644 --- a/comp/dogstatsd/server/serverless.go +++ b/comp/dogstatsd/server/serverless.go @@ -17,7 +17,7 @@ import ( "github.com/DataDog/datadog-agent/comp/dogstatsd/serverDebug/serverdebugimpl" "github.com/DataDog/datadog-agent/pkg/aggregator" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // team: agent-metrics-logs @@ -30,7 +30,7 @@ type ServerlessDogstatsd interface { //nolint:revive // TODO(AML) Fix revive linter func NewServerlessServer(demux aggregator.Demultiplexer) (ServerlessDogstatsd, error) { - wmeta := optional.NewNoneOption[workloadmeta.Component]() + wmeta := option.None[workloadmeta.Component]() s := newServerCompat(pkgconfigsetup.Datadog(), logComponentImpl.NewTemporaryLoggerWithoutInit(), replay.NewNoopTrafficCapture(), serverdebugimpl.NewServerlessServerDebug(), true, demux, wmeta, pidmapimpl.NewServerlessPidMap(), telemetry.GetCompatComponent()) err := s.start(context.TODO()) diff --git a/comp/forwarder/defaultforwarder/default_forwarder.go b/comp/forwarder/defaultforwarder/default_forwarder.go index 0ee76612b5580..e0dc2a67d2f69 100644 --- a/comp/forwarder/defaultforwarder/default_forwarder.go +++ b/comp/forwarder/defaultforwarder/default_forwarder.go @@ -27,6 +27,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/telemetry" "github.com/DataDog/datadog-agent/pkg/util/filesystem" + "github.com/DataDog/datadog-agent/pkg/util/scrubber" "github.com/DataDog/datadog-agent/pkg/version" ) @@ -360,6 +361,7 @@ func NewDefaultForwarder(config config.Component, log log.Component, options *Op oldAPIKey, ok1 := oldValue.(string) newAPIKey, ok2 := newValue.(string) if ok1 && ok2 { + log.Infof("Updating API key: %s -> %s", scrubber.HideKeyExceptLastFiveChars(oldAPIKey), scrubber.HideKeyExceptLastFiveChars(newAPIKey)) for _, dr := range f.domainResolvers { dr.UpdateAPIKey(oldAPIKey, newAPIKey) } diff --git a/comp/forwarder/defaultforwarder/forwarder.go b/comp/forwarder/defaultforwarder/forwarder.go index 64cc62cd636df..cf2f74e51411b 100644 --- a/comp/forwarder/defaultforwarder/forwarder.go +++ b/comp/forwarder/defaultforwarder/forwarder.go @@ -7,6 +7,7 @@ package defaultforwarder import ( "context" + "strings" "go.uber.org/fx" @@ -15,6 +16,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/status" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder/resolver" "github.com/DataDog/datadog-agent/pkg/config/utils" + "github.com/DataDog/datadog-agent/pkg/util/scrubber" ) type dependencies struct { @@ -46,7 +48,11 @@ func newForwarder(dep dependencies) provides { func createOptions(params Params, config config.Component, log log.Component) *Options { var options *Options - keysPerDomain := getMultipleEndpoints(config, log) + keysPerDomain, err := utils.GetMultipleEndpoints(config) + if err != nil { + log.Error("Misconfiguration of agent endpoints: ", err) + } + if !params.withResolver { options = NewOptions(config, log, keysPerDomain) } else { @@ -58,16 +64,15 @@ func createOptions(params Params, config config.Component, log log.Component) *O } options.SetEnabledFeatures(params.features) - return options -} - -func getMultipleEndpoints(config config.Component, log log.Component) map[string][]string { - // Inject the config to make sure we can call GetMultipleEndpoints. - keysPerDomain, err := utils.GetMultipleEndpoints(config) - if err != nil { - log.Error("Misconfiguration of agent endpoints: ", err) + log.Infof("starting forwarder with %d endpoints", len(options.DomainResolvers)) + for _, resolver := range options.DomainResolvers { + scrubbedKeys := []string{} + for _, k := range resolver.GetAPIKeys() { + scrubbedKeys = append(scrubbedKeys, scrubber.HideKeyExceptLastFiveChars(k)) + } + log.Infof("domain '%s' has %d keys: %s", resolver.GetBaseDomain(), len(scrubbedKeys), strings.Join(scrubbedKeys, ", ")) } - return keysPerDomain + return options } // NewForwarder returns a new forwarder component. diff --git a/comp/forwarder/defaultforwarder/go.mod b/comp/forwarder/defaultforwarder/go.mod index 26fee04e02f54..13b048c59f08a 100644 --- a/comp/forwarder/defaultforwarder/go.mod +++ b/comp/forwarder/defaultforwarder/go.mod @@ -42,7 +42,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/http => ../../../pkg/util/http github.com/DataDog/datadog-agent/pkg/util/log => ../../../pkg/util/log github.com/DataDog/datadog-agent/pkg/util/log/setup => ../../../pkg/util/log/setup - github.com/DataDog/datadog-agent/pkg/util/optional => ../../../pkg/util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../../pkg/util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../pkg/util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../pkg/util/scrubber github.com/DataDog/datadog-agent/pkg/util/system => ../../../pkg/util/system @@ -54,12 +54,12 @@ replace ( require ( github.com/DataDog/datadog-agent/comp/core/config v0.57.1 - github.com/DataDog/datadog-agent/comp/core/log/def v0.0.0-00010101000000-000000000000 - github.com/DataDog/datadog-agent/comp/core/log/mock v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/comp/core/log/def v0.61.0 + github.com/DataDog/datadog-agent/comp/core/log/mock v0.61.0 github.com/DataDog/datadog-agent/comp/core/status v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/api v0.57.1 github.com/DataDog/datadog-agent/pkg/config/mock v0.59.0 - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 github.com/DataDog/datadog-agent/pkg/config/utils v0.57.1 github.com/DataDog/datadog-agent/pkg/orchestrator/model v0.56.0-rc.3 @@ -67,11 +67,11 @@ require ( github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/backoff v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/common v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 github.com/DataDog/datadog-agent/pkg/util/fxutil v0.57.1 github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 github.com/DataDog/datadog-agent/pkg/version v0.59.1 github.com/golang/protobuf v1.5.4 github.com/hashicorp/go-multierror v1.1.1 @@ -88,17 +88,17 @@ require ( github.com/DataDog/datadog-agent/comp/def v0.57.1 // indirect github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.60.0-devel // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.60.0-devel // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/log/setup v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/log/setup v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect github.com/DataDog/viper v1.14.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/beorn7/perks v1.0.1 // indirect @@ -116,7 +116,7 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect github.com/klauspost/compress v1.17.11 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect @@ -126,14 +126,14 @@ require ( github.com/patrickmn/go-cache v2.1.0+incompatible // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.60.1 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -144,11 +144,11 @@ require ( go.uber.org/dig v1.18.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/net v0.34.0 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect - google.golang.org/protobuf v1.35.2 // indirect + google.golang.org/protobuf v1.36.3 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/comp/forwarder/defaultforwarder/go.sum b/comp/forwarder/defaultforwarder/go.sum index 75d47f4e88e64..11d570be42d8d 100644 --- a/comp/forwarder/defaultforwarder/go.sum +++ b/comp/forwarder/defaultforwarder/go.sum @@ -77,7 +77,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -124,8 +123,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -159,8 +158,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -177,8 +176,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -192,8 +191,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -204,8 +203,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -262,8 +261,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -281,8 +280,8 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -304,8 +303,8 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= @@ -333,8 +332,8 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/comp/forwarder/defaultforwarder/params.go b/comp/forwarder/defaultforwarder/params.go index 1914e068b06ec..eb8cd783d4724 100644 --- a/comp/forwarder/defaultforwarder/params.go +++ b/comp/forwarder/defaultforwarder/params.go @@ -5,9 +5,7 @@ package defaultforwarder -import ( - "github.com/DataDog/datadog-agent/pkg/util/optional" -) +import "github.com/DataDog/datadog-agent/pkg/util/option" // Params contains the parameters to create a forwarder. type Params struct { @@ -15,14 +13,14 @@ type Params struct { withResolver bool // Use optional to override Options.DisableAPIKeyChecking only if WithFeatures was called - disableAPIKeyCheckingOverride optional.Option[bool] + disableAPIKeyCheckingOverride option.Option[bool] features []Features } -type option = func(*Params) +type optionParams = func(*Params) // NewParams initializes a new Params struct -func NewParams(options ...option) Params { +func NewParams(options ...optionParams) Params { p := Params{} for _, option := range options { option(&p) @@ -31,28 +29,28 @@ func NewParams(options ...option) Params { } // WithResolvers enables the forwarder to use resolvers -func WithResolvers() option { +func WithResolvers() optionParams { return func(p *Params) { p.withResolver = true } } // WithDisableAPIKeyChecking disables the API key checking -func WithDisableAPIKeyChecking() option { +func WithDisableAPIKeyChecking() optionParams { return func(p *Params) { p.disableAPIKeyCheckingOverride.Set(true) } } // WithFeatures sets a features to the forwarder -func WithFeatures(features ...Features) option { +func WithFeatures(features ...Features) optionParams { return func(p *Params) { p.features = features } } // WithNoopForwarder sets the forwarder to use the noop forwarder -func WithNoopForwarder() option { +func WithNoopForwarder() optionParams { return func(p *Params) { p.useNoopForwarder = true } diff --git a/comp/forwarder/defaultforwarder/worker.go b/comp/forwarder/defaultforwarder/worker.go index 73552e299a836..fd379cfd76db0 100644 --- a/comp/forwarder/defaultforwarder/worker.go +++ b/comp/forwarder/defaultforwarder/worker.go @@ -78,8 +78,6 @@ func NewWorker( } if isLocal { worker.Client = newBearerAuthHTTPClient() - } else { - worker.Client = NewHTTPClient(config) } return worker } diff --git a/comp/forwarder/eventplatform/eventplatformimpl/epforwarder.go b/comp/forwarder/eventplatform/eventplatformimpl/epforwarder.go index 9c4fb0d451ed5..a06a3a55c7ef2 100644 --- a/comp/forwarder/eventplatform/eventplatformimpl/epforwarder.go +++ b/comp/forwarder/eventplatform/eventplatformimpl/epforwarder.go @@ -21,6 +21,7 @@ import ( "github.com/DataDog/datadog-agent/comp/forwarder/eventplatformreceiver" "github.com/DataDog/datadog-agent/comp/forwarder/eventplatformreceiver/eventplatformreceiverimpl" "github.com/DataDog/datadog-agent/comp/logs/agent/config" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/def" "github.com/DataDog/datadog-agent/pkg/config/model" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/diagnose/diagnosis" @@ -30,9 +31,10 @@ import ( "github.com/DataDog/datadog-agent/pkg/logs/message" "github.com/DataDog/datadog-agent/pkg/logs/metrics" "github.com/DataDog/datadog-agent/pkg/logs/sender" + compressioncommon "github.com/DataDog/datadog-agent/pkg/util/compression" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/util/startstop" ) @@ -373,7 +375,7 @@ type passthroughPipelineDesc struct { // newHTTPPassthroughPipeline creates a new HTTP-only event platform pipeline that sends messages directly to intake // without any of the processing that exists in regular logs pipelines. -func newHTTPPassthroughPipeline(coreConfig model.Reader, eventPlatformReceiver eventplatformreceiver.Component, desc passthroughPipelineDesc, destinationsContext *client.DestinationsContext, pipelineID int) (p *passthroughPipeline, err error) { +func newHTTPPassthroughPipeline(coreConfig model.Reader, eventPlatformReceiver eventplatformreceiver.Component, compressor logscompression.Component, desc passthroughPipelineDesc, destinationsContext *client.DestinationsContext, pipelineID int) (p *passthroughPipeline, err error) { configKeys := config.NewLogsConfigKeys(desc.endpointsConfigPrefix, pkgconfigsetup.Datadog()) endpoints, err := config.BuildHTTPEndpointsWithConfig(pkgconfigsetup.Datadog(), configKeys, desc.hostnameEndpointPrefix, desc.intakeTrackType, config.DefaultIntakeProtocol, config.DefaultIntakeOrigin) if err != nil { @@ -412,9 +414,10 @@ func newHTTPPassthroughPipeline(coreConfig model.Reader, eventPlatformReceiver e inputChan := make(chan *message.Message, endpoints.InputChanSize) senderInput := make(chan *message.Payload, 1) // Only buffer 1 message since payloads can be large - encoder := sender.IdentityContentType + var encoder compressioncommon.Compressor + encoder = compressor.NewCompressor("none", 0) if endpoints.Main.UseCompression { - encoder = sender.NewGzipContentEncoding(endpoints.Main.CompressionLevel) + encoder = compressor.NewCompressor(endpoints.Main.CompressionKind, endpoints.Main.CompressionLevel) } var strategy sender.Strategy @@ -471,12 +474,12 @@ func joinHosts(endpoints []config.Endpoint) string { return strings.Join(additionalHosts, ",") } -func newDefaultEventPlatformForwarder(config model.Reader, eventPlatformReceiver eventplatformreceiver.Component) *defaultEventPlatformForwarder { +func newDefaultEventPlatformForwarder(config model.Reader, eventPlatformReceiver eventplatformreceiver.Component, compression logscompression.Component) *defaultEventPlatformForwarder { destinationsCtx := client.NewDestinationsContext() destinationsCtx.Start() pipelines := make(map[string]*passthroughPipeline) for i, desc := range passthroughPipelineDescs { - p, err := newHTTPPassthroughPipeline(config, eventPlatformReceiver, desc, destinationsCtx, i) + p, err := newHTTPPassthroughPipeline(config, eventPlatformReceiver, compression, desc, destinationsCtx, i) if err != nil { log.Errorf("Failed to initialize event platform forwarder pipeline. eventType=%s, error=%s", desc.eventType, err.Error()) continue @@ -496,6 +499,7 @@ type dependencies struct { Lc fx.Lifecycle EventPlatformReceiver eventplatformreceiver.Component Hostname hostnameinterface.Component + Compression logscompression.Component } // newEventPlatformForwarder creates a new EventPlatformForwarder @@ -503,12 +507,12 @@ func newEventPlatformForwarder(deps dependencies) eventplatform.Component { var forwarder *defaultEventPlatformForwarder if deps.Params.UseNoopEventPlatformForwarder { - forwarder = newNoopEventPlatformForwarder(deps.Hostname) + forwarder = newNoopEventPlatformForwarder(deps.Hostname, deps.Compression) } else if deps.Params.UseEventPlatformForwarder { - forwarder = newDefaultEventPlatformForwarder(deps.Config, deps.EventPlatformReceiver) + forwarder = newDefaultEventPlatformForwarder(deps.Config, deps.EventPlatformReceiver, deps.Compression) } if forwarder == nil { - return optional.NewNoneOptionPtr[eventplatform.Forwarder]() + return option.NonePtr[eventplatform.Forwarder]() } deps.Lc.Append(fx.Hook{ OnStart: func(context.Context) error { @@ -520,17 +524,17 @@ func newEventPlatformForwarder(deps dependencies) eventplatform.Component { return nil }, }) - return optional.NewOptionPtr[eventplatform.Forwarder](forwarder) + return option.NewPtr[eventplatform.Forwarder](forwarder) } // NewNoopEventPlatformForwarder returns the standard event platform forwarder with sending disabled, meaning events // will build up in each pipeline channel without being forwarded to the intake -func NewNoopEventPlatformForwarder(hostname hostnameinterface.Component) eventplatform.Forwarder { - return newNoopEventPlatformForwarder(hostname) +func NewNoopEventPlatformForwarder(hostname hostnameinterface.Component, compression logscompression.Component) eventplatform.Forwarder { + return newNoopEventPlatformForwarder(hostname, compression) } -func newNoopEventPlatformForwarder(hostname hostnameinterface.Component) *defaultEventPlatformForwarder { - f := newDefaultEventPlatformForwarder(pkgconfigsetup.Datadog(), eventplatformreceiverimpl.NewReceiver(hostname).Comp) +func newNoopEventPlatformForwarder(hostname hostnameinterface.Component, compression logscompression.Component) *defaultEventPlatformForwarder { + f := newDefaultEventPlatformForwarder(pkgconfigsetup.Datadog(), eventplatformreceiverimpl.NewReceiver(hostname).Comp, compression) // remove the senders for _, p := range f.pipelines { p.strategy = nil diff --git a/comp/forwarder/eventplatform/eventplatformimpl/epforwarder_mock.go b/comp/forwarder/eventplatform/eventplatformimpl/epforwarder_mock.go index 2d4d6d296ca7e..a56c7b6a94ebd 100644 --- a/comp/forwarder/eventplatform/eventplatformimpl/epforwarder_mock.go +++ b/comp/forwarder/eventplatform/eventplatformimpl/epforwarder_mock.go @@ -10,8 +10,9 @@ package eventplatformimpl import ( "github.com/DataDog/datadog-agent/comp/core/hostname" "github.com/DataDog/datadog-agent/comp/forwarder/eventplatform" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/def" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "go.uber.org/fx" ) @@ -22,6 +23,6 @@ func MockModule() fxutil.Module { ) } -func newMockComponent(hostname hostname.Component) eventplatform.Component { - return optional.NewOptionPtr[eventplatform.Forwarder](NewNoopEventPlatformForwarder(hostname)) +func newMockComponent(hostname hostname.Component, compression logscompression.Component) eventplatform.Component { + return option.NewPtr[eventplatform.Forwarder](NewNoopEventPlatformForwarder(hostname, compression)) } diff --git a/comp/forwarder/orchestrator/orchestratorimpl/forwarder_no_orchestrator.go b/comp/forwarder/orchestrator/orchestratorimpl/forwarder_no_orchestrator.go index a656b5e4b04cc..f017e7c90e707 100644 --- a/comp/forwarder/orchestrator/orchestratorimpl/forwarder_no_orchestrator.go +++ b/comp/forwarder/orchestrator/orchestratorimpl/forwarder_no_orchestrator.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" "github.com/DataDog/datadog-agent/comp/forwarder/orchestrator" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // Module defines the fx options for this component. @@ -31,9 +31,9 @@ func Module(params Params) fxutil.Module { // dependencies (k8s, several MBs) while building binaries not needing these. func newOrchestratorForwarder(_ log.Component, _ config.Component, params Params) orchestrator.Component { if params.useNoopOrchestratorForwarder { - forwarder := optional.NewOption[defaultforwarder.Forwarder](defaultforwarder.NoopForwarder{}) + forwarder := option.New[defaultforwarder.Forwarder](defaultforwarder.NoopForwarder{}) return &forwarder } - forwarder := optional.NewNoneOption[defaultforwarder.Forwarder]() + forwarder := option.None[defaultforwarder.Forwarder]() return &forwarder } diff --git a/comp/forwarder/orchestrator/orchestratorimpl/forwarder_orchestrator.go b/comp/forwarder/orchestrator/orchestratorimpl/forwarder_orchestrator.go index 7fedf5deeece4..dbc80d10b072f 100644 --- a/comp/forwarder/orchestrator/orchestratorimpl/forwarder_orchestrator.go +++ b/comp/forwarder/orchestrator/orchestratorimpl/forwarder_orchestrator.go @@ -21,7 +21,7 @@ import ( orchestratorconfig "github.com/DataDog/datadog-agent/pkg/orchestrator/config" apicfg "github.com/DataDog/datadog-agent/pkg/process/util/api/config" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // Module defines the fx options for this component. @@ -39,7 +39,7 @@ func newOrchestratorForwarder(log log.Component, config config.Component, lc fx. } if params.useOrchestratorForwarder { if !config.GetBool(orchestratorconfig.OrchestratorNSKey("enabled")) { - forwarder := optional.NewNoneOption[defaultforwarder.Forwarder]() + forwarder := option.None[defaultforwarder.Forwarder]() return &forwarder } orchestratorCfg := orchestratorconfig.NewDefaultOrchestratorConfig() @@ -63,11 +63,11 @@ func newOrchestratorForwarder(log log.Component, config config.Component, lc fx. return createComponent(forwarder) } - forwarder := optional.NewNoneOption[defaultforwarder.Forwarder]() + forwarder := option.None[defaultforwarder.Forwarder]() return &forwarder } func createComponent(forwarder defaultforwarder.Forwarder) orchestrator.Component { - o := optional.NewOption(forwarder) + o := option.New(forwarder) return &o } diff --git a/comp/forwarder/orchestrator/orchestratorimpl/forwarder_orchestrator_mock.go b/comp/forwarder/orchestrator/orchestratorimpl/forwarder_orchestrator_mock.go index 4e3c1e698ec69..3555e68ded9f7 100644 --- a/comp/forwarder/orchestrator/orchestratorimpl/forwarder_orchestrator_mock.go +++ b/comp/forwarder/orchestrator/orchestratorimpl/forwarder_orchestrator_mock.go @@ -13,7 +13,7 @@ import ( "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" "github.com/DataDog/datadog-agent/comp/forwarder/orchestrator" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // MockModule defines the fx options for this mock component. @@ -24,6 +24,6 @@ func MockModule() fxutil.Module { // NewMockOrchestratorForwarder returns an orchestratorForwarder func NewMockOrchestratorForwarder() orchestrator.Component { - forwarder := optional.NewOption[defaultforwarder.Forwarder](defaultforwarder.NoopForwarder{}) + forwarder := option.New[defaultforwarder.Forwarder](defaultforwarder.NoopForwarder{}) return &forwarder } diff --git a/comp/forwarder/orchestrator/orchestratorinterface/go.mod b/comp/forwarder/orchestrator/orchestratorinterface/go.mod index 4fc6e333ce46b..5f2a98d54efba 100644 --- a/comp/forwarder/orchestrator/orchestratorinterface/go.mod +++ b/comp/forwarder/orchestrator/orchestratorinterface/go.mod @@ -43,7 +43,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/http => ../../../../pkg/util/http github.com/DataDog/datadog-agent/pkg/util/log => ../../../../pkg/util/log github.com/DataDog/datadog-agent/pkg/util/log/setup => ../../../../pkg/util/log/setup - github.com/DataDog/datadog-agent/pkg/util/optional => ../../../../pkg/util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../../../pkg/util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../../pkg/util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../../pkg/util/scrubber github.com/DataDog/datadog-agent/pkg/util/sort => ../../../../pkg/util/sort @@ -60,16 +60,14 @@ require github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder v0.56.0 // Internal deps fix version replace ( github.com/cihub/seelog => github.com/cihub/seelog v0.0.0-20151216151435-d2c6e5aa9fbf // v2.6 - github.com/coreos/go-systemd => github.com/coreos/go-systemd v0.0.0-20180202092358-40e2722dffea github.com/spf13/cast => github.com/DataDog/cast v1.8.0 - github.com/ugorji/go => github.com/ugorji/go v1.1.7 ) require ( github.com/DataDog/datadog-agent/comp/core/config v0.57.1 // indirect github.com/DataDog/datadog-agent/comp/core/flare/builder v0.57.1 // indirect github.com/DataDog/datadog-agent/comp/core/flare/types v0.57.1 // indirect - github.com/DataDog/datadog-agent/comp/core/log/def v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/comp/core/log/def v0.61.0 // indirect github.com/DataDog/datadog-agent/comp/core/secrets v0.59.0 // indirect github.com/DataDog/datadog-agent/comp/core/status v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/core/telemetry v0.56.0-rc.3 // indirect @@ -78,8 +76,8 @@ require ( github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/mock v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.60.0-devel // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.60.0-devel // indirect @@ -90,17 +88,17 @@ require ( github.com/DataDog/datadog-agent/pkg/util/backoff v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/common v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/fxutil v0.57.1 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect github.com/DataDog/viper v1.14.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect @@ -121,7 +119,7 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect github.com/klauspost/compress v1.17.11 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect @@ -131,14 +129,14 @@ require ( github.com/patrickmn/go-cache v2.1.0+incompatible // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.60.1 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -152,11 +150,11 @@ require ( go.uber.org/fx v1.23.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/net v0.34.0 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect - google.golang.org/protobuf v1.35.2 // indirect + google.golang.org/protobuf v1.36.3 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/comp/forwarder/orchestrator/orchestratorinterface/go.sum b/comp/forwarder/orchestrator/orchestratorinterface/go.sum index a055847bfd129..9c875a9422f10 100644 --- a/comp/forwarder/orchestrator/orchestratorinterface/go.sum +++ b/comp/forwarder/orchestrator/orchestratorinterface/go.sum @@ -31,7 +31,7 @@ github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180202092358-40e2722dffea/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -127,8 +127,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -163,8 +163,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -181,8 +181,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -197,8 +197,8 @@ github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/f github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -233,6 +233,7 @@ github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYg github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20200122045848-3419fae592fc/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= @@ -263,8 +264,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -282,8 +283,8 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -305,8 +306,8 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= @@ -334,8 +335,8 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/comp/haagent/impl/haagent.go b/comp/haagent/impl/haagent.go index 40635092d42f7..a2c29b8ce1761 100644 --- a/comp/haagent/impl/haagent.go +++ b/comp/haagent/impl/haagent.go @@ -66,6 +66,10 @@ func (h *haAgentImpl) SetLeader(leaderAgentHostname string) { } } +func (h *haAgentImpl) resetAgentState() { + h.state.Store(string(haagent.Unknown)) +} + // ShouldRunIntegration return true if the agent integrations should to run. // When ha-agent is disabled, the agent behave as standalone agent (non HA) and will always run all integrations. func (h *haAgentImpl) ShouldRunIntegration(integrationName string) bool { @@ -78,6 +82,15 @@ func (h *haAgentImpl) ShouldRunIntegration(integrationName string) bool { func (h *haAgentImpl) onHaAgentUpdate(updates map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus)) { h.log.Debugf("Updates received: count=%d", len(updates)) + // New updates arrived, but if the list of updates is empty, + // it means we don't have any updates applying to this agent anymore. + // In this case, reset HA Agent setting to default states. + if len(updates) == 0 { + h.log.Warn("Empty update received. Resetting Agent State to Unknown.") + h.resetAgentState() + return + } + for configPath, rawConfig := range updates { h.log.Debugf("Received config %s: %s", configPath, string(rawConfig.Config)) haAgentMsg := haAgentConfig{} diff --git a/comp/haagent/impl/haagent_test.go b/comp/haagent/impl/haagent_test.go index 88b1397f70456..8cb66ce2ec0b2 100644 --- a/comp/haagent/impl/haagent_test.go +++ b/comp/haagent/impl/haagent_test.go @@ -14,6 +14,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/remoteconfig/state" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "go.uber.org/fx" ) @@ -106,25 +107,41 @@ func Test_RCListener(t *testing.T) { } func Test_haAgentImpl_onHaAgentUpdate(t *testing.T) { - tests := []struct { name string + initialState haagent.State updates map[string]state.RawConfig expectedApplyID string expectedApplyStatus state.ApplyStatus + expectedAgentState haagent.State }{ { - name: "successful update", + name: "successful update with leader matching current agent", + initialState: haagent.Unknown, + updates: map[string]state.RawConfig{ + testConfigID: {Config: []byte(`{"group":"testGroup01","leader":"my-agent-hostname"}`)}, + }, + expectedApplyID: testConfigID, + expectedApplyStatus: state.ApplyStatus{ + State: state.ApplyStateAcknowledged, + }, + expectedAgentState: haagent.Active, + }, + { + name: "successful update with leader NOT matching current agent", + initialState: haagent.Unknown, updates: map[string]state.RawConfig{ - testConfigID: {Config: []byte(`{"group":"testGroup01","leader":"ha-agent1"}`)}, + testConfigID: {Config: []byte(`{"group":"testGroup01","leader":"another-agent-hostname"}`)}, }, expectedApplyID: testConfigID, expectedApplyStatus: state.ApplyStatus{ State: state.ApplyStateAcknowledged, }, + expectedAgentState: haagent.Standby, }, { - name: "invalid payload", + name: "invalid payload", + initialState: haagent.Unknown, updates: map[string]state.RawConfig{ testConfigID: {Config: []byte(`invalid-json`)}, }, @@ -133,17 +150,28 @@ func Test_haAgentImpl_onHaAgentUpdate(t *testing.T) { State: state.ApplyStateError, Error: "error unmarshalling payload", }, + expectedAgentState: haagent.Unknown, }, { - name: "invalid group", + name: "invalid group", + initialState: haagent.Unknown, updates: map[string]state.RawConfig{ - testConfigID: {Config: []byte(`{"group":"invalidGroup","leader":"ha-agent1"}`)}, + testConfigID: {Config: []byte(`{"group":"invalidGroup","leader":"another-agent-hostname"}`)}, }, expectedApplyID: testConfigID, expectedApplyStatus: state.ApplyStatus{ State: state.ApplyStateError, Error: "group does not match", }, + expectedAgentState: haagent.Unknown, + }, + { + name: "empty update", + initialState: haagent.Active, + updates: map[string]state.RawConfig{}, + expectedApplyID: "", + expectedApplyStatus: state.ApplyStatus{}, + expectedAgentState: haagent.Unknown, }, } for _, tt := range tests { @@ -160,6 +188,10 @@ func Test_haAgentImpl_onHaAgentUpdate(t *testing.T) { h := newHaAgentImpl(logmock.New(t), newHaAgentConfigs(agentConfigComponent)) + if tt.initialState != "" { + h.state.Store(string(tt.initialState)) + } + var applyID string var applyStatus state.ApplyStatus applyFunc := func(id string, status state.ApplyStatus) { @@ -169,6 +201,7 @@ func Test_haAgentImpl_onHaAgentUpdate(t *testing.T) { h.onHaAgentUpdate(tt.updates, applyFunc) assert.Equal(t, tt.expectedApplyID, applyID) assert.Equal(t, tt.expectedApplyStatus, applyStatus) + assert.Equal(t, tt.expectedAgentState, h.GetState()) }) } } @@ -249,3 +282,17 @@ func Test_haAgentImpl_ShouldRunIntegration(t *testing.T) { }) } } + +func Test_haAgentImpl_resetAgentState(t *testing.T) { + // GIVEN + haAgent := newTestHaAgentComponent(t, nil) + haAgentComp := haAgent.Comp.(*haAgentImpl) + haAgentComp.state.Store(string(haagent.Active)) + require.Equal(t, haagent.Active, haAgentComp.GetState()) + + // WHEN + haAgentComp.resetAgentState() + + // THEN + assert.Equal(t, haagent.Unknown, haAgentComp.GetState()) +} diff --git a/comp/languagedetection/client/clientimpl/client.go b/comp/languagedetection/client/clientimpl/client.go index becde06ae5206..590bd7260a3bb 100644 --- a/comp/languagedetection/client/clientimpl/client.go +++ b/comp/languagedetection/client/clientimpl/client.go @@ -21,8 +21,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/status/health" "github.com/DataDog/datadog-agent/pkg/util/clusteragent" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" - + "github.com/DataDog/datadog-agent/pkg/util/option" "go.uber.org/fx" ) @@ -103,7 +102,7 @@ func newClient( deps dependencies, ) clientComp.Component { if !deps.Config.GetBool("language_detection.reporting.enabled") || !deps.Config.GetBool("language_detection.enabled") || !deps.Config.GetBool("cluster_agent.enabled") { - return optional.NewNoneOption[clientComp.Component]() + return option.None[clientComp.Component]() } ctx := context.Background() @@ -129,7 +128,7 @@ func newClient( OnStop: cl.stop, }) - return optional.NewOption[clientComp.Component](cl) + return option.New[clientComp.Component](cl) } // start starts streaming languages to the Cluster-Agent diff --git a/comp/languagedetection/client/clientimpl/client_test.go b/comp/languagedetection/client/clientimpl/client_test.go index bc69601771407..a7436b4ced74c 100644 --- a/comp/languagedetection/client/clientimpl/client_test.go +++ b/comp/languagedetection/client/clientimpl/client_test.go @@ -30,7 +30,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/languagedetection/languagemodels" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/process" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" langUtil "github.com/DataDog/datadog-agent/pkg/languagedetection/util" ) @@ -61,7 +61,7 @@ func newTestClient(t *testing.T) (*client, chan *pbgo.ParentLanguageAnnotationRe workloadmetafxmock.MockModule(workloadmeta.NewParams()), )) - optComponent := newClient(deps).(optional.Option[clientComp.Component]) + optComponent := newClient(deps).(option.Option[clientComp.Component]) comp, _ := optComponent.Get() client := comp.(*client) client.langDetectionCl = mockDCAClient @@ -107,7 +107,7 @@ func TestClientEnabled(t *testing.T) { workloadmetafxmock.MockModule(workloadmeta.NewParams()), )) - optionalCl := newClient(deps).(optional.Option[clientComp.Component]) + optionalCl := newClient(deps).(option.Option[clientComp.Component]) _, ok := optionalCl.Get() assert.Equal(t, testCase.isSet, ok) }) diff --git a/comp/logs/agent/agentimpl/agent.go b/comp/logs/agent/agentimpl/agent.go index 3a7ada8e08a23..110fee063ae9c 100644 --- a/comp/logs/agent/agentimpl/agent.go +++ b/comp/logs/agent/agentimpl/agent.go @@ -31,6 +31,7 @@ import ( integrationsimpl "github.com/DataDog/datadog-agent/comp/logs/integrations/impl" "github.com/DataDog/datadog-agent/comp/metadata/inventoryagent" rctypes "github.com/DataDog/datadog-agent/comp/remote-config/rcclient/types" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/def" "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/logs/auditor" "github.com/DataDog/datadog-agent/pkg/logs/client" @@ -46,9 +47,9 @@ import ( "github.com/DataDog/datadog-agent/pkg/logs/tailers" "github.com/DataDog/datadog-agent/pkg/remoteconfig/state" "github.com/DataDog/datadog-agent/pkg/status/health" - "github.com/DataDog/datadog-agent/pkg/util" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/goroutinesdump" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/util/startstop" ) @@ -79,19 +80,20 @@ type dependencies struct { Config configComponent.Component InventoryAgent inventoryagent.Component Hostname hostname.Component - WMeta optional.Option[workloadmeta.Component] + WMeta option.Option[workloadmeta.Component] SchedulerProviders []schedulers.Scheduler `group:"log-agent-scheduler"` Tagger tagger.Component + Compression logscompression.Component } type provides struct { fx.Out - Comp optional.Option[agent.Component] + Comp option.Option[agent.Component] FlareProvider flaretypes.Provider StatusProvider statusComponent.InformationProvider RCListener rctypes.ListenerProvider - LogsReciever optional.Option[integrations.Component] + LogsReciever option.Option[integrations.Component] } // logAgent represents the data pipeline that collects, decodes, @@ -116,9 +118,10 @@ type logAgent struct { health *health.Handle diagnosticMessageReceiver *diagnostic.BufferedMessageReceiver flarecontroller *flareController.FlareController - wmeta optional.Option[workloadmeta.Component] + wmeta option.Option[workloadmeta.Component] schedulerProviders []schedulers.Scheduler integrationsLogs integrations.Component + compression logscompression.Component // make sure this is done only once, when we're ready prepareSchedulers sync.Once @@ -150,6 +153,7 @@ func newLogsAgent(deps dependencies) provides { schedulerProviders: deps.SchedulerProviders, integrationsLogs: integrationsLogs, tagger: deps.Tagger, + compression: deps.Compression, } deps.Lc.Append(fx.Hook{ OnStart: logsAgent.start, @@ -165,19 +169,19 @@ func newLogsAgent(deps dependencies) provides { } return provides{ - Comp: optional.NewOption[agent.Component](logsAgent), + Comp: option.New[agent.Component](logsAgent), StatusProvider: statusComponent.NewInformationProvider(NewStatusProvider()), FlareProvider: flaretypes.NewProvider(logsAgent.flarecontroller.FillFlare), RCListener: rcListener, - LogsReciever: optional.NewOption[integrations.Component](integrationsLogs), + LogsReciever: option.New[integrations.Component](integrationsLogs), } } deps.Log.Info("logs-agent disabled") return provides{ - Comp: optional.NewNoneOption[agent.Component](), + Comp: option.None[agent.Component](), StatusProvider: statusComponent.NewInformationProvider(NewStatusProvider()), - LogsReciever: optional.NewNoneOption[integrations.Component](), + LogsReciever: option.None[integrations.Component](), } } @@ -318,7 +322,7 @@ func (a *logAgent) stop(context.Context) error { case <-c: case <-timeout.C: a.log.Warn("Force close of the Logs Agent, dumping the Go routines.") - if stack, err := util.GetGoRoutinesDump(); err != nil { + if stack, err := goroutinesdump.Get(); err != nil { a.log.Warnf("can't get the Go routines dump: %s\n", err) } else { a.log.Warn(stack) diff --git a/comp/logs/agent/agentimpl/agent_core_init.go b/comp/logs/agent/agentimpl/agent_core_init.go index fae9804bae9b8..87e2b00810b24 100644 --- a/comp/logs/agent/agentimpl/agent_core_init.go +++ b/comp/logs/agent/agentimpl/agent_core_init.go @@ -30,11 +30,11 @@ import ( "github.com/DataDog/datadog-agent/pkg/logs/pipeline" "github.com/DataDog/datadog-agent/pkg/logs/schedulers" "github.com/DataDog/datadog-agent/pkg/status/health" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // NewAgent returns a new Logs Agent -func (a *logAgent) SetupPipeline(processingRules []*config.ProcessingRule, wmeta optional.Option[workloadmeta.Component], integrationsLogs integrations.Component) { +func (a *logAgent) SetupPipeline(processingRules []*config.ProcessingRule, wmeta option.Option[workloadmeta.Component], integrationsLogs integrations.Component) { health := health.RegisterLiveness("logs-agent") // setup the auditor @@ -46,7 +46,7 @@ func (a *logAgent) SetupPipeline(processingRules []*config.ProcessingRule, wmeta diagnosticMessageReceiver := diagnostic.NewBufferedMessageReceiver(nil, a.hostname) // setup the pipeline provider that provides pairs of processor and sender - pipelineProvider := pipeline.NewProvider(a.config.GetInt("logs_config.pipelines"), auditor, diagnosticMessageReceiver, processingRules, a.endpoints, destinationsCtx, NewStatusProvider(), a.hostname, a.config) + pipelineProvider := pipeline.NewProvider(a.config.GetInt("logs_config.pipelines"), auditor, diagnosticMessageReceiver, processingRules, a.endpoints, destinationsCtx, NewStatusProvider(), a.hostname, a.config, a.compression) // setup the launchers lnchrs := launchers.NewLaunchers(a.sources, pipelineProvider, auditor, a.tracker) diff --git a/comp/logs/agent/agentimpl/agent_serverless_init.go b/comp/logs/agent/agentimpl/agent_serverless_init.go index 67711def14029..dabe8ab060135 100644 --- a/comp/logs/agent/agentimpl/agent_serverless_init.go +++ b/comp/logs/agent/agentimpl/agent_serverless_init.go @@ -25,7 +25,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/logs/schedulers" "github.com/DataDog/datadog-agent/pkg/serverless/streamlogs" "github.com/DataDog/datadog-agent/pkg/status/health" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // Note: Building the logs-agent for serverless separately removes the @@ -37,7 +37,7 @@ import ( // It is using a NullAuditor because we've nothing to do after having sent the logs to the intake. func (a *logAgent) SetupPipeline( processingRules []*config.ProcessingRule, - wmeta optional.Option[workloadmeta.Component], + wmeta option.Option[workloadmeta.Component], _ integrations.Component, ) { health := health.RegisterLiveness("logs-agent") @@ -49,7 +49,7 @@ func (a *logAgent) SetupPipeline( destinationsCtx := client.NewDestinationsContext() // setup the pipeline provider that provides pairs of processor and sender - pipelineProvider := pipeline.NewServerlessProvider(a.config.GetInt("logs_config.pipelines"), a.auditor, diagnosticMessageReceiver, processingRules, a.endpoints, destinationsCtx, NewStatusProvider(), a.hostname, a.config) + pipelineProvider := pipeline.NewServerlessProvider(a.config.GetInt("logs_config.pipelines"), a.auditor, diagnosticMessageReceiver, processingRules, a.endpoints, destinationsCtx, NewStatusProvider(), a.hostname, a.config, a.compression) lnchrs := launchers.NewLaunchers(a.sources, pipelineProvider, a.auditor, a.tracker) lnchrs.AddLauncher(channel.NewLauncher()) diff --git a/comp/logs/agent/agentimpl/agent_test.go b/comp/logs/agent/agentimpl/agent_test.go index 6af1f1781419e..df067c7f2fc73 100644 --- a/comp/logs/agent/agentimpl/agent_test.go +++ b/comp/logs/agent/agentimpl/agent_test.go @@ -36,6 +36,7 @@ import ( flareController "github.com/DataDog/datadog-agent/comp/logs/agent/flare" "github.com/DataDog/datadog-agent/comp/metadata/inventoryagent/inventoryagentimpl" + compressionfx "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx-mock" "github.com/DataDog/datadog-agent/pkg/config/env" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/client/http" @@ -134,11 +135,12 @@ func createAgent(suite *AgentTestSuite, endpoints *config.Endpoints) (*logAgent, started: atomic.NewUint32(0), integrationsLogs: integrationsimpl.NewLogsIntegration(), - sources: sources, - services: services, - tracker: tailers.NewTailerTracker(), - endpoints: endpoints, - tagger: fakeTagger, + sources: sources, + services: services, + tracker: tailers.NewTailerTracker(), + endpoints: endpoints, + tagger: fakeTagger, + compression: compressionfx.NewMockCompressor(), } agent.setupAgent() @@ -405,6 +407,7 @@ func (suite *AgentTestSuite) createDeps() dependencies { fx.Replace(configComponent.MockParams{Overrides: suite.configOverrides}), inventoryagentimpl.MockModule(), workloadmetafxmock.MockModule(workloadmeta.NewParams()), + compressionfx.MockModule(), fx.Provide(func() tagger.Component { return suite.tagger }), diff --git a/comp/logs/agent/agentimpl/mock.go b/comp/logs/agent/agentimpl/mock.go index 8d58828ff9f13..4c8a1e538a82d 100644 --- a/comp/logs/agent/agentimpl/mock.go +++ b/comp/logs/agent/agentimpl/mock.go @@ -17,7 +17,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/logs/schedulers" "github.com/DataDog/datadog-agent/pkg/logs/sources" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "go.uber.org/fx" ) @@ -36,7 +36,7 @@ type mockLogsAgent struct { logSources *sources.LogSources } -func newMock(deps dependencies) optional.Option[agent.Mock] { +func newMock(deps dependencies) option.Option[agent.Mock] { logsAgent := &mockLogsAgent{ hasFlushed: false, addedSchedulers: make([]schedulers.Scheduler, 0), @@ -47,7 +47,7 @@ func newMock(deps dependencies) optional.Option[agent.Mock] { OnStart: logsAgent.start, OnStop: logsAgent.stop, }) - return optional.NewOption[agent.Mock](logsAgent) + return option.New[agent.Mock](logsAgent) } func (a *mockLogsAgent) start(context.Context) error { diff --git a/comp/logs/agent/agentimpl/serverless.go b/comp/logs/agent/agentimpl/serverless.go index 44752f65726cc..32d725da3fa0c 100644 --- a/comp/logs/agent/agentimpl/serverless.go +++ b/comp/logs/agent/agentimpl/serverless.go @@ -14,6 +14,7 @@ import ( tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" "github.com/DataDog/datadog-agent/comp/logs/agent" flareController "github.com/DataDog/datadog-agent/comp/logs/agent/flare" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/def" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/service" "github.com/DataDog/datadog-agent/pkg/logs/sources" @@ -21,7 +22,8 @@ import ( ) // NewServerlessLogsAgent creates a new instance of the logs agent for serverless -func NewServerlessLogsAgent(tagger tagger.Component) agent.ServerlessLogsAgent { +func NewServerlessLogsAgent(tagger tagger.Component, compression logscompression.Component) agent.ServerlessLogsAgent { + logsAgent := &logAgent{ log: logComponent.NewTemporaryLoggerWithoutInit(), config: pkgconfigsetup.Datadog(), @@ -32,6 +34,7 @@ func NewServerlessLogsAgent(tagger tagger.Component) agent.ServerlessLogsAgent { tracker: tailers.NewTailerTracker(), flarecontroller: flareController.NewFlareController(), tagger: tagger, + compression: compression, } return logsAgent } diff --git a/comp/logs/agent/config/config_keys.go b/comp/logs/agent/config/config_keys.go index 0f9dbbf2b36de..ed27929cd5be0 100644 --- a/comp/logs/agent/config/config_keys.go +++ b/comp/logs/agent/config/config_keys.go @@ -109,7 +109,15 @@ func (l *LogsConfigKeys) devModeUseProto() bool { return l.getConfig().GetBool(l.getConfigKey("dev_mode_use_proto")) } +func (l *LogsConfigKeys) compressionKind() string { + return l.getConfig().GetString(l.getConfigKey("compression_kind")) +} + func (l *LogsConfigKeys) compressionLevel() int { + if l.compressionKind() == "zstd" { + return l.getConfig().GetInt(l.getConfigKey("zstd_compression_level")) + } + return l.getConfig().GetInt(l.getConfigKey("compression_level")) } diff --git a/comp/logs/agent/config/endpoints.go b/comp/logs/agent/config/endpoints.go index 58ce82d31255c..192c5c0bf5277 100644 --- a/comp/logs/agent/config/endpoints.go +++ b/comp/logs/agent/config/endpoints.go @@ -41,8 +41,9 @@ type Endpoint struct { Host string `mapstructure:"host" json:"host"` Port int - UseCompression bool `mapstructure:"use_compression" json:"use_compression"` - CompressionLevel int `mapstructure:"compression_level" json:"compression_level"` + UseCompression bool `mapstructure:"use_compression" json:"use_compression"` + CompressionKind string `mapstructure:"compression_kind" json:"compression_kind"` + CompressionLevel int `mapstructure:"compression_level" json:"compression_level"` ProxyAddress string IsMRF bool `mapstructure:"-" json:"-"` ConnectionResetInterval time.Duration @@ -99,6 +100,7 @@ func NewHTTPEndpoint(logsConfig *LogsConfigKeys) Endpoint { return Endpoint{ apiKeyGetter: logsConfig.getAPIKeyGetter(), UseCompression: logsConfig.useCompression(), + CompressionKind: logsConfig.compressionKind(), CompressionLevel: logsConfig.compressionLevel(), ConnectionResetInterval: logsConfig.connectionResetInterval(), BackoffBase: logsConfig.senderBackoffBase(), diff --git a/comp/logs/agent/config/go.mod b/comp/logs/agent/config/go.mod index 1ae81a7be7da6..a1e3c5ec84516 100644 --- a/comp/logs/agent/config/go.mod +++ b/comp/logs/agent/config/go.mod @@ -26,7 +26,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../../pkg/util/fxutil github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../../../pkg/util/hostname/validate github.com/DataDog/datadog-agent/pkg/util/log => ../../../../pkg/util/log - github.com/DataDog/datadog-agent/pkg/util/optional => ../../../../pkg/util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../../../pkg/util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../../pkg/util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../../pkg/util/scrubber github.com/DataDog/datadog-agent/pkg/util/system => ../../../../pkg/util/system @@ -38,13 +38,13 @@ replace ( require ( github.com/DataDog/datadog-agent/comp/core/config v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 github.com/DataDog/viper v1.14.0 github.com/stretchr/testify v1.10.0 go.uber.org/fx v1.23.0 @@ -58,16 +58,16 @@ require ( github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/mock v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect @@ -79,16 +79,16 @@ require ( github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -99,8 +99,8 @@ require ( go.uber.org/dig v1.18.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/comp/logs/agent/config/go.sum b/comp/logs/agent/config/go.sum index dd16364891695..77eac717c35df 100644 --- a/comp/logs/agent/config/go.sum +++ b/comp/logs/agent/config/go.sum @@ -72,7 +72,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -110,8 +109,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -138,8 +137,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -156,8 +155,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -171,8 +170,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -183,8 +182,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -239,8 +238,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -277,8 +276,8 @@ golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= @@ -306,8 +305,8 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/comp/logs/integrations/impl/integrations.go b/comp/logs/integrations/impl/integrations.go index 7f286becf1184..654cf7cdcb0dd 100644 --- a/comp/logs/integrations/impl/integrations.go +++ b/comp/logs/integrations/impl/integrations.go @@ -27,6 +27,10 @@ func NewLogsIntegration() *Logsintegration { // RegisterIntegration registers an integration with the integrations component func (li *Logsintegration) RegisterIntegration(id string, config integration.Config) { + if len(config.LogsConfig) == 0 { + return + } + integrationConfig := integrations.IntegrationConfig{ IntegrationID: id, Config: config, diff --git a/comp/logs/integrations/impl/integrations_test.go b/comp/logs/integrations/impl/integrations_test.go index 52146c58961c8..8c101d03fdb92 100644 --- a/comp/logs/integrations/impl/integrations_test.go +++ b/comp/logs/integrations/impl/integrations_test.go @@ -7,8 +7,11 @@ package integrationsimpl import ( "testing" + "time" "github.com/stretchr/testify/assert" + + "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" ) func TestNewComponent(t *testing.T) { @@ -29,3 +32,25 @@ func TestSendandSubscribe(t *testing.T) { assert.Equal(t, "test log", log.Log) assert.Equal(t, "integration1", log.IntegrationID) } + +// TestReceiveEmptyConfig ensures that ReceiveIntegration doesn't send an empty +// configuration to subscribers +func TestReceiveEmptyConfig(t *testing.T) { + logsIntegration := NewLogsIntegration() + integrationChan := logsIntegration.SubscribeIntegration() + + mockConf := &integration.Config{} + mockConf.Provider = "container" + mockConf.LogsConfig = integration.Data(``) + + go func() { + logsIntegration.RegisterIntegration("12345", *mockConf) + }() + + select { + case msg := <-integrationChan: + assert.Fail(t, "Expected channel to not receive logs, instead got:", msg) + case <-time.After(100 * time.Millisecond): + assert.True(t, true, "Channel remained empty.") + } +} diff --git a/comp/metadata/bundle_test.go b/comp/metadata/bundle_test.go index 61861f356b775..9d848f436bcb5 100644 --- a/comp/metadata/bundle_test.go +++ b/comp/metadata/bundle_test.go @@ -17,16 +17,16 @@ import ( "github.com/DataDog/datadog-agent/comp/metadata/runner/runnerimpl" "github.com/DataDog/datadog-agent/pkg/serializer" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) func TestBundleDependencies(t *testing.T) { fxutil.TestBundle(t, Bundle(), core.MockBundle(), - fx.Supply(optional.NewNoneOption[runnerimpl.MetadataProvider]()), + fx.Supply(option.None[runnerimpl.MetadataProvider]()), fx.Provide(func() serializer.MetricSerializer { return nil }), collectorimpl.MockModule(), - fx.Provide(func() optional.Option[agent.Component] { - return optional.NewNoneOption[agent.Component]() + fx.Provide(func() option.Option[agent.Component] { + return option.None[agent.Component]() }), authtokenimpl.Module(), ) diff --git a/comp/metadata/host/hostimpl/hosttags/tags.go b/comp/metadata/host/hostimpl/hosttags/tags.go index 92d610d49e079..64d06bfd83555 100644 --- a/comp/metadata/host/hostimpl/hosttags/tags.go +++ b/comp/metadata/host/hostimpl/hosttags/tags.go @@ -16,7 +16,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/config/env" "github.com/DataDog/datadog-agent/pkg/config/model" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" - "github.com/DataDog/datadog-agent/pkg/util" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/cloudproviders/gce" "github.com/DataDog/datadog-agent/pkg/util/docker" @@ -25,6 +24,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/kubernetes/clustername" k8s "github.com/DataDog/datadog-agent/pkg/util/kubernetes/hostinfo" "github.com/DataDog/datadog-agent/pkg/util/log" + "github.com/DataDog/datadog-agent/pkg/util/sort" ) var ( @@ -184,7 +184,7 @@ func Get(ctx context.Context, cached bool, conf model.Reader) *Tags { } t := &Tags{ - System: util.SortUniqInPlace(hostTags), + System: sort.UniqInPlace(hostTags), GoogleCloudPlatform: gceTags, } diff --git a/comp/metadata/host/hostimpl/utils/meta.go b/comp/metadata/host/hostimpl/utils/meta.go index 7e96e8f4efdef..911b910483b0f 100644 --- a/comp/metadata/host/hostimpl/utils/meta.go +++ b/comp/metadata/host/hostimpl/utils/meta.go @@ -11,12 +11,12 @@ import ( "time" "github.com/DataDog/datadog-agent/pkg/config/model" - "github.com/DataDog/datadog-agent/pkg/util" "github.com/DataDog/datadog-agent/pkg/util/cache" "github.com/DataDog/datadog-agent/pkg/util/cloudproviders" "github.com/DataDog/datadog-agent/pkg/util/ec2" "github.com/DataDog/datadog-agent/pkg/util/hostname" "github.com/DataDog/datadog-agent/pkg/util/kubelet" + netutil "github.com/DataDog/datadog-agent/pkg/util/net" ) var ( @@ -67,7 +67,7 @@ func getMeta(ctx context.Context, conf model.Reader) *Meta { m := &Meta{ SocketHostname: osHostname, Timezones: []string{tzname}, - SocketFqdn: util.Fqdn(osHostname), + SocketFqdn: netutil.Fqdn(osHostname), EC2Hostname: ec2Hostname, HostAliases: cloudproviders.GetHostAliases(ctx), InstanceID: instanceID, diff --git a/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent.go b/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent.go index 4af65ceb7dd83..44310c9ba98d3 100644 --- a/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent.go +++ b/comp/metadata/inventoryagent/inventoryagentimpl/inventoryagent.go @@ -43,7 +43,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/hostname" httputils "github.com/DataDog/datadog-agent/pkg/util/http" "github.com/DataDog/datadog-agent/pkg/util/installinfo" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/util/scrubber" "github.com/DataDog/datadog-agent/pkg/util/uuid" "github.com/DataDog/datadog-agent/pkg/version" @@ -92,7 +92,7 @@ type inventoryagent struct { log log.Component conf config.Component - sysprobeConf optional.Option[sysprobeconfig.Component] + sysprobeConf option.Option[sysprobeconfig.Component] m sync.Mutex data agentMetadata hostname string @@ -104,7 +104,7 @@ type dependencies struct { Log log.Component Config config.Component - SysProbeConfig optional.Option[sysprobeconfig.Component] + SysProbeConfig option.Option[sysprobeconfig.Component] Serializer serializer.MetricSerializer AuthToken authtoken.Component } diff --git a/comp/metadata/inventorychecks/inventorychecksimpl/inventorychecks.go b/comp/metadata/inventorychecks/inventorychecksimpl/inventorychecks.go index 6390ff1e08a3a..e3da9ab20fd74 100644 --- a/comp/metadata/inventorychecks/inventorychecksimpl/inventorychecks.go +++ b/comp/metadata/inventorychecks/inventorychecksimpl/inventorychecks.go @@ -35,7 +35,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/hostname" httputils "github.com/DataDog/datadog-agent/pkg/util/http" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/util/uuid" ) @@ -86,8 +86,8 @@ type inventorychecksImpl struct { log log.Component conf config.Component - coll optional.Option[collector.Component] - sources optional.Option[*sources.LogSources] + coll option.Option[collector.Component] + sources option.Option[*sources.LogSources] hostname string } @@ -97,8 +97,8 @@ type dependencies struct { Log log.Component Config config.Component Serializer serializer.MetricSerializer - Coll optional.Option[collector.Component] - LogAgent optional.Option[logagent.Component] + Coll option.Option[collector.Component] + LogAgent option.Option[logagent.Component] } type provides struct { @@ -116,7 +116,7 @@ func newInventoryChecksProvider(deps dependencies) provides { conf: deps.Config, log: deps.Log, coll: deps.Coll, - sources: optional.NewNoneOption[*sources.LogSources](), + sources: option.None[*sources.LogSources](), hostname: hname, data: map[string]instanceMetadata{}, } diff --git a/comp/metadata/inventorychecks/inventorychecksimpl/inventorychecks_test.go b/comp/metadata/inventorychecks/inventorychecksimpl/inventorychecks_test.go index 91033327fbd5a..535833cd53e6c 100644 --- a/comp/metadata/inventorychecks/inventorychecksimpl/inventorychecks_test.go +++ b/comp/metadata/inventorychecks/inventorychecksimpl/inventorychecks_test.go @@ -27,16 +27,17 @@ import ( logagent "github.com/DataDog/datadog-agent/comp/logs/agent" logConfig "github.com/DataDog/datadog-agent/comp/logs/agent/config" "github.com/DataDog/datadog-agent/comp/metadata/inventoryagent/inventoryagentimpl" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx-mock" "github.com/DataDog/datadog-agent/pkg/collector/check" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" "github.com/DataDog/datadog-agent/pkg/logs/sources" "github.com/DataDog/datadog-agent/pkg/serializer" serializermock "github.com/DataDog/datadog-agent/pkg/serializer/mocks" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) -func getTestInventoryChecks(t *testing.T, coll optional.Option[collector.Component], logAgent optional.Option[logagent.Component], overrides map[string]any) *inventorychecksImpl { +func getTestInventoryChecks(t *testing.T, coll option.Option[collector.Component], logAgent option.Option[logagent.Component], overrides map[string]any) *inventorychecksImpl { p := newInventoryChecksProvider( fxutil.Test[dependencies]( t, @@ -44,10 +45,10 @@ func getTestInventoryChecks(t *testing.T, coll optional.Option[collector.Compone config.MockModule(), fx.Replace(config.MockParams{Overrides: overrides}), fx.Provide(func() serializer.MetricSerializer { return serializermock.NewMetricSerializer(t) }), - fx.Provide(func() optional.Option[collector.Component] { + fx.Provide(func() option.Option[collector.Component] { return coll }), - fx.Provide(func() optional.Option[logagent.Component] { + fx.Provide(func() option.Option[logagent.Component] { return logAgent }), ), @@ -57,7 +58,7 @@ func getTestInventoryChecks(t *testing.T, coll optional.Option[collector.Compone func TestSet(t *testing.T) { ic := getTestInventoryChecks( - t, optional.NewNoneOption[collector.Component](), optional.Option[logagent.Component]{}, nil, + t, option.None[collector.Component](), option.Option[logagent.Component]{}, nil, ) ic.Set("instance_1", "key", "value") @@ -76,7 +77,7 @@ func TestSet(t *testing.T) { func TestSetEmptyInstance(t *testing.T) { ic := getTestInventoryChecks( - t, optional.NewNoneOption[collector.Component](), optional.Option[logagent.Component]{}, nil, + t, option.None[collector.Component](), option.Option[logagent.Component]{}, nil, ) ic.Set("", "key", "value") @@ -86,7 +87,7 @@ func TestSetEmptyInstance(t *testing.T) { func TestGetInstanceMetadata(t *testing.T) { ic := getTestInventoryChecks( - t, optional.NewNoneOption[collector.Component](), optional.Option[logagent.Component]{}, nil, + t, option.None[collector.Component](), option.Option[logagent.Component]{}, nil, ) ic.Set("instance_1", "key1", "value1") @@ -154,11 +155,12 @@ func TestGetPayload(t *testing.T) { logSources.AddSource(src) fakeTagger := mock.SetupFakeTagger(t) - mockLogAgent := fxutil.Test[optional.Option[logagent.Mock]]( + mockLogAgent := fxutil.Test[option.Option[logagent.Mock]]( t, logsBundle.MockBundle(), core.MockBundle(), inventoryagentimpl.MockModule(), + logscompression.MockModule(), workloadmetafxmock.MockModule(workloadmeta.NewParams()), fx.Provide(func() tagger.Component { return fakeTagger @@ -175,8 +177,8 @@ func TestGetPayload(t *testing.T) { } ic := getTestInventoryChecks(t, - optional.NewOption[collector.Component](mockColl), - optional.NewOption[logagent.Component](logsAgent), + option.New[collector.Component](mockColl), + option.New[logagent.Component](logsAgent), overrides, ) @@ -257,7 +259,7 @@ func TestGetPayload(t *testing.T) { func TestFlareProviderFilename(t *testing.T) { ic := getTestInventoryChecks( - t, optional.NewNoneOption[collector.Component](), optional.Option[logagent.Component]{}, nil, + t, option.None[collector.Component](), option.Option[logagent.Component]{}, nil, ) assert.Equal(t, "checks.json", ic.FlareFileName) } @@ -265,7 +267,7 @@ func TestFlareProviderFilename(t *testing.T) { // TODO (Component): This test will be removed when the inventorychecks component will be move into the collector component func TestExpvarExist(t *testing.T) { getTestInventoryChecks( - t, optional.NewNoneOption[collector.Component](), optional.Option[logagent.Component]{}, nil, + t, option.None[collector.Component](), option.Option[logagent.Component]{}, nil, ) assert.NotNil(t, expvar.Get("inventories")) } diff --git a/comp/metadata/systemprobe/impl/system_probe.go b/comp/metadata/systemprobe/impl/system_probe.go index 04b33cc38d687..aadda34dcd98d 100644 --- a/comp/metadata/systemprobe/impl/system_probe.go +++ b/comp/metadata/systemprobe/impl/system_probe.go @@ -30,7 +30,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/serializer/marshaler" "github.com/DataDog/datadog-agent/pkg/util/hostname" httputils "github.com/DataDog/datadog-agent/pkg/util/http" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/version" ) @@ -65,7 +65,7 @@ type systemprobe struct { log log.Component conf config.Component - sysprobeConf optional.Option[sysprobeconfig.Component] + sysprobeConf option.Option[sysprobeconfig.Component] hostname string } @@ -76,7 +76,7 @@ type Requires struct { Serializer serializer.MetricSerializer // We need the authtoken to be created so we requires the comp. It will be used by configFetcher. AuthToken authtoken.Component - SysProbeConfig optional.Option[sysprobeconfig.Component] + SysProbeConfig option.Option[sysprobeconfig.Component] } // Provides defines the output of the systemprobe metadatacomponent diff --git a/comp/metadata/systemprobe/impl/system_probe_test.go b/comp/metadata/systemprobe/impl/system_probe_test.go index 40bad07c0a505..32dd9d98fbe86 100644 --- a/comp/metadata/systemprobe/impl/system_probe_test.go +++ b/comp/metadata/systemprobe/impl/system_probe_test.go @@ -28,7 +28,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/config/model" serializermock "github.com/DataDog/datadog-agent/pkg/serializer/mocks" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/version" ) @@ -68,7 +68,7 @@ func getSystemProbeComp(t *testing.T, enableConfig bool) *systemprobe { fx.Provide(func() log.Component { return l }), fx.Provide(func() config.Component { return cfg }), ), - SysProbeConfig: fxutil.Test[optional.Option[sysprobeconfig.Component]](t, sysprobeconfigimpl.MockModule()), + SysProbeConfig: fxutil.Test[option.Option[sysprobeconfig.Component]](t, sysprobeconfigimpl.MockModule()), } comp := NewComponent(r).Comp diff --git a/comp/networkpath/bundle_test.go b/comp/networkpath/bundle_test.go index 6eb6868e8acba..1e26b5677a53c 100644 --- a/comp/networkpath/bundle_test.go +++ b/comp/networkpath/bundle_test.go @@ -11,6 +11,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core" "github.com/DataDog/datadog-agent/comp/forwarder/eventplatform/eventplatformimpl" rdnsquerier "github.com/DataDog/datadog-agent/comp/rdnsquerier/fx-mock" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx-mock" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -19,5 +20,6 @@ func TestBundleDependencies(t *testing.T) { core.MockBundle(), eventplatformimpl.MockModule(), rdnsquerier.MockModule(), + logscompression.MockModule(), ) } diff --git a/comp/networkpath/npcollector/npcollectorimpl/npcollector_testutils.go b/comp/networkpath/npcollector/npcollectorimpl/npcollector_testutils.go index 16d777c11fd79..a553069842674 100644 --- a/comp/networkpath/npcollector/npcollectorimpl/npcollector_testutils.go +++ b/comp/networkpath/npcollector/npcollectorimpl/npcollector_testutils.go @@ -23,6 +23,7 @@ import ( "github.com/DataDog/datadog-agent/comp/ndmtmp/forwarder/forwarderimpl" "github.com/DataDog/datadog-agent/comp/networkpath/npcollector" rdnsqueriermock "github.com/DataDog/datadog-agent/comp/rdnsquerier/fx-mock" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx-mock" "github.com/stretchr/testify/require" "go.uber.org/fx" "go.uber.org/fx/fxtest" @@ -45,6 +46,7 @@ var testOptions = fx.Options( core.MockBundle(), eventplatformimpl.MockModule(), rdnsqueriermock.MockModule(), + logscompression.MockModule(), ) func newTestNpCollector(t fxtest.TB, agentConfigs map[string]any) (*fxtest.App, *npCollectorImpl) { diff --git a/comp/otelcol/collector-contrib/def/go.mod b/comp/otelcol/collector-contrib/def/go.mod index de371feafc0c9..296ea0cbbf9f2 100644 --- a/comp/otelcol/collector-contrib/def/go.mod +++ b/comp/otelcol/collector-contrib/def/go.mod @@ -1,22 +1,27 @@ module github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib/def -go 1.22.0 +go 1.23.0 -require go.opentelemetry.io/collector/otelcol v0.115.0 +require go.opentelemetry.io/collector/otelcol v0.118.0 require ( - go.opentelemetry.io/collector/component/componenttest v0.115.0 // indirect - go.opentelemetry.io/collector/extension/extensiontest v0.115.0 // indirect + go.opentelemetry.io/collector/component/componenttest v0.118.0 // indirect + go.opentelemetry.io/collector/connector/xconnector v0.118.0 // indirect + go.opentelemetry.io/collector/consumer/xconsumer v0.118.0 // indirect + go.opentelemetry.io/collector/exporter/xexporter v0.118.0 // indirect + go.opentelemetry.io/collector/extension/extensiontest v0.118.0 // indirect + go.opentelemetry.io/collector/pipeline/xpipeline v0.118.0 // indirect + go.opentelemetry.io/collector/processor/xprocessor v0.118.0 // indirect + go.opentelemetry.io/collector/receiver/xreceiver v0.118.0 // indirect ) require ( - go.opentelemetry.io/collector/connector/connectortest v0.115.0 // indirect - go.opentelemetry.io/collector/consumer/consumererror v0.115.0 // indirect - go.opentelemetry.io/collector/exporter/exportertest v0.115.0 // indirect - go.opentelemetry.io/collector/internal/fanoutconsumer v0.115.0 // indirect - go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/processor/processortest v0.115.0 // indirect - go.opentelemetry.io/collector/receiver/receivertest v0.115.0 // indirect + go.opentelemetry.io/collector/connector/connectortest v0.118.0 // indirect + go.opentelemetry.io/collector/consumer/consumererror v0.118.0 // indirect + go.opentelemetry.io/collector/exporter/exportertest v0.118.0 // indirect + go.opentelemetry.io/collector/internal/fanoutconsumer v0.118.0 // indirect + go.opentelemetry.io/collector/processor/processortest v0.118.0 // indirect + go.opentelemetry.io/collector/receiver/receivertest v0.118.0 // indirect go.opentelemetry.io/contrib/bridges/otelzap v0.6.0 // indirect ) @@ -51,39 +56,34 @@ require ( github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.60.1 // indirect + github.com/prometheus/common v0.61.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/testify v1.10.0 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect - go.opentelemetry.io/collector/component v0.115.0 // indirect - go.opentelemetry.io/collector/component/componentstatus v0.115.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.115.0 // indirect - go.opentelemetry.io/collector/confmap v1.21.0 // indirect - go.opentelemetry.io/collector/connector v0.115.0 // indirect - go.opentelemetry.io/collector/connector/connectorprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/consumer v1.21.0 // indirect - go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/consumer/consumertest v0.115.0 // indirect - go.opentelemetry.io/collector/exporter v0.115.0 // indirect - go.opentelemetry.io/collector/exporter/exporterprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/extension v0.115.0 // indirect - go.opentelemetry.io/collector/extension/extensioncapabilities v0.115.0 // indirect - go.opentelemetry.io/collector/featuregate v1.21.0 // indirect - go.opentelemetry.io/collector/pdata v1.21.0 // indirect - go.opentelemetry.io/collector/pdata/pprofile v0.115.0 // indirect - go.opentelemetry.io/collector/pdata/testdata v0.115.0 // indirect - go.opentelemetry.io/collector/pipeline v0.115.0 // indirect - go.opentelemetry.io/collector/processor v0.115.0 // indirect - go.opentelemetry.io/collector/processor/processorprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/receiver v0.115.0 // indirect - go.opentelemetry.io/collector/receiver/receiverprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/semconv v0.115.0 // indirect - go.opentelemetry.io/collector/service v0.115.0 // indirect + go.opentelemetry.io/collector/component v0.118.0 // indirect + go.opentelemetry.io/collector/component/componentstatus v0.118.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.118.0 // indirect + go.opentelemetry.io/collector/confmap v1.24.0 // indirect + go.opentelemetry.io/collector/connector v0.118.0 // indirect + go.opentelemetry.io/collector/consumer v1.24.0 // indirect + go.opentelemetry.io/collector/consumer/consumertest v0.118.0 // indirect + go.opentelemetry.io/collector/exporter v0.118.0 // indirect + go.opentelemetry.io/collector/extension v0.118.0 // indirect + go.opentelemetry.io/collector/extension/extensioncapabilities v0.118.0 // indirect + go.opentelemetry.io/collector/featuregate v1.24.0 // indirect + go.opentelemetry.io/collector/pdata v1.24.0 // indirect + go.opentelemetry.io/collector/pdata/pprofile v0.118.0 // indirect + go.opentelemetry.io/collector/pdata/testdata v0.118.0 // indirect + go.opentelemetry.io/collector/pipeline v0.118.0 // indirect + go.opentelemetry.io/collector/processor v0.118.0 // indirect + go.opentelemetry.io/collector/receiver v0.118.0 // indirect + go.opentelemetry.io/collector/semconv v0.118.0 // indirect + go.opentelemetry.io/collector/service v0.118.0 // indirect go.opentelemetry.io/contrib/config v0.10.0 // indirect go.opentelemetry.io/contrib/propagators/b3 v1.31.0 // indirect go.opentelemetry.io/otel v1.32.0 // indirect @@ -107,13 +107,13 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/net v0.34.0 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect gonum.org/v1/gonum v0.15.1 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect - google.golang.org/grpc v1.67.1 // indirect - google.golang.org/protobuf v1.35.2 // indirect + google.golang.org/grpc v1.69.4 // indirect + google.golang.org/protobuf v1.36.3 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/comp/otelcol/collector-contrib/def/go.sum b/comp/otelcol/collector-contrib/def/go.sum index 258b336c23443..e06d55ff71b94 100644 --- a/comp/otelcol/collector-contrib/def/go.sum +++ b/comp/otelcol/collector-contrib/def/go.sum @@ -27,6 +27,8 @@ github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIx github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -72,8 +74,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= -github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= +github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -83,8 +85,8 @@ github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+ github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ= +github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= @@ -92,8 +94,8 @@ github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWN github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -110,98 +112,96 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -go.opentelemetry.io/collector v0.115.0 h1:qUZ0bTeNBudMxNQ7FJKS//TxTjeJ7tfU/z22mcFavWU= -go.opentelemetry.io/collector v0.115.0/go.mod h1:66qx0xKnVvdwq60e1DEfb4e+zmM9szhPsv2hxZ/Mpj4= -go.opentelemetry.io/collector/client v1.21.0 h1:3Kes8lOFMYVxoxeAmX+DTEAkuS1iTA3NkSfqzGmygJA= -go.opentelemetry.io/collector/client v1.21.0/go.mod h1:jYJGiL0UA975OOyHmjbQSokNWt1OiviI5KjPOMUMGwc= -go.opentelemetry.io/collector/component v0.115.0 h1:iLte1oCiXzjiCnaOBKdsXacfFiECecpWxW3/LeriMoo= -go.opentelemetry.io/collector/component v0.115.0/go.mod h1:oIUFiH7w1eOimdeYhFI+gAIxYSiLDocKVJ0PTvX7d6s= -go.opentelemetry.io/collector/component/componentstatus v0.115.0 h1:pbpUIL+uKDfEiSgKK+S5nuSL6MDIIQYsp4b65ZGVb9M= -go.opentelemetry.io/collector/component/componentstatus v0.115.0/go.mod h1:36A+9XSiOz0Cdhq+UwwPRlEr5CYuSkEnVO9om4BH7d0= -go.opentelemetry.io/collector/component/componenttest v0.115.0 h1:9URDJ9VyP6tuij+YHjp/kSSMecnZOd7oGvzu+rw9SJY= -go.opentelemetry.io/collector/component/componenttest v0.115.0/go.mod h1:PzXvNqKLCiSADZGZFKH+IOHMkaQ0GTHuzysfVbTPKYY= -go.opentelemetry.io/collector/config/configauth v0.115.0 h1:xa+ALdyPgva3rZnLBh1H2oS5MsHP6JxSqMtQmcELnys= -go.opentelemetry.io/collector/config/configauth v0.115.0/go.mod h1:C7anpb3Rf4KswMT+dgOzkW9UX0z/65PLORpUw3p0VYc= -go.opentelemetry.io/collector/config/configcompression v1.21.0 h1:0zbPdZAgPFMAarwJEC4gaR6f/JBP686A3TYSgb3oa+E= -go.opentelemetry.io/collector/config/configcompression v1.21.0/go.mod h1:LvYG00tbPTv0NOLoZN0wXq1F5thcxvukO8INq7xyfWU= -go.opentelemetry.io/collector/config/confighttp v0.115.0 h1:BIy394oNXnqySJwrCqgAJu4gWgAV5aQUDD6k1hy6C8o= -go.opentelemetry.io/collector/config/confighttp v0.115.0/go.mod h1:Wr50ut12NmCEAl4bWLJryw2EjUmJTtYRg89560Q51wc= -go.opentelemetry.io/collector/config/configopaque v1.21.0 h1:PcvRGkBk4Px8BQM7tX+kw4i3jBsfAHGoGQbtZg6Ox7U= -go.opentelemetry.io/collector/config/configopaque v1.21.0/go.mod h1:sW0t0iI/VfRL9VYX7Ik6XzVgPcR+Y5kejTLsYcMyDWs= -go.opentelemetry.io/collector/config/configretry v1.21.0 h1:ZHoOvAkEcv5BBeaJn8IQ6rQ4GMPZWW4S+W7R4QTEbZU= -go.opentelemetry.io/collector/config/configretry v1.21.0/go.mod h1:cleBc9I0DIWpTiiHfu9v83FUaCTqcPXmebpLxjEIqro= -go.opentelemetry.io/collector/config/configtelemetry v0.115.0 h1:U07FinCDop+r2RjWQ3aP9ZWONC7r7kQIp1GkXQi6nsI= -go.opentelemetry.io/collector/config/configtelemetry v0.115.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= -go.opentelemetry.io/collector/config/configtls v1.21.0 h1:ZfrlAYgBD8lzp04W0GxwiDmUbrvKsvDYJi+wkyiXlpA= -go.opentelemetry.io/collector/config/configtls v1.21.0/go.mod h1:5EsNefPfVCMOTlOrr3wyj7LrsOgY7V8iqRl8oFZEqtw= -go.opentelemetry.io/collector/config/internal v0.115.0 h1:eVk57iufZpUXyPJFKTb1Ebx5tmcCyroIlt427r5pxS8= -go.opentelemetry.io/collector/config/internal v0.115.0/go.mod h1:OVkadRWlKAoWjHslqjWtBLAne8ceQm8WYT71ZcBWLFc= -go.opentelemetry.io/collector/confmap v1.21.0 h1:1tIcx2/Suwg8VhuPmQw87ba0ludPmumpFCFRZZa6RXA= -go.opentelemetry.io/collector/confmap v1.21.0/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec= -go.opentelemetry.io/collector/connector v0.115.0 h1:4Kkm3HQFzNT1eliMOB8FbIn+PLMRJ2qQku5Vmy3V8Ko= -go.opentelemetry.io/collector/connector v0.115.0/go.mod h1:+ByuAmYLrYHoKh9B+LGqUc0N2kXcN2l8Dea8Mp6brZ8= -go.opentelemetry.io/collector/connector/connectorprofiles v0.115.0 h1:aW1f4Az0I+QJyImFccNWAXqik80bnNu27aQqi2hFfD8= -go.opentelemetry.io/collector/connector/connectorprofiles v0.115.0/go.mod h1:lmynB1CucydOsHa8RSSBh5roUZPfuiv65imXhtNzClM= -go.opentelemetry.io/collector/connector/connectortest v0.115.0 h1:GjtourFr0MJmlbtEPAZ/1BZCxkNAeJ0aMTlrxwftJ0k= -go.opentelemetry.io/collector/connector/connectortest v0.115.0/go.mod h1:f3KQXXNlh/XuV8elmnuVVyfY92dJCAovz10gD72OH0k= -go.opentelemetry.io/collector/consumer v1.21.0 h1:THKZ2Vbi6GkamjTBI2hFq5Dc4kINZTWGwQNa8d/Ty9g= -go.opentelemetry.io/collector/consumer v1.21.0/go.mod h1:FQcC4ThMtRYY41dv+IPNK8POLLhAFY3r1YR5fuP7iiY= -go.opentelemetry.io/collector/consumer/consumererror v0.115.0 h1:yli//xBCQMPZKXNgNlXemo4dvqhnFrAmCZ11DvQgmcY= -go.opentelemetry.io/collector/consumer/consumererror v0.115.0/go.mod h1:LwVzAvQ6ZVNG7mbOvurbAo+W/rKws0IcjOwriuZXqPE= -go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0 h1:H3fDuyQW1t2HWHkz96WMBQJKUevypOCjBqnqtaAWyoA= -go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0/go.mod h1:IzEmZ91Tp7TBxVDq8Cc9xvLsmO7H08njr6Pu9P5d9ns= -go.opentelemetry.io/collector/consumer/consumertest v0.115.0 h1:hru0I2447y0TluCdwlKYFFtgcpyCnlM+LiOK1JZyA70= -go.opentelemetry.io/collector/consumer/consumertest v0.115.0/go.mod h1:ybjALRJWR6aKNOzEMy1T1ruCULVDEjj4omtOJMrH/kU= -go.opentelemetry.io/collector/exporter v0.115.0 h1:JnxfpOnsuqhTPKJXVKJLS1Cv3BiVrVLzpHOjJEQw+xw= -go.opentelemetry.io/collector/exporter v0.115.0/go.mod h1:xof3fHQK8wADhaKLIJcQ7ChZaFLNC+haRdPN0wgl6kY= -go.opentelemetry.io/collector/exporter/exporterprofiles v0.115.0 h1:lSQEleCn/q9eFufcuK61NdFKU70ZlgI9dBjPCO/4CrE= -go.opentelemetry.io/collector/exporter/exporterprofiles v0.115.0/go.mod h1:7l5K2AecimX2kx+nZC1gKG3QkP247CO1+SodmJ4fFkQ= -go.opentelemetry.io/collector/exporter/exportertest v0.115.0 h1:P9SMTUXQOtcaq40bGtnnAe14zRmR4/yUgj/Tb2BEf/k= -go.opentelemetry.io/collector/exporter/exportertest v0.115.0/go.mod h1:1jMZ9gFGXglb8wfNrBZIgd+RvpZhSyFwdfE+Jtf9w4U= -go.opentelemetry.io/collector/extension v0.115.0 h1:/cBb8AUdD0KMWC6V3lvCC16eP9Fg0wd1Upcp5rgvuGI= -go.opentelemetry.io/collector/extension v0.115.0/go.mod h1:HI7Ak6loyi6ZrZPsQJW1OO1wbaAW8OqXLFNQlTZnreQ= -go.opentelemetry.io/collector/extension/auth v0.115.0 h1:TTMokbBsSHZRFH48PvGSJmgSS8F3Rkr9MWGHZn8eJDk= -go.opentelemetry.io/collector/extension/auth v0.115.0/go.mod h1:3w+2mzeb2OYNOO4Bi41TUo4jr32ap2y7AOq64IDpxQo= -go.opentelemetry.io/collector/extension/experimental/storage v0.115.0 h1:sZXw0+77092pq24CkUoTRoHQPLQUsDq6HFRNB0g5yR4= -go.opentelemetry.io/collector/extension/experimental/storage v0.115.0/go.mod h1:qjFH7Y3QYYs88By2ZB5GMSUN5k3ul4Brrq2J6lKACA0= -go.opentelemetry.io/collector/extension/extensioncapabilities v0.115.0 h1:/g25Hp5aoCNKdDjIb3Fc7XRglO8yaBRFLO/IUNPnqNI= -go.opentelemetry.io/collector/extension/extensioncapabilities v0.115.0/go.mod h1:EQx7ETiy330O6q05S2KRZsRNDg0aQEeJmVl7Ipx+Fcw= -go.opentelemetry.io/collector/extension/extensiontest v0.115.0 h1:GBVFxFEskR8jSdu9uaQh2qpXnN5VNXhXjpJ2UjxtE8I= -go.opentelemetry.io/collector/extension/extensiontest v0.115.0/go.mod h1:eu1ecbz5mT+cHoH2H3GmD/rOO0WsicSJD2RLrYuOmRA= -go.opentelemetry.io/collector/extension/zpagesextension v0.115.0 h1:zYrZZocc7n0ZuDyXNkIaX0P0qk2fjMQj7NegwBJZA4k= -go.opentelemetry.io/collector/extension/zpagesextension v0.115.0/go.mod h1:OaXwNHF3MAcInBzCXrhXbTNHfIi9b7YGhXjtCFZqxNY= -go.opentelemetry.io/collector/featuregate v1.21.0 h1:+EULHPJDLMipcwAGZVp9Nm8NriRvoBBMxp7MSiIZVMI= -go.opentelemetry.io/collector/featuregate v1.21.0/go.mod h1:3GaXqflNDVwWndNGBJ1+XJFy3Fv/XrFgjMN60N3z7yg= -go.opentelemetry.io/collector/internal/fanoutconsumer v0.115.0 h1:6DRiSECeApFq6Jj5ug77rG53R6FzJEZBfygkyMEXdpg= -go.opentelemetry.io/collector/internal/fanoutconsumer v0.115.0/go.mod h1:vgQf5HQdmLQqpDHpDq2S3nTRoUuKtRcZpRTsy+UiwYw= -go.opentelemetry.io/collector/otelcol v0.115.0 h1:wZhFGrSCZcTQ4qw4ePjI2PaSrOCejoQKAjprKD/xavs= -go.opentelemetry.io/collector/otelcol v0.115.0/go.mod h1:iK8DPvaizirIYKDl1zZG7DDYUj6GkkH4KHifVVM88vk= -go.opentelemetry.io/collector/pdata v1.21.0 h1:PG+UbiFMJ35X/WcAR7Rf/PWmWtRdW0aHlOidsR6c5MA= -go.opentelemetry.io/collector/pdata v1.21.0/go.mod h1:GKb1/zocKJMvxKbS+sl0W85lxhYBTFJ6h6I1tphVyDU= -go.opentelemetry.io/collector/pdata/pprofile v0.115.0 h1:NI89hy13vNDw7EOnQf7Jtitks4HJFO0SUWznTssmP94= -go.opentelemetry.io/collector/pdata/pprofile v0.115.0/go.mod h1:jGzdNfO0XTtfLjXCL/uCC1livg1LlfR+ix2WE/z3RpQ= -go.opentelemetry.io/collector/pdata/testdata v0.115.0 h1:Rblz+AKXdo3fG626jS+KSd0OSA4uMXcTQfpwed6P8LI= -go.opentelemetry.io/collector/pdata/testdata v0.115.0/go.mod h1:inNnRt6S2Nn260EfCBEcjesjlKOSsr0jPwkPqpBkt4s= -go.opentelemetry.io/collector/pipeline v0.115.0 h1:bmACBqb0e8U9ag+vGGHUP7kCfAO7HHROdtzIEg8ulus= -go.opentelemetry.io/collector/pipeline v0.115.0/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74= -go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.115.0 h1:3l9ruCAOrssTUDnyChKNzHWOdTtfThnYaoPZ1/+5sD0= -go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.115.0/go.mod h1:2Myg+law/5lcezo9PhhZ0wjCaLYdGK24s1jDWbSW9VY= -go.opentelemetry.io/collector/processor v0.115.0 h1:+fveHGRe24PZPv/F5taahGuZ9HdNW44hgNWEJhIUdyc= -go.opentelemetry.io/collector/processor v0.115.0/go.mod h1:/oLHBlLsm7tFb7zOIrA5C0j14yBtjXKAgxJJ2Bktyk4= -go.opentelemetry.io/collector/processor/processorprofiles v0.115.0 h1:cCZAs+FXaebZPppqAN3m+X3etoSBL6NvyQo8l0hOZoo= -go.opentelemetry.io/collector/processor/processorprofiles v0.115.0/go.mod h1:kMxF0gknlWX4duuAJFi2/HuIRi6C3w95tOenRa0GKOY= -go.opentelemetry.io/collector/processor/processortest v0.115.0 h1:j9HEaYFOeOB6VYl9zGhBnhQbTkqGBa2udUvu5NTh6hc= -go.opentelemetry.io/collector/processor/processortest v0.115.0/go.mod h1:Gws+VEnp/eW3qAqPpqbKsrbnnxxNfyDjqrfUXbZfZic= -go.opentelemetry.io/collector/receiver v0.115.0 h1:55Q3Jvj6zHCIA1psKqi/3kEMJO4OqUF5tNAEYNdB1U8= -go.opentelemetry.io/collector/receiver v0.115.0/go.mod h1:nBSCh2O/WUcfgpJ+Jpz+B0z0Hn5jHeRvF2WmLij5EIY= -go.opentelemetry.io/collector/receiver/receiverprofiles v0.115.0 h1:R9JLaj2Al93smIPUkbJshAkb/cY0H5JBOxIx+Zu0NG4= -go.opentelemetry.io/collector/receiver/receiverprofiles v0.115.0/go.mod h1:05E5hGujWeeXJmzKZwTdHyZ/+rRyrQlQB5p5Q2XY39M= -go.opentelemetry.io/collector/receiver/receivertest v0.115.0 h1:OiB684SbHQi6/Pd3ZH0cXjYvCpBS9ilQBfTQx0wVXHg= -go.opentelemetry.io/collector/receiver/receivertest v0.115.0/go.mod h1:Y8Z9U/bz9Xpyt8GI8DxZZgryw3mnnIw+AeKVLTD2cP8= -go.opentelemetry.io/collector/semconv v0.115.0 h1:SoqMvg4ZEB3mz2EdAb6XYa+TuMo5Mir5FRBr3nVFUDY= -go.opentelemetry.io/collector/semconv v0.115.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= -go.opentelemetry.io/collector/service v0.115.0 h1:k4GAOiI5tZgB2QKgwA6c3TeAVr7QL/ft5cOQbzUr8Iw= -go.opentelemetry.io/collector/service v0.115.0/go.mod h1:DKde9LMhNebdREecDSsqiTFLI2wRc+IoV4/wGxU6goY= +go.opentelemetry.io/collector v0.118.0 h1:OBqxppK9Ul6bzEabcHsx11pXwgp05sBpqYxIxiOkyFo= +go.opentelemetry.io/collector v0.118.0/go.mod h1:yxfijW5k9dwd9sifTBAEoItE+ahFEtOlyvex1B99uno= +go.opentelemetry.io/collector/client v1.24.0 h1:eH7ctqDnRWNH5QVVbAvdYYdkvr8QWLkEm8FUPaaYbWE= +go.opentelemetry.io/collector/client v1.24.0/go.mod h1:C/38SYPa0tTL6ikPz/glYz6f3GVzEuT4nlEml6IBDMw= +go.opentelemetry.io/collector/component v0.118.0 h1:sSO/ObxJ+yH77Z4DmT1mlSuxhbgUmY1ztt7xCA1F/8w= +go.opentelemetry.io/collector/component v0.118.0/go.mod h1:LUJ3AL2b+tmFr3hZol3hzKzCMvNdqNq0M5CF3SWdv4M= +go.opentelemetry.io/collector/component/componentstatus v0.118.0 h1:1aCIdUjqz0noKNQr1v04P+lwF89Lkua5U7BhH9IAxkE= +go.opentelemetry.io/collector/component/componentstatus v0.118.0/go.mod h1:ynO1Nyj0t1h6x/djIMJy35bhnnWEc2mlQaFgDNUO504= +go.opentelemetry.io/collector/component/componenttest v0.118.0 h1:knEHckoiL2fEWSIc0iehg39zP4IXzi9sHa45O+oxKo8= +go.opentelemetry.io/collector/component/componenttest v0.118.0/go.mod h1:aHc7t7zVwCpbhrWIWY+GMuaMxMCUP8C8P7pJOt8r/vU= +go.opentelemetry.io/collector/config/configauth v0.118.0 h1:uBH/s9kRw/m7VWuibrkCzbXSCVLf9ElKq9NuKb0wAwk= +go.opentelemetry.io/collector/config/configauth v0.118.0/go.mod h1:uAmSGkihIENoIah6mEQ8S/HX4oiFOHZu3EoZLZwi9OI= +go.opentelemetry.io/collector/config/configcompression v1.24.0 h1:jyM6BX7wYcrh+eVSC0FMbWgy/zb9iP58SerOrvisccE= +go.opentelemetry.io/collector/config/configcompression v1.24.0/go.mod h1:LvYG00tbPTv0NOLoZN0wXq1F5thcxvukO8INq7xyfWU= +go.opentelemetry.io/collector/config/confighttp v0.118.0 h1:ey50dfySOCPgUPJ1x8Kq6CmNcv/TpZHt6cYmPhZItj0= +go.opentelemetry.io/collector/config/confighttp v0.118.0/go.mod h1:4frheVFiIfKUHuD/KAPn+u+d+EUx5GlQTNmoI1ftReA= +go.opentelemetry.io/collector/config/configopaque v1.24.0 h1:EPOprMDreZPKyIgT0/eVBvEGQVvq7ncvBCBVnWerj54= +go.opentelemetry.io/collector/config/configopaque v1.24.0/go.mod h1:sW0t0iI/VfRL9VYX7Ik6XzVgPcR+Y5kejTLsYcMyDWs= +go.opentelemetry.io/collector/config/configretry v1.24.0 h1:sIPHhNNY2YlHMIJ//63iMxIqlgDeGczId0uUb1njsPM= +go.opentelemetry.io/collector/config/configretry v1.24.0/go.mod h1:cleBc9I0DIWpTiiHfu9v83FUaCTqcPXmebpLxjEIqro= +go.opentelemetry.io/collector/config/configtelemetry v0.118.0 h1:UlN46EViG2X42odWtXgWaqY7Y01ZKpsnswSwXTWx5mM= +go.opentelemetry.io/collector/config/configtelemetry v0.118.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= +go.opentelemetry.io/collector/config/configtls v1.24.0 h1:rOhl8qjIlUVVRHnwQj6/vZe6cuCYImyx7aVDBR35bqI= +go.opentelemetry.io/collector/config/configtls v1.24.0/go.mod h1:d0OdfkbuYEMYDBJLSbpH0wPI29lmSiFT3geqh/ygF2k= +go.opentelemetry.io/collector/confmap v1.24.0 h1:UUHVhkDCsVw14jPOarug9PDQE2vaB2ELPWMr7ARFBCA= +go.opentelemetry.io/collector/confmap v1.24.0/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec= +go.opentelemetry.io/collector/connector v0.118.0 h1:amay4UriWrtydaAxjQ8/MTTaVYERlZcYLCAGcjoBejw= +go.opentelemetry.io/collector/connector v0.118.0/go.mod h1:R6jbMrHZYg21pZ0nsoo4cSHIn7Lrdpi5R3OWcDEQwhE= +go.opentelemetry.io/collector/connector/connectortest v0.118.0 h1:hLMSTqtFWveXa3b1qJMEaWuaX3PHx7dfl8G/bsac2fE= +go.opentelemetry.io/collector/connector/connectortest v0.118.0/go.mod h1:hm6TNLiQLe65NpENCFsFoiO8fOf3BbN4UF1heUsT73Q= +go.opentelemetry.io/collector/connector/xconnector v0.118.0 h1:0s6rwZmt8va6xd3BEZs7s2QBNFNjLv0kzYi6l44dKqc= +go.opentelemetry.io/collector/connector/xconnector v0.118.0/go.mod h1:12mJPGWo90iZrrpgOkmSd5TkejweL34V/R6AqwqJnMA= +go.opentelemetry.io/collector/consumer v1.24.0 h1:7DeyBm9qdr1EPuCfPjWyChPK16DbVc0wZeSa9LZprFU= +go.opentelemetry.io/collector/consumer v1.24.0/go.mod h1:0G6jvZprIp4dpKMD1ZxCjriiP9GdFvFMObsQEtTk71s= +go.opentelemetry.io/collector/consumer/consumererror v0.118.0 h1:Cx//ZFDa6wUEoRDRYRZ/Rkb52dWNoHj2e9FdlcM9jCA= +go.opentelemetry.io/collector/consumer/consumererror v0.118.0/go.mod h1:2mhnzzLYR5zS2Zz4h9ZnRM8Uogu9qatcfQwGNenhing= +go.opentelemetry.io/collector/consumer/consumertest v0.118.0 h1:8AAS9ejQapP1zqt0+cI6u+AUBheT3X0171N9WtXWsVY= +go.opentelemetry.io/collector/consumer/consumertest v0.118.0/go.mod h1:spRM2wyGr4QZzqMHlLmZnqRCxqXN4Wd0piogC4Qb5PQ= +go.opentelemetry.io/collector/consumer/xconsumer v0.118.0 h1:guWnzzRqgCInjnYlOQ1BPrimppNGIVvnknAjlIbWXuY= +go.opentelemetry.io/collector/consumer/xconsumer v0.118.0/go.mod h1:C5V2d6Ys/Fi6k3tzjBmbdZ9v3J/rZSAMlhx4KVcMIIg= +go.opentelemetry.io/collector/exporter v0.118.0 h1:PE0vF2U+znOB8OVLPWNw40bGCoT/5QquQ8Xbz4i9Rb0= +go.opentelemetry.io/collector/exporter v0.118.0/go.mod h1:5ST3gxT/RzE/vg2bcGDtWJxlQF1ypwk50UpmdK1kUqY= +go.opentelemetry.io/collector/exporter/exportertest v0.118.0 h1:8gWky42BcJsxoaqWbnqCDUjP3Y84hjC6RD/UWHwR7sI= +go.opentelemetry.io/collector/exporter/exportertest v0.118.0/go.mod h1:UbpQBZvznA8YPqqcKlafVIhB6Qa4fPf2+I67MUGyNqo= +go.opentelemetry.io/collector/exporter/xexporter v0.118.0 h1:PZAo1CFhZHfQwtzUNj+Fwcv/21pWHJHTsrIddD096fw= +go.opentelemetry.io/collector/exporter/xexporter v0.118.0/go.mod h1:x4J+qyrRcp4DfWKqK3DLZomFTIUhedsqCQWqq6Gqps4= +go.opentelemetry.io/collector/extension v0.118.0 h1:9o5jLCTRvs0+rtFDx04zTBuB4WFrE0RvtVCPovYV0sA= +go.opentelemetry.io/collector/extension v0.118.0/go.mod h1:BFwB0WOlse6JnrStO44+k9kwUVjjtseFEHhJLHD7lBg= +go.opentelemetry.io/collector/extension/auth v0.118.0 h1:+eMNUBUK1JK9A3mr95BasbWE90Lxu+WlR9sqS36sJms= +go.opentelemetry.io/collector/extension/auth v0.118.0/go.mod h1:MJpYcRGSERkgOhczqTKoAhkHmcugr+YTlRhc/SpYYYI= +go.opentelemetry.io/collector/extension/extensioncapabilities v0.118.0 h1:I/SjuacUXdBOxa6ZnVMuMKkZX+m40tUm+5YKqWnNv/c= +go.opentelemetry.io/collector/extension/extensioncapabilities v0.118.0/go.mod h1:IxDALY0rMvsENrVui7Y5tvvL/xHNgMKuhfiQiSHMiTQ= +go.opentelemetry.io/collector/extension/extensiontest v0.118.0 h1:rKBUaFS9elGfENG45wANmrwx7mHsmt1+YWCzxjftElg= +go.opentelemetry.io/collector/extension/extensiontest v0.118.0/go.mod h1:CqNXzkIOR32D8EUpptpOXhpFkibs3kFlRyNMEgIW8l4= +go.opentelemetry.io/collector/extension/xextension v0.118.0 h1:P6gvJzqnH9ma2QfnWde/E6Xu9bAzuefzIwm5iupiVPE= +go.opentelemetry.io/collector/extension/xextension v0.118.0/go.mod h1:ne4Q8ZtRlbC0Etr2hTcVkjOpVM2bE2xy1u+R80LUkDw= +go.opentelemetry.io/collector/extension/zpagesextension v0.118.0 h1:XkaLvST4p1/i/dsk5yCwFG4HJUUr6joCbegJc2MEOrE= +go.opentelemetry.io/collector/extension/zpagesextension v0.118.0/go.mod h1:alaAK7I7UeM1Hcs/eNqIjTLIZpqrk3mD1Ua42mJ7JnU= +go.opentelemetry.io/collector/featuregate v1.24.0 h1:DEqDsuJgxjZ3E5JNC9hXCd4sWGFiF7h9kaziODuqwFY= +go.opentelemetry.io/collector/featuregate v1.24.0/go.mod h1:3GaXqflNDVwWndNGBJ1+XJFy3Fv/XrFgjMN60N3z7yg= +go.opentelemetry.io/collector/internal/fanoutconsumer v0.118.0 h1:affTj1Qxjbg9dZ1x2tbV9Rs9/otZQ1lHA++L8qB5KiQ= +go.opentelemetry.io/collector/internal/fanoutconsumer v0.118.0/go.mod h1:9mbE68mYdtTyozr3jTtNMB1RA5F8/dt2aWVYSu6bsQ4= +go.opentelemetry.io/collector/otelcol v0.118.0 h1:uSD3wU0sO4vsw5VvWI2yUFLggLdq1BWN/nC1LJXIhMg= +go.opentelemetry.io/collector/otelcol v0.118.0/go.mod h1:OdKz/AXj+ewCwXp/acZCBIoMIYiIxeNRNkbqUXvWi+o= +go.opentelemetry.io/collector/pdata v1.24.0 h1:D6j92eAzmAbQgivNBUnt8r9juOl8ugb+ihYynoFZIEg= +go.opentelemetry.io/collector/pdata v1.24.0/go.mod h1:cf3/W9E/uIvPS4MR26SnMFJhraUCattzzM6qusuONuc= +go.opentelemetry.io/collector/pdata/pprofile v0.118.0 h1:VK/fr65VFOwEhsSGRPj5c3lCv0yIK1Kt0sZxv9WZBb8= +go.opentelemetry.io/collector/pdata/pprofile v0.118.0/go.mod h1:eJyP/vBm179EghV3dPSnamGAWQwLyd+4z/3yG54YFoQ= +go.opentelemetry.io/collector/pdata/testdata v0.118.0 h1:5N0w1SX9KIRkwvtkrpzQgXy9eGk3vfNG0ds6mhEPMIM= +go.opentelemetry.io/collector/pdata/testdata v0.118.0/go.mod h1:UY+GHV5bOC1BnFburOZ0wiHReJj1XbW12mi2Ogbc5Lw= +go.opentelemetry.io/collector/pipeline v0.118.0 h1:RI1DMe7L0+5hGkx0EDGxG00TaJoh96MEQppgOlGx1Oc= +go.opentelemetry.io/collector/pipeline v0.118.0/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74= +go.opentelemetry.io/collector/pipeline/xpipeline v0.118.0 h1:ZUVF1MYNQYZvmuL30KfP+QbVGSbFZvldBM9hgCe4J4k= +go.opentelemetry.io/collector/pipeline/xpipeline v0.118.0/go.mod h1:XgG1ktGO9J1f6fasMYPWSXL9Raan/VYB9vddKKWp5hQ= +go.opentelemetry.io/collector/processor v0.118.0 h1:NlqWiTTpPP+EPbrqTcNP9nh/4O4/9U9RGWVB49xo4ws= +go.opentelemetry.io/collector/processor v0.118.0/go.mod h1:Y8OD7wk51oPuBqrbn1qXIK91AbprRHP76hlvEzC24U4= +go.opentelemetry.io/collector/processor/processortest v0.118.0 h1:VfTLHuIaJWGyUmrvAOvf63gPMf1vAW68/jtJClEsKtU= +go.opentelemetry.io/collector/processor/processortest v0.118.0/go.mod h1:ZFWxsSoafGNOEk83FtGz43M5ypUzAOvGnfT0aQTDHdU= +go.opentelemetry.io/collector/processor/xprocessor v0.118.0 h1:M/EMhPRbadHLpv7g99fBjfgyuYexBZmgQqb2vjTXjvM= +go.opentelemetry.io/collector/processor/xprocessor v0.118.0/go.mod h1:lkoQoCv2Cz+C0kf2VHgBUDYWDecZLLeaHEvHDXbBCXU= +go.opentelemetry.io/collector/receiver v0.118.0 h1:X4mspHmbbtwdCQZ7o370kNmdWfxRnK1FrsvEShCCKEc= +go.opentelemetry.io/collector/receiver v0.118.0/go.mod h1:wFyfu6sgrkDPLQoGOGMuChGZzkZnYcI/tPJWV4CRTzs= +go.opentelemetry.io/collector/receiver/receivertest v0.118.0 h1:XlMr2mPsyXJsMUOqCpEoY3uCPsLZQbNA5fmVNDGB7Bw= +go.opentelemetry.io/collector/receiver/receivertest v0.118.0/go.mod h1:dtu/H1RNjhy11hTVf/XUfc02uGufMhYYdhhYBbglcUg= +go.opentelemetry.io/collector/receiver/xreceiver v0.118.0 h1:dzECve9e0H3ot0JWnWPuQr9Y84RhOYSd0+CjvJskx7Y= +go.opentelemetry.io/collector/receiver/xreceiver v0.118.0/go.mod h1:Lv1nD/mSYSP64iV8k+C+mWWZZOMLRubv9d1SUory3/E= +go.opentelemetry.io/collector/semconv v0.118.0 h1:V4vlMIK7TIaemrrn2VawvQPwruIKpj7Xgw9P5+BL56w= +go.opentelemetry.io/collector/semconv v0.118.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= +go.opentelemetry.io/collector/service v0.118.0 h1:acZ9LzUbEF5M3G7o5FgenPJVuuM2y8c4HW5JVm648L4= +go.opentelemetry.io/collector/service v0.118.0/go.mod h1:uw3cl3UtkAOrEr8UQV2lXKjyTIbhWxURaQec8kE+Pic= go.opentelemetry.io/contrib/bridges/otelzap v0.6.0 h1:j8icMXyyqNf6HGuwlYhniPnVsbJIq7n+WirDu3VAJdQ= go.opentelemetry.io/contrib/bridges/otelzap v0.6.0/go.mod h1:evIOZpl+kAlU5IsaYX2Siw+IbpacAZvXemVsgt70uvw= go.opentelemetry.io/contrib/config v0.10.0 h1:2JknAzMaYjxrHkTnZh3eOme/Y2P5eHE2SWfhfV6Xd6c= @@ -265,8 +265,8 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -278,8 +278,8 @@ golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= @@ -298,10 +298,10 @@ google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1: google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= -google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= -google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= +google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/comp/otelcol/collector-contrib/impl/components.go b/comp/otelcol/collector-contrib/impl/components.go index 82de6f29c43a8..f5bb3dbf72337 100644 --- a/comp/otelcol/collector-contrib/impl/components.go +++ b/comp/otelcol/collector-contrib/impl/components.go @@ -71,14 +71,14 @@ func components() (otelcol.Factories, error) { return otelcol.Factories{}, err } factories.ExtensionModules = make(map[component.Type]string, len(factories.Extensions)) - factories.ExtensionModules[zpagesextension.NewFactory().Type()] = "go.opentelemetry.io/collector/extension/zpagesextension v0.115.0" - factories.ExtensionModules[healthcheckextension.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.115.0" - factories.ExtensionModules[pprofextension.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.115.0" - factories.ExtensionModules[dockerobserver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver v0.115.0" - factories.ExtensionModules[ecsobserver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.115.0" - factories.ExtensionModules[ecstaskobserver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver v0.115.0" - factories.ExtensionModules[hostobserver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver v0.115.0" - factories.ExtensionModules[k8sobserver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver v0.115.0" + factories.ExtensionModules[zpagesextension.NewFactory().Type()] = "go.opentelemetry.io/collector/extension/zpagesextension v0.117.0" + factories.ExtensionModules[healthcheckextension.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.117.0" + factories.ExtensionModules[pprofextension.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.117.0" + factories.ExtensionModules[dockerobserver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver v0.117.0" + factories.ExtensionModules[ecsobserver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.117.0" + factories.ExtensionModules[ecstaskobserver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver v0.117.0" + factories.ExtensionModules[hostobserver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver v0.117.0" + factories.ExtensionModules[k8sobserver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver v0.117.0" factories.Receivers, err = receiver.MakeFactoryMap( nopreceiver.NewFactory(), @@ -95,15 +95,15 @@ func components() (otelcol.Factories, error) { return otelcol.Factories{}, err } factories.ReceiverModules = make(map[component.Type]string, len(factories.Receivers)) - factories.ReceiverModules[nopreceiver.NewFactory().Type()] = "go.opentelemetry.io/collector/receiver/nopreceiver v0.115.0" - factories.ReceiverModules[otlpreceiver.NewFactory().Type()] = "go.opentelemetry.io/collector/receiver/otlpreceiver v0.115.0" - factories.ReceiverModules[filelogreceiver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.115.0" - factories.ReceiverModules[fluentforwardreceiver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.115.0" - factories.ReceiverModules[hostmetricsreceiver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.115.0" - factories.ReceiverModules[jaegerreceiver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.115.0" - factories.ReceiverModules[prometheusreceiver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.115.0" - factories.ReceiverModules[receivercreator.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.115.0" - factories.ReceiverModules[zipkinreceiver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.115.0" + factories.ReceiverModules[nopreceiver.NewFactory().Type()] = "go.opentelemetry.io/collector/receiver/nopreceiver v0.117.0" + factories.ReceiverModules[otlpreceiver.NewFactory().Type()] = "go.opentelemetry.io/collector/receiver/otlpreceiver v0.117.0" + factories.ReceiverModules[filelogreceiver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.117.0" + factories.ReceiverModules[fluentforwardreceiver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.117.0" + factories.ReceiverModules[hostmetricsreceiver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.117.0" + factories.ReceiverModules[jaegerreceiver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.117.0" + factories.ReceiverModules[prometheusreceiver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.117.0" + factories.ReceiverModules[receivercreator.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.117.0" + factories.ReceiverModules[zipkinreceiver.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.117.0" factories.Exporters, err = exporter.MakeFactoryMap( debugexporter.NewFactory(), @@ -116,11 +116,11 @@ func components() (otelcol.Factories, error) { return otelcol.Factories{}, err } factories.ExporterModules = make(map[component.Type]string, len(factories.Exporters)) - factories.ExporterModules[debugexporter.NewFactory().Type()] = "go.opentelemetry.io/collector/exporter/debugexporter v0.115.0" - factories.ExporterModules[nopexporter.NewFactory().Type()] = "go.opentelemetry.io/collector/exporter/nopexporter v0.115.0" - factories.ExporterModules[otlpexporter.NewFactory().Type()] = "go.opentelemetry.io/collector/exporter/otlpexporter v0.115.0" - factories.ExporterModules[otlphttpexporter.NewFactory().Type()] = "go.opentelemetry.io/collector/exporter/otlphttpexporter v0.115.0" - factories.ExporterModules[sapmexporter.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.115.0" + factories.ExporterModules[debugexporter.NewFactory().Type()] = "go.opentelemetry.io/collector/exporter/debugexporter v0.117.0" + factories.ExporterModules[nopexporter.NewFactory().Type()] = "go.opentelemetry.io/collector/exporter/nopexporter v0.117.0" + factories.ExporterModules[otlpexporter.NewFactory().Type()] = "go.opentelemetry.io/collector/exporter/otlpexporter v0.117.0" + factories.ExporterModules[otlphttpexporter.NewFactory().Type()] = "go.opentelemetry.io/collector/exporter/otlphttpexporter v0.117.0" + factories.ExporterModules[sapmexporter.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.117.0" factories.Processors, err = processor.MakeFactoryMap( batchprocessor.NewFactory(), @@ -141,19 +141,19 @@ func components() (otelcol.Factories, error) { return otelcol.Factories{}, err } factories.ProcessorModules = make(map[component.Type]string, len(factories.Processors)) - factories.ProcessorModules[batchprocessor.NewFactory().Type()] = "go.opentelemetry.io/collector/processor/batchprocessor v0.115.0" - factories.ProcessorModules[memorylimiterprocessor.NewFactory().Type()] = "go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.115.0" - factories.ProcessorModules[attributesprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.115.0" - factories.ProcessorModules[cumulativetodeltaprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.115.0" - factories.ProcessorModules[filterprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.115.0" - factories.ProcessorModules[groupbyattrsprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.115.0" - factories.ProcessorModules[k8sattributesprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.115.0" - factories.ProcessorModules[probabilisticsamplerprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.115.0" - factories.ProcessorModules[resourcedetectionprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.115.0" - factories.ProcessorModules[resourceprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.115.0" - factories.ProcessorModules[routingprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.115.0" - factories.ProcessorModules[tailsamplingprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.115.0" - factories.ProcessorModules[transformprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.115.0" + factories.ProcessorModules[batchprocessor.NewFactory().Type()] = "go.opentelemetry.io/collector/processor/batchprocessor v0.117.0" + factories.ProcessorModules[memorylimiterprocessor.NewFactory().Type()] = "go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.117.0" + factories.ProcessorModules[attributesprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.117.0" + factories.ProcessorModules[cumulativetodeltaprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.117.0" + factories.ProcessorModules[filterprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.117.0" + factories.ProcessorModules[groupbyattrsprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.117.0" + factories.ProcessorModules[k8sattributesprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.117.0" + factories.ProcessorModules[probabilisticsamplerprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.117.0" + factories.ProcessorModules[resourcedetectionprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.117.0" + factories.ProcessorModules[resourceprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.117.0" + factories.ProcessorModules[routingprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.117.0" + factories.ProcessorModules[tailsamplingprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.117.0" + factories.ProcessorModules[transformprocessor.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.117.0" factories.Connectors, err = connector.MakeFactoryMap( spanmetricsconnector.NewFactory(), @@ -162,7 +162,7 @@ func components() (otelcol.Factories, error) { return otelcol.Factories{}, err } factories.ConnectorModules = make(map[component.Type]string, len(factories.Connectors)) - factories.ConnectorModules[spanmetricsconnector.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.115.0" + factories.ConnectorModules[spanmetricsconnector.NewFactory().Type()] = "github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.117.0" return factories, nil } diff --git a/comp/otelcol/collector-contrib/impl/go.mod b/comp/otelcol/collector-contrib/impl/go.mod index 9b14be986534c..1e93c876b230f 100644 --- a/comp/otelcol/collector-contrib/impl/go.mod +++ b/comp/otelcol/collector-contrib/impl/go.mod @@ -2,61 +2,79 @@ module github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib/impl -go 1.22.0 +go 1.23.0 toolchain go1.23.3 require ( - github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib/def v0.0.0-00010101000000-000000000000 - github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.115.0 - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.115.0 - github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.115.0 - github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver v0.115.0 - github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.115.0 - github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver v0.115.0 - github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver v0.115.0 - github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver v0.115.0 - github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.115.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.115.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.115.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.115.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.115.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.115.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.115.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.115.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.115.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.115.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.115.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.115.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.115.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.115.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.115.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.115.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.115.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.115.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.115.0 - go.opentelemetry.io/collector/component v0.115.0 - go.opentelemetry.io/collector/connector v0.115.0 - go.opentelemetry.io/collector/exporter v0.115.0 - go.opentelemetry.io/collector/exporter/debugexporter v0.115.0 - go.opentelemetry.io/collector/exporter/nopexporter v0.115.0 - go.opentelemetry.io/collector/exporter/otlpexporter v0.115.0 - go.opentelemetry.io/collector/exporter/otlphttpexporter v0.115.0 - go.opentelemetry.io/collector/extension v0.115.0 - go.opentelemetry.io/collector/extension/zpagesextension v0.115.0 - go.opentelemetry.io/collector/otelcol v0.115.0 - go.opentelemetry.io/collector/processor v0.115.0 - go.opentelemetry.io/collector/processor/batchprocessor v0.115.0 - go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.115.0 - go.opentelemetry.io/collector/receiver v0.115.0 - go.opentelemetry.io/collector/receiver/nopreceiver v0.115.0 - go.opentelemetry.io/collector/receiver/otlpreceiver v0.115.0 + github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib/def v0.61.0 + github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.118.0 + go.opentelemetry.io/collector/component v0.118.0 + go.opentelemetry.io/collector/connector v0.118.0 + go.opentelemetry.io/collector/exporter v0.118.0 + go.opentelemetry.io/collector/exporter/debugexporter v0.118.0 + go.opentelemetry.io/collector/exporter/nopexporter v0.118.0 + go.opentelemetry.io/collector/exporter/otlpexporter v0.118.0 + go.opentelemetry.io/collector/exporter/otlphttpexporter v0.118.0 + go.opentelemetry.io/collector/extension v0.118.0 + go.opentelemetry.io/collector/extension/zpagesextension v0.118.0 + go.opentelemetry.io/collector/otelcol v0.118.0 + go.opentelemetry.io/collector/processor v0.118.0 + go.opentelemetry.io/collector/processor/batchprocessor v0.118.0 + go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.118.0 + go.opentelemetry.io/collector/receiver v0.118.0 + go.opentelemetry.io/collector/receiver/nopreceiver v0.118.0 + go.opentelemetry.io/collector/receiver/otlpreceiver v0.118.0 +) + +require ( + github.com/aws/aws-sdk-go-v2 v1.33.0 // indirect + github.com/aws/aws-sdk-go-v2/config v1.29.1 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.54 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.24 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.28 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.28 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ec2 v1.200.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.9 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.11 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.10 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.33.9 // indirect + github.com/aws/smithy-go v1.22.1 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect ) require ( cloud.google.com/go/auth v0.7.0 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect - cloud.google.com/go/compute/metadata v0.5.2 // indirect + cloud.google.com/go/compute/metadata v0.6.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect @@ -69,28 +87,28 @@ require ( github.com/Showmax/go-fqdn v1.0.0 // indirect github.com/alecthomas/participle/v2 v2.1.1 // indirect github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 // indirect - github.com/antchfx/xmlquery v1.4.2 // indirect - github.com/antchfx/xpath v1.3.2 // indirect + github.com/antchfx/xmlquery v1.4.3 // indirect + github.com/antchfx/xpath v1.3.3 // indirect github.com/apache/thrift v0.21.0 // indirect github.com/armon/go-metrics v0.4.1 // indirect - github.com/aws/aws-sdk-go v1.55.5 // indirect + github.com/aws/aws-sdk-go v1.55.6 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bmatcuk/doublestar/v4 v4.7.1 // indirect + github.com/bmatcuk/doublestar/v4 v4.8.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 // indirect + github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dennwc/varint v1.0.0 // indirect github.com/digitalocean/godo v1.118.0 // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/docker v27.3.1+incompatible // indirect + github.com/docker/docker v27.5.0+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/ebitengine/purego v0.8.1 // indirect github.com/elastic/go-grok v0.3.1 // indirect github.com/elastic/lunes v0.1.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/envoyproxy/go-control-plane v0.13.0 // indirect + github.com/envoyproxy/go-control-plane v0.13.1 // indirect github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect github.com/expr-lang/expr v1.16.9 // indirect github.com/fatih/color v1.16.0 // indirect @@ -109,7 +127,7 @@ require ( github.com/go-viper/mapstructure/v2 v2.2.1 // indirect github.com/go-zookeeper/zk v1.0.3 // indirect github.com/gobwas/glob v0.2.3 // indirect - github.com/goccy/go-json v0.10.3 // indirect + github.com/goccy/go-json v0.10.4 // indirect github.com/gogo/googleapis v1.4.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect @@ -128,8 +146,8 @@ require ( github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect - github.com/hashicorp/consul/api v1.30.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect + github.com/hashicorp/consul/api v1.31.0 // indirect github.com/hashicorp/cronexpr v1.1.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect @@ -148,7 +166,7 @@ require ( github.com/imdario/mergo v0.3.16 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/ionos-cloud/sdk-go/v6 v6.1.11 // indirect - github.com/jaegertracing/jaeger v1.62.0 // indirect + github.com/jaegertracing/jaeger v1.65.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jonboulle/clockwork v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -180,26 +198,26 @@ require ( github.com/mostynb/go-grpc-compression v1.2.3 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.115.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.118.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0 // indirect github.com/openshift/api v3.9.0+incompatible // indirect @@ -207,7 +225,7 @@ require ( github.com/openzipkin/zipkin-go v0.4.3 // indirect github.com/ovh/go-ovh v1.6.0 // indirect github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect - github.com/pierrec/lz4/v4 v4.1.21 // indirect + github.com/pierrec/lz4/v4 v4.1.22 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pkg/errors v0.9.1 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect @@ -216,20 +234,20 @@ require ( github.com/prometheus-community/windows_exporter v0.27.2 // indirect github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.60.1 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/prometheus/prometheus v0.54.1 // indirect github.com/rs/cors v1.11.1 // indirect github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29 // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/signalfx/sapm-proto v0.17.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/stretchr/testify v1.10.0 // indirect - github.com/tinylib/msgp v1.2.4 // indirect + github.com/tinylib/msgp v1.2.5 // indirect github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect github.com/ua-parser/uap-go v0.0.0-20240611065828-3a4781585db6 // indirect @@ -238,97 +256,97 @@ require ( github.com/x448/float16 v0.8.4 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector v0.115.0 // indirect - go.opentelemetry.io/collector/client v1.21.0 // indirect - go.opentelemetry.io/collector/component/componentstatus v0.115.0 // indirect - go.opentelemetry.io/collector/component/componenttest v0.115.0 // indirect - go.opentelemetry.io/collector/config/configauth v0.115.0 // indirect - go.opentelemetry.io/collector/config/configcompression v1.21.0 // indirect - go.opentelemetry.io/collector/config/configgrpc v0.115.0 // indirect - go.opentelemetry.io/collector/config/confighttp v0.115.0 // indirect - go.opentelemetry.io/collector/config/confignet v1.21.0 // indirect - go.opentelemetry.io/collector/config/configopaque v1.21.0 // indirect - go.opentelemetry.io/collector/config/configretry v1.21.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.115.0 // indirect - go.opentelemetry.io/collector/config/configtls v1.21.0 // indirect - go.opentelemetry.io/collector/config/internal v0.115.0 // indirect - go.opentelemetry.io/collector/confmap v1.21.0 // indirect - go.opentelemetry.io/collector/connector/connectorprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/connector/connectortest v0.115.0 // indirect - go.opentelemetry.io/collector/consumer v1.21.0 // indirect - go.opentelemetry.io/collector/consumer/consumererror v0.115.0 // indirect - go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/consumer/consumertest v0.115.0 // indirect - go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/exporter/exporterprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/exporter/exportertest v0.115.0 // indirect - go.opentelemetry.io/collector/extension/auth v0.115.0 // indirect - go.opentelemetry.io/collector/extension/experimental/storage v0.115.0 // indirect - go.opentelemetry.io/collector/extension/extensioncapabilities v0.115.0 // indirect - go.opentelemetry.io/collector/extension/extensiontest v0.115.0 // indirect - go.opentelemetry.io/collector/featuregate v1.21.0 // indirect - go.opentelemetry.io/collector/filter v0.115.0 // indirect - go.opentelemetry.io/collector/internal/fanoutconsumer v0.115.0 // indirect - go.opentelemetry.io/collector/internal/memorylimiter v0.115.0 // indirect - go.opentelemetry.io/collector/internal/sharedcomponent v0.115.0 // indirect - go.opentelemetry.io/collector/pdata v1.21.0 // indirect - go.opentelemetry.io/collector/pdata/pprofile v0.115.0 // indirect - go.opentelemetry.io/collector/pdata/testdata v0.115.0 // indirect - go.opentelemetry.io/collector/pipeline v0.115.0 // indirect - go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/processor/processorhelper/processorhelperprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/processor/processorprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/processor/processortest v0.115.0 // indirect - go.opentelemetry.io/collector/receiver/receiverprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/receiver/receivertest v0.115.0 // indirect - go.opentelemetry.io/collector/scraper v0.115.0 // indirect - go.opentelemetry.io/collector/semconv v0.115.0 // indirect - go.opentelemetry.io/collector/service v0.115.0 // indirect + go.opentelemetry.io/collector v0.118.0 // indirect + go.opentelemetry.io/collector/client v1.24.0 // indirect + go.opentelemetry.io/collector/component/componentstatus v0.118.0 // indirect + go.opentelemetry.io/collector/component/componenttest v0.118.0 // indirect + go.opentelemetry.io/collector/config/configauth v0.118.0 // indirect + go.opentelemetry.io/collector/config/configcompression v1.24.0 // indirect + go.opentelemetry.io/collector/config/configgrpc v0.118.0 // indirect + go.opentelemetry.io/collector/config/confighttp v0.118.0 // indirect + go.opentelemetry.io/collector/config/confignet v1.24.0 // indirect + go.opentelemetry.io/collector/config/configopaque v1.24.0 // indirect + go.opentelemetry.io/collector/config/configretry v1.24.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.118.0 // indirect + go.opentelemetry.io/collector/config/configtls v1.24.0 // indirect + go.opentelemetry.io/collector/confmap v1.24.0 // indirect + go.opentelemetry.io/collector/connector/connectortest v0.118.0 // indirect + go.opentelemetry.io/collector/connector/xconnector v0.118.0 // indirect + go.opentelemetry.io/collector/consumer v1.24.0 // indirect + go.opentelemetry.io/collector/consumer/consumererror v0.118.0 // indirect + go.opentelemetry.io/collector/consumer/consumererror/xconsumererror v0.118.0 // indirect + go.opentelemetry.io/collector/consumer/consumertest v0.118.0 // indirect + go.opentelemetry.io/collector/consumer/xconsumer v0.118.0 // indirect + go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper v0.118.0 // indirect + go.opentelemetry.io/collector/exporter/exportertest v0.118.0 // indirect + go.opentelemetry.io/collector/exporter/xexporter v0.118.0 // indirect + go.opentelemetry.io/collector/extension/auth v0.118.0 // indirect + go.opentelemetry.io/collector/extension/extensioncapabilities v0.118.0 // indirect + go.opentelemetry.io/collector/extension/extensiontest v0.118.0 // indirect + go.opentelemetry.io/collector/extension/xextension v0.118.0 // indirect + go.opentelemetry.io/collector/featuregate v1.24.0 // indirect + go.opentelemetry.io/collector/filter v0.118.0 // indirect + go.opentelemetry.io/collector/internal/fanoutconsumer v0.118.0 // indirect + go.opentelemetry.io/collector/internal/memorylimiter v0.118.0 // indirect + go.opentelemetry.io/collector/internal/sharedcomponent v0.118.0 // indirect + go.opentelemetry.io/collector/pdata v1.24.0 // indirect + go.opentelemetry.io/collector/pdata/pprofile v0.118.0 // indirect + go.opentelemetry.io/collector/pdata/testdata v0.118.0 // indirect + go.opentelemetry.io/collector/pipeline v0.118.0 // indirect + go.opentelemetry.io/collector/pipeline/xpipeline v0.118.0 // indirect + go.opentelemetry.io/collector/processor/processorhelper/xprocessorhelper v0.118.0 // indirect + go.opentelemetry.io/collector/processor/processortest v0.118.0 // indirect + go.opentelemetry.io/collector/processor/xprocessor v0.118.0 // indirect + go.opentelemetry.io/collector/receiver/receivertest v0.118.0 // indirect + go.opentelemetry.io/collector/receiver/xreceiver v0.118.0 // indirect + go.opentelemetry.io/collector/scraper v0.118.0 // indirect + go.opentelemetry.io/collector/scraper/scraperhelper v0.118.0 // indirect + go.opentelemetry.io/collector/semconv v0.118.0 // indirect + go.opentelemetry.io/collector/service v0.118.0 // indirect go.opentelemetry.io/contrib/bridges/otelzap v0.6.0 // indirect go.opentelemetry.io/contrib/config v0.10.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect go.opentelemetry.io/contrib/propagators/b3 v1.31.0 // indirect go.opentelemetry.io/contrib/zpages v0.56.0 // indirect - go.opentelemetry.io/otel v1.32.0 // indirect + go.opentelemetry.io/otel v1.33.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.7.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 // indirect - go.opentelemetry.io/otel/exporters/prometheus v0.54.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 // indirect + go.opentelemetry.io/otel/exporters/prometheus v0.55.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.7.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.33.0 // indirect go.opentelemetry.io/otel/log v0.8.0 // indirect - go.opentelemetry.io/otel/metric v1.32.0 // indirect - go.opentelemetry.io/otel/sdk v1.32.0 // indirect + go.opentelemetry.io/otel/metric v1.33.0 // indirect + go.opentelemetry.io/otel/sdk v1.33.0 // indirect go.opentelemetry.io/otel/sdk/log v0.7.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.32.0 // indirect - go.opentelemetry.io/otel/trace v1.32.0 // indirect - go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.opentelemetry.io/otel/sdk/metric v1.33.0 // indirect + go.opentelemetry.io/otel/trace v1.33.0 // indirect + go.opentelemetry.io/proto/otlp v1.4.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.31.0 // indirect + golang.org/x/crypto v0.32.0 // indirect golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f // indirect golang.org/x/mod v0.22.0 // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/oauth2 v0.23.0 // indirect + golang.org/x/net v0.34.0 // indirect + golang.org/x/oauth2 v0.24.0 // indirect golang.org/x/sync v0.10.0 // indirect - golang.org/x/sys v0.28.0 // indirect - golang.org/x/term v0.27.0 // indirect + golang.org/x/sys v0.29.0 // indirect + golang.org/x/term v0.28.0 // indirect golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.5.0 // indirect golang.org/x/tools v0.27.0 // indirect gonum.org/v1/gonum v0.15.1 // indirect google.golang.org/api v0.188.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect - google.golang.org/grpc v1.67.1 // indirect - google.golang.org/protobuf v1.35.2 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484 // indirect + google.golang.org/grpc v1.69.4 // indirect + google.golang.org/protobuf v1.36.3 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/comp/otelcol/collector-contrib/impl/go.sum b/comp/otelcol/collector-contrib/impl/go.sum index c97ea3c785b72..7189e9291c6e8 100644 --- a/comp/otelcol/collector-contrib/impl/go.sum +++ b/comp/otelcol/collector-contrib/impl/go.sum @@ -23,8 +23,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= -cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= +cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= +cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -93,10 +93,10 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 h1:t3eaIm0rUkzbrIewtiFmMK5RXHej2XnoXNhxVsAYUfg= github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= -github.com/antchfx/xmlquery v1.4.2 h1:MZKd9+wblwxfQ1zd1AdrTsqVaMjMCwow3IqkCSe00KA= -github.com/antchfx/xmlquery v1.4.2/go.mod h1:QXhvf5ldTuGqhd1SHNvvtlhhdQLks4dD0awIVhXIDTA= -github.com/antchfx/xpath v1.3.2 h1:LNjzlsSjinu3bQpw9hWMY9ocB80oLOWuQqFvO6xt51U= -github.com/antchfx/xpath v1.3.2/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= +github.com/antchfx/xmlquery v1.4.3 h1:f6jhxCzANrWfa93O+NmRWvieVyLs+R2Szfpy+YrZaww= +github.com/antchfx/xmlquery v1.4.3/go.mod h1:AEPEEPYE9GnA2mj5Ur2L5Q5/2PycJ0N9Fusrx9b12fc= +github.com/antchfx/xpath v1.3.3 h1:tmuPQa1Uye0Ym1Zn65vxPgfltWb/Lxu2jeqIGteJSRs= +github.com/antchfx/xpath v1.3.3/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= github.com/apache/thrift v0.21.0 h1:tdPmh/ptjE1IJnhbhrcl2++TauVjy242rkV/UzJChnE= github.com/apache/thrift v0.21.0/go.mod h1:W1H8aR/QRtYNvrPeFXBtobyRkd0/YVhTc6i07XIAgDw= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= @@ -108,8 +108,36 @@ github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgI github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= -github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go v1.55.6 h1:cSg4pvZ3m8dgYcgqB97MrcdjUmZ1BeMYKUxMMB89IPk= +github.com/aws/aws-sdk-go v1.55.6/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go-v2 v1.33.0 h1:Evgm4DI9imD81V0WwD+TN4DCwjUMdc94TrduMLbgZJs= +github.com/aws/aws-sdk-go-v2 v1.33.0/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= +github.com/aws/aws-sdk-go-v2/config v1.29.1 h1:JZhGawAyZ/EuJeBtbQYnaoftczcb2drR2Iq36Wgz4sQ= +github.com/aws/aws-sdk-go-v2/config v1.29.1/go.mod h1:7bR2YD5euaxBhzt2y/oDkt3uNRb6tjFp98GlTFueRwk= +github.com/aws/aws-sdk-go-v2/credentials v1.17.54 h1:4UmqeOqJPvdvASZWrKlhzpRahAulBfyTJQUaYy4+hEI= +github.com/aws/aws-sdk-go-v2/credentials v1.17.54/go.mod h1:RTdfo0P0hbbTxIhmQrOsC/PquBZGabEPnCaxxKRPSnI= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.24 h1:5grmdTdMsovn9kPZPI23Hhvp0ZyNm5cRO+IZFIYiAfw= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.24/go.mod h1:zqi7TVKTswH3Ozq28PkmBmgzG1tona7mo9G2IJg4Cis= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.28 h1:igORFSiH3bfq4lxKFkTSYDhJEUCYo6C8VKiWJjYwQuQ= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.28/go.mod h1:3So8EA/aAYm36L7XIvCVwLa0s5N0P7o2b1oqnx/2R4g= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.28 h1:1mOW9zAUMhTSrMDssEHS/ajx8JcAj/IcftzcmNlmVLI= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.28/go.mod h1:kGlXVIWDfvt2Ox5zEaNglmq0hXPHgQFNMix33Tw22jA= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.200.0 h1:3hH6o7Z2WeE1twvz44Aitn6Qz8DZN3Dh5IB4Eh2xq7s= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.200.0/go.mod h1:I76S7jN0nfsYTBtuTgTsJtK2Q8yJVDgrLr5eLN64wMA= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 h1:iXtILhvDxB6kPvEXgsDhGaZCSC6LQET5ZHSdJozeI0Y= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1/go.mod h1:9nu0fVANtYiAePIBh2/pFUSwtJ402hLnp854CNoDOeE= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.9 h1:TQmKDyETFGiXVhZfQ/I0cCFziqqX58pi4tKJGYGFSz0= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.9/go.mod h1:HVLPK2iHQBUx7HfZeOQSEu3v2ubZaAY2YPbAm5/WUyY= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.11 h1:kuIyu4fTT38Kj7YCC7ouNbVZSSpqkZ+LzIfhCr6Dg+I= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.11/go.mod h1:Ro744S4fKiCCuZECXgOi760TiYylUM8ZBf6OGiZzJtY= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.10 h1:l+dgv/64iVlQ3WsBbnn+JSbkj01jIi+SM0wYsj3y/hY= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.10/go.mod h1:Fzsj6lZEb8AkTE5S68OhcbBqeWPsR8RnGuKPr8Todl8= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.9 h1:BRVDbewN6VZcwr+FBOszDKvYeXY1kJ+GGMCcpghlw0U= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.9/go.mod h1:f6vjfZER1M17Fokn0IzssOTMT2N8ZSq+7jnNF0tArvw= +github.com/aws/smithy-go v1.22.1 h1:/HPHZQ0g7f4eUeK6HKglFz8uwVfZKgoI25rb/J+dnro= +github.com/aws/smithy-go v1.22.1/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -117,8 +145,8 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bmatcuk/doublestar/v4 v4.7.1 h1:fdDeAqgT47acgwd9bd9HxJRDmc9UAmPpc+2m0CXv75Q= -github.com/bmatcuk/doublestar/v4 v4.7.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= +github.com/bmatcuk/doublestar/v4 v4.8.0 h1:DSXtrypQddoug1459viM9X9D3dp1Z7993fw36I2kNcQ= +github.com/bmatcuk/doublestar/v4 v4.8.0/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -132,8 +160,8 @@ github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6D github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 h1:N+3sFI5GUjRKBi+i0TxYVST9h4Ie192jJWpHvthBBgg= -github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8ETbOasdwEV+avkR75ZzsVV9WI= +github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= @@ -154,8 +182,8 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/docker v27.3.1+incompatible h1:KttF0XoteNTicmUtBO0L2tP+J7FGRFTjaEF4k6WdhfI= -github.com/docker/docker v27.3.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.5.0+incompatible h1:um++2NcQtGRTz5eEgO6aJimo6/JxrTXC941hd05JO6U= +github.com/docker/docker v27.5.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -176,8 +204,8 @@ github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRr github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= -github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/go-control-plane v0.13.1 h1:vPfJZCkob6yTMEgS+0TwfTUfbHjfy/6vOJ8hUWX/uXE= +github.com/envoyproxy/go-control-plane v0.13.1/go.mod h1:X45hY0mufo6Fd0KW3rqsGvQMw58jvjymeCzBU3mWyHw= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= @@ -257,8 +285,8 @@ github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= -github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/goccy/go-json v0.10.4 h1:JSwxQzIqKfmFX1swYPpUThQZp/Ka4wzJdK0LWVytLPM= +github.com/goccy/go-json v0.10.4/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= @@ -368,12 +396,12 @@ github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc h1:GN2Lv3MGO7AS6PrRoT6yV5+wkrOpcszoIsO4+4ds248= github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc/go.mod h1:+JKpmjMGhpgPL+rXZ5nsZieVzvarn86asRlBg4uNGnk= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 h1:pRhl55Yx1eC7BZ1N+BBWwnKaMyD8uC+34TLdndZMAKk= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0/go.mod h1:XKMd7iuf/RGPSMJ/U4HP0zS2Z9Fh8Ps9a+6X26m/tmI= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= -github.com/hashicorp/consul/api v1.30.0 h1:ArHVMMILb1nQv8vZSGIwwQd2gtc+oSQZ6CalyiyH2XQ= -github.com/hashicorp/consul/api v1.30.0/go.mod h1:B2uGchvaXVW2JhFoS8nqTxMD5PBykr4ebY4JWHTTeLM= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0 h1:kQ0NI7W1B3HwiN5gAYtY+XFItDPbLBwYRxAqbFTyDes= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0/go.mod h1:zrT2dxOAjNFPRGjTUe2Xmb4q4YdUwVvQFV6xiCSf+z0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= +github.com/hashicorp/consul/api v1.31.0 h1:32BUNLembeSRek0G/ZAM6WNfdEwYdYo8oQ4+JoqGkNQ= +github.com/hashicorp/consul/api v1.31.0/go.mod h1:2ZGIiXM3A610NmDULmCHd/aqBJj8CkMfOhswhOafxRg= github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s= github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= @@ -444,8 +472,8 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/ionos-cloud/sdk-go/v6 v6.1.11 h1:J/uRN4UWO3wCyGOeDdMKv8LWRzKu6UIkLEaes38Kzh8= github.com/ionos-cloud/sdk-go/v6 v6.1.11/go.mod h1:EzEgRIDxBELvfoa/uBN0kOQaqovLjUWEB7iW4/Q+t4k= -github.com/jaegertracing/jaeger v1.62.0 h1:YoaJ2e8oVz5sqGGlVAKSUCED8DzJ1q7PojBmZFNKoJA= -github.com/jaegertracing/jaeger v1.62.0/go.mod h1:jhEIHazwyb+a6xlRBi+p96BAvTYTSmGkghcwdQfV7FM= +github.com/jaegertracing/jaeger v1.65.0 h1:phDrZzaPUbomlN8VfxGWuPwkipYh7cU6V9q6Obf+7Fc= +github.com/jaegertracing/jaeger v1.65.0/go.mod h1:EkEqyIzI0xCjexVHURWJmZZxjswTUKSriW57eVG44yo= github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww= github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -621,114 +649,114 @@ github.com/onsi/gomega v1.27.3/go.mod h1:5vG284IBtfDAmDyrK+eGyZmUgUlmi+Wngqo557c github.com/onsi/gomega v1.27.4/go.mod h1:riYq/GJKh8hhoM01HN6Vmuy93AarCXCBGpvFDK3q3fQ= github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= -github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.115.0 h1:sO4fPw0NRUibgBVvQVTqPBCBRFh0I+ODIr3HAwcWezI= -github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.115.0/go.mod h1:HqzCXJ4rxXzWNYaUtCqJzXyTsCGEKSa/d+tHcyeRDY0= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.115.0 h1:u7Ht+E1ghQESffcjyaxWrXGsfSWa1VE9LKC4f2PPx84= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.115.0/go.mod h1:r3iS2mDYu+cnGjgNc8TgvuUUAN6A6/1BvR1e1YJBrqM= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.115.0 h1:RXYLbv2uTJlJTJcEa5H8/fLdX419XUlbn6mjzEgTWxc= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.115.0/go.mod h1:ngeyITKu+koaagA/sFpnuT+x0nFVBNdWq60/h5buSr4= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.115.0 h1:51D/x3xIAnWgVrY0lgdU+b+yb2aWd72uDqu9GhjRcNI= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.115.0/go.mod h1:nLau1YUdjhtLrk4jXLPb2l9riQ1Ap4xytTLl7MBedBg= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer v0.115.0 h1:eJk/gbfWpGKTIGLUN+EWpqM52Zf4LFTfIeMnDji+dqM= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer v0.115.0/go.mod h1:+GPzqBFeqV90U4/bntDRPMxo/i/12lxH7GyPJmqz4ls= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver v0.115.0 h1:790+/iSYt6bMs/OA3AfLlZl9E/Zpb0pm5X628TCncE4= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver v0.115.0/go.mod h1:LtsKKBDZyn02DiqvuOZapGg75P/FqGQNelTI6fO12o0= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.115.0 h1:BtYrSkQSYGJufsmbqqrpzb+BJXH2S4CKL14i1bxOFCU= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.115.0/go.mod h1:4LQ1S3eBu+MyCNaCkBk0hIoAhvJJS851i/tY45FtDf4= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver v0.115.0 h1:zi0LLZp26hAycIKNbmOIMGc0ZnkikrciTHl1tiJuo4Y= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver v0.115.0/go.mod h1:a/UMjV9mrFJ5WIlpaDQ/S5KgCrg0H3kD8nlhfQRxfBI= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver v0.115.0 h1:5PiDmieivpExBd2LchzSIvEls+cjUeJtPLXvvHxLZoI= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver v0.115.0/go.mod h1:FIFNtgEoqcI/evvgSL+5qO/cdRUK+6ixFKKUdKpmMeA= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver v0.115.0 h1:sMHHN4HrakORqrpsTLQQVGiDjKg4QreBJ+UCx/1OI+I= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver v0.115.0/go.mod h1:q1950sX5QqCGDurVOkwatDSc5de4gpGfuPGVtFgNo3I= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.115.0 h1:HVGG31WeB6Fn2+il2/ycWj9tDP0fxOeOqD1rKCjsBSc= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.115.0/go.mod h1:2hYojHs5daPVWECuZsPViKwty0ojuHUEmk8GEuaFqO0= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.115.0 h1:4Ycg73pYVdiF+oq+BmUq7Dkg0WKeKvBSk9AOKvBe4LU= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.115.0/go.mod h1:l2Q+MmYk2ZRDSbhX9GlJYvBXC51AqhDJAj2ne290Xik= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.115.0 h1:SF3gOOEkfntE3zEhY80yO7BVQ5CkaK8ecic2U2AZPHE= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.115.0/go.mod h1:jeBzX5m8O9X0LQxiryV9sJUIrn+QAwOnCBE2wZWIltQ= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.115.0 h1:vRQQFD4YpasQFUAdF030UWtaflSYFXK542bfWMGhOK0= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.115.0/go.mod h1:BZ7DT+0VkKR7P3I9PGEDfVa0GdB0ty41eEcejIUXF9A= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.115.0 h1:a36EJz/mb83f6ieX0v4fNDJ1jXqpeaM6DVQXeFDvdhw= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.115.0/go.mod h1:r5/40YO1eSP5ZreOmRzVOUtDr7YG39ZIUcVjHd+9Izc= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.115.0 h1:xITYM8BkEgs2Wf+PczOrVv0b1Fk4N929/xR9YtxLpkw= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.115.0/go.mod h1:m+5tYnZKfNDtnZKknOfssYSXBEL5Yqse4CJMpaY5kMk= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.115.0 h1:h6zEsBtuZalQu7lKYf6ZCcj8fTocT+zxdmuOou9515Q= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.115.0/go.mod h1:6QU/K0dGCGYorkOvJmhbDFCspy4RPxRkFjf9I64y6I0= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.115.0 h1:f/HrZgTf6TF97v67uEZB3v2UtBT9aQojBvnloD3LOm4= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.115.0/go.mod h1:Hp9uSq3qNJqdxu24u7RWyuPT9x1GgEUSx9US1LLeLi0= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stest v0.115.0 h1:vXDJE8YHfAoYIAlPRtODchlqb6lWnGhJxPaT2ljvN7I= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stest v0.115.0/go.mod h1:f3IgMFHIjEUEI/I+5e3KWMPq9h2PSMy9WovmvPdmlb0= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.115.0 h1:4RoU3SlcNe6Dxyxfv8JVsrN8QgjBQ44Pkt9FLKK095I= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.115.0/go.mod h1:jfPlBpZT+hvp52Ldcx+srxaqyYuKxBkxOd3KtxbveCU= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.115.0 h1:8A+iBT5G23zvBPqYx32Qh4800jHFo4X9T1fpQKVQ+4E= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.115.0/go.mod h1:AhdPvwYKu7G8LKRWzHTNQYBq27RinsMm5qSanwSA/rU= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.115.0 h1:MuyDWyVoCty8HyP2CAYoRZXwINiThHovcC1Bj3+H8lk= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.115.0/go.mod h1:asekVnrdzYsMJBaJtIyXOt8p07l1x0xs8X3h00sZyf0= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.115.0 h1:6GIJOSEIWBt9bprARMtTjRlENrwNsJl2UzbtjOBk7A0= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.115.0/go.mod h1:/Fg/itwlAzDjyM0Sjenup9TbdOT+aVNPSqXsF80M8hw= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.115.0 h1:l4NBxl2AELPlyqupLu1IVAjtbGOEovaKEyt0UGMsuq8= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.115.0/go.mod h1:j1qF1hE/Qcy2I655yXbf2ItezXok61OW+9AAxbH2ORw= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.115.0 h1:l9AsnVHr3Sp4lAGFlBJ6Ochl7mlPE0d5MNd70o4qKEM= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.115.0/go.mod h1:kARk81QZpcX6L8x4fLo4Nr/z/+jpo5PxXtugBxF2DyE= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.115.0 h1:Z9p78zj9Qblw472mGkPieuX7mqduAp47rzMbFfq5evI= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.115.0/go.mod h1:mtxUxJEIQy27MaGR1yzcn/OK8NoddEgb7fumpEbKYss= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.115.0 h1:qdZ9EqmdM19pWhPoFA7VivBTdzP2HvNwXa3CCMHYoDQ= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.115.0/go.mod h1:mrL1MNrcg0zYAJ+aK9WtOH062dl2wN9DDG7mZk9H8v4= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.115.0 h1:MerLKMrkM4YoGF6Di0D9yMXO02yCX8mrZAi/+jJVVeI= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.115.0/go.mod h1:R8AkVWe9G5Q0oMOapvm9HNS076E3Min8SVlmhBL3QD0= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.115.0 h1:WEqcnWSy9dNSlGb8pYRBX7zhaz2ReyaeImlenbzNTB4= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.115.0/go.mod h1:6Mk71CakHUA3I6oM9hARDiyQypYyOolvb+4PFYyVEFg= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.115.0 h1:eoapW0JBablApkdv4C1RUuOKfz0U6SwuKMYYSAJH6fE= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.115.0/go.mod h1:hW2AaybTRcwxJySGLC3Fh1vd2VDaQhRBfa7O7w30NS8= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.115.0 h1:R9MRrO+dSkAHBQLZjuwjv2RHXHQqF2Wtm1Ki0VKD5cs= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.115.0/go.mod h1:rKXLXmwdUVcUHwTilroKSejbg3KSwLeYzNPSpkIEnv4= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.115.0 h1:7tQ+WjojXhtWDFTJlwCvkjpvdTed5YkVKVQKVAu1alg= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.115.0/go.mod h1:iqgJP7+N03pOIOqYaKjVWYoIKweNdFivsvWJfFw6MTQ= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.115.0 h1:rrIm0dyEdaHmQo6udPK1V3opkzEKa0PrZzSdY5oGqmQ= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.115.0/go.mod h1:AMeisxL/9gs0bzozaymUqI1/EJ9GPvtnLh/BtqtjSF8= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.115.0 h1:KghgAubxdDqP4eUQ+d2GzHXUAwtFxpSDToqFVnax0XA= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.115.0/go.mod h1:cW/BaYE6Uo7ZYHbmT0wVBktHP0SfeLqGHMf0qks7rOE= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.115.0 h1:ioGiKiO0WqT3PxkzanuJsPVA24FItH6nTJeDeSMFpYA= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.115.0/go.mod h1:x1W4J+pzK/Bi9jjYBYESTsPq0nRJJLZoN7cPNd0vYSU= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.115.0 h1:A9zqBtUJZ5J/0VI+B1dxuQhc2iVYpD9c54SgaKtFIN8= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.115.0/go.mod h1:hG7GOrBBux/cg1fAUzvSlzYY02ekxjF9IvH4ls/nGXA= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.115.0 h1:hAsK9I081ShnSDSKPVEHB3TLawyOmbR6bPDiQEkgo2Y= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.115.0/go.mod h1:z8XdvlhXSYVboxS3TPGembE9kfxLAYH2PxPLMvf8wTk= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.115.0 h1:t3BGnPpmeuxW51vISSu51PrAs49ACBCa1Yl1NfZGE5Y= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.115.0/go.mod h1:jQLYyroEYEV1kWJApmGBgVuGUd73v+Q6EUJ6Wy7N508= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.115.0 h1:ficXJmB6l6kfiu+R6CmggtnlQWMHUNzu2csDYA4CFSs= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.115.0/go.mod h1:ykraxSeEVCuA43oqlMWnex78+vNQ+1dBTJUeInkqIpA= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.115.0 h1:LVe/Oh2un9CFKFYtepB9oZ6j38whFPVYl01RAVsdxHg= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.115.0/go.mod h1:mGSGQCX5dT5KUxBkuCO15CNqB+8Cb+qj0edt/oKmA34= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.115.0 h1:6RGhDlZkekmp12EvK6JV9fiIwrdZBOJID6/Ts9tXzL4= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.115.0/go.mod h1:qZRQtGr/DAjuBqAuKJMN2cWvc9RI94lB0Oq8UyGAduo= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.115.0 h1:vwZQ7k8oqlK0bdZYTsjP/59zjQQfjSD4fNsWIWsTu2w= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.115.0/go.mod h1:5ObSa9amrbzbYTdAK1Qhv3D/YqCxxnQhP0sk2eWB7Oo= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.115.0 h1:jQ6mIXhWqXhl8MPun9soNynsQ0lpOpOYQyAnQ28F014= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.115.0/go.mod h1:oRxNwm6HN7ckp4aJOAFC8BVBPa0UDhB8vNGTFL3QBJg= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.115.0 h1:KbfjEsr2d/5TGWHvcaBC3lOpYAnquEraLXcis4IamAs= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.115.0/go.mod h1:fmtZPK5RIz+2Lcm9xQZuwiM+M8/juSSeJufSxUT+J9w= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.115.0 h1:Ea5v0Q6VNIMRbXVJjHUsSbdOSkB+80sCOH7Y9yhStnY= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.115.0/go.mod h1:IkiZL9vOU8qNCkrnJP0GOWPoFTED+yhB94wJbcLYcGA= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.115.0 h1:olyiml73slGYORDjZNViW3nKiysC+K+h5yPsSBjUxQ4= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.115.0/go.mod h1:N00k1mTxzfS2clqxSP4Dxk7iX8GWbbuCq6LF8/ECk/M= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.115.0 h1:sLRTfXUFiqJ5Qe/NN5MUJxTaFt46E0Y/xjSY+KesCQc= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.115.0/go.mod h1:361IqXD4jnfs6G+Yn7978uv1UNozhZo4yBYy4p6Nqzc= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.115.0 h1:JSFnfWwlVGLul8p9DE6Sk6E0zaqCvbys7CqvJQD4MIs= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.115.0/go.mod h1:cw0qzwXzKKxM7QyDcNSp9OSDLySVXyaSrgdqWPqlDk8= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.115.0 h1:2xlgF/vCUsZx9HDqhDi0XyR1QXBM67YFRyWrEq5Ydos= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.115.0/go.mod h1:vWTdohkLm9S+3Ekz4aq1jW0xt8wD2jrdOOSOJNllppo= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.115.0 h1:XDlXWa6pdAp02kdfZdzZ0cjeZMNHjI7dj2dNgKdzOfo= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.115.0/go.mod h1:Zo6YARAWAMCdlUmyKBq0EcuKmLjxfC2hUNd3jIAFsWE= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.115.0 h1:hYNlyUj3F43cuv1ap19NlEEchQfs91vYeNoQ1+nswLo= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.115.0/go.mod h1:1o6wF5HJdpb2hd2eGMoQhGuTKb4F2+j/IHBJJSPdM2w= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.115.0 h1:GIyMUiud3T8nyCJP9KVhxVKvfcNQRBCde5uTCl6K/i0= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.115.0/go.mod h1:x4hCznyUolxGt5cE/uXWRCckdIDrUYqH5hJddvdKZd4= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.115.0 h1:Di0uc2QvwEVrq1PEReZ34FpPuo1z5QhHmT0bvdTe0DU= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.115.0/go.mod h1:ODvjmz18PDQnX/BruQ8IFOpiz/HdGOpUWMEKq7f3nhA= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.115.0 h1:h/HAHLIZnIyu85l8wOeggOyiI8z8citNAqxQktVKUpk= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.115.0/go.mod h1:iEU0NA/i2sUREqD19JYmjKwrjMUTcddad/h1LGdSMHw= +github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.118.0 h1:X0RNsPCvo+VCQNaxFL+3Zj+13/It8aY6yRmBSLcGy1c= +github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.118.0/go.mod h1:ZZzyaYuuQVUA/STahm8GOJqXRPFrB9KxT7jY7EakDXA= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.118.0 h1:zHA9n518dSAz2VKqqn30upcZQL6ll9lrK1jCRnBHmhc= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.118.0/go.mod h1:9KW4qWtwCvpWmZYczNkwCwT7nI2Nat6IemDX5w/fTdI= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.118.0 h1:3ppMguebAQUpaf7vy8fbgnPNBTXRMUPzMy1qvzkG8lw= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.118.0/go.mod h1:zhFt+3GJXpvmSlNp8XnnR4kIIgsKfTBIlLXoH1SPMHY= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.118.0 h1:HKPTwhA+GNlsBpIR77DH1gwbzL2thOv/+rZzEZ27Sdk= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.118.0/go.mod h1:MchgZvCCgNc9sXj52bvrjQQxS71UaZ0HcikwUDuGwFg= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer v0.118.0 h1:wvirZ1Q8AgtkuJcOJMsloo3F59hYYQstUAmjRkCVcLg= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer v0.118.0/go.mod h1:+GPzqBFeqV90U4/bntDRPMxo/i/12lxH7GyPJmqz4ls= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver v0.118.0 h1:hYDOKToj0lY6FeE5lTZKznpSGVHFD/4Cfi2lPaRivjw= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver v0.118.0/go.mod h1:PjNA+kVULMLNDKtEgRysEa49wIO6k4tn8iLZWp9gbqs= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.118.0 h1:jy3jqQbSZr6zlZefoGOgGWOsALaU4iMPK3vFF7IYM/A= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.118.0/go.mod h1:aNnCfejJO8lURzs3xgff8kCMf/X9OvIxzQotaqqMEy8= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver v0.118.0 h1:4kumnH6249A8TIlz47mDvkJQGg5iHw/p53wTjf9R7G0= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver v0.118.0/go.mod h1:z/AkF6hKF31PSGZ0al59SXAi0Bb+xY/l5arsL0CDHvM= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver v0.118.0 h1:aUfAf5iF/oncctB1T54rCqN1Mq+4EXH/ODhnIZj9U70= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver v0.118.0/go.mod h1:r7IO5NDCqknOmYV+UP6zw4Pmwgr27WwLqOktPXRHinY= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver v0.118.0 h1:N4uUP2W9anaeClRpvyo3Voj5PdjU8juSIRKNaTW0BzA= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver v0.118.0/go.mod h1:PBxPdHzVa6Vi5L1PONYaELTD0eJZmTN6C808r4P4O0Q= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.118.0 h1:KlIEiJprSJYUvc2XxXCu0uXM0/T/IbTKcyugEcjmnm4= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.118.0/go.mod h1:oE1OPZITVJobOfQBHokvUlCm4BILngcmba1jkKhBcKs= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.118.0 h1:uWMYM1UrkVGBlWDZP5DxrjVvGfKM3RUaEwSeBNaW8aU= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.118.0/go.mod h1:cBXc0E/8KWMwd5CZfg2PrOeSRzeE9+uL/P02ZV86fV0= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.118.0 h1:xRe7n6OGxrjAkSycWEHSRXlSO9gN8dHoRHC8mrNEqsU= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.118.0/go.mod h1:6Lrr+9tQ1/cBVpMhccQ43CgUmy9YYbsu/yssNIZJxjM= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.118.0 h1:W6maz9dZiAYO3WWFMy41GoX2tzx7EPiLGiykNkiAOMI= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.118.0/go.mod h1:WmS8hAnghKAR5UGYC/sww46mKkBO4RxAqkn8K0i+ZU4= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.118.0 h1:Xnwd0QEyBg6iNPUbc3CnHIb0hVjfTc+jHdFbA9VSa7k= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.118.0/go.mod h1:rmqCuNFMNBUxgyufeU8rpVYOWau8ubr0gmSO1u+R5Hk= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.118.0 h1:ID+tXs48HrBgG8FqRbBxTBTssybnBc7M7+dcY4dD5Bg= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.118.0/go.mod h1:VHzkkLUJmRxbIYdbIv/8ZkaDmpMNbtJydMgbEp61GrE= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.118.0 h1:cRDOmJfEOm7G369Lw47k03NIg1qY6HtO9XTwfYRLBw4= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.118.0/go.mod h1:KPphlnKqOx44kbEks3VjqQstD/892osXDVN1kn53wWE= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.118.0 h1:94Xf/jV2ewqnVRA/CUKvNKZ5p3+mEtrMcPE1Xw9lk18= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.118.0/go.mod h1:GhC+Pk3PbAIq52vmYr+d6PN4Hnxyp4lGQMbomI7Bom8= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stest v0.118.0 h1:zzv0uQqa3UZ7Axiad2yVDCdPCzUMKDWLbKjRzkq7KXY= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stest v0.118.0/go.mod h1:TgVgtImN3q4BNxLMWz6xLwk//UKShVerrZ4R2rGxOPo= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.118.0 h1:OnZwsQGs3DKeZbyLWNZY1J2xKthKkg4Myb0OP9YN7/U= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.118.0/go.mod h1:6wbKIFyIVjtzzHEFUSvk6bKBLPEwwdVqY86N6MYVsPw= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.118.0 h1:nzm0/xJEzIWKydgsubNipphuYabJPF3dXc4I6g0dR2M= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.118.0/go.mod h1:jORSreOnfMNkLI3KgHVRCFaj/D8gMvgUAQXzXnPf858= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.118.0 h1:dPYcq0NyUpXeJGejLvNAMZ+iaQGx0UCmNwnnn60D/Oc= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.118.0/go.mod h1:RYz6Pcxqia18V98XqWXWqXB/Qejn7vgK5PoWgMv7DwM= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.118.0 h1:p/DhBHfynUpu6jO4G2zsKlPaeXnWcqdMHMZTc0JY7PQ= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.118.0/go.mod h1:uAVNa10cWbfJsWpf73NyVi93AIR9Kk/+ygXHKKXoWt8= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.118.0 h1:jShgD4zzFxDAWXuk+5kiDuNxLc9222s4qUSISIybJo4= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.118.0/go.mod h1:QV0JiEz23p+0jDeAA4IfmX9/nAhGPrn9ZEnFKqS8r7w= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.118.0 h1:vuPvyNTWyqJVp4hJ/Gr1i5Gqd89lFaaOjXtsVlLUlfs= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.118.0/go.mod h1:rv8ynKZtox4Lahm+1eG8zyyAsARoKiM0TZNqlMwDfE8= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.118.0 h1:Pho1MwH+cvosN6pOinGhunBwAJyyAwFnbIW5x7N/37A= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.118.0/go.mod h1:IMy3f4XjwIu+PZF9Qq5T6WZ/+mOL9l+SFjPYEQuWZh8= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.118.0 h1:8pBuMvrFhU7YLJn1uhuuv5uLz0cJUyzusFtNA//fvFI= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.118.0/go.mod h1:pPjJ7ITPSA68VT7cGK9UIJkGsvfjIJV8cjB8cnnsKr8= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.118.0 h1:DSoYrOjLv23HXpx72hl61br4ZZTj6dqtwZSGoypKWIA= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.118.0/go.mod h1:nR+r7aAbsktscJk4fGmzljblbZBMaiZcIWeKbXV+HmY= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.118.0 h1:aUTSkzJExtrlHN32g8hX/cRNEo2ZmucPg+vwPqOYvhg= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.118.0/go.mod h1:a3sewj4nEozMwcNwZTHPzddS+1BnA6BaAkO/CRIGHVU= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.118.0 h1:WnOBLIbdKDdtLCmpedY35QIkCOb2yW+BxydQMEIv2Xc= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.118.0/go.mod h1:QNv8LB5TzLUHB4p413mrtLryozBRNHKwIlY2R6UirrQ= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.118.0 h1:zEdd1JoVEBX7Lmf/wjs+45p4rR5+HvT2iF5VcoOgK1g= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.118.0/go.mod h1:WE5ientZ87x3cySOh4D/uVUwxK82DMyCkLBJ43+ehDU= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.118.0 h1:iuQWJbTtl3A/wgG7Zl/mWpcBQASXeJiWWblSfu1qSQ8= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.118.0/go.mod h1:JKBSWs4Wo3B2172g6/Hcar31GM8EvlJK2lbAqElpkT0= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.118.0 h1:cNxDWIo5FNwVCEJ0OkYZG7L2FSiIoH7ASUnhjw5+yaA= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.118.0/go.mod h1:wGuwhjwdA3Sqw0gLBebku6vJ8NHqWhv8mDEOaxFsKTQ= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.118.0 h1:ycH2OpswYo9KWsZv7i7zaI8QQUTVZZssAC48cwThZ88= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.118.0/go.mod h1:VkFMDbe3yp1xEzLyyHhQ5SZzWFXxgzuw38SdLzEet+A= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.118.0 h1:ZellmKscolOE6l5R8Cf4ndjSvXzA6sx4ItmbviMBWSQ= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.118.0/go.mod h1:jQKwQo7XgAUXnibEA4bq+RngO43owGFBXRqbbP50i+Y= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.118.0 h1:Ef0H9eY8EtZ6yqZvbyEFiE5ElQNLiADYo2KVdR7a3jU= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.118.0/go.mod h1:VqUc4LGE97Qh8RddrA7+fkd4OAzhhkQ59/oE0q3TfqI= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.118.0 h1:Hj5+sK/NK5lKY6aq+d19GrFE0upk22NCWoJFPQSGA8M= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.118.0/go.mod h1:6TXkJ9mQArydxXiL6Da2VM4iEyhpcGAGI43BW/SCGgo= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.118.0 h1:rcF1K6gDvX8lSXYnglnSYIlyW9wL98A95XABxvHWoaY= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.118.0/go.mod h1:JjCx8GAMR29DytUD0osPum9bXyf5iobMBTmjUlo/JCk= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.118.0 h1:D/67TEByWyRExhiV0Ihr5DZCh6WsCpaFMUaaPeyP6c8= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.118.0/go.mod h1:qYuRkOOo0OXWAFb2YGyL+UQkyrypds9cMW+q7+dTUJM= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.118.0 h1:hoX1aUlZdrC5Y4AVWONPAFhq/UOMLL4tGGOrMDANrbw= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.118.0/go.mod h1:D3hu4pM6NK9Ouot8cPtsDxh6EcA/g1qOFEIOy8iOnKI= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.118.0 h1:5ElmjGrphFCpidyucBTINYX5lZXCpJiFo0csZBGJS/k= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.118.0/go.mod h1:uUIOPbmhZNUXPo8tWn++5f/LR70hx4deGtVUkWlkBMM= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.118.0 h1:pC1e5BvBf8rjwGb56MiTUFEDHU2LSclaqRNUs3z9Snw= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.118.0/go.mod h1:wZTrQ0XWb1A9XBhl1WmUKLPfqNjERKFYWT5WER70gLg= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.118.0 h1:oNVf6dfJAy46JPwogw98YSGrQm30qdtrdQVoJswgLAE= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.118.0/go.mod h1:TXA8gBGYuK9NJeAJVdFaxQ/3DElUExT7kMQHiNqKWfY= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.118.0 h1:p0WId+SoJIm3RMidEqsXqZ86u6+815I8AnCXQHgV27Q= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.118.0/go.mod h1:J103TDmU+aY8mMbYMuvAZWRugt4crQlmvVOg97eZckY= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.118.0 h1:aTWOuC42eWr6Z/unoHiV1oQwZ27F5sszygoLE5p45CI= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.118.0/go.mod h1:YerB7SYBjfS0j1zpfg7EHezUHKn9o5L5YlWG8U2uYdg= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.118.0 h1:vOVsKrrRjfOYSvOu3Qv7MIHUZSVL93tnHETBU+GGxsI= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.118.0/go.mod h1:NxqPda5zVnG8RiCgff0L2EfdIflsC/wkRTLNdlYgN/E= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.118.0 h1:ZeOm/Hf/zCcpqIa6zbZ80uy1W0/HR/ib18rTj7cuQ4I= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.118.0/go.mod h1:0WO9Sxt9rPjfe88wnP4SL/M09nohh3H9NX634fem0b0= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.118.0 h1:j/961n8IAbqdw6NoWrnJLTADnLzH3txAZhMocaUi+3o= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.118.0/go.mod h1:6LGm+uv2Hv0D9OCJ/7d5+2h+he/8YbQT7lV3rFrUskA= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.118.0 h1:E7R1x5fBhWKbG4F0c7vLfIYoL7a5XB9BZDagq2XDPp4= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.118.0/go.mod h1:D4vpT7Xo8mwmq7b0YFBwV5LO8dKQAtPkTVgfkRjBwaU= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.118.0 h1:zHRXkCwg0/Mz0tnb15T3sltANwMzQyJyDDnYor2is2c= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.118.0/go.mod h1:zu2HyeyHz5WG1ssJSRCsHggM06IAaEDsm0eGFFedKpQ= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.118.0 h1:ABsdtuXGh1YjOkiVr19ZsaHAAfM+c7QiccF0yinhb4s= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.118.0/go.mod h1:bpfe1oPTuiP6ot4tkPvSVYPMkYshLGjNPrJvoDk1ZCg= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.118.0 h1:ZKedpw3/P2iAW1mkPij/AP0q4bSY/3BjH5k6K50wgmk= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.118.0/go.mod h1:Vx5ZkbyLKL01R44rHNn6FwdVrY7x4LxLMi8f1Zmxk1g= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.118.0 h1:Y5MeQVPRosTBzw5U6HSmhpB9NIYkuQxUUH2f7hScug8= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.118.0/go.mod h1:Q7BhKWXfa5IxpDwbD64mjrQVImo70WsUwcP8vIEgadw= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.118.0 h1:hdq0EDq6gCjOWl0RfXhAcSepB52QHx7us+UcUYTbWpg= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.118.0/go.mod h1:9kAczl5meDgn9zlLJJre8Q/4U43cqh9aAy3kCm/rJlk= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= @@ -749,8 +777,8 @@ github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c h1:dAMKvw0MlJT1GshSTtih8C2gDs04w8dReiOGXrGLNoY= github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM= -github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= -github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= +github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -786,8 +814,8 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -818,8 +846,8 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUt github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI= github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v1.7.1 h1:UJcjSAI3aUKx52kfcfhblgyhZceouhvvs3OYdWgn+PY= @@ -836,8 +864,8 @@ github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIK github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -869,8 +897,8 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/testcontainers/testcontainers-go v0.34.0 h1:5fbgF0vIN5u+nD3IWabQwRybuB4GY8G2HHgCkbMzMHo= -github.com/testcontainers/testcontainers-go v0.34.0/go.mod h1:6P/kMkQe8yqPHfPWNulFGdFHTD8HB2vLq/231xY2iPQ= +github.com/testcontainers/testcontainers-go v0.35.0 h1:uADsZpTKFAtp8SLK+hMwSaa+X+JiERHtd4sQAFmXeMo= +github.com/testcontainers/testcontainers-go v0.35.0/go.mod h1:oEVBj5zrfJTrgjwONs1SsRbnBtH9OKl+IGl3UMcr2B4= github.com/tidwall/gjson v1.10.2 h1:APbLGOM0rrEkd8WBw9C24nllro4ajFuJu0Sc9hRz8Bo= github.com/tidwall/gjson v1.10.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= @@ -881,8 +909,8 @@ github.com/tidwall/tinylru v1.1.0 h1:XY6IUfzVTU9rpwdhKUF6nQdChgCdGjkMfLzbWyiau6I github.com/tidwall/tinylru v1.1.0/go.mod h1:3+bX+TJ2baOLMWTnlyNWHh4QMnFyARg2TLTQ6OFbzw8= github.com/tidwall/wal v1.1.8 h1:2qDSGdAdjaY3PEvHRva+9UFqgk+ef7cOiW1Qn5JH1y0= github.com/tidwall/wal v1.1.8/go.mod h1:r6lR1j27W9EPalgHiB7zLJDYu3mzW5BQP5KrzBpYY/E= -github.com/tinylib/msgp v1.2.4 h1:yLFeUGostXXSGW5vxfT5dXG/qzkn4schv2I7at5+hVU= -github.com/tinylib/msgp v1.2.4/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= +github.com/tinylib/msgp v1.2.5 h1:WeQg1whrXRFiZusidTQqzETkRpGjFjcIhW6uqWH09po= +github.com/tinylib/msgp v1.2.5/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= @@ -911,192 +939,198 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector v0.115.0 h1:qUZ0bTeNBudMxNQ7FJKS//TxTjeJ7tfU/z22mcFavWU= -go.opentelemetry.io/collector v0.115.0/go.mod h1:66qx0xKnVvdwq60e1DEfb4e+zmM9szhPsv2hxZ/Mpj4= -go.opentelemetry.io/collector/client v1.21.0 h1:3Kes8lOFMYVxoxeAmX+DTEAkuS1iTA3NkSfqzGmygJA= -go.opentelemetry.io/collector/client v1.21.0/go.mod h1:jYJGiL0UA975OOyHmjbQSokNWt1OiviI5KjPOMUMGwc= -go.opentelemetry.io/collector/component v0.115.0 h1:iLte1oCiXzjiCnaOBKdsXacfFiECecpWxW3/LeriMoo= -go.opentelemetry.io/collector/component v0.115.0/go.mod h1:oIUFiH7w1eOimdeYhFI+gAIxYSiLDocKVJ0PTvX7d6s= -go.opentelemetry.io/collector/component/componentstatus v0.115.0 h1:pbpUIL+uKDfEiSgKK+S5nuSL6MDIIQYsp4b65ZGVb9M= -go.opentelemetry.io/collector/component/componentstatus v0.115.0/go.mod h1:36A+9XSiOz0Cdhq+UwwPRlEr5CYuSkEnVO9om4BH7d0= -go.opentelemetry.io/collector/component/componenttest v0.115.0 h1:9URDJ9VyP6tuij+YHjp/kSSMecnZOd7oGvzu+rw9SJY= -go.opentelemetry.io/collector/component/componenttest v0.115.0/go.mod h1:PzXvNqKLCiSADZGZFKH+IOHMkaQ0GTHuzysfVbTPKYY= -go.opentelemetry.io/collector/config/configauth v0.115.0 h1:xa+ALdyPgva3rZnLBh1H2oS5MsHP6JxSqMtQmcELnys= -go.opentelemetry.io/collector/config/configauth v0.115.0/go.mod h1:C7anpb3Rf4KswMT+dgOzkW9UX0z/65PLORpUw3p0VYc= -go.opentelemetry.io/collector/config/configcompression v1.21.0 h1:0zbPdZAgPFMAarwJEC4gaR6f/JBP686A3TYSgb3oa+E= -go.opentelemetry.io/collector/config/configcompression v1.21.0/go.mod h1:LvYG00tbPTv0NOLoZN0wXq1F5thcxvukO8INq7xyfWU= -go.opentelemetry.io/collector/config/configgrpc v0.115.0 h1:gZzXSFe6hB3RUcEeAYqk1yT+TBa+X9tp6/1x29Yg2yk= -go.opentelemetry.io/collector/config/configgrpc v0.115.0/go.mod h1:107lRZ5LdQPMdGJGd4m1GhyKxyH0az2cUOqrJgTEN8E= -go.opentelemetry.io/collector/config/confighttp v0.115.0 h1:BIy394oNXnqySJwrCqgAJu4gWgAV5aQUDD6k1hy6C8o= -go.opentelemetry.io/collector/config/confighttp v0.115.0/go.mod h1:Wr50ut12NmCEAl4bWLJryw2EjUmJTtYRg89560Q51wc= -go.opentelemetry.io/collector/config/confignet v1.21.0 h1:PeQ5YrMnfftysFL/WVaSrjPOWjD6DfeABY50pf9CZxU= -go.opentelemetry.io/collector/config/confignet v1.21.0/go.mod h1:ZppUH1hgUJOubawEsxsQ9MzEYFytqo2GnVSS7d4CVxc= -go.opentelemetry.io/collector/config/configopaque v1.21.0 h1:PcvRGkBk4Px8BQM7tX+kw4i3jBsfAHGoGQbtZg6Ox7U= -go.opentelemetry.io/collector/config/configopaque v1.21.0/go.mod h1:sW0t0iI/VfRL9VYX7Ik6XzVgPcR+Y5kejTLsYcMyDWs= -go.opentelemetry.io/collector/config/configretry v1.21.0 h1:ZHoOvAkEcv5BBeaJn8IQ6rQ4GMPZWW4S+W7R4QTEbZU= -go.opentelemetry.io/collector/config/configretry v1.21.0/go.mod h1:cleBc9I0DIWpTiiHfu9v83FUaCTqcPXmebpLxjEIqro= -go.opentelemetry.io/collector/config/configtelemetry v0.115.0 h1:U07FinCDop+r2RjWQ3aP9ZWONC7r7kQIp1GkXQi6nsI= -go.opentelemetry.io/collector/config/configtelemetry v0.115.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= -go.opentelemetry.io/collector/config/configtls v1.21.0 h1:ZfrlAYgBD8lzp04W0GxwiDmUbrvKsvDYJi+wkyiXlpA= -go.opentelemetry.io/collector/config/configtls v1.21.0/go.mod h1:5EsNefPfVCMOTlOrr3wyj7LrsOgY7V8iqRl8oFZEqtw= -go.opentelemetry.io/collector/config/internal v0.115.0 h1:eVk57iufZpUXyPJFKTb1Ebx5tmcCyroIlt427r5pxS8= -go.opentelemetry.io/collector/config/internal v0.115.0/go.mod h1:OVkadRWlKAoWjHslqjWtBLAne8ceQm8WYT71ZcBWLFc= -go.opentelemetry.io/collector/confmap v1.21.0 h1:1tIcx2/Suwg8VhuPmQw87ba0ludPmumpFCFRZZa6RXA= -go.opentelemetry.io/collector/confmap v1.21.0/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec= -go.opentelemetry.io/collector/confmap/provider/envprovider v1.21.0 h1:YLf++Z8CMp86AanfOCWUiE7vKbb1kSjgC3a9VJoxbD4= -go.opentelemetry.io/collector/confmap/provider/envprovider v1.21.0/go.mod h1:aSWLYcmgZZJDNtWN1M8JKQuehoGgOxibl1KuvKTar4M= -go.opentelemetry.io/collector/confmap/provider/fileprovider v1.21.0 h1:+zukkM+3l426iGoJkXTpLB2Z8QnZFu26TkGPjh5Rn/4= -go.opentelemetry.io/collector/confmap/provider/fileprovider v1.21.0/go.mod h1:BXBpQhF3n4CNLYO2n/mWZPd2U9ekpbLXLRGZrun1VfI= -go.opentelemetry.io/collector/confmap/provider/httpprovider v1.21.0 h1:NYYGM+SgIlTuNGjd8eGzDr8DkvOe4q7cXon8djF9yyI= -go.opentelemetry.io/collector/confmap/provider/httpprovider v1.21.0/go.mod h1:XRYbuwqq1awFuNhLDUv4aSvn6MzqX+abcevx1O+APJI= -go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.21.0 h1:P3Q9RytCMY76ORPCnkkjOa4fkuFqmZiQRor+F/nPlYE= -go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.21.0/go.mod h1:xhYhHK3yLQ78tsoaKPIGUfFulgy961ImOe2gATH3RQc= -go.opentelemetry.io/collector/connector v0.115.0 h1:4Kkm3HQFzNT1eliMOB8FbIn+PLMRJ2qQku5Vmy3V8Ko= -go.opentelemetry.io/collector/connector v0.115.0/go.mod h1:+ByuAmYLrYHoKh9B+LGqUc0N2kXcN2l8Dea8Mp6brZ8= -go.opentelemetry.io/collector/connector/connectorprofiles v0.115.0 h1:aW1f4Az0I+QJyImFccNWAXqik80bnNu27aQqi2hFfD8= -go.opentelemetry.io/collector/connector/connectorprofiles v0.115.0/go.mod h1:lmynB1CucydOsHa8RSSBh5roUZPfuiv65imXhtNzClM= -go.opentelemetry.io/collector/connector/connectortest v0.115.0 h1:GjtourFr0MJmlbtEPAZ/1BZCxkNAeJ0aMTlrxwftJ0k= -go.opentelemetry.io/collector/connector/connectortest v0.115.0/go.mod h1:f3KQXXNlh/XuV8elmnuVVyfY92dJCAovz10gD72OH0k= -go.opentelemetry.io/collector/consumer v1.21.0 h1:THKZ2Vbi6GkamjTBI2hFq5Dc4kINZTWGwQNa8d/Ty9g= -go.opentelemetry.io/collector/consumer v1.21.0/go.mod h1:FQcC4ThMtRYY41dv+IPNK8POLLhAFY3r1YR5fuP7iiY= -go.opentelemetry.io/collector/consumer/consumererror v0.115.0 h1:yli//xBCQMPZKXNgNlXemo4dvqhnFrAmCZ11DvQgmcY= -go.opentelemetry.io/collector/consumer/consumererror v0.115.0/go.mod h1:LwVzAvQ6ZVNG7mbOvurbAo+W/rKws0IcjOwriuZXqPE= -go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.115.0 h1:gaIhzpaGFWauiyznrQ3f++TbcdXxA5rpsX3L9uGjMM8= -go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.115.0/go.mod h1:7oXvuGBSawS5bc413lh1KEMcXkqBcrCqZQahOdnE24U= -go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0 h1:H3fDuyQW1t2HWHkz96WMBQJKUevypOCjBqnqtaAWyoA= -go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0/go.mod h1:IzEmZ91Tp7TBxVDq8Cc9xvLsmO7H08njr6Pu9P5d9ns= -go.opentelemetry.io/collector/consumer/consumertest v0.115.0 h1:hru0I2447y0TluCdwlKYFFtgcpyCnlM+LiOK1JZyA70= -go.opentelemetry.io/collector/consumer/consumertest v0.115.0/go.mod h1:ybjALRJWR6aKNOzEMy1T1ruCULVDEjj4omtOJMrH/kU= -go.opentelemetry.io/collector/exporter v0.115.0 h1:JnxfpOnsuqhTPKJXVKJLS1Cv3BiVrVLzpHOjJEQw+xw= -go.opentelemetry.io/collector/exporter v0.115.0/go.mod h1:xof3fHQK8wADhaKLIJcQ7ChZaFLNC+haRdPN0wgl6kY= -go.opentelemetry.io/collector/exporter/debugexporter v0.115.0 h1:gb9VMQhcbvYqp0SJ4Hp8R9XqOLNLsoTgNJCPKpNEaVc= -go.opentelemetry.io/collector/exporter/debugexporter v0.115.0/go.mod h1:H/HS1UJlcZPNBbOcrsGZc2sPdQDHtbOjHOxMtJkmlcU= -go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles v0.115.0 h1:fetbc740pODH6JW+H49SW0hiAJwQE+/B0SbuIlaY2rg= -go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles v0.115.0/go.mod h1:oEKZ/d5BeaCK6Made9iwaeqmlT4lRbJSlW9nhIn/TwM= -go.opentelemetry.io/collector/exporter/exporterprofiles v0.115.0 h1:lSQEleCn/q9eFufcuK61NdFKU70ZlgI9dBjPCO/4CrE= -go.opentelemetry.io/collector/exporter/exporterprofiles v0.115.0/go.mod h1:7l5K2AecimX2kx+nZC1gKG3QkP247CO1+SodmJ4fFkQ= -go.opentelemetry.io/collector/exporter/exportertest v0.115.0 h1:P9SMTUXQOtcaq40bGtnnAe14zRmR4/yUgj/Tb2BEf/k= -go.opentelemetry.io/collector/exporter/exportertest v0.115.0/go.mod h1:1jMZ9gFGXglb8wfNrBZIgd+RvpZhSyFwdfE+Jtf9w4U= -go.opentelemetry.io/collector/exporter/nopexporter v0.115.0 h1:ufwLbNp7mfoSxWJcoded3D9f/nIVvCwNa/0+ZqxzkzU= -go.opentelemetry.io/collector/exporter/nopexporter v0.115.0/go.mod h1:iIJgru1t+VJVVCE5KMAKjXbq9RkK4/5FCClnWnAlGtc= -go.opentelemetry.io/collector/exporter/otlpexporter v0.115.0 h1:Kqr31VFrQvgEMzeg8T1JSXWacjUQoZph39efKN8jBpY= -go.opentelemetry.io/collector/exporter/otlpexporter v0.115.0/go.mod h1:5uy/gduFx2mH0GxJ84sY75NfzQJb9xYmgiL9Pf0dKF8= -go.opentelemetry.io/collector/exporter/otlphttpexporter v0.115.0 h1:I0qzSWGbgph+iva5/jU8tkeUTkkqqcj8+UzMxg5ubF8= -go.opentelemetry.io/collector/exporter/otlphttpexporter v0.115.0/go.mod h1:cUrv5EG12iOs5MXaecfi9K+ZATEELefpyZY6Hj4NlUo= -go.opentelemetry.io/collector/extension v0.115.0 h1:/cBb8AUdD0KMWC6V3lvCC16eP9Fg0wd1Upcp5rgvuGI= -go.opentelemetry.io/collector/extension v0.115.0/go.mod h1:HI7Ak6loyi6ZrZPsQJW1OO1wbaAW8OqXLFNQlTZnreQ= -go.opentelemetry.io/collector/extension/auth v0.115.0 h1:TTMokbBsSHZRFH48PvGSJmgSS8F3Rkr9MWGHZn8eJDk= -go.opentelemetry.io/collector/extension/auth v0.115.0/go.mod h1:3w+2mzeb2OYNOO4Bi41TUo4jr32ap2y7AOq64IDpxQo= -go.opentelemetry.io/collector/extension/auth/authtest v0.115.0 h1:OZe7dKbZ01qodSpZU0ZYzI6zpmmzJ3UvfdBSFAbSgDw= -go.opentelemetry.io/collector/extension/auth/authtest v0.115.0/go.mod h1:fk9WCXP0x91Q64Z8HZKWTHh9PWtgoWE1KXe3n2Bff3U= -go.opentelemetry.io/collector/extension/experimental/storage v0.115.0 h1:sZXw0+77092pq24CkUoTRoHQPLQUsDq6HFRNB0g5yR4= -go.opentelemetry.io/collector/extension/experimental/storage v0.115.0/go.mod h1:qjFH7Y3QYYs88By2ZB5GMSUN5k3ul4Brrq2J6lKACA0= -go.opentelemetry.io/collector/extension/extensioncapabilities v0.115.0 h1:/g25Hp5aoCNKdDjIb3Fc7XRglO8yaBRFLO/IUNPnqNI= -go.opentelemetry.io/collector/extension/extensioncapabilities v0.115.0/go.mod h1:EQx7ETiy330O6q05S2KRZsRNDg0aQEeJmVl7Ipx+Fcw= -go.opentelemetry.io/collector/extension/extensiontest v0.115.0 h1:GBVFxFEskR8jSdu9uaQh2qpXnN5VNXhXjpJ2UjxtE8I= -go.opentelemetry.io/collector/extension/extensiontest v0.115.0/go.mod h1:eu1ecbz5mT+cHoH2H3GmD/rOO0WsicSJD2RLrYuOmRA= -go.opentelemetry.io/collector/extension/zpagesextension v0.115.0 h1:zYrZZocc7n0ZuDyXNkIaX0P0qk2fjMQj7NegwBJZA4k= -go.opentelemetry.io/collector/extension/zpagesextension v0.115.0/go.mod h1:OaXwNHF3MAcInBzCXrhXbTNHfIi9b7YGhXjtCFZqxNY= -go.opentelemetry.io/collector/featuregate v1.21.0 h1:+EULHPJDLMipcwAGZVp9Nm8NriRvoBBMxp7MSiIZVMI= -go.opentelemetry.io/collector/featuregate v1.21.0/go.mod h1:3GaXqflNDVwWndNGBJ1+XJFy3Fv/XrFgjMN60N3z7yg= -go.opentelemetry.io/collector/filter v0.115.0 h1:pYnHUFDSHSjEIFZit+CU09itVkDXgV+WcV2HOkjvQcE= -go.opentelemetry.io/collector/filter v0.115.0/go.mod h1:aewQ+jmvpH88gPVWpNXiWSm+wwJVxTK4f23ex2NMd2c= -go.opentelemetry.io/collector/internal/fanoutconsumer v0.115.0 h1:6DRiSECeApFq6Jj5ug77rG53R6FzJEZBfygkyMEXdpg= -go.opentelemetry.io/collector/internal/fanoutconsumer v0.115.0/go.mod h1:vgQf5HQdmLQqpDHpDq2S3nTRoUuKtRcZpRTsy+UiwYw= -go.opentelemetry.io/collector/internal/memorylimiter v0.115.0 h1:U07IJxyHZXM6eLn8cOq/Lycx6DhQZhpDOuYtIRw/d6I= -go.opentelemetry.io/collector/internal/memorylimiter v0.115.0/go.mod h1:KNcU8WVpW5y7Ij6CGnsefb7q1UZT7VvrTDhe5FKNOA4= -go.opentelemetry.io/collector/internal/sharedcomponent v0.115.0 h1:9TL6T6ALqDpumUJ0tYIuPIg5LGo4r6eoqlNArYX116o= -go.opentelemetry.io/collector/internal/sharedcomponent v0.115.0/go.mod h1:SgBLKMh11bOTPR1bdDZbi5MlqsoDBBFI3uBIwnei+0k= -go.opentelemetry.io/collector/otelcol v0.115.0 h1:wZhFGrSCZcTQ4qw4ePjI2PaSrOCejoQKAjprKD/xavs= -go.opentelemetry.io/collector/otelcol v0.115.0/go.mod h1:iK8DPvaizirIYKDl1zZG7DDYUj6GkkH4KHifVVM88vk= -go.opentelemetry.io/collector/otelcol/otelcoltest v0.115.0 h1:HNlFpQujlnvawBk8nvMGxzjDHWDCfSprxem/EpQn4u8= -go.opentelemetry.io/collector/otelcol/otelcoltest v0.115.0/go.mod h1:WsMbqYl2rm3nPFbdxQqyLXf4iu97nYLeuQ1seZIpV3Y= -go.opentelemetry.io/collector/pdata v1.21.0 h1:PG+UbiFMJ35X/WcAR7Rf/PWmWtRdW0aHlOidsR6c5MA= -go.opentelemetry.io/collector/pdata v1.21.0/go.mod h1:GKb1/zocKJMvxKbS+sl0W85lxhYBTFJ6h6I1tphVyDU= -go.opentelemetry.io/collector/pdata/pprofile v0.115.0 h1:NI89hy13vNDw7EOnQf7Jtitks4HJFO0SUWznTssmP94= -go.opentelemetry.io/collector/pdata/pprofile v0.115.0/go.mod h1:jGzdNfO0XTtfLjXCL/uCC1livg1LlfR+ix2WE/z3RpQ= -go.opentelemetry.io/collector/pdata/testdata v0.115.0 h1:Rblz+AKXdo3fG626jS+KSd0OSA4uMXcTQfpwed6P8LI= -go.opentelemetry.io/collector/pdata/testdata v0.115.0/go.mod h1:inNnRt6S2Nn260EfCBEcjesjlKOSsr0jPwkPqpBkt4s= -go.opentelemetry.io/collector/pipeline v0.115.0 h1:bmACBqb0e8U9ag+vGGHUP7kCfAO7HHROdtzIEg8ulus= -go.opentelemetry.io/collector/pipeline v0.115.0/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74= -go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.115.0 h1:3l9ruCAOrssTUDnyChKNzHWOdTtfThnYaoPZ1/+5sD0= -go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.115.0/go.mod h1:2Myg+law/5lcezo9PhhZ0wjCaLYdGK24s1jDWbSW9VY= -go.opentelemetry.io/collector/processor v0.115.0 h1:+fveHGRe24PZPv/F5taahGuZ9HdNW44hgNWEJhIUdyc= -go.opentelemetry.io/collector/processor v0.115.0/go.mod h1:/oLHBlLsm7tFb7zOIrA5C0j14yBtjXKAgxJJ2Bktyk4= -go.opentelemetry.io/collector/processor/batchprocessor v0.115.0 h1:dgw1jcE/YVFTs41b3Y7SerU3BBSyMEE93AYV+BAxR8E= -go.opentelemetry.io/collector/processor/batchprocessor v0.115.0/go.mod h1:imG1kDEq14UGlxyCjSCf1TUEFdSWRvF7tLoYX9nixEQ= -go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.115.0 h1:LCA2jwxy1PRc7X/AtRJfMdOANh5rVLdwo5PAM+gAuyo= -go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.115.0/go.mod h1:gPcHyza7Rek3jfrQFxw99fcWBDkkRqBaMHcUz9yYv5I= -go.opentelemetry.io/collector/processor/processorhelper/processorhelperprofiles v0.115.0 h1:r1UF8LPICTRXBL0685zV/CC8J4sWg/qm1g+sHOYMq2Y= -go.opentelemetry.io/collector/processor/processorhelper/processorhelperprofiles v0.115.0/go.mod h1:3erq5umu5a7DKXo4PBm4I5yJjc6r0aJNvBV2nVSPDuE= -go.opentelemetry.io/collector/processor/processorprofiles v0.115.0 h1:cCZAs+FXaebZPppqAN3m+X3etoSBL6NvyQo8l0hOZoo= -go.opentelemetry.io/collector/processor/processorprofiles v0.115.0/go.mod h1:kMxF0gknlWX4duuAJFi2/HuIRi6C3w95tOenRa0GKOY= -go.opentelemetry.io/collector/processor/processortest v0.115.0 h1:j9HEaYFOeOB6VYl9zGhBnhQbTkqGBa2udUvu5NTh6hc= -go.opentelemetry.io/collector/processor/processortest v0.115.0/go.mod h1:Gws+VEnp/eW3qAqPpqbKsrbnnxxNfyDjqrfUXbZfZic= -go.opentelemetry.io/collector/receiver v0.115.0 h1:55Q3Jvj6zHCIA1psKqi/3kEMJO4OqUF5tNAEYNdB1U8= -go.opentelemetry.io/collector/receiver v0.115.0/go.mod h1:nBSCh2O/WUcfgpJ+Jpz+B0z0Hn5jHeRvF2WmLij5EIY= -go.opentelemetry.io/collector/receiver/nopreceiver v0.115.0 h1:87dxAcHekbXqLtjcQjnK1An2PWkWAhTly+EXzPEgYOE= -go.opentelemetry.io/collector/receiver/nopreceiver v0.115.0/go.mod h1:Llu88KNSNwvmYPRr2PMDDbVY9zHfHEbPPB4yTjjQQe0= -go.opentelemetry.io/collector/receiver/otlpreceiver v0.115.0 h1:NqMWsGuVy6y6VKTaPeJS7NZ9KAxhE/xyGUC7GaLYm/o= -go.opentelemetry.io/collector/receiver/otlpreceiver v0.115.0/go.mod h1:9ituzngnjsh/YvO+Phayq9BTk/nw0rgK5ZVvX1oxULk= -go.opentelemetry.io/collector/receiver/receiverprofiles v0.115.0 h1:R9JLaj2Al93smIPUkbJshAkb/cY0H5JBOxIx+Zu0NG4= -go.opentelemetry.io/collector/receiver/receiverprofiles v0.115.0/go.mod h1:05E5hGujWeeXJmzKZwTdHyZ/+rRyrQlQB5p5Q2XY39M= -go.opentelemetry.io/collector/receiver/receivertest v0.115.0 h1:OiB684SbHQi6/Pd3ZH0cXjYvCpBS9ilQBfTQx0wVXHg= -go.opentelemetry.io/collector/receiver/receivertest v0.115.0/go.mod h1:Y8Z9U/bz9Xpyt8GI8DxZZgryw3mnnIw+AeKVLTD2cP8= -go.opentelemetry.io/collector/scraper v0.115.0 h1:hbfebO7x1Xm96OwqeuLz5w7QAaB3ZMlwOkUo0XzPadc= -go.opentelemetry.io/collector/scraper v0.115.0/go.mod h1:7YoCO6/4PeExLiX1FokcydJGCQUa7lUqZsqXokJ5VZ4= -go.opentelemetry.io/collector/semconv v0.115.0 h1:SoqMvg4ZEB3mz2EdAb6XYa+TuMo5Mir5FRBr3nVFUDY= -go.opentelemetry.io/collector/semconv v0.115.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= -go.opentelemetry.io/collector/service v0.115.0 h1:k4GAOiI5tZgB2QKgwA6c3TeAVr7QL/ft5cOQbzUr8Iw= -go.opentelemetry.io/collector/service v0.115.0/go.mod h1:DKde9LMhNebdREecDSsqiTFLI2wRc+IoV4/wGxU6goY= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/collector v0.118.0 h1:OBqxppK9Ul6bzEabcHsx11pXwgp05sBpqYxIxiOkyFo= +go.opentelemetry.io/collector v0.118.0/go.mod h1:yxfijW5k9dwd9sifTBAEoItE+ahFEtOlyvex1B99uno= +go.opentelemetry.io/collector/client v1.24.0 h1:eH7ctqDnRWNH5QVVbAvdYYdkvr8QWLkEm8FUPaaYbWE= +go.opentelemetry.io/collector/client v1.24.0/go.mod h1:C/38SYPa0tTL6ikPz/glYz6f3GVzEuT4nlEml6IBDMw= +go.opentelemetry.io/collector/component v0.118.0 h1:sSO/ObxJ+yH77Z4DmT1mlSuxhbgUmY1ztt7xCA1F/8w= +go.opentelemetry.io/collector/component v0.118.0/go.mod h1:LUJ3AL2b+tmFr3hZol3hzKzCMvNdqNq0M5CF3SWdv4M= +go.opentelemetry.io/collector/component/componentstatus v0.118.0 h1:1aCIdUjqz0noKNQr1v04P+lwF89Lkua5U7BhH9IAxkE= +go.opentelemetry.io/collector/component/componentstatus v0.118.0/go.mod h1:ynO1Nyj0t1h6x/djIMJy35bhnnWEc2mlQaFgDNUO504= +go.opentelemetry.io/collector/component/componenttest v0.118.0 h1:knEHckoiL2fEWSIc0iehg39zP4IXzi9sHa45O+oxKo8= +go.opentelemetry.io/collector/component/componenttest v0.118.0/go.mod h1:aHc7t7zVwCpbhrWIWY+GMuaMxMCUP8C8P7pJOt8r/vU= +go.opentelemetry.io/collector/config/configauth v0.118.0 h1:uBH/s9kRw/m7VWuibrkCzbXSCVLf9ElKq9NuKb0wAwk= +go.opentelemetry.io/collector/config/configauth v0.118.0/go.mod h1:uAmSGkihIENoIah6mEQ8S/HX4oiFOHZu3EoZLZwi9OI= +go.opentelemetry.io/collector/config/configcompression v1.24.0 h1:jyM6BX7wYcrh+eVSC0FMbWgy/zb9iP58SerOrvisccE= +go.opentelemetry.io/collector/config/configcompression v1.24.0/go.mod h1:LvYG00tbPTv0NOLoZN0wXq1F5thcxvukO8INq7xyfWU= +go.opentelemetry.io/collector/config/configgrpc v0.118.0 h1:if8VfsnnHwVX/E+GgehVXKh85YtAtVci+c4A/M5gPh0= +go.opentelemetry.io/collector/config/configgrpc v0.118.0/go.mod h1:TZqpu5s/iEW5XmhSnzrhXCUQ3W5qaICNvlllBf3GGcw= +go.opentelemetry.io/collector/config/confighttp v0.118.0 h1:ey50dfySOCPgUPJ1x8Kq6CmNcv/TpZHt6cYmPhZItj0= +go.opentelemetry.io/collector/config/confighttp v0.118.0/go.mod h1:4frheVFiIfKUHuD/KAPn+u+d+EUx5GlQTNmoI1ftReA= +go.opentelemetry.io/collector/config/confignet v1.24.0 h1:Je1oO3qCUI4etX9ZVyav/NkeD+sfzZQRmwMGy51Oei4= +go.opentelemetry.io/collector/config/confignet v1.24.0/go.mod h1:ZppUH1hgUJOubawEsxsQ9MzEYFytqo2GnVSS7d4CVxc= +go.opentelemetry.io/collector/config/configopaque v1.24.0 h1:EPOprMDreZPKyIgT0/eVBvEGQVvq7ncvBCBVnWerj54= +go.opentelemetry.io/collector/config/configopaque v1.24.0/go.mod h1:sW0t0iI/VfRL9VYX7Ik6XzVgPcR+Y5kejTLsYcMyDWs= +go.opentelemetry.io/collector/config/configretry v1.24.0 h1:sIPHhNNY2YlHMIJ//63iMxIqlgDeGczId0uUb1njsPM= +go.opentelemetry.io/collector/config/configretry v1.24.0/go.mod h1:cleBc9I0DIWpTiiHfu9v83FUaCTqcPXmebpLxjEIqro= +go.opentelemetry.io/collector/config/configtelemetry v0.118.0 h1:UlN46EViG2X42odWtXgWaqY7Y01ZKpsnswSwXTWx5mM= +go.opentelemetry.io/collector/config/configtelemetry v0.118.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= +go.opentelemetry.io/collector/config/configtls v1.24.0 h1:rOhl8qjIlUVVRHnwQj6/vZe6cuCYImyx7aVDBR35bqI= +go.opentelemetry.io/collector/config/configtls v1.24.0/go.mod h1:d0OdfkbuYEMYDBJLSbpH0wPI29lmSiFT3geqh/ygF2k= +go.opentelemetry.io/collector/confmap v1.24.0 h1:UUHVhkDCsVw14jPOarug9PDQE2vaB2ELPWMr7ARFBCA= +go.opentelemetry.io/collector/confmap v1.24.0/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec= +go.opentelemetry.io/collector/confmap/provider/envprovider v1.24.0 h1:jAtaNR4b5gnddNzyfcpIhURSDq4rai667yV1Ngmku2Y= +go.opentelemetry.io/collector/confmap/provider/envprovider v1.24.0/go.mod h1:X0BuIYyscilkwApnmxlrdz0kTVWgKXq2ih8sTWm8Zio= +go.opentelemetry.io/collector/confmap/provider/fileprovider v1.24.0 h1:QoQulv9L20MhD1TFWH1scbRoo0bxbZqF2quh1VRNMh4= +go.opentelemetry.io/collector/confmap/provider/fileprovider v1.24.0/go.mod h1:ljIH/rWIUHJeWIDEKMRU/ufol/bcgC7ufamchtuTAwM= +go.opentelemetry.io/collector/confmap/provider/httpprovider v1.24.0 h1:1mbj6HlVZ4LNVBYrxM5jQEJKxinpe0LtNZwI7i8pQNY= +go.opentelemetry.io/collector/confmap/provider/httpprovider v1.24.0/go.mod h1:xM2qJmW6mB1lzFpLWIoxX/h4tUnoYTICZoqPND9YWi0= +go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.24.0 h1:Ncr7a3HbVpmjAvPHd0yQM/MV2p7HqJe+zvDPmHdjSCI= +go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.24.0/go.mod h1:i7omVh3uK8efpr7/fSAcOh8Xiv3FLYL26wUuON9i1WI= +go.opentelemetry.io/collector/connector v0.118.0 h1:amay4UriWrtydaAxjQ8/MTTaVYERlZcYLCAGcjoBejw= +go.opentelemetry.io/collector/connector v0.118.0/go.mod h1:R6jbMrHZYg21pZ0nsoo4cSHIn7Lrdpi5R3OWcDEQwhE= +go.opentelemetry.io/collector/connector/connectortest v0.118.0 h1:hLMSTqtFWveXa3b1qJMEaWuaX3PHx7dfl8G/bsac2fE= +go.opentelemetry.io/collector/connector/connectortest v0.118.0/go.mod h1:hm6TNLiQLe65NpENCFsFoiO8fOf3BbN4UF1heUsT73Q= +go.opentelemetry.io/collector/connector/xconnector v0.118.0 h1:0s6rwZmt8va6xd3BEZs7s2QBNFNjLv0kzYi6l44dKqc= +go.opentelemetry.io/collector/connector/xconnector v0.118.0/go.mod h1:12mJPGWo90iZrrpgOkmSd5TkejweL34V/R6AqwqJnMA= +go.opentelemetry.io/collector/consumer v1.24.0 h1:7DeyBm9qdr1EPuCfPjWyChPK16DbVc0wZeSa9LZprFU= +go.opentelemetry.io/collector/consumer v1.24.0/go.mod h1:0G6jvZprIp4dpKMD1ZxCjriiP9GdFvFMObsQEtTk71s= +go.opentelemetry.io/collector/consumer/consumererror v0.118.0 h1:Cx//ZFDa6wUEoRDRYRZ/Rkb52dWNoHj2e9FdlcM9jCA= +go.opentelemetry.io/collector/consumer/consumererror v0.118.0/go.mod h1:2mhnzzLYR5zS2Zz4h9ZnRM8Uogu9qatcfQwGNenhing= +go.opentelemetry.io/collector/consumer/consumererror/xconsumererror v0.118.0 h1:/kkWdw1PQtPb1noZMTt6tbgP1ntWdJ835u1o45nYhTg= +go.opentelemetry.io/collector/consumer/consumererror/xconsumererror v0.118.0/go.mod h1:2mdXnTT0nPd/KTG9w29cc1OGKBLzL2HW+x/o7QVpCpI= +go.opentelemetry.io/collector/consumer/consumertest v0.118.0 h1:8AAS9ejQapP1zqt0+cI6u+AUBheT3X0171N9WtXWsVY= +go.opentelemetry.io/collector/consumer/consumertest v0.118.0/go.mod h1:spRM2wyGr4QZzqMHlLmZnqRCxqXN4Wd0piogC4Qb5PQ= +go.opentelemetry.io/collector/consumer/xconsumer v0.118.0 h1:guWnzzRqgCInjnYlOQ1BPrimppNGIVvnknAjlIbWXuY= +go.opentelemetry.io/collector/consumer/xconsumer v0.118.0/go.mod h1:C5V2d6Ys/Fi6k3tzjBmbdZ9v3J/rZSAMlhx4KVcMIIg= +go.opentelemetry.io/collector/exporter v0.118.0 h1:PE0vF2U+znOB8OVLPWNw40bGCoT/5QquQ8Xbz4i9Rb0= +go.opentelemetry.io/collector/exporter v0.118.0/go.mod h1:5ST3gxT/RzE/vg2bcGDtWJxlQF1ypwk50UpmdK1kUqY= +go.opentelemetry.io/collector/exporter/debugexporter v0.118.0 h1:MUZl270SJSU/fDpIr5cJ+JEPrK6OEsHllmKauWYhxxQ= +go.opentelemetry.io/collector/exporter/debugexporter v0.118.0/go.mod h1:SW3j4Bl3uB/nbTC1D0hog9TcelVot9RXQnScCwx8azw= +go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper v0.118.0 h1:wC4IyE98DR4eXVyT7EnA4iJ6s+sbUTZVq/5KoVWSKDw= +go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper v0.118.0/go.mod h1:spjZv9QX+pCcx/ECSqlo/UKCYJzp2rR5NsvIgpfdUxQ= +go.opentelemetry.io/collector/exporter/exportertest v0.118.0 h1:8gWky42BcJsxoaqWbnqCDUjP3Y84hjC6RD/UWHwR7sI= +go.opentelemetry.io/collector/exporter/exportertest v0.118.0/go.mod h1:UbpQBZvznA8YPqqcKlafVIhB6Qa4fPf2+I67MUGyNqo= +go.opentelemetry.io/collector/exporter/nopexporter v0.118.0 h1:YX+pX1bVv3IJtBmrAN0waJnFWe9ynCfIRhvmVMTg4Cs= +go.opentelemetry.io/collector/exporter/nopexporter v0.118.0/go.mod h1:vWBRmNyRqN7nzu7sXjrSuVZPnpKZnKAG4ct01jL8xrg= +go.opentelemetry.io/collector/exporter/otlpexporter v0.118.0 h1:kfVfskZEroh3zs8HmdCLeo9weAJT5oedd+04McXEBSU= +go.opentelemetry.io/collector/exporter/otlpexporter v0.118.0/go.mod h1:iyvbf05lZdh+KObvNF0uEpaaV9YoQNofm1RRamWbq78= +go.opentelemetry.io/collector/exporter/otlphttpexporter v0.118.0 h1:8ShK60uf6nY6TlSYBZ2Y7eh3sv0WwNkUKgmh3P1U/2U= +go.opentelemetry.io/collector/exporter/otlphttpexporter v0.118.0/go.mod h1:UJXry//sSRs04eg35nZkT1wxP43tPxz/3wbf26eLRkc= +go.opentelemetry.io/collector/exporter/xexporter v0.118.0 h1:PZAo1CFhZHfQwtzUNj+Fwcv/21pWHJHTsrIddD096fw= +go.opentelemetry.io/collector/exporter/xexporter v0.118.0/go.mod h1:x4J+qyrRcp4DfWKqK3DLZomFTIUhedsqCQWqq6Gqps4= +go.opentelemetry.io/collector/extension v0.118.0 h1:9o5jLCTRvs0+rtFDx04zTBuB4WFrE0RvtVCPovYV0sA= +go.opentelemetry.io/collector/extension v0.118.0/go.mod h1:BFwB0WOlse6JnrStO44+k9kwUVjjtseFEHhJLHD7lBg= +go.opentelemetry.io/collector/extension/auth v0.118.0 h1:+eMNUBUK1JK9A3mr95BasbWE90Lxu+WlR9sqS36sJms= +go.opentelemetry.io/collector/extension/auth v0.118.0/go.mod h1:MJpYcRGSERkgOhczqTKoAhkHmcugr+YTlRhc/SpYYYI= +go.opentelemetry.io/collector/extension/auth/authtest v0.118.0 h1:KIORXNc71vfpQrrZOntiZesRCZtQ8alrASWVT/zZkyo= +go.opentelemetry.io/collector/extension/auth/authtest v0.118.0/go.mod h1:0ZlSP9NPAfTRQd6Tx4mOH0IWrp6ufHaVN//L9Mb87gM= +go.opentelemetry.io/collector/extension/extensioncapabilities v0.118.0 h1:I/SjuacUXdBOxa6ZnVMuMKkZX+m40tUm+5YKqWnNv/c= +go.opentelemetry.io/collector/extension/extensioncapabilities v0.118.0/go.mod h1:IxDALY0rMvsENrVui7Y5tvvL/xHNgMKuhfiQiSHMiTQ= +go.opentelemetry.io/collector/extension/extensiontest v0.118.0 h1:rKBUaFS9elGfENG45wANmrwx7mHsmt1+YWCzxjftElg= +go.opentelemetry.io/collector/extension/extensiontest v0.118.0/go.mod h1:CqNXzkIOR32D8EUpptpOXhpFkibs3kFlRyNMEgIW8l4= +go.opentelemetry.io/collector/extension/xextension v0.118.0 h1:P6gvJzqnH9ma2QfnWde/E6Xu9bAzuefzIwm5iupiVPE= +go.opentelemetry.io/collector/extension/xextension v0.118.0/go.mod h1:ne4Q8ZtRlbC0Etr2hTcVkjOpVM2bE2xy1u+R80LUkDw= +go.opentelemetry.io/collector/extension/zpagesextension v0.118.0 h1:XkaLvST4p1/i/dsk5yCwFG4HJUUr6joCbegJc2MEOrE= +go.opentelemetry.io/collector/extension/zpagesextension v0.118.0/go.mod h1:alaAK7I7UeM1Hcs/eNqIjTLIZpqrk3mD1Ua42mJ7JnU= +go.opentelemetry.io/collector/featuregate v1.24.0 h1:DEqDsuJgxjZ3E5JNC9hXCd4sWGFiF7h9kaziODuqwFY= +go.opentelemetry.io/collector/featuregate v1.24.0/go.mod h1:3GaXqflNDVwWndNGBJ1+XJFy3Fv/XrFgjMN60N3z7yg= +go.opentelemetry.io/collector/filter v0.118.0 h1:mvf08g5VHUcyhqobqId2bVGhgcs1RNR69INGlT0LEsA= +go.opentelemetry.io/collector/filter v0.118.0/go.mod h1:Pgii0Ad2PXdxYSYYqki6Mr4gZdueJG9rDOiaB3fXf3Q= +go.opentelemetry.io/collector/internal/fanoutconsumer v0.118.0 h1:affTj1Qxjbg9dZ1x2tbV9Rs9/otZQ1lHA++L8qB5KiQ= +go.opentelemetry.io/collector/internal/fanoutconsumer v0.118.0/go.mod h1:9mbE68mYdtTyozr3jTtNMB1RA5F8/dt2aWVYSu6bsQ4= +go.opentelemetry.io/collector/internal/memorylimiter v0.118.0 h1:F2FgIe7N4UBQKybKEmpcpFjFBfVLR7ogQHTGAPQ04rc= +go.opentelemetry.io/collector/internal/memorylimiter v0.118.0/go.mod h1:1UXOl4BMaJl9hOlORAJvXNt1jc0GJazCRy9ieDdMkxw= +go.opentelemetry.io/collector/internal/sharedcomponent v0.118.0 h1:aCiwkzBL4VyPEUBmEcTnoPyld5EClJGbwyUNJhHNgEo= +go.opentelemetry.io/collector/internal/sharedcomponent v0.118.0/go.mod h1:drV6vD4acelEUOjM9cgxV5ILs8q2AYUh3EV+Pljdorg= +go.opentelemetry.io/collector/otelcol v0.118.0 h1:uSD3wU0sO4vsw5VvWI2yUFLggLdq1BWN/nC1LJXIhMg= +go.opentelemetry.io/collector/otelcol v0.118.0/go.mod h1:OdKz/AXj+ewCwXp/acZCBIoMIYiIxeNRNkbqUXvWi+o= +go.opentelemetry.io/collector/otelcol/otelcoltest v0.118.0 h1:s4yLzDUPzzPElvcOqth7iOuKe+eBo8iXy6bzAy57sXA= +go.opentelemetry.io/collector/otelcol/otelcoltest v0.118.0/go.mod h1:nNDwBOLXNHVnALpcBzkWQ/770WB3IFvEVgLjgujt3Eo= +go.opentelemetry.io/collector/pdata v1.24.0 h1:D6j92eAzmAbQgivNBUnt8r9juOl8ugb+ihYynoFZIEg= +go.opentelemetry.io/collector/pdata v1.24.0/go.mod h1:cf3/W9E/uIvPS4MR26SnMFJhraUCattzzM6qusuONuc= +go.opentelemetry.io/collector/pdata/pprofile v0.118.0 h1:VK/fr65VFOwEhsSGRPj5c3lCv0yIK1Kt0sZxv9WZBb8= +go.opentelemetry.io/collector/pdata/pprofile v0.118.0/go.mod h1:eJyP/vBm179EghV3dPSnamGAWQwLyd+4z/3yG54YFoQ= +go.opentelemetry.io/collector/pdata/testdata v0.118.0 h1:5N0w1SX9KIRkwvtkrpzQgXy9eGk3vfNG0ds6mhEPMIM= +go.opentelemetry.io/collector/pdata/testdata v0.118.0/go.mod h1:UY+GHV5bOC1BnFburOZ0wiHReJj1XbW12mi2Ogbc5Lw= +go.opentelemetry.io/collector/pipeline v0.118.0 h1:RI1DMe7L0+5hGkx0EDGxG00TaJoh96MEQppgOlGx1Oc= +go.opentelemetry.io/collector/pipeline v0.118.0/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74= +go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.116.0 h1:vRdnwIU40bYtxntVOmxg4Bhrh9QVKtx5wwlxK21rc1s= +go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.116.0/go.mod h1:KxDMUOfbVy8lzZ85CZEG3gCJEYMyWiBKdN+HWUwQWTM= +go.opentelemetry.io/collector/pipeline/xpipeline v0.118.0 h1:ZUVF1MYNQYZvmuL30KfP+QbVGSbFZvldBM9hgCe4J4k= +go.opentelemetry.io/collector/pipeline/xpipeline v0.118.0/go.mod h1:XgG1ktGO9J1f6fasMYPWSXL9Raan/VYB9vddKKWp5hQ= +go.opentelemetry.io/collector/processor v0.118.0 h1:NlqWiTTpPP+EPbrqTcNP9nh/4O4/9U9RGWVB49xo4ws= +go.opentelemetry.io/collector/processor v0.118.0/go.mod h1:Y8OD7wk51oPuBqrbn1qXIK91AbprRHP76hlvEzC24U4= +go.opentelemetry.io/collector/processor/batchprocessor v0.118.0 h1:odyJ9l5eakr+TS8sr6U9rz53QD5ZwewL/6pLUtFTJBs= +go.opentelemetry.io/collector/processor/batchprocessor v0.118.0/go.mod h1:fcHRefknjoLMpCRQ9LKEEzrrmSFUejEaTSxCqj5lHhI= +go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.118.0 h1:1v9VB9lJdo5kNT448Ba1jk9psS4+iv8clooiDU0/5WM= +go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.118.0/go.mod h1:UjlRdaLezSHt+5vX9erJu24HmTMw9mefQSQLatcSwG4= +go.opentelemetry.io/collector/processor/processorhelper/xprocessorhelper v0.118.0 h1:FAuXTUGtk82XDeNC2EIsK8Ad2I0GrbK9zLT6piwjNeA= +go.opentelemetry.io/collector/processor/processorhelper/xprocessorhelper v0.118.0/go.mod h1:F/sHViojq+fH2rEAJcPC/o71EE5aFAideVkVBu59e9k= +go.opentelemetry.io/collector/processor/processortest v0.118.0 h1:VfTLHuIaJWGyUmrvAOvf63gPMf1vAW68/jtJClEsKtU= +go.opentelemetry.io/collector/processor/processortest v0.118.0/go.mod h1:ZFWxsSoafGNOEk83FtGz43M5ypUzAOvGnfT0aQTDHdU= +go.opentelemetry.io/collector/processor/xprocessor v0.118.0 h1:M/EMhPRbadHLpv7g99fBjfgyuYexBZmgQqb2vjTXjvM= +go.opentelemetry.io/collector/processor/xprocessor v0.118.0/go.mod h1:lkoQoCv2Cz+C0kf2VHgBUDYWDecZLLeaHEvHDXbBCXU= +go.opentelemetry.io/collector/receiver v0.118.0 h1:X4mspHmbbtwdCQZ7o370kNmdWfxRnK1FrsvEShCCKEc= +go.opentelemetry.io/collector/receiver v0.118.0/go.mod h1:wFyfu6sgrkDPLQoGOGMuChGZzkZnYcI/tPJWV4CRTzs= +go.opentelemetry.io/collector/receiver/nopreceiver v0.118.0 h1:JeOZxB26tIIBshKgzhWoLsC90TLF1ftyL0JSVyFtOBk= +go.opentelemetry.io/collector/receiver/nopreceiver v0.118.0/go.mod h1:cxUUVD5rXqBIK1ynSuR0cyJ1B8s1VWx4xZunZ31+EAM= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.118.0 h1:Nud8aaRDb86K2kBeqMTjqAKDUV00JDn+G4wUZ3hDlAk= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.118.0/go.mod h1:MJvDEzWJnm1FMoIoTKmhlT3pPmwJP+65GKWy0lAzd30= +go.opentelemetry.io/collector/receiver/receivertest v0.118.0 h1:XlMr2mPsyXJsMUOqCpEoY3uCPsLZQbNA5fmVNDGB7Bw= +go.opentelemetry.io/collector/receiver/receivertest v0.118.0/go.mod h1:dtu/H1RNjhy11hTVf/XUfc02uGufMhYYdhhYBbglcUg= +go.opentelemetry.io/collector/receiver/xreceiver v0.118.0 h1:dzECve9e0H3ot0JWnWPuQr9Y84RhOYSd0+CjvJskx7Y= +go.opentelemetry.io/collector/receiver/xreceiver v0.118.0/go.mod h1:Lv1nD/mSYSP64iV8k+C+mWWZZOMLRubv9d1SUory3/E= +go.opentelemetry.io/collector/scraper v0.118.0 h1:944QgQVZ7PM0L9WIwgRPY0LbbHX5qsk2x4uxDO1IOAQ= +go.opentelemetry.io/collector/scraper v0.118.0/go.mod h1:wIa4bIqiU9bkeg3v5QQybwz1+K5DjrP1Afc13Kt22Cw= +go.opentelemetry.io/collector/scraper/scraperhelper v0.118.0 h1:kZu4TgGGSWlNP9ogVr3pVQGX6J/P8ooPj8wMH5+aWyQ= +go.opentelemetry.io/collector/scraper/scraperhelper v0.118.0/go.mod h1:NKOcwL580ycua1HQ9K3OUucBsMsVL5DbvOJxGtg4chs= +go.opentelemetry.io/collector/scraper/scrapertest v0.118.0 h1:1zqF7Rs/RuvUITsxGJSDsvVZEqyDFW5xe1nvlNs2+HE= +go.opentelemetry.io/collector/scraper/scrapertest v0.118.0/go.mod h1:lUUX279TfqMQ63VdAdf/cpX4AUuMLPHS0hJcjfyzKkg= +go.opentelemetry.io/collector/semconv v0.118.0 h1:V4vlMIK7TIaemrrn2VawvQPwruIKpj7Xgw9P5+BL56w= +go.opentelemetry.io/collector/semconv v0.118.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= +go.opentelemetry.io/collector/service v0.118.0 h1:acZ9LzUbEF5M3G7o5FgenPJVuuM2y8c4HW5JVm648L4= +go.opentelemetry.io/collector/service v0.118.0/go.mod h1:uw3cl3UtkAOrEr8UQV2lXKjyTIbhWxURaQec8kE+Pic= go.opentelemetry.io/contrib/bridges/otelzap v0.6.0 h1:j8icMXyyqNf6HGuwlYhniPnVsbJIq7n+WirDu3VAJdQ= go.opentelemetry.io/contrib/bridges/otelzap v0.6.0/go.mod h1:evIOZpl+kAlU5IsaYX2Siw+IbpacAZvXemVsgt70uvw= go.opentelemetry.io/contrib/config v0.10.0 h1:2JknAzMaYjxrHkTnZh3eOme/Y2P5eHE2SWfhfV6Xd6c= go.opentelemetry.io/contrib/config v0.10.0/go.mod h1:aND2M6/KfNkntI5cyvHriR/zvZgPf8j9yETdSmvpfmc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 h1:yMkBS9yViCc7U7yeLzJPM2XizlfdVvBRSmsQDWu6qc0= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= go.opentelemetry.io/contrib/propagators/b3 v1.31.0 h1:PQPXYscmwbCp76QDvO4hMngF2j8Bx/OTV86laEl8uqo= go.opentelemetry.io/contrib/propagators/b3 v1.31.0/go.mod h1:jbqfV8wDdqSDrAYxVpXQnpM0XFMq2FtDesblJ7blOwQ= go.opentelemetry.io/contrib/zpages v0.56.0 h1:W7vP6s3juzL5KiHpr41zLNmsJ0QAZudYu8ay0zGAoko= go.opentelemetry.io/contrib/zpages v0.56.0/go.mod h1:IxPRP4TYHw9jLeaEOSDIiA9zmyJNZNO6sbW55iMvSXs= -go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= -go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.7.0 h1:mMOmtYie9Fx6TSVzw4W+NTpvoaS1JWWga37oI1a/4qQ= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.7.0/go.mod h1:yy7nDsMMBUkD+jeekJ36ur5f3jJIrmCwUrY67VFhNpA= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 h1:j7ZSD+5yn+lo3sGV69nW04rRR0jhYnBwjuX3r0HvnK0= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0/go.mod h1:WXbYJTUaZXAbYd8lbgGuvih0yuCfOFC5RJoYnoLcGz8= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 h1:t/Qur3vKSkUCcDVaSumWF2PKHt85pc7fRvFuoVT8qFU= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0/go.mod h1:Rl61tySSdcOJWoEgYZVtmnKdA0GeKrSqkHC1t+91CH8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy03iVTXP6ffeN2iXrxfGsZGCjVx0/4KlizjyBwU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0/go.mod h1:TMu73/k1CP8nBUpDLc71Wj/Kf7ZS9FK5b53VapRsP9o= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 h1:lUsI2TYsQw2r1IASwoROaCnjdj2cvC2+Jbxvk6nHnWU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0/go.mod h1:2HpZxxQurfGxJlJDblybejHB6RX6pmExPNe517hREw4= -go.opentelemetry.io/otel/exporters/prometheus v0.54.0 h1:rFwzp68QMgtzu9PgP3jm9XaMICI6TsofWWPcBDKwlsU= -go.opentelemetry.io/otel/exporters/prometheus v0.54.0/go.mod h1:QyjcV9qDP6VeK5qPyKETvNjmaaEc7+gqjh4SS0ZYzDU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 h1:wpMfgF8E1rkrT1Z6meFh1NDtownE9Ii3n3X2GJYjsaU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0/go.mod h1:wAy0T/dUbs468uOlkT31xjvqQgEVXv58BRFWEgn5v/0= +go.opentelemetry.io/otel/exporters/prometheus v0.55.0 h1:sSPw658Lk2NWAv74lkD3B/RSDb+xRFx46GjkrL3VUZo= +go.opentelemetry.io/otel/exporters/prometheus v0.55.0/go.mod h1:nC00vyCmQixoeaxF6KNyP42II/RHa9UdruK02qBmHvI= go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.7.0 h1:TwmL3O3fRR80m8EshBrd8YydEZMcUCsZXzOUlnFohwM= go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.7.0/go.mod h1:tH98dDv5KPmPThswbXA0fr0Lwfs+OhK8HgaCo7PjRrk= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 h1:SZmDnHcgp3zwlPBS2JX2urGYe/jBKEIT6ZedHRUyCz8= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0/go.mod h1:fdWW0HtZJ7+jNpTKUR0GpMEDP69nR8YBJQxNiVCE3jk= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 h1:UGZ1QwZWY67Z6BmckTU+9Rxn04m2bD3gD6Mk0OIOCPk= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0/go.mod h1:fcwWuDuaObkkChiDlhEpSq9+X1C0omv+s5mBtToAQ64= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.33.0 h1:W5AWUn/IVe8RFb5pZx1Uh9Laf/4+Qmm4kJL5zPuvR+0= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.33.0/go.mod h1:mzKxJywMNBdEX8TSJais3NnsVZUaJ+bAy6UxPTng2vk= go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk= go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8= -go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= -go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= -go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= -go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= go.opentelemetry.io/otel/sdk/log v0.7.0 h1:dXkeI2S0MLc5g0/AwxTZv6EUEjctiH8aG14Am56NTmQ= go.opentelemetry.io/otel/sdk/log v0.7.0/go.mod h1:oIRXpW+WD6M8BuGj5rtS0aRu/86cbDV/dAfNaZBIjYM= -go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= -go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= -go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= -go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.opentelemetry.io/otel/sdk/metric v1.33.0 h1:Gs5VK9/WUJhNXZgn8MR6ITatvAmKeIuCtNbsP3JkNqU= +go.opentelemetry.io/otel/sdk/metric v1.33.0/go.mod h1:dL5ykHZmm1B1nVRk9dDjChwDmt81MjVp3gLkQRwKf/Q= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= +go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= +go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -1118,10 +1152,12 @@ golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= -golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1161,6 +1197,9 @@ golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1216,18 +1255,20 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= +golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1241,6 +1282,9 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1317,10 +1361,13 @@ golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1332,10 +1379,12 @@ golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1350,6 +1399,7 @@ golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= @@ -1414,6 +1464,8 @@ golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/tools v0.27.0 h1:qEKojBykQkQ4EynWy4S8Weg69NumxKdn40Fce3uc/8o= golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1477,10 +1529,10 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= -google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q= +google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484 h1:Z7FRVJPSMaHQxD0uXU8WdgFh8PseLM8Q8NzhnpMrBhQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1494,8 +1546,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= -google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= +google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1511,8 +1563,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/comp/otelcol/collector-contrib/impl/manifest.yaml b/comp/otelcol/collector-contrib/impl/manifest.yaml index 952a5f97e483f..5e9b85d49d4d8 100644 --- a/comp/otelcol/collector-contrib/impl/manifest.yaml +++ b/comp/otelcol/collector-contrib/impl/manifest.yaml @@ -1,83 +1,83 @@ connectors: - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector - v0.115.0 + v0.118.0 dist: description: Datadog OpenTelemetry Collector module: github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib/impl name: otelcol-contrib output_path: ./comp/otelcol/collector-contrib/impl - version: 0.115.0 + version: 0.118.0 exporters: -- gomod: go.opentelemetry.io/collector/exporter/debugexporter v0.115.0 -- gomod: go.opentelemetry.io/collector/exporter/nopexporter v0.115.0 -- gomod: go.opentelemetry.io/collector/exporter/otlpexporter v0.115.0 -- gomod: go.opentelemetry.io/collector/exporter/otlphttpexporter v0.115.0 +- gomod: go.opentelemetry.io/collector/exporter/debugexporter v0.118.0 +- gomod: go.opentelemetry.io/collector/exporter/nopexporter v0.118.0 +- gomod: go.opentelemetry.io/collector/exporter/otlpexporter v0.118.0 +- gomod: go.opentelemetry.io/collector/exporter/otlphttpexporter v0.118.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter - v0.115.0 + v0.118.0 extensions: -- gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.115.0 +- gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.118.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension - v0.115.0 + v0.118.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension - v0.115.0 + v0.118.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver - v0.115.0 + v0.118.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver - v0.115.0 + v0.118.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver - v0.115.0 + v0.118.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver - v0.115.0 + v0.118.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver - v0.115.0 + v0.118.0 processors: -- gomod: go.opentelemetry.io/collector/processor/batchprocessor v0.115.0 -- gomod: go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.115.0 +- gomod: go.opentelemetry.io/collector/processor/batchprocessor v0.118.0 +- gomod: go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.118.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor - v0.115.0 + v0.118.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor - v0.115.0 + v0.118.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor - v0.115.0 + v0.118.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor - v0.115.0 + v0.118.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor - v0.115.0 + v0.118.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor - v0.115.0 + v0.118.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor - v0.115.0 + v0.118.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor - v0.115.0 + v0.118.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor - v0.115.0 + v0.118.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor - v0.115.0 + v0.118.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor - v0.115.0 + v0.118.0 providers: -- gomod: go.opentelemetry.io/collector/confmap/provider/envprovider v1.21.0 -- gomod: go.opentelemetry.io/collector/confmap/provider/fileprovider v1.21.0 -- gomod: go.opentelemetry.io/collector/confmap/provider/httpprovider v1.21.0 -- gomod: go.opentelemetry.io/collector/confmap/provider/httpsprovider v1.21.0 -- gomod: go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.21.0 +- gomod: go.opentelemetry.io/collector/confmap/provider/envprovider v1.24.0 +- gomod: go.opentelemetry.io/collector/confmap/provider/fileprovider v1.24.0 +- gomod: go.opentelemetry.io/collector/confmap/provider/httpprovider v1.24.0 +- gomod: go.opentelemetry.io/collector/confmap/provider/httpsprovider v1.24.0 +- gomod: go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.24.0 receivers: -- gomod: go.opentelemetry.io/collector/receiver/nopreceiver v0.115.0 -- gomod: go.opentelemetry.io/collector/receiver/otlpreceiver v0.115.0 +- gomod: go.opentelemetry.io/collector/receiver/nopreceiver v0.118.0 +- gomod: go.opentelemetry.io/collector/receiver/otlpreceiver v0.118.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver - v0.115.0 + v0.118.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver - v0.115.0 + v0.118.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver - v0.115.0 + v0.118.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver - v0.115.0 + v0.118.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver - v0.115.0 + v0.118.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator - v0.115.0 + v0.118.0 - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver - v0.115.0 + v0.118.0 replaces: - github.com/googleapis/gnostic v0.5.6 => github.com/googleapis/gnostic v0.5.5 - github.com/docker/go-connections v0.4.1-0.20210727194412-58542c764a11 => github.com/docker/go-connections diff --git a/comp/otelcol/collector/impl-pipeline/pipeline.go b/comp/otelcol/collector/impl-pipeline/pipeline.go index 34cd92d133ed8..ec0da3bb1e72e 100644 --- a/comp/otelcol/collector/impl-pipeline/pipeline.go +++ b/comp/otelcol/collector/impl-pipeline/pipeline.go @@ -29,7 +29,7 @@ import ( apiutil "github.com/DataDog/datadog-agent/pkg/api/util" "github.com/DataDog/datadog-agent/pkg/logs/message" "github.com/DataDog/datadog-agent/pkg/serializer" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -57,7 +57,7 @@ type Requires struct { Serializer serializer.MetricSerializer // LogsAgent specifies a logs agent - LogsAgent optional.Option[logsagentpipeline.Component] + LogsAgent option.Option[logsagentpipeline.Component] // InventoryAgent require the inventory metadata payload, allowing otelcol to add data to it. InventoryAgent inventoryagent.Component @@ -80,7 +80,7 @@ type collectorImpl struct { config config.Component log log.Component serializer serializer.MetricSerializer - logsAgent optional.Option[logsagentpipeline.Component] + logsAgent option.Option[logsagentpipeline.Component] inventoryAgent inventoryagent.Component tagger tagger.Component client *http.Client diff --git a/comp/otelcol/collector/impl/collector.go b/comp/otelcol/collector/impl/collector.go index 5bc453ed970ef..5590fb179815c 100644 --- a/comp/otelcol/collector/impl/collector.go +++ b/comp/otelcol/collector/impl/collector.go @@ -44,7 +44,7 @@ import ( traceagent "github.com/DataDog/datadog-agent/comp/trace/agent/def" "github.com/DataDog/datadog-agent/pkg/serializer" zapAgent "github.com/DataDog/datadog-agent/pkg/util/log/zap" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) type collectorImpl struct { @@ -68,7 +68,7 @@ type Requires struct { Config config.Component Serializer serializer.MetricSerializer TraceAgent traceagent.Component - LogsAgent optional.Option[logsagentpipeline.Component] + LogsAgent option.Option[logsagentpipeline.Component] SourceProvider serializerexporter.SourceProviderFunc Tagger tagger.Component StatsdClientWrapper *metricsclient.StatsdClientWrapper @@ -119,6 +119,7 @@ func newConfigProviderSettings(uris []string, converter confmap.Converter, enhan httpsprovider.NewFactory(), }, ConverterFactories: converterFactories, + DefaultScheme: "env", }, } } @@ -139,7 +140,7 @@ func addFactories(reqs Requires, factories otelcol.Factories) { } var buildInfo = component.BuildInfo{ - Version: "v0.115.0", + Version: "v0.118.0", Command: filepath.Base(os.Args[0]), Description: "Datadog Agent OpenTelemetry Collector", } diff --git a/comp/otelcol/converter/def/go.mod b/comp/otelcol/converter/def/go.mod index eea27b7e5bbd3..ef377832937a5 100644 --- a/comp/otelcol/converter/def/go.mod +++ b/comp/otelcol/converter/def/go.mod @@ -1,8 +1,8 @@ module github.com/DataDog/datadog-agent/comp/otelcol/converter/def -go 1.22.0 +go 1.23.0 -require go.opentelemetry.io/collector/confmap v1.21.0 +require go.opentelemetry.io/collector/confmap v1.24.0 require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect diff --git a/comp/otelcol/converter/def/go.sum b/comp/otelcol/converter/def/go.sum index 42f05aac3c99a..8e9e67d51e730 100644 --- a/comp/otelcol/converter/def/go.sum +++ b/comp/otelcol/converter/def/go.sum @@ -22,8 +22,8 @@ github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjR github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -go.opentelemetry.io/collector/confmap v1.21.0 h1:1tIcx2/Suwg8VhuPmQw87ba0ludPmumpFCFRZZa6RXA= -go.opentelemetry.io/collector/confmap v1.21.0/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec= +go.opentelemetry.io/collector/confmap v1.24.0 h1:UUHVhkDCsVw14jPOarug9PDQE2vaB2ELPWMr7ARFBCA= +go.opentelemetry.io/collector/confmap v1.24.0/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= diff --git a/comp/otelcol/converter/impl/autoconfigure.go b/comp/otelcol/converter/impl/autoconfigure.go index b2c5a287c51bc..adfa9f72bb2cb 100644 --- a/comp/otelcol/converter/impl/autoconfigure.go +++ b/comp/otelcol/converter/impl/autoconfigure.go @@ -165,6 +165,11 @@ func addCoreAgentConfig(conf *confmap.Conf, coreCfg config.Component) { apiSite := apiMap["site"] if (apiSite == nil || apiSite == "") && coreCfg.Get("site") != nil { apiMap["site"] = coreCfg.Get("site") + } else if (apiSite == nil || apiSite == "") && coreCfg.Get("site") == nil { + // if site is nil or empty string, and core config site is unset, set default + // site. Site defaults to an empty string in helm chart: + // https://github.com/DataDog/helm-charts/blob/datadog-3.86.0/charts/datadog/templates/_otel_agent_config.yaml#L24. + apiMap["site"] = "datadoghq.com" } // api::key diff --git a/comp/otelcol/converter/impl/converter_test.go b/comp/otelcol/converter/impl/converter_test.go index 8ef0926120ae6..ae5814004a684 100644 --- a/comp/otelcol/converter/impl/converter_test.go +++ b/comp/otelcol/converter/impl/converter_test.go @@ -38,6 +38,7 @@ func newResolver(uris []string) (*confmap.Resolver, error) { httpsprovider.NewFactory(), }, ConverterFactories: []confmap.ConverterFactory{}, + DefaultScheme: "env", }) } @@ -148,6 +149,16 @@ func TestConvert(t *testing.T) { provided: "receivers/no-receivers-defined/config.yaml", expectedResult: "receivers/no-receivers-defined/config-result.yaml", }, + { + name: "receivers/empty-staticconfigs", + provided: "receivers/empty-staticconfigs/config.yaml", + expectedResult: "receivers/empty-staticconfigs/config-result.yaml", + }, + { + name: "receivers/missing-staticconfigs-section", + provided: "receivers/missing-staticconfigs-section/config.yaml", + expectedResult: "receivers/missing-staticconfigs-section/config-result.yaml", + }, { name: "processors/dd-connector", provided: "processors/dd-connector/config.yaml", @@ -218,6 +229,12 @@ func TestConvert(t *testing.T) { expectedResult: "dd-core-cfg/site/unset/config-result.yaml", agentConfig: "dd-core-cfg/site/unset/acfg.yaml", }, + { + name: "dd-core-cfg/site/unset-core-mptystr-col", + provided: "dd-core-cfg/site/unset-core-mptystr-col/config.yaml", + expectedResult: "dd-core-cfg/site/unset-core-mptystr-col/config-result.yaml", + agentConfig: "dd-core-cfg/site/unset-core-mptystr-col/acfg.yaml", + }, { name: "dd-core-cfg/site/api-set-no-site", provided: "dd-core-cfg/site/api-set-no-site/config.yaml", diff --git a/comp/otelcol/converter/impl/datadogconnector.go b/comp/otelcol/converter/impl/datadogconnector.go index 75c023237bc82..d04b7454715bb 100644 --- a/comp/otelcol/converter/impl/datadogconnector.go +++ b/comp/otelcol/converter/impl/datadogconnector.go @@ -7,10 +7,14 @@ package converterimpl import ( + pkgdatadog "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog" "go.opentelemetry.io/collector/confmap" ) func changeDefaultConfigsForDatadogConnector(conf *confmap.Conf) { + if pkgdatadog.OperationAndResourceNameV2FeatureGate.IsEnabled() { + return + } stringMapConf := conf.ToStringMap() connectors, ok := stringMapConf["connectors"] if !ok { diff --git a/comp/otelcol/converter/impl/go.mod b/comp/otelcol/converter/impl/go.mod index ec9c75f1a3c68..12e9071d215f5 100644 --- a/comp/otelcol/converter/impl/go.mod +++ b/comp/otelcol/converter/impl/go.mod @@ -1,6 +1,6 @@ module github.com/DataDog/datadog-agent/comp/otelcol/converter/impl -go 1.22.0 +go 1.23.0 replace ( github.com/DataDog/datadog-agent/comp/api/api/def => ../../../../comp/api/api/def @@ -28,7 +28,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../../pkg/util/fxutil github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../../../pkg/util/hostname/validate github.com/DataDog/datadog-agent/pkg/util/log => ../../../../pkg/util/log - github.com/DataDog/datadog-agent/pkg/util/optional => ../../../../pkg/util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../../../pkg/util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../../pkg/util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../../pkg/util/scrubber github.com/DataDog/datadog-agent/pkg/util/system => ../../../../pkg/util/system @@ -41,13 +41,14 @@ replace ( require ( github.com/DataDog/datadog-agent/comp/core/config v0.56.2 github.com/DataDog/datadog-agent/comp/otelcol/converter/def v0.56.0-rc.3 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.118.0 github.com/stretchr/testify v1.10.0 - go.opentelemetry.io/collector/confmap v1.21.0 - go.opentelemetry.io/collector/confmap/provider/envprovider v1.21.0 - go.opentelemetry.io/collector/confmap/provider/fileprovider v1.21.0 - go.opentelemetry.io/collector/confmap/provider/httpprovider v1.21.0 - go.opentelemetry.io/collector/confmap/provider/httpsprovider v1.21.0 - go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.21.0 + go.opentelemetry.io/collector/confmap v1.24.0 + go.opentelemetry.io/collector/confmap/provider/envprovider v1.24.0 + go.opentelemetry.io/collector/confmap/provider/fileprovider v1.24.0 + go.opentelemetry.io/collector/confmap/provider/httpprovider v1.24.0 + go.opentelemetry.io/collector/confmap/provider/httpsprovider v1.24.0 + go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.24.0 go.uber.org/zap v1.27.0 ) @@ -60,22 +61,22 @@ require ( github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/mock v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.2 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect github.com/DataDog/viper v1.14.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect @@ -85,6 +86,7 @@ require ( github.com/fsnotify/fsnotify v1.8.0 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-viper/mapstructure/v2 v2.2.1 // indirect + github.com/hashicorp/go-version v1.7.0 // indirect github.com/hashicorp/hcl v1.0.1-vault-5 // indirect github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -92,7 +94,7 @@ require ( github.com/knadh/koanf/maps v0.1.1 // indirect github.com/knadh/koanf/providers/confmap v0.1.0 // indirect github.com/knadh/koanf/v2 v2.1.2 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect @@ -100,22 +102,23 @@ require ( github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/numcpus v0.8.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect + go.opentelemetry.io/collector/featuregate v1.24.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/dig v1.18.0 // indirect go.uber.org/fx v1.23.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/comp/otelcol/converter/impl/go.sum b/comp/otelcol/converter/impl/go.sum index 4239a8ba70c9d..09f898196d855 100644 --- a/comp/otelcol/converter/impl/go.sum +++ b/comp/otelcol/converter/impl/go.sum @@ -74,7 +74,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -86,6 +85,8 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9G github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= @@ -118,8 +119,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -141,6 +142,8 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.118.0 h1:4IvL4o5uOf1PspPgjgcrxfPkyZQbgJP6VsyUi5KuSdM= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.118.0/go.mod h1:9cP+bHuftqoYmNDd8LrJ3YTzQl8S1T+qQxSeOIdLM+g= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= @@ -150,8 +153,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -168,8 +171,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -183,8 +186,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -195,8 +198,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -228,18 +231,20 @@ github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1: github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.opentelemetry.io/collector/confmap v1.21.0 h1:1tIcx2/Suwg8VhuPmQw87ba0ludPmumpFCFRZZa6RXA= -go.opentelemetry.io/collector/confmap v1.21.0/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec= -go.opentelemetry.io/collector/confmap/provider/envprovider v1.21.0 h1:YLf++Z8CMp86AanfOCWUiE7vKbb1kSjgC3a9VJoxbD4= -go.opentelemetry.io/collector/confmap/provider/envprovider v1.21.0/go.mod h1:aSWLYcmgZZJDNtWN1M8JKQuehoGgOxibl1KuvKTar4M= -go.opentelemetry.io/collector/confmap/provider/fileprovider v1.21.0 h1:+zukkM+3l426iGoJkXTpLB2Z8QnZFu26TkGPjh5Rn/4= -go.opentelemetry.io/collector/confmap/provider/fileprovider v1.21.0/go.mod h1:BXBpQhF3n4CNLYO2n/mWZPd2U9ekpbLXLRGZrun1VfI= -go.opentelemetry.io/collector/confmap/provider/httpprovider v1.21.0 h1:NYYGM+SgIlTuNGjd8eGzDr8DkvOe4q7cXon8djF9yyI= -go.opentelemetry.io/collector/confmap/provider/httpprovider v1.21.0/go.mod h1:XRYbuwqq1awFuNhLDUv4aSvn6MzqX+abcevx1O+APJI= -go.opentelemetry.io/collector/confmap/provider/httpsprovider v1.21.0 h1:2EEUI2DzA2DvrvCImMWRSNqIHdRJ6+qbgvZL44Zb2ac= -go.opentelemetry.io/collector/confmap/provider/httpsprovider v1.21.0/go.mod h1:axezjjQWY4kZc5pr/+wOKAuqSYMhea/tWzP5S30h+dc= -go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.21.0 h1:P3Q9RytCMY76ORPCnkkjOa4fkuFqmZiQRor+F/nPlYE= -go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.21.0/go.mod h1:xhYhHK3yLQ78tsoaKPIGUfFulgy961ImOe2gATH3RQc= +go.opentelemetry.io/collector/confmap v1.24.0 h1:UUHVhkDCsVw14jPOarug9PDQE2vaB2ELPWMr7ARFBCA= +go.opentelemetry.io/collector/confmap v1.24.0/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec= +go.opentelemetry.io/collector/confmap/provider/envprovider v1.24.0 h1:jAtaNR4b5gnddNzyfcpIhURSDq4rai667yV1Ngmku2Y= +go.opentelemetry.io/collector/confmap/provider/envprovider v1.24.0/go.mod h1:X0BuIYyscilkwApnmxlrdz0kTVWgKXq2ih8sTWm8Zio= +go.opentelemetry.io/collector/confmap/provider/fileprovider v1.24.0 h1:QoQulv9L20MhD1TFWH1scbRoo0bxbZqF2quh1VRNMh4= +go.opentelemetry.io/collector/confmap/provider/fileprovider v1.24.0/go.mod h1:ljIH/rWIUHJeWIDEKMRU/ufol/bcgC7ufamchtuTAwM= +go.opentelemetry.io/collector/confmap/provider/httpprovider v1.24.0 h1:1mbj6HlVZ4LNVBYrxM5jQEJKxinpe0LtNZwI7i8pQNY= +go.opentelemetry.io/collector/confmap/provider/httpprovider v1.24.0/go.mod h1:xM2qJmW6mB1lzFpLWIoxX/h4tUnoYTICZoqPND9YWi0= +go.opentelemetry.io/collector/confmap/provider/httpsprovider v1.24.0 h1:/Z3LvIRPJTJEu6mOqELxPiiKMfyl9sUxoZOR/qc7D1I= +go.opentelemetry.io/collector/confmap/provider/httpsprovider v1.24.0/go.mod h1:C61Rq3ppnFUoieBGiZxqDnOUKK8ZmmH2RzDXG1P+OUo= +go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.24.0 h1:Ncr7a3HbVpmjAvPHd0yQM/MV2p7HqJe+zvDPmHdjSCI= +go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.24.0/go.mod h1:i7omVh3uK8efpr7/fSAcOh8Xiv3FLYL26wUuON9i1WI= +go.opentelemetry.io/collector/featuregate v1.24.0 h1:DEqDsuJgxjZ3E5JNC9hXCd4sWGFiF7h9kaziODuqwFY= +go.opentelemetry.io/collector/featuregate v1.24.0/go.mod h1:3GaXqflNDVwWndNGBJ1+XJFy3Fv/XrFgjMN60N3z7yg= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= @@ -263,8 +268,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -301,8 +306,8 @@ golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= @@ -330,8 +335,8 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/comp/otelcol/converter/impl/prometheus.go b/comp/otelcol/converter/impl/prometheus.go index 9e3dc9e489ff8..23b57e6ba424b 100644 --- a/comp/otelcol/converter/impl/prometheus.go +++ b/comp/otelcol/converter/impl/prometheus.go @@ -95,7 +95,7 @@ func addPrometheusReceiver(conf *confmap.Conf, comp component) { } staticConfigSlice, ok := staticConfig.([]any) if !ok { - return + continue } for _, staticConfig := range staticConfigSlice { staticConfigMap, ok := staticConfig.(map[string]any) diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/empty-string/config-result.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/empty-string/config-result.yaml index 0f4753e0690e5..e43ecf79adc7b 100644 --- a/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/empty-string/config-result.yaml +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/empty-string/config-result.yaml @@ -15,6 +15,7 @@ exporters: datadog: api: key: ggggg77777 + site: datadoghq.com extensions: pprof/user-defined: diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/empty-string/config.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/empty-string/config.yaml index f31a6089fe240..3c69b1d8e1683 100644 --- a/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/empty-string/config.yaml +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/empty-string/config.yaml @@ -15,6 +15,7 @@ exporters: datadog: api: key: "" + site: datadoghq.com extensions: pprof/user-defined: diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/multiple-dd-exporter/config-result.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/multiple-dd-exporter/config-result.yaml index 38413568a0848..3279246588d2a 100644 --- a/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/multiple-dd-exporter/config-result.yaml +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/multiple-dd-exporter/config-result.yaml @@ -14,9 +14,11 @@ exporters: datadog: api: key: ggggg77777 + site: datadoghq.com datadog/2: api: key: ggggg77777 + site: datadoghq.com extensions: pprof/user-defined: health_check/user-defined: diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/multiple-dd-exporter/config.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/multiple-dd-exporter/config.yaml index e1a8d16c5ada3..13107c2732d1a 100644 --- a/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/multiple-dd-exporter/config.yaml +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/multiple-dd-exporter/config.yaml @@ -15,9 +15,11 @@ exporters: datadog: api: key: + site: datadoghq.com datadog/2: api: key: + site: datadoghq.com extensions: pprof/user-defined: diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/no-api-key-section/config-result.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/no-api-key-section/config-result.yaml index 0f4753e0690e5..e43ecf79adc7b 100644 --- a/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/no-api-key-section/config-result.yaml +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/no-api-key-section/config-result.yaml @@ -15,6 +15,7 @@ exporters: datadog: api: key: ggggg77777 + site: datadoghq.com extensions: pprof/user-defined: diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/secret/config-result.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/secret/config-result.yaml index 0f4753e0690e5..e43ecf79adc7b 100644 --- a/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/secret/config-result.yaml +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/secret/config-result.yaml @@ -15,6 +15,7 @@ exporters: datadog: api: key: ggggg77777 + site: datadoghq.com extensions: pprof/user-defined: diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/secret/config.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/secret/config.yaml index 7d51de23ea767..c7caf0e8f26ee 100644 --- a/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/secret/config.yaml +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/secret/config.yaml @@ -15,6 +15,7 @@ exporters: datadog: api: key: "ENC[my-secret]" + site: datadoghq.com extensions: pprof/user-defined: diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/unset/config-result.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/unset/config-result.yaml index 0f4753e0690e5..e43ecf79adc7b 100644 --- a/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/unset/config-result.yaml +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/apikey/unset/config-result.yaml @@ -15,6 +15,7 @@ exporters: datadog: api: key: ggggg77777 + site: datadoghq.com extensions: pprof/user-defined: diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/none/config-result.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/none/config-result.yaml index f9d9173c1238a..904c8474c0e53 100644 --- a/comp/otelcol/converter/impl/testdata/dd-core-cfg/none/config-result.yaml +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/none/config-result.yaml @@ -15,7 +15,7 @@ exporters: datadog: api: key: - site: + site: datadoghq.com extensions: pprof/user-defined: diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/unset-core-mptystr-col/acfg.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/unset-core-mptystr-col/acfg.yaml new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/unset-core-mptystr-col/config-result.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/unset-core-mptystr-col/config-result.yaml new file mode 100644 index 0000000000000..eee38549dd7e0 --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/unset-core-mptystr-col/config-result.yaml @@ -0,0 +1,41 @@ +receivers: + otlp: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + +processors: + infraattributes/user-defined: + +exporters: + datadog: + api: + key: abcde12345 + site: datadoghq.com + +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/unset-core-mptystr-col/config.yaml b/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/unset-core-mptystr-col/config.yaml new file mode 100644 index 0000000000000..5de3ea2b69b9b --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/dd-core-cfg/site/unset-core-mptystr-col/config.yaml @@ -0,0 +1,41 @@ +receivers: + otlp: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + +processors: + infraattributes/user-defined: + +exporters: + datadog: + api: + key: abcde12345 + site: "" + +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + traces: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] + logs: + receivers: [otlp] + processors: [infraattributes/user-defined] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/receivers/empty-staticconfigs/config-result.yaml b/comp/otelcol/converter/impl/testdata/receivers/empty-staticconfigs/config-result.yaml new file mode 100644 index 0000000000000..a37951a21481b --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/receivers/empty-staticconfigs/config-result.yaml @@ -0,0 +1,40 @@ +receivers: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'custom' + scrape_interval: 10s + static_configs: + + prometheus/dd-autoconfigured: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] +exporters: + datadog: + api: + key: abcde12345 + +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +processors: + infraattributes/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + metrics: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics/dd-autoconfigured/datadog: + receivers: [prometheus/dd-autoconfigured] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/receivers/empty-staticconfigs/config.yaml b/comp/otelcol/converter/impl/testdata/receivers/empty-staticconfigs/config.yaml new file mode 100644 index 0000000000000..1b2094315f8d7 --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/receivers/empty-staticconfigs/config.yaml @@ -0,0 +1,30 @@ +receivers: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'custom' + scrape_interval: 10s + static_configs: + +exporters: + datadog: + api: + key: abcde12345 + +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +processors: + infraattributes/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + metrics: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/receivers/missing-staticconfigs-section/config-result.yaml b/comp/otelcol/converter/impl/testdata/receivers/missing-staticconfigs-section/config-result.yaml new file mode 100644 index 0000000000000..675642941c001 --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/receivers/missing-staticconfigs-section/config-result.yaml @@ -0,0 +1,40 @@ +receivers: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'custom' + scrape_interval: 10s + + prometheus/dd-autoconfigured: + config: + scrape_configs: + - job_name: 'datadog-agent' + scrape_interval: 10s + static_configs: + - targets: ['0.0.0.0:8888'] + +exporters: + datadog: + api: + key: abcde12345 + +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +processors: + infraattributes/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + metrics: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] + metrics/dd-autoconfigured/datadog: + receivers: [prometheus/dd-autoconfigured] + exporters: [datadog] diff --git a/comp/otelcol/converter/impl/testdata/receivers/missing-staticconfigs-section/config.yaml b/comp/otelcol/converter/impl/testdata/receivers/missing-staticconfigs-section/config.yaml new file mode 100644 index 0000000000000..5334c4f953d0b --- /dev/null +++ b/comp/otelcol/converter/impl/testdata/receivers/missing-staticconfigs-section/config.yaml @@ -0,0 +1,29 @@ +receivers: + prometheus/user-defined: + config: + scrape_configs: + - job_name: 'custom' + scrape_interval: 10s + +exporters: + datadog: + api: + key: abcde12345 + +extensions: + pprof/user-defined: + health_check/user-defined: + zpages/user-defined: + endpoint: "localhost:55679" + ddflare/user-defined: + +processors: + infraattributes/user-defined: + +service: + extensions: [pprof/user-defined, zpages/user-defined, health_check/user-defined, ddflare/user-defined] + pipelines: + metrics: + receivers: [prometheus/user-defined] + processors: [infraattributes/user-defined] + exporters: [datadog] diff --git a/comp/otelcol/ddflareextension/def/go.mod b/comp/otelcol/ddflareextension/def/go.mod index 2160a62e2f369..fc23149b37833 100644 --- a/comp/otelcol/ddflareextension/def/go.mod +++ b/comp/otelcol/ddflareextension/def/go.mod @@ -1,25 +1,26 @@ module github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/def -go 1.22.0 +go 1.23.0 -require go.opentelemetry.io/collector/extension v0.115.0 +require go.opentelemetry.io/collector/extension v0.118.0 require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - go.opentelemetry.io/collector/component v0.115.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.115.0 // indirect - go.opentelemetry.io/collector/pdata v1.21.0 // indirect - go.opentelemetry.io/otel v1.32.0 // indirect - go.opentelemetry.io/otel/metric v1.32.0 // indirect - go.opentelemetry.io/otel/trace v1.32.0 // indirect + go.opentelemetry.io/collector/component v0.118.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.118.0 // indirect + go.opentelemetry.io/collector/pdata v1.24.0 // indirect + go.opentelemetry.io/otel v1.33.0 // indirect + go.opentelemetry.io/otel/metric v1.33.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.33.0 // indirect + go.opentelemetry.io/otel/trace v1.33.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/net v0.34.0 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect - google.golang.org/grpc v1.67.1 // indirect - google.golang.org/protobuf v1.35.2 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect + google.golang.org/grpc v1.69.4 // indirect + google.golang.org/protobuf v1.36.3 // indirect ) diff --git a/comp/otelcol/ddflareextension/def/go.sum b/comp/otelcol/ddflareextension/def/go.sum index e8348caae0f44..41393b006f817 100644 --- a/comp/otelcol/ddflareextension/def/go.sum +++ b/comp/otelcol/ddflareextension/def/go.sum @@ -6,8 +6,12 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= @@ -16,20 +20,26 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opentelemetry.io/collector/component v0.115.0 h1:iLte1oCiXzjiCnaOBKdsXacfFiECecpWxW3/LeriMoo= -go.opentelemetry.io/collector/component v0.115.0/go.mod h1:oIUFiH7w1eOimdeYhFI+gAIxYSiLDocKVJ0PTvX7d6s= -go.opentelemetry.io/collector/config/configtelemetry v0.115.0 h1:U07FinCDop+r2RjWQ3aP9ZWONC7r7kQIp1GkXQi6nsI= -go.opentelemetry.io/collector/config/configtelemetry v0.115.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= -go.opentelemetry.io/collector/extension v0.115.0 h1:/cBb8AUdD0KMWC6V3lvCC16eP9Fg0wd1Upcp5rgvuGI= -go.opentelemetry.io/collector/extension v0.115.0/go.mod h1:HI7Ak6loyi6ZrZPsQJW1OO1wbaAW8OqXLFNQlTZnreQ= -go.opentelemetry.io/collector/pdata v1.21.0 h1:PG+UbiFMJ35X/WcAR7Rf/PWmWtRdW0aHlOidsR6c5MA= -go.opentelemetry.io/collector/pdata v1.21.0/go.mod h1:GKb1/zocKJMvxKbS+sl0W85lxhYBTFJ6h6I1tphVyDU= -go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= -go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= -go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= -go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= -go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= -go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/collector/component v0.118.0 h1:sSO/ObxJ+yH77Z4DmT1mlSuxhbgUmY1ztt7xCA1F/8w= +go.opentelemetry.io/collector/component v0.118.0/go.mod h1:LUJ3AL2b+tmFr3hZol3hzKzCMvNdqNq0M5CF3SWdv4M= +go.opentelemetry.io/collector/config/configtelemetry v0.118.0 h1:UlN46EViG2X42odWtXgWaqY7Y01ZKpsnswSwXTWx5mM= +go.opentelemetry.io/collector/config/configtelemetry v0.118.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= +go.opentelemetry.io/collector/extension v0.118.0 h1:9o5jLCTRvs0+rtFDx04zTBuB4WFrE0RvtVCPovYV0sA= +go.opentelemetry.io/collector/extension v0.118.0/go.mod h1:BFwB0WOlse6JnrStO44+k9kwUVjjtseFEHhJLHD7lBg= +go.opentelemetry.io/collector/pdata v1.24.0 h1:D6j92eAzmAbQgivNBUnt8r9juOl8ugb+ihYynoFZIEg= +go.opentelemetry.io/collector/pdata v1.24.0/go.mod h1:cf3/W9E/uIvPS4MR26SnMFJhraUCattzzM6qusuONuc= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= +go.opentelemetry.io/otel/sdk/metric v1.33.0 h1:Gs5VK9/WUJhNXZgn8MR6ITatvAmKeIuCtNbsP3JkNqU= +go.opentelemetry.io/otel/sdk/metric v1.33.0/go.mod h1:dL5ykHZmm1B1nVRk9dDjChwDmt81MjVp3gLkQRwKf/Q= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -45,16 +55,16 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= @@ -67,11 +77,11 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= -google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= -google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= +google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= +google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/comp/otelcol/ddflareextension/impl/configstore_test.go b/comp/otelcol/ddflareextension/impl/configstore_test.go index b1b79d47df3ba..21b47ce36d133 100644 --- a/comp/otelcol/ddflareextension/impl/configstore_test.go +++ b/comp/otelcol/ddflareextension/impl/configstore_test.go @@ -192,6 +192,7 @@ func newResolverSettings(uris []string, enhanced bool) confmap.ResolverSettings httpsprovider.NewFactory(), }, ConverterFactories: newConverterFactory(enhanced), + DefaultScheme: "env", } } diff --git a/comp/otelcol/ddflareextension/impl/go.mod b/comp/otelcol/ddflareextension/impl/go.mod index 25448ad26a8c3..69bd620c86d75 100644 --- a/comp/otelcol/ddflareextension/impl/go.mod +++ b/comp/otelcol/ddflareextension/impl/go.mod @@ -1,6 +1,6 @@ module github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/impl -go 1.22.0 +go 1.23.0 replace ( github.com/DataDog/datadog-agent/comp/api/api/def => ../../../api/api/def @@ -34,7 +34,8 @@ replace ( github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/processor/infraattributesprocessor => ../../otlp/components/processor/infraattributesprocessor github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/statsprocessor => ../../otlp/components/statsprocessor github.com/DataDog/datadog-agent/comp/otelcol/otlp/testutil => ../../../otelcol/otlp/testutil - github.com/DataDog/datadog-agent/comp/serializer/compression => ../../../serializer/compression + github.com/DataDog/datadog-agent/comp/serializer/logscompression => ../../../serializer/logscompression + github.com/DataDog/datadog-agent/comp/serializer/metricscompression => ../../../serializer/metricscompression github.com/DataDog/datadog-agent/comp/trace/agent/def => ../../../trace/agent/def github.com/DataDog/datadog-agent/comp/trace/compression/def => ../../../trace/compression/def github.com/DataDog/datadog-agent/comp/trace/compression/impl-gzip => ../../../trace/compression/impl-gzip @@ -79,6 +80,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/buf => ../../../../pkg/util/buf github.com/DataDog/datadog-agent/pkg/util/cgroups => ../../../../pkg/util/cgroups github.com/DataDog/datadog-agent/pkg/util/common => ../../../../pkg/util/common + github.com/DataDog/datadog-agent/pkg/util/compression => ../../../../pkg/util/compression github.com/DataDog/datadog-agent/pkg/util/containers/image => ../../../../pkg/util/containers/image github.com/DataDog/datadog-agent/pkg/util/defaultpaths => ../../../../pkg/util/defaultpaths github.com/DataDog/datadog-agent/pkg/util/executable => ../../../../pkg/util/executable @@ -89,7 +91,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/json => ../../../../pkg/util/json github.com/DataDog/datadog-agent/pkg/util/log => ../../../../pkg/util/log github.com/DataDog/datadog-agent/pkg/util/log/setup => ../../../../pkg/util/log/setup - github.com/DataDog/datadog-agent/pkg/util/optional => ../../../../pkg/util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../../../pkg/util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../../pkg/util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../../pkg/util/scrubber github.com/DataDog/datadog-agent/pkg/util/sort => ../../../../pkg/util/sort @@ -101,7 +103,6 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/utilizationtracker => ../../../../pkg/util/utilizationtracker github.com/DataDog/datadog-agent/pkg/util/winutil => ../../../../pkg/util/winutil github.com/DataDog/datadog-agent/pkg/version => ../../../../pkg/version - github.com/coreos/go-systemd => github.com/coreos/go-systemd v0.0.0-20180202092358-40e2722dffea ) require ( @@ -110,70 +111,82 @@ require ( github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/datadogexporter v0.59.0 github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/processor/infraattributesprocessor v0.59.0 github.com/DataDog/datadog-agent/pkg/api v0.59.0 - github.com/DataDog/datadog-agent/pkg/config/mock v0.59.0 - github.com/DataDog/datadog-agent/pkg/version v0.59.1 + github.com/DataDog/datadog-agent/pkg/config/mock v0.61.0 + github.com/DataDog/datadog-agent/pkg/version v0.61.0 github.com/google/go-cmp v0.6.0 github.com/gorilla/mux v1.8.1 - github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.115.0 - github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.115.0 - github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.115.0 - github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.115.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.115.0 - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.118.0 + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.118.0 github.com/stretchr/testify v1.10.0 - go.opentelemetry.io/collector/component v0.115.0 - go.opentelemetry.io/collector/component/componentstatus v0.115.0 - go.opentelemetry.io/collector/component/componenttest v0.115.0 - go.opentelemetry.io/collector/config/confighttp v0.115.0 - go.opentelemetry.io/collector/confmap v1.21.0 - go.opentelemetry.io/collector/confmap/provider/envprovider v1.21.0 - go.opentelemetry.io/collector/confmap/provider/fileprovider v1.21.0 - go.opentelemetry.io/collector/confmap/provider/httpprovider v1.21.0 - go.opentelemetry.io/collector/confmap/provider/httpsprovider v1.21.0 - go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.21.0 - go.opentelemetry.io/collector/connector v0.115.0 - go.opentelemetry.io/collector/exporter v0.115.0 - go.opentelemetry.io/collector/exporter/otlpexporter v0.115.0 - go.opentelemetry.io/collector/exporter/otlphttpexporter v0.115.0 - go.opentelemetry.io/collector/extension v0.115.0 - go.opentelemetry.io/collector/extension/extensioncapabilities v0.115.0 - go.opentelemetry.io/collector/extension/zpagesextension v0.115.0 - go.opentelemetry.io/collector/otelcol v0.115.0 - go.opentelemetry.io/collector/processor v0.115.0 - go.opentelemetry.io/collector/processor/batchprocessor v0.115.0 - go.opentelemetry.io/collector/receiver v0.115.0 - go.opentelemetry.io/collector/receiver/nopreceiver v0.115.0 - go.opentelemetry.io/collector/receiver/otlpreceiver v0.115.0 + go.opentelemetry.io/collector/component v0.118.0 + go.opentelemetry.io/collector/component/componentstatus v0.118.0 + go.opentelemetry.io/collector/component/componenttest v0.118.0 + go.opentelemetry.io/collector/config/confighttp v0.118.0 + go.opentelemetry.io/collector/confmap v1.24.0 + go.opentelemetry.io/collector/confmap/provider/envprovider v1.24.0 + go.opentelemetry.io/collector/confmap/provider/fileprovider v1.24.0 + go.opentelemetry.io/collector/confmap/provider/httpprovider v1.24.0 + go.opentelemetry.io/collector/confmap/provider/httpsprovider v1.24.0 + go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.24.0 + go.opentelemetry.io/collector/connector v0.118.0 + go.opentelemetry.io/collector/exporter v0.118.0 + go.opentelemetry.io/collector/exporter/otlpexporter v0.118.0 + go.opentelemetry.io/collector/exporter/otlphttpexporter v0.118.0 + go.opentelemetry.io/collector/extension v0.118.0 + go.opentelemetry.io/collector/extension/extensioncapabilities v0.118.0 + go.opentelemetry.io/collector/extension/zpagesextension v0.118.0 + go.opentelemetry.io/collector/otelcol v0.118.0 + go.opentelemetry.io/collector/processor v0.118.0 + go.opentelemetry.io/collector/processor/batchprocessor v0.118.0 + go.opentelemetry.io/collector/receiver v0.118.0 + go.opentelemetry.io/collector/receiver/nopreceiver v0.118.0 + go.opentelemetry.io/collector/receiver/otlpreceiver v0.118.0 go.uber.org/zap v1.27.0 gopkg.in/yaml.v2 v2.4.0 ) -require go.opentelemetry.io/collector/extension/extensiontest v0.115.0 // indirect +require go.opentelemetry.io/collector/extension/extensiontest v0.118.0 // indirect require ( - github.com/DataDog/datadog-agent/comp/core/tagger/origindetection v0.0.0-20241217122454-175edb6c74f2 // indirect + go.opentelemetry.io/collector/connector/xconnector v0.118.0 // indirect + go.opentelemetry.io/collector/consumer/consumererror/xconsumererror v0.118.0 // indirect + go.opentelemetry.io/collector/consumer/xconsumer v0.118.0 // indirect + go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper v0.118.0 // indirect + go.opentelemetry.io/collector/exporter/xexporter v0.118.0 // indirect + go.opentelemetry.io/collector/extension/xextension v0.118.0 // indirect + go.opentelemetry.io/collector/pipeline/xpipeline v0.118.0 // indirect + go.opentelemetry.io/collector/processor/xprocessor v0.118.0 // indirect + go.opentelemetry.io/collector/receiver/xreceiver v0.118.0 // indirect +) + +require ( + github.com/DataDog/datadog-agent/comp/core/tagger/origindetection v0.62.0-rc.1 // indirect + github.com/DataDog/datadog-agent/comp/serializer/metricscompression v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/compression v0.56.0-rc.3 // indirect github.com/knadh/koanf/maps v0.1.1 // indirect github.com/knadh/koanf/providers/confmap v0.1.0 // indirect github.com/moby/sys/userns v0.1.0 // indirect - github.com/pierrec/lz4/v4 v4.1.21 // indirect - go.opentelemetry.io/collector/connector/connectortest v0.115.0 // indirect - go.opentelemetry.io/collector/consumer/consumererror v0.115.0 // indirect - go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/exporter/exportertest v0.115.0 // indirect - go.opentelemetry.io/collector/internal/fanoutconsumer v0.115.0 // indirect - go.opentelemetry.io/collector/internal/sharedcomponent v0.115.0 // indirect - go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/processor/processortest v0.115.0 // indirect - go.opentelemetry.io/collector/receiver/receivertest v0.115.0 // indirect + github.com/pierrec/lz4/v4 v4.1.22 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/collector/connector/connectortest v0.118.0 // indirect + go.opentelemetry.io/collector/consumer/consumererror v0.118.0 // indirect + go.opentelemetry.io/collector/exporter/exportertest v0.118.0 // indirect + go.opentelemetry.io/collector/internal/fanoutconsumer v0.118.0 // indirect + go.opentelemetry.io/collector/internal/sharedcomponent v0.118.0 // indirect + go.opentelemetry.io/collector/processor/processortest v0.118.0 // indirect + go.opentelemetry.io/collector/receiver/receivertest v0.118.0 // indirect go.opentelemetry.io/contrib/bridges/otelzap v0.6.0 // indirect ) require ( cloud.google.com/go/auth v0.9.5 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect - cloud.google.com/go/compute/metadata v0.5.2 // indirect + cloud.google.com/go/compute/metadata v0.6.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect @@ -182,98 +195,96 @@ require ( github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/Code-Hex/go-generics-cache v1.5.1 // indirect - github.com/DataDog/agent-payload/v5 v5.0.138 // indirect - github.com/DataDog/datadog-agent/comp/core/config v0.59.0 // indirect - github.com/DataDog/datadog-agent/comp/core/flare/builder v0.59.0 // indirect - github.com/DataDog/datadog-agent/comp/core/flare/types v0.59.0 // indirect - github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface v0.59.0 // indirect - github.com/DataDog/datadog-agent/comp/core/log/def v0.59.0 // indirect - github.com/DataDog/datadog-agent/comp/core/log/mock v0.58.0-devel // indirect - github.com/DataDog/datadog-agent/comp/core/secrets v0.59.0 // indirect + github.com/DataDog/agent-payload/v5 v5.0.141 // indirect + github.com/DataDog/datadog-agent/comp/core/config v0.61.0 // indirect + github.com/DataDog/datadog-agent/comp/core/flare/builder v0.61.0 // indirect + github.com/DataDog/datadog-agent/comp/core/flare/types v0.61.0 // indirect + github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface v0.61.0 // indirect + github.com/DataDog/datadog-agent/comp/core/log/def v0.61.0 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.61.0 // indirect github.com/DataDog/datadog-agent/comp/core/status v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/comp/core/tagger/tags v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/comp/core/tagger/tags v0.61.0 // indirect github.com/DataDog/datadog-agent/comp/core/tagger/types v0.59.0 // indirect github.com/DataDog/datadog-agent/comp/core/tagger/utils v0.59.0 // indirect - github.com/DataDog/datadog-agent/comp/core/telemetry v0.59.0 // indirect - github.com/DataDog/datadog-agent/comp/def v0.59.0 // indirect + github.com/DataDog/datadog-agent/comp/core/telemetry v0.61.0 // indirect + github.com/DataDog/datadog-agent/comp/def v0.61.0 // indirect github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/forwarder/orchestrator/orchestratorinterface v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/comp/logs/agent/config v0.59.0 // indirect + github.com/DataDog/datadog-agent/comp/logs/agent/config v0.61.0 // indirect github.com/DataDog/datadog-agent/comp/otelcol/converter/def v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline v0.59.0 // indirect - github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/logsagentexporter v0.61.0-devel.0.20241118141418-5b899217c342 // indirect + github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline v0.61.0 // indirect + github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/logsagentexporter v0.62.0-devel.0.20241213165407-f95df913d2b7 // indirect github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/serializerexporter v0.59.0-rc.6 // indirect - github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/metricsclient v0.59.0 // indirect - github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/statsprocessor v0.59.0 // indirect + github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/metricsclient v0.61.0 // indirect + github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/statsprocessor v0.61.0 // indirect github.com/DataDog/datadog-agent/comp/otelcol/otlp/testutil v0.57.0-devel.0.20240718200853-81bf3b2e412d // indirect - github.com/DataDog/datadog-agent/comp/serializer/compression v0.59.0-rc.6 // indirect + github.com/DataDog/datadog-agent/comp/serializer/logscompression v0.61.0 // indirect github.com/DataDog/datadog-agent/comp/trace/agent/def v0.59.0-rc.6 // indirect - github.com/DataDog/datadog-agent/comp/trace/compression/def v0.59.0 // indirect - github.com/DataDog/datadog-agent/comp/trace/compression/impl-gzip v0.59.0 // indirect + github.com/DataDog/datadog-agent/comp/trace/compression/def v0.61.0 // indirect + github.com/DataDog/datadog-agent/comp/trace/compression/impl-gzip v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/aggregator/ckey v0.59.0-rc.6 // indirect - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.60.0-devel // indirect - github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.60.0-devel // indirect - github.com/DataDog/datadog-agent/pkg/config/utils v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/logs/auditor v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/logs/client v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/logs/diagnostic v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/logs/message v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/logs/metrics v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/logs/pipeline v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/logs/processor v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/logs/sds v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/logs/sender v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/logs/sources v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/utils v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/logs/auditor v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/logs/client v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/logs/diagnostic v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/logs/message v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/logs/metrics v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/logs/pipeline v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/logs/processor v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/logs/sds v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/logs/sender v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/logs/sources v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/metrics v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/obfuscate v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/obfuscate v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/orchestrator/model v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/process/util/api v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/proto v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/proto v0.63.0-devel // indirect + github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/serializer v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/status/health v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/status/health v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/tagger/types v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/tagset v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/telemetry v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/trace v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/backoff v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/telemetry v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/trace v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/backoff v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/util/buf v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/cgroups v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/cgroups v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/util/common v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/fxutil v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/http v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/fxutil v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/http v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/util/json v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/log/setup v0.58.0-devel // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/util/sort v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/startstop v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/statstracker v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect - github.com/DataDog/datadog-api-client-go/v2 v2.33.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/startstop v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/statstracker v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.61.0 // indirect + github.com/DataDog/datadog-api-client-go/v2 v2.34.0 // indirect github.com/DataDog/datadog-go/v5 v5.6.0 // indirect github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect - github.com/DataDog/go-sqllexer v0.0.17 // indirect + github.com/DataDog/go-sqllexer v0.0.20 // indirect github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.22.0 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.22.0 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.22.0 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.22.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.24.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.24.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.24.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.24.0 // indirect github.com/DataDog/sketches-go v1.4.6 // indirect github.com/DataDog/viper v1.14.0 // indirect github.com/DataDog/zstd v1.5.6 // indirect @@ -281,8 +292,8 @@ require ( github.com/Microsoft/go-winio v0.6.2 // indirect github.com/alecthomas/participle/v2 v2.1.1 // indirect github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 // indirect - github.com/antchfx/xmlquery v1.4.2 // indirect - github.com/antchfx/xpath v1.3.2 // indirect + github.com/antchfx/xmlquery v1.4.3 // indirect + github.com/antchfx/xpath v1.3.3 // indirect github.com/armon/go-metrics v0.4.1 // indirect github.com/aws/aws-sdk-go v1.55.5 // indirect github.com/benbjohnson/clock v1.3.5 // indirect @@ -292,7 +303,7 @@ require ( github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect - github.com/containerd/cgroups/v3 v3.0.4 // indirect + github.com/containerd/cgroups/v3 v3.0.5 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dennwc/varint v1.0.0 // indirect @@ -306,7 +317,7 @@ require ( github.com/elastic/go-grok v0.3.1 // indirect github.com/elastic/lunes v0.1.0 // indirect github.com/emicklei/go-restful/v3 v3.12.1 // indirect - github.com/envoyproxy/go-control-plane v0.13.0 // indirect + github.com/envoyproxy/go-control-plane v0.13.1 // indirect github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect github.com/fatih/color v1.18.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -324,12 +335,12 @@ require ( github.com/go-viper/mapstructure/v2 v2.2.1 // indirect github.com/go-zookeeper/zk v1.0.3 // indirect github.com/gobwas/glob v0.2.3 // indirect - github.com/goccy/go-json v0.10.3 // indirect + github.com/goccy/go-json v0.10.4 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/mock v1.6.0 // indirect + github.com/golang/mock v1.7.0-rc.1 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/btree v1.1.3 // indirect @@ -343,7 +354,7 @@ require ( github.com/gophercloud/gophercloud v1.13.0 // indirect github.com/gorilla/websocket v1.5.1 // indirect github.com/grafana/regexp v0.0.0-20240518133315-a468a5bfb3bc // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.0 // indirect github.com/hashicorp/consul/api v1.30.0 // indirect github.com/hashicorp/cronexpr v1.1.2 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect @@ -379,7 +390,7 @@ require ( github.com/kylelemons/godebug v1.1.0 // indirect github.com/lightstep/go-expohisto v1.0.0 // indirect github.com/linode/linodego v1.37.0 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magefile/mage v1.15.0 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect @@ -399,15 +410,15 @@ require ( github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/onsi/ginkgo/v2 v2.20.2 // indirect github.com/onsi/gomega v1.34.1 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.115.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.118.0 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect github.com/opencontainers/image-spec v1.1.0 // indirect github.com/opencontainers/runtime-spec v1.2.0 // indirect @@ -420,10 +431,10 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.60.1 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/common/sigv4 v0.1.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/prometheus/prometheus v0.54.1 // indirect @@ -431,9 +442,9 @@ require ( github.com/rs/cors v1.11.1 // indirect github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29 // indirect github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -441,7 +452,7 @@ require ( github.com/stretchr/objx v0.5.2 // indirect github.com/tidwall/gjson v1.18.0 // indirect github.com/tidwall/pretty v1.2.1 // indirect - github.com/tinylib/msgp v1.2.4 // indirect + github.com/tinylib/msgp v1.2.5 // indirect github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/numcpus v0.8.0 // indirect github.com/twmb/murmur3 v1.1.8 // indirect @@ -450,39 +461,32 @@ require ( github.com/x448/float16 v0.8.4 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector v0.115.0 // indirect - go.opentelemetry.io/collector/client v1.21.0 // indirect - go.opentelemetry.io/collector/config/configauth v0.115.0 // indirect - go.opentelemetry.io/collector/config/configcompression v1.21.0 // indirect - go.opentelemetry.io/collector/config/configgrpc v0.115.0 // indirect - go.opentelemetry.io/collector/config/confignet v1.21.0 // indirect - go.opentelemetry.io/collector/config/configopaque v1.21.0 // indirect - go.opentelemetry.io/collector/config/configretry v1.21.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.115.0 // indirect - go.opentelemetry.io/collector/config/configtls v1.21.0 // indirect - go.opentelemetry.io/collector/config/internal v0.115.0 // indirect - go.opentelemetry.io/collector/connector/connectorprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/consumer v1.21.0 // indirect - go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/consumer/consumertest v0.115.0 // indirect - go.opentelemetry.io/collector/exporter/exporterprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/extension/auth v0.115.0 // indirect - go.opentelemetry.io/collector/extension/experimental/storage v0.115.0 // indirect - go.opentelemetry.io/collector/featuregate v1.21.0 // indirect - go.opentelemetry.io/collector/pdata v1.21.0 // indirect - go.opentelemetry.io/collector/pdata/pprofile v0.115.0 // indirect - go.opentelemetry.io/collector/pdata/testdata v0.115.0 // indirect - go.opentelemetry.io/collector/pipeline v0.115.0 // indirect - go.opentelemetry.io/collector/processor/processorprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/receiver/receiverprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/semconv v0.115.0 // indirect - go.opentelemetry.io/collector/service v0.115.0 // indirect + go.opentelemetry.io/collector v0.118.0 // indirect + go.opentelemetry.io/collector/client v1.24.0 // indirect + go.opentelemetry.io/collector/config/configauth v0.118.0 // indirect + go.opentelemetry.io/collector/config/configcompression v1.24.0 // indirect + go.opentelemetry.io/collector/config/configgrpc v0.118.0 // indirect + go.opentelemetry.io/collector/config/confignet v1.24.0 // indirect + go.opentelemetry.io/collector/config/configopaque v1.24.0 // indirect + go.opentelemetry.io/collector/config/configretry v1.24.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.118.0 // indirect + go.opentelemetry.io/collector/config/configtls v1.24.0 // indirect + go.opentelemetry.io/collector/consumer v1.24.0 // indirect + go.opentelemetry.io/collector/consumer/consumertest v0.118.0 // indirect + go.opentelemetry.io/collector/extension/auth v0.118.0 // indirect + go.opentelemetry.io/collector/featuregate v1.24.0 // indirect + go.opentelemetry.io/collector/pdata v1.24.0 // indirect + go.opentelemetry.io/collector/pdata/pprofile v0.118.0 // indirect + go.opentelemetry.io/collector/pdata/testdata v0.118.0 // indirect + go.opentelemetry.io/collector/pipeline v0.118.0 // indirect + go.opentelemetry.io/collector/semconv v0.118.0 // indirect + go.opentelemetry.io/collector/service v0.118.0 // indirect go.opentelemetry.io/contrib/config v0.10.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 // indirect go.opentelemetry.io/contrib/propagators/b3 v1.31.0 // indirect go.opentelemetry.io/contrib/zpages v0.56.0 // indirect - go.opentelemetry.io/otel v1.32.0 // indirect + go.opentelemetry.io/otel v1.33.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.7.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 // indirect @@ -494,33 +498,33 @@ require ( go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 // indirect go.opentelemetry.io/otel/log v0.8.0 // indirect - go.opentelemetry.io/otel/metric v1.32.0 // indirect - go.opentelemetry.io/otel/sdk v1.32.0 // indirect + go.opentelemetry.io/otel/metric v1.33.0 // indirect + go.opentelemetry.io/otel/sdk v1.33.0 // indirect go.opentelemetry.io/otel/sdk/log v0.7.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.32.0 // indirect - go.opentelemetry.io/otel/trace v1.32.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.33.0 // indirect + go.opentelemetry.io/otel/trace v1.33.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/dig v1.18.0 // indirect go.uber.org/fx v1.23.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.31.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect + golang.org/x/crypto v0.32.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect golang.org/x/mod v0.22.0 // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/oauth2 v0.23.0 // indirect + golang.org/x/net v0.34.0 // indirect + golang.org/x/oauth2 v0.25.0 // indirect golang.org/x/sync v0.10.0 // indirect - golang.org/x/sys v0.28.0 // indirect - golang.org/x/term v0.27.0 // indirect + golang.org/x/sys v0.29.0 // indirect + golang.org/x/term v0.28.0 // indirect golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.8.0 // indirect - golang.org/x/tools v0.28.0 // indirect + golang.org/x/tools v0.29.0 // indirect gonum.org/v1/gonum v0.15.1 // indirect google.golang.org/api v0.199.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect - google.golang.org/grpc v1.67.1 // indirect - google.golang.org/protobuf v1.35.2 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect + google.golang.org/grpc v1.69.4 // indirect + google.golang.org/protobuf v1.36.3 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect @@ -535,3 +539,6 @@ require ( sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) + +// github.com/golang/mock is unmaintained and archived, v1.6.0 is the last released version +replace github.com/golang/mock => github.com/golang/mock v1.6.0 diff --git a/comp/otelcol/ddflareextension/impl/go.sum b/comp/otelcol/ddflareextension/impl/go.sum index 1b8a5abd738cb..6e70ebd8ce8e4 100644 --- a/comp/otelcol/ddflareextension/impl/go.sum +++ b/comp/otelcol/ddflareextension/impl/go.sum @@ -23,8 +23,8 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= -cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= +cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= +cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -59,37 +59,35 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Code-Hex/go-generics-cache v1.5.1 h1:6vhZGc5M7Y/YD8cIUcY8kcuQLB4cHR7U+0KMqAA0KcU= github.com/Code-Hex/go-generics-cache v1.5.1/go.mod h1:qxcC9kRVrct9rHeiYpFWSoW1vxyillCVzX13KZG8dl4= -github.com/DataDog/agent-payload/v5 v5.0.138 h1:Wg7hmWuoLC/o0X3zZ+uGcfRHPyaytljudgSY9O59zjc= -github.com/DataDog/agent-payload/v5 v5.0.138/go.mod h1:lxh9lb5xYrBXjblpIWYUi4deJqVbkIfkjwesi5nskDc= -github.com/DataDog/datadog-agent/comp/core/log v0.56.2 h1:qvBT+FfjKGqimyEvmsNHCZKbTfBJAdUZSVy2IZQ8HS4= -github.com/DataDog/datadog-agent/comp/core/log v0.56.2/go.mod h1:ivJ/RMZjTNkoPPNDX+v/nnBwABLCiMv1vQA5tk/HCR4= -github.com/DataDog/datadog-api-client-go/v2 v2.33.0 h1:OI6kDnJeQmkjfGzxmP0XUQUxMD4tp6oAPXnnJ4VpgUM= -github.com/DataDog/datadog-api-client-go/v2 v2.33.0/go.mod h1:d3tOEgUd2kfsr9uuHQdY+nXrWp4uikgTgVCPdKNK30U= +github.com/DataDog/agent-payload/v5 v5.0.141 h1:pV76CyTUEe/LFuS7fwarIfOX5seSuYZylzhj1aGY2DQ= +github.com/DataDog/agent-payload/v5 v5.0.141/go.mod h1:lxh9lb5xYrBXjblpIWYUi4deJqVbkIfkjwesi5nskDc= +github.com/DataDog/datadog-api-client-go/v2 v2.34.0 h1:0VVmv8uZg8vdBuEpiF2nBGUezl2QITrxdEsLgh38j8M= +github.com/DataDog/datadog-api-client-go/v2 v2.34.0/go.mod h1:d3tOEgUd2kfsr9uuHQdY+nXrWp4uikgTgVCPdKNK30U= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go/v5 v5.6.0 h1:2oCLxjF/4htd55piM75baflj/KoE6VYS7alEUqFvRDw= github.com/DataDog/datadog-go/v5 v5.6.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 h1:RoH7VLzTnxHEugRPIgnGlxwDFszFGI7b3WZZUtWuPRM= github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42/go.mod h1:TX7CTOQ3LbQjfAi4SwqUoR5gY1zfUk7VRBDTuArjaDc= -github.com/DataDog/go-sqllexer v0.0.17 h1:u47fJAVg/+5DA74ZW3w0Qu+3qXHd3GtnA8ZBYixdPrM= -github.com/DataDog/go-sqllexer v0.0.17/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= +github.com/DataDog/go-sqllexer v0.0.20 h1:0fBknHo42yuhawZS3GtuQSdqcwaiojWjYNT6OdsZRfI= +github.com/DataDog/go-sqllexer v0.0.20/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4= github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= github.com/DataDog/gohai v0.0.0-20230524154621-4316413895ee h1:tXibLZk3G6HncIFJKaNItsdzcrk4YqILNDZlXPTNt4k= github.com/DataDog/gohai v0.0.0-20230524154621-4316413895ee/go.mod h1:nTot/Iy0kW16bXgXr6blEc8gFeAS7vTqYlhAxh+dbc0= github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 h1:EbzDX8HPk5uE2FsJYxD74QmMw0/3CqSKhEr6teh0ncQ= github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49/go.mod h1:SvsjzyJlSg0rKsqYgdcFxeEVflx3ZNAyFfkUHP0TxXg= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.22.0 h1:r1Dx2cRHCBWkVluSZA41i4eoI/nOGbcrrZdkqWjoFCc= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.22.0/go.mod h1:+/dkO8ZiMa8rfm4SmtTF6qPUdBbBcvsWWKaO4xPKAIk= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.22.0 h1:cXcKVEU1D0HlguR7GunnvuI70TghkarCa9DApqzMY94= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.22.0/go.mod h1:ES00EXfyEKgUkjd93tAXCxJA6i0seeOhZoS5Cj2qzzg= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.22.0 h1:yfk2cF8Bx98fSFpGrehEHh1FRqewfxcCTAbUDt5r3F8= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.22.0/go.mod h1:9qzpnBSxSOnKzbF/uHket3SSlQihQHix/ZRC2nZUUYQ= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.22.0 h1:Zqj8YUZ/ualUhM8GDCQX6xKnUJKEiG0eYdFGWmIDG30= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.22.0/go.mod h1:lpr4q6g2TB0BHeLHaz/XleKm8YXQjuxiQEb9Q9HXXE0= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.22.0 h1:w9+ngZDYUMLW+GSRA8x1DvVbuMR+cwlGb8VLwZfgBGs= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.22.0/go.mod h1:UsfqLgiD6Sjhpjkg+YzAd+TdKUZ2m6ZZ8t+tEkLNTMA= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.22.0 h1:63SzQz9Ab8XJj8fQKQz6UZNBhOm8rucwzbDfwTVF6dQ= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.22.0/go.mod h1:E/PY/aQ6S/N5hBPHXZRGmovs5b1BSi4RHGNcB4yP/Z0= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.24.0 h1:Fth9wZCAVbIUvlKq/QXT7QINza+epFaKtIvy1qqybbg= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.24.0/go.mod h1:7D+x/7CIdzklC9spgB3lrg8GUvIW52Y8SMONrBCiPbw= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.24.0 h1:ttW3C3IN8p1goqyvaVpT4Blzg3lQ+sh4MTtB33BbpdE= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.24.0/go.mod h1:FpUbxBqKdi16CDJnRifUzmkETaEYR75xvh2Vo8vvJN0= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.24.0 h1:Y65h9AvfQO7ONOBlqCetvvUhh2XO1wIzN7IfXVFjc84= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.24.0/go.mod h1:7aAFw4o5dZk/kqFniz7ljJwS8covz8DHouGl7BrsnLI= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.24.0 h1:wZaNTYVo2WIHzvn8GBAH4FNbXac5A+hfETeK0YxYYnw= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.24.0/go.mod h1:0JvUXmUWULz1XU0RTaNPLgces6LJvI/FinPO5suiJOo= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.24.0 h1:dG1rn794tdEpU+fqHumwx/Ngcc7uVPlJT/xt/4L1lmQ= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.24.0/go.mod h1:UWDxETdZ0XK3lpVJ4JYa16oYhu5H6IluXPrDtyvMIwU= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.24.0 h1:Uha4TTkbCcYTvUbkbfvUjUmxtPaPKCOtwwl91erkRRg= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.24.0/go.mod h1:RWoMSFb2Q+L0FSRYctEt8Wp0em+InUg+Oe+BU30e7gA= github.com/DataDog/sketches-go v1.4.6 h1:acd5fb+QdUzGrosfNLwrIhqyrbMORpvBy7mE+vHlT3I= github.com/DataDog/sketches-go v1.4.6/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg= github.com/DataDog/viper v1.14.0 h1:dIjTe/uJiah+QFqFZ+MXeqgmUvWhg37l37ZxFWxr3is= @@ -117,10 +115,10 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 h1:t3eaIm0rUkzbrIewtiFmMK5RXHej2XnoXNhxVsAYUfg= github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= -github.com/antchfx/xmlquery v1.4.2 h1:MZKd9+wblwxfQ1zd1AdrTsqVaMjMCwow3IqkCSe00KA= -github.com/antchfx/xmlquery v1.4.2/go.mod h1:QXhvf5ldTuGqhd1SHNvvtlhhdQLks4dD0awIVhXIDTA= -github.com/antchfx/xpath v1.3.2 h1:LNjzlsSjinu3bQpw9hWMY9ocB80oLOWuQqFvO6xt51U= -github.com/antchfx/xpath v1.3.2/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= +github.com/antchfx/xmlquery v1.4.3 h1:f6jhxCzANrWfa93O+NmRWvieVyLs+R2Szfpy+YrZaww= +github.com/antchfx/xmlquery v1.4.3/go.mod h1:AEPEEPYE9GnA2mj5Ur2L5Q5/2PycJ0N9Fusrx9b12fc= +github.com/antchfx/xpath v1.3.3 h1:tmuPQa1Uye0Ym1Zn65vxPgfltWb/Lxu2jeqIGteJSRs= +github.com/antchfx/xpath v1.3.3/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= @@ -132,6 +130,34 @@ github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgI github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go-v2 v1.33.0 h1:Evgm4DI9imD81V0WwD+TN4DCwjUMdc94TrduMLbgZJs= +github.com/aws/aws-sdk-go-v2 v1.33.0/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= +github.com/aws/aws-sdk-go-v2/config v1.29.1 h1:JZhGawAyZ/EuJeBtbQYnaoftczcb2drR2Iq36Wgz4sQ= +github.com/aws/aws-sdk-go-v2/config v1.29.1/go.mod h1:7bR2YD5euaxBhzt2y/oDkt3uNRb6tjFp98GlTFueRwk= +github.com/aws/aws-sdk-go-v2/credentials v1.17.54 h1:4UmqeOqJPvdvASZWrKlhzpRahAulBfyTJQUaYy4+hEI= +github.com/aws/aws-sdk-go-v2/credentials v1.17.54/go.mod h1:RTdfo0P0hbbTxIhmQrOsC/PquBZGabEPnCaxxKRPSnI= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.24 h1:5grmdTdMsovn9kPZPI23Hhvp0ZyNm5cRO+IZFIYiAfw= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.24/go.mod h1:zqi7TVKTswH3Ozq28PkmBmgzG1tona7mo9G2IJg4Cis= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.28 h1:igORFSiH3bfq4lxKFkTSYDhJEUCYo6C8VKiWJjYwQuQ= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.28/go.mod h1:3So8EA/aAYm36L7XIvCVwLa0s5N0P7o2b1oqnx/2R4g= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.28 h1:1mOW9zAUMhTSrMDssEHS/ajx8JcAj/IcftzcmNlmVLI= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.28/go.mod h1:kGlXVIWDfvt2Ox5zEaNglmq0hXPHgQFNMix33Tw22jA= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.200.0 h1:3hH6o7Z2WeE1twvz44Aitn6Qz8DZN3Dh5IB4Eh2xq7s= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.200.0/go.mod h1:I76S7jN0nfsYTBtuTgTsJtK2Q8yJVDgrLr5eLN64wMA= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 h1:iXtILhvDxB6kPvEXgsDhGaZCSC6LQET5ZHSdJozeI0Y= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1/go.mod h1:9nu0fVANtYiAePIBh2/pFUSwtJ402hLnp854CNoDOeE= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.9 h1:TQmKDyETFGiXVhZfQ/I0cCFziqqX58pi4tKJGYGFSz0= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.9/go.mod h1:HVLPK2iHQBUx7HfZeOQSEu3v2ubZaAY2YPbAm5/WUyY= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.11 h1:kuIyu4fTT38Kj7YCC7ouNbVZSSpqkZ+LzIfhCr6Dg+I= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.11/go.mod h1:Ro744S4fKiCCuZECXgOi760TiYylUM8ZBf6OGiZzJtY= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.10 h1:l+dgv/64iVlQ3WsBbnn+JSbkj01jIi+SM0wYsj3y/hY= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.10/go.mod h1:Fzsj6lZEb8AkTE5S68OhcbBqeWPsR8RnGuKPr8Todl8= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.9 h1:BRVDbewN6VZcwr+FBOszDKvYeXY1kJ+GGMCcpghlw0U= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.9/go.mod h1:f6vjfZER1M17Fokn0IzssOTMT2N8ZSq+7jnNF0tArvw= +github.com/aws/smithy-go v1.22.1 h1:/HPHZQ0g7f4eUeK6HKglFz8uwVfZKgoI25rb/J+dnro= +github.com/aws/smithy-go v1.22.1/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= @@ -163,8 +189,8 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8ETbOasdwEV+avkR75ZzsVV9WI= github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= -github.com/containerd/cgroups/v3 v3.0.4 h1:2fs7l3P0Qxb1nKWuJNFiwhp2CqiKzho71DQkDrHJIo4= -github.com/containerd/cgroups/v3 v3.0.4/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins= +github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJHo6Bzo= +github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= @@ -173,7 +199,7 @@ github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180202092358-40e2722dffea/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= @@ -215,8 +241,8 @@ github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRr github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= -github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/go-control-plane v0.13.1 h1:vPfJZCkob6yTMEgS+0TwfTUfbHjfy/6vOJ8hUWX/uXE= +github.com/envoyproxy/go-control-plane v0.13.1/go.mod h1:X45hY0mufo6Fd0KW3rqsGvQMw58jvjymeCzBU3mWyHw= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= @@ -273,8 +299,8 @@ github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= -github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/goccy/go-json v0.10.4 h1:JSwxQzIqKfmFX1swYPpUThQZp/Ka4wzJdK0LWVytLPM= +github.com/goccy/go-json v0.10.4/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -292,13 +318,6 @@ github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -384,8 +403,8 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9G github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.0 h1:VD1gqscl4nYs1YxVuSdemTrSgTKrwOWDK0FVFMqm+Cg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.0/go.mod h1:4EgsQoS4TOhJizV+JTFg40qx1Ofh3XmXEQNBpgvNT40= github.com/hashicorp/consul/api v1.30.0 h1:ArHVMMILb1nQv8vZSGIwwQd2gtc+oSQZ6CalyiyH2XQ= github.com/hashicorp/consul/api v1.30.0/go.mod h1:B2uGchvaXVW2JhFoS8nqTxMD5PBykr4ebY4JWHTTeLM= github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= @@ -516,8 +535,8 @@ github.com/lightstep/go-expohisto v1.0.0 h1:UPtTS1rGdtehbbAF7o/dhkWLTDI73UifG8Lb github.com/lightstep/go-expohisto v1.0.0/go.mod h1:xDXD0++Mu2FOaItXtdDfksfgxfV0z1TMPa+e/EUd0cs= github.com/linode/linodego v1.37.0 h1:B/2Spzv9jYXzKA+p+GD8fVCNJ7Wuw6P91ZDD9eCkkso= github.com/linode/linodego v1.37.0/go.mod h1:L7GXKFD3PoN2xSEtFc04wIXP5WK65O10jYQx0PQISWQ= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg= github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= @@ -590,58 +609,58 @@ github.com/onsi/ginkgo/v2 v2.20.2 h1:7NVCeyIWROIAheY21RLS+3j2bb52W0W82tkberYytp4 github.com/onsi/ginkgo/v2 v2.20.2/go.mod h1:K9gyxPIlb+aIvnZ8bd9Ak+YP18w3APlR+5coaZoE2ag= github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= -github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.115.0 h1:Xkfl44ZRgkz1EoCCYgwPomQkV+BrYOPvv9v1Kd1gZE4= -github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.115.0/go.mod h1:Sr/upBdJeJ7nxDfmCFCl9iHosXiPoQCPHkCJslDyoUA= -github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.115.0 h1:sO4fPw0NRUibgBVvQVTqPBCBRFh0I+ODIr3HAwcWezI= -github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.115.0/go.mod h1:HqzCXJ4rxXzWNYaUtCqJzXyTsCGEKSa/d+tHcyeRDY0= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.115.0 h1:qtct9PsKONY6YOMc+QGBE/uGs8KMBcF6mvYJbyFHFt8= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.115.0/go.mod h1:OR9DKWrSRpfc3+CxwsL2QTOuHD03S9w0Jubi3EhTcy4= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.115.0 h1:u7Ht+E1ghQESffcjyaxWrXGsfSWa1VE9LKC4f2PPx84= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.115.0/go.mod h1:r3iS2mDYu+cnGjgNc8TgvuUUAN6A6/1BvR1e1YJBrqM= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.115.0 h1:51D/x3xIAnWgVrY0lgdU+b+yb2aWd72uDqu9GhjRcNI= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.115.0/go.mod h1:nLau1YUdjhtLrk4jXLPb2l9riQ1Ap4xytTLl7MBedBg= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.115.0 h1:HVGG31WeB6Fn2+il2/ycWj9tDP0fxOeOqD1rKCjsBSc= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.115.0/go.mod h1:2hYojHs5daPVWECuZsPViKwty0ojuHUEmk8GEuaFqO0= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.115.0 h1:SF3gOOEkfntE3zEhY80yO7BVQ5CkaK8ecic2U2AZPHE= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.115.0/go.mod h1:jeBzX5m8O9X0LQxiryV9sJUIrn+QAwOnCBE2wZWIltQ= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.115.0 h1:vRQQFD4YpasQFUAdF030UWtaflSYFXK542bfWMGhOK0= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.115.0/go.mod h1:BZ7DT+0VkKR7P3I9PGEDfVa0GdB0ty41eEcejIUXF9A= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.115.0 h1:a36EJz/mb83f6ieX0v4fNDJ1jXqpeaM6DVQXeFDvdhw= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.115.0/go.mod h1:r5/40YO1eSP5ZreOmRzVOUtDr7YG39ZIUcVjHd+9Izc= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.115.0 h1:h6zEsBtuZalQu7lKYf6ZCcj8fTocT+zxdmuOou9515Q= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.115.0/go.mod h1:6QU/K0dGCGYorkOvJmhbDFCspy4RPxRkFjf9I64y6I0= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.115.0 h1:f/HrZgTf6TF97v67uEZB3v2UtBT9aQojBvnloD3LOm4= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.115.0/go.mod h1:Hp9uSq3qNJqdxu24u7RWyuPT9x1GgEUSx9US1LLeLi0= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.115.0 h1:4RoU3SlcNe6Dxyxfv8JVsrN8QgjBQ44Pkt9FLKK095I= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.115.0/go.mod h1:jfPlBpZT+hvp52Ldcx+srxaqyYuKxBkxOd3KtxbveCU= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.115.0 h1:8A+iBT5G23zvBPqYx32Qh4800jHFo4X9T1fpQKVQ+4E= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.115.0/go.mod h1:AhdPvwYKu7G8LKRWzHTNQYBq27RinsMm5qSanwSA/rU= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.115.0 h1:WOqt8NpU/JPGYDR4CiWx7g/sHV6Oe9FChzhushwmVdo= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.115.0/go.mod h1:wV/+iU7MyXcyTaY8K5Qx+1Z3yUzrxA40nydPQA476Iw= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.115.0 h1:Z9p78zj9Qblw472mGkPieuX7mqduAp47rzMbFfq5evI= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.115.0/go.mod h1:mtxUxJEIQy27MaGR1yzcn/OK8NoddEgb7fumpEbKYss= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.115.0 h1:qdZ9EqmdM19pWhPoFA7VivBTdzP2HvNwXa3CCMHYoDQ= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.115.0/go.mod h1:mrL1MNrcg0zYAJ+aK9WtOH062dl2wN9DDG7mZk9H8v4= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.115.0 h1:MerLKMrkM4YoGF6Di0D9yMXO02yCX8mrZAi/+jJVVeI= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.115.0/go.mod h1:R8AkVWe9G5Q0oMOapvm9HNS076E3Min8SVlmhBL3QD0= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.115.0 h1:WEqcnWSy9dNSlGb8pYRBX7zhaz2ReyaeImlenbzNTB4= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.115.0/go.mod h1:6Mk71CakHUA3I6oM9hARDiyQypYyOolvb+4PFYyVEFg= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.115.0 h1:eoapW0JBablApkdv4C1RUuOKfz0U6SwuKMYYSAJH6fE= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.115.0/go.mod h1:hW2AaybTRcwxJySGLC3Fh1vd2VDaQhRBfa7O7w30NS8= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.115.0 h1:R9MRrO+dSkAHBQLZjuwjv2RHXHQqF2Wtm1Ki0VKD5cs= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.115.0/go.mod h1:rKXLXmwdUVcUHwTilroKSejbg3KSwLeYzNPSpkIEnv4= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.115.0 h1:KghgAubxdDqP4eUQ+d2GzHXUAwtFxpSDToqFVnax0XA= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.115.0/go.mod h1:cW/BaYE6Uo7ZYHbmT0wVBktHP0SfeLqGHMf0qks7rOE= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.115.0 h1:ioGiKiO0WqT3PxkzanuJsPVA24FItH6nTJeDeSMFpYA= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.115.0/go.mod h1:x1W4J+pzK/Bi9jjYBYESTsPq0nRJJLZoN7cPNd0vYSU= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.115.0 h1:vwZQ7k8oqlK0bdZYTsjP/59zjQQfjSD4fNsWIWsTu2w= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.115.0/go.mod h1:5ObSa9amrbzbYTdAK1Qhv3D/YqCxxnQhP0sk2eWB7Oo= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.115.0 h1:olyiml73slGYORDjZNViW3nKiysC+K+h5yPsSBjUxQ4= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.115.0/go.mod h1:N00k1mTxzfS2clqxSP4Dxk7iX8GWbbuCq6LF8/ECk/M= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.115.0 h1:sLRTfXUFiqJ5Qe/NN5MUJxTaFt46E0Y/xjSY+KesCQc= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.115.0/go.mod h1:361IqXD4jnfs6G+Yn7978uv1UNozhZo4yBYy4p6Nqzc= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.115.0 h1:GIyMUiud3T8nyCJP9KVhxVKvfcNQRBCde5uTCl6K/i0= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.115.0/go.mod h1:x4hCznyUolxGt5cE/uXWRCckdIDrUYqH5hJddvdKZd4= +github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.118.0 h1:i6EXJvoGNOrYvmYbOgJu2FRurpMg1eS/lP6nkEOwKM8= +github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.118.0/go.mod h1:I96MagdAxY3SNR8PZWSiKMp4s8EfutqmhJVpQ3x4dLk= +github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.118.0 h1:X0RNsPCvo+VCQNaxFL+3Zj+13/It8aY6yRmBSLcGy1c= +github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.118.0/go.mod h1:ZZzyaYuuQVUA/STahm8GOJqXRPFrB9KxT7jY7EakDXA= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.118.0 h1:AsPP531/BHxHh0SD73ij1Lg+prrGn2RTVXWdtf0d0YI= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.118.0/go.mod h1:NKFEb3yh4hZBTi1BQM5Sn7n/UiIVBZForHHqjtJBH5U= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.118.0 h1:zHA9n518dSAz2VKqqn30upcZQL6ll9lrK1jCRnBHmhc= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.118.0/go.mod h1:9KW4qWtwCvpWmZYczNkwCwT7nI2Nat6IemDX5w/fTdI= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.118.0 h1:HKPTwhA+GNlsBpIR77DH1gwbzL2thOv/+rZzEZ27Sdk= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.118.0/go.mod h1:MchgZvCCgNc9sXj52bvrjQQxS71UaZ0HcikwUDuGwFg= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.118.0 h1:KlIEiJprSJYUvc2XxXCu0uXM0/T/IbTKcyugEcjmnm4= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.118.0/go.mod h1:oE1OPZITVJobOfQBHokvUlCm4BILngcmba1jkKhBcKs= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.118.0 h1:xRe7n6OGxrjAkSycWEHSRXlSO9gN8dHoRHC8mrNEqsU= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.118.0/go.mod h1:6Lrr+9tQ1/cBVpMhccQ43CgUmy9YYbsu/yssNIZJxjM= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.118.0 h1:W6maz9dZiAYO3WWFMy41GoX2tzx7EPiLGiykNkiAOMI= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.118.0/go.mod h1:WmS8hAnghKAR5UGYC/sww46mKkBO4RxAqkn8K0i+ZU4= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.118.0 h1:Xnwd0QEyBg6iNPUbc3CnHIb0hVjfTc+jHdFbA9VSa7k= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.118.0/go.mod h1:rmqCuNFMNBUxgyufeU8rpVYOWau8ubr0gmSO1u+R5Hk= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.118.0 h1:cRDOmJfEOm7G369Lw47k03NIg1qY6HtO9XTwfYRLBw4= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.118.0/go.mod h1:KPphlnKqOx44kbEks3VjqQstD/892osXDVN1kn53wWE= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.118.0 h1:94Xf/jV2ewqnVRA/CUKvNKZ5p3+mEtrMcPE1Xw9lk18= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.118.0/go.mod h1:GhC+Pk3PbAIq52vmYr+d6PN4Hnxyp4lGQMbomI7Bom8= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.118.0 h1:OnZwsQGs3DKeZbyLWNZY1J2xKthKkg4Myb0OP9YN7/U= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.118.0/go.mod h1:6wbKIFyIVjtzzHEFUSvk6bKBLPEwwdVqY86N6MYVsPw= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.118.0 h1:nzm0/xJEzIWKydgsubNipphuYabJPF3dXc4I6g0dR2M= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.118.0/go.mod h1:jORSreOnfMNkLI3KgHVRCFaj/D8gMvgUAQXzXnPf858= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.118.0 h1:4IvL4o5uOf1PspPgjgcrxfPkyZQbgJP6VsyUi5KuSdM= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.118.0/go.mod h1:9cP+bHuftqoYmNDd8LrJ3YTzQl8S1T+qQxSeOIdLM+g= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.118.0 h1:Pho1MwH+cvosN6pOinGhunBwAJyyAwFnbIW5x7N/37A= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.118.0/go.mod h1:IMy3f4XjwIu+PZF9Qq5T6WZ/+mOL9l+SFjPYEQuWZh8= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.118.0 h1:8pBuMvrFhU7YLJn1uhuuv5uLz0cJUyzusFtNA//fvFI= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.118.0/go.mod h1:pPjJ7ITPSA68VT7cGK9UIJkGsvfjIJV8cjB8cnnsKr8= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.118.0 h1:DSoYrOjLv23HXpx72hl61br4ZZTj6dqtwZSGoypKWIA= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.118.0/go.mod h1:nR+r7aAbsktscJk4fGmzljblbZBMaiZcIWeKbXV+HmY= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.118.0 h1:aUTSkzJExtrlHN32g8hX/cRNEo2ZmucPg+vwPqOYvhg= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.118.0/go.mod h1:a3sewj4nEozMwcNwZTHPzddS+1BnA6BaAkO/CRIGHVU= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.118.0 h1:WnOBLIbdKDdtLCmpedY35QIkCOb2yW+BxydQMEIv2Xc= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.118.0/go.mod h1:QNv8LB5TzLUHB4p413mrtLryozBRNHKwIlY2R6UirrQ= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.118.0 h1:zEdd1JoVEBX7Lmf/wjs+45p4rR5+HvT2iF5VcoOgK1g= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.118.0/go.mod h1:WE5ientZ87x3cySOh4D/uVUwxK82DMyCkLBJ43+ehDU= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.118.0 h1:ycH2OpswYo9KWsZv7i7zaI8QQUTVZZssAC48cwThZ88= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.118.0/go.mod h1:VkFMDbe3yp1xEzLyyHhQ5SZzWFXxgzuw38SdLzEet+A= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.118.0 h1:ZellmKscolOE6l5R8Cf4ndjSvXzA6sx4ItmbviMBWSQ= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.118.0/go.mod h1:jQKwQo7XgAUXnibEA4bq+RngO43owGFBXRqbbP50i+Y= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.118.0 h1:pC1e5BvBf8rjwGb56MiTUFEDHU2LSclaqRNUs3z9Snw= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.118.0/go.mod h1:wZTrQ0XWb1A9XBhl1WmUKLPfqNjERKFYWT5WER70gLg= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.118.0 h1:vOVsKrrRjfOYSvOu3Qv7MIHUZSVL93tnHETBU+GGxsI= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.118.0/go.mod h1:NxqPda5zVnG8RiCgff0L2EfdIflsC/wkRTLNdlYgN/E= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.118.0 h1:ZeOm/Hf/zCcpqIa6zbZ80uy1W0/HR/ib18rTj7cuQ4I= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.118.0/go.mod h1:0WO9Sxt9rPjfe88wnP4SL/M09nohh3H9NX634fem0b0= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.118.0 h1:ZKedpw3/P2iAW1mkPij/AP0q4bSY/3BjH5k6K50wgmk= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.118.0/go.mod h1:Vx5ZkbyLKL01R44rHNn6FwdVrY7x4LxLMi8f1Zmxk1g= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= @@ -667,8 +686,8 @@ github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3v github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c h1:dAMKvw0MlJT1GshSTtih8C2gDs04w8dReiOGXrGLNoY= github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM= -github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= -github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= +github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -682,8 +701,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -706,8 +725,8 @@ github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8b github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -742,8 +761,8 @@ github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbm github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI= github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v1.7.1 h1:UJcjSAI3aUKx52kfcfhblgyhZceouhvvs3OYdWgn+PY= @@ -762,8 +781,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -806,8 +825,8 @@ github.com/tidwall/tinylru v1.1.0 h1:XY6IUfzVTU9rpwdhKUF6nQdChgCdGjkMfLzbWyiau6I github.com/tidwall/tinylru v1.1.0/go.mod h1:3+bX+TJ2baOLMWTnlyNWHh4QMnFyARg2TLTQ6OFbzw8= github.com/tidwall/wal v1.1.8 h1:2qDSGdAdjaY3PEvHRva+9UFqgk+ef7cOiW1Qn5JH1y0= github.com/tidwall/wal v1.1.8/go.mod h1:r6lR1j27W9EPalgHiB7zLJDYu3mzW5BQP5KrzBpYY/E= -github.com/tinylib/msgp v1.2.4 h1:yLFeUGostXXSGW5vxfT5dXG/qzkn4schv2I7at5+hVU= -github.com/tinylib/msgp v1.2.4/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= +github.com/tinylib/msgp v1.2.5 h1:WeQg1whrXRFiZusidTQqzETkRpGjFjcIhW6uqWH09po= +github.com/tinylib/msgp v1.2.5/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= @@ -848,134 +867,134 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector v0.115.0 h1:qUZ0bTeNBudMxNQ7FJKS//TxTjeJ7tfU/z22mcFavWU= -go.opentelemetry.io/collector v0.115.0/go.mod h1:66qx0xKnVvdwq60e1DEfb4e+zmM9szhPsv2hxZ/Mpj4= -go.opentelemetry.io/collector/client v1.21.0 h1:3Kes8lOFMYVxoxeAmX+DTEAkuS1iTA3NkSfqzGmygJA= -go.opentelemetry.io/collector/client v1.21.0/go.mod h1:jYJGiL0UA975OOyHmjbQSokNWt1OiviI5KjPOMUMGwc= -go.opentelemetry.io/collector/component v0.115.0 h1:iLte1oCiXzjiCnaOBKdsXacfFiECecpWxW3/LeriMoo= -go.opentelemetry.io/collector/component v0.115.0/go.mod h1:oIUFiH7w1eOimdeYhFI+gAIxYSiLDocKVJ0PTvX7d6s= -go.opentelemetry.io/collector/component/componentstatus v0.115.0 h1:pbpUIL+uKDfEiSgKK+S5nuSL6MDIIQYsp4b65ZGVb9M= -go.opentelemetry.io/collector/component/componentstatus v0.115.0/go.mod h1:36A+9XSiOz0Cdhq+UwwPRlEr5CYuSkEnVO9om4BH7d0= -go.opentelemetry.io/collector/component/componenttest v0.115.0 h1:9URDJ9VyP6tuij+YHjp/kSSMecnZOd7oGvzu+rw9SJY= -go.opentelemetry.io/collector/component/componenttest v0.115.0/go.mod h1:PzXvNqKLCiSADZGZFKH+IOHMkaQ0GTHuzysfVbTPKYY= -go.opentelemetry.io/collector/config/configauth v0.115.0 h1:xa+ALdyPgva3rZnLBh1H2oS5MsHP6JxSqMtQmcELnys= -go.opentelemetry.io/collector/config/configauth v0.115.0/go.mod h1:C7anpb3Rf4KswMT+dgOzkW9UX0z/65PLORpUw3p0VYc= -go.opentelemetry.io/collector/config/configcompression v1.21.0 h1:0zbPdZAgPFMAarwJEC4gaR6f/JBP686A3TYSgb3oa+E= -go.opentelemetry.io/collector/config/configcompression v1.21.0/go.mod h1:LvYG00tbPTv0NOLoZN0wXq1F5thcxvukO8INq7xyfWU= -go.opentelemetry.io/collector/config/configgrpc v0.115.0 h1:gZzXSFe6hB3RUcEeAYqk1yT+TBa+X9tp6/1x29Yg2yk= -go.opentelemetry.io/collector/config/configgrpc v0.115.0/go.mod h1:107lRZ5LdQPMdGJGd4m1GhyKxyH0az2cUOqrJgTEN8E= -go.opentelemetry.io/collector/config/confighttp v0.115.0 h1:BIy394oNXnqySJwrCqgAJu4gWgAV5aQUDD6k1hy6C8o= -go.opentelemetry.io/collector/config/confighttp v0.115.0/go.mod h1:Wr50ut12NmCEAl4bWLJryw2EjUmJTtYRg89560Q51wc= -go.opentelemetry.io/collector/config/confignet v1.21.0 h1:PeQ5YrMnfftysFL/WVaSrjPOWjD6DfeABY50pf9CZxU= -go.opentelemetry.io/collector/config/confignet v1.21.0/go.mod h1:ZppUH1hgUJOubawEsxsQ9MzEYFytqo2GnVSS7d4CVxc= -go.opentelemetry.io/collector/config/configopaque v1.21.0 h1:PcvRGkBk4Px8BQM7tX+kw4i3jBsfAHGoGQbtZg6Ox7U= -go.opentelemetry.io/collector/config/configopaque v1.21.0/go.mod h1:sW0t0iI/VfRL9VYX7Ik6XzVgPcR+Y5kejTLsYcMyDWs= -go.opentelemetry.io/collector/config/configretry v1.21.0 h1:ZHoOvAkEcv5BBeaJn8IQ6rQ4GMPZWW4S+W7R4QTEbZU= -go.opentelemetry.io/collector/config/configretry v1.21.0/go.mod h1:cleBc9I0DIWpTiiHfu9v83FUaCTqcPXmebpLxjEIqro= -go.opentelemetry.io/collector/config/configtelemetry v0.115.0 h1:U07FinCDop+r2RjWQ3aP9ZWONC7r7kQIp1GkXQi6nsI= -go.opentelemetry.io/collector/config/configtelemetry v0.115.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= -go.opentelemetry.io/collector/config/configtls v1.21.0 h1:ZfrlAYgBD8lzp04W0GxwiDmUbrvKsvDYJi+wkyiXlpA= -go.opentelemetry.io/collector/config/configtls v1.21.0/go.mod h1:5EsNefPfVCMOTlOrr3wyj7LrsOgY7V8iqRl8oFZEqtw= -go.opentelemetry.io/collector/config/internal v0.115.0 h1:eVk57iufZpUXyPJFKTb1Ebx5tmcCyroIlt427r5pxS8= -go.opentelemetry.io/collector/config/internal v0.115.0/go.mod h1:OVkadRWlKAoWjHslqjWtBLAne8ceQm8WYT71ZcBWLFc= -go.opentelemetry.io/collector/confmap v1.21.0 h1:1tIcx2/Suwg8VhuPmQw87ba0ludPmumpFCFRZZa6RXA= -go.opentelemetry.io/collector/confmap v1.21.0/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec= -go.opentelemetry.io/collector/confmap/provider/envprovider v1.21.0 h1:YLf++Z8CMp86AanfOCWUiE7vKbb1kSjgC3a9VJoxbD4= -go.opentelemetry.io/collector/confmap/provider/envprovider v1.21.0/go.mod h1:aSWLYcmgZZJDNtWN1M8JKQuehoGgOxibl1KuvKTar4M= -go.opentelemetry.io/collector/confmap/provider/fileprovider v1.21.0 h1:+zukkM+3l426iGoJkXTpLB2Z8QnZFu26TkGPjh5Rn/4= -go.opentelemetry.io/collector/confmap/provider/fileprovider v1.21.0/go.mod h1:BXBpQhF3n4CNLYO2n/mWZPd2U9ekpbLXLRGZrun1VfI= -go.opentelemetry.io/collector/confmap/provider/httpprovider v1.21.0 h1:NYYGM+SgIlTuNGjd8eGzDr8DkvOe4q7cXon8djF9yyI= -go.opentelemetry.io/collector/confmap/provider/httpprovider v1.21.0/go.mod h1:XRYbuwqq1awFuNhLDUv4aSvn6MzqX+abcevx1O+APJI= -go.opentelemetry.io/collector/confmap/provider/httpsprovider v1.21.0 h1:2EEUI2DzA2DvrvCImMWRSNqIHdRJ6+qbgvZL44Zb2ac= -go.opentelemetry.io/collector/confmap/provider/httpsprovider v1.21.0/go.mod h1:axezjjQWY4kZc5pr/+wOKAuqSYMhea/tWzP5S30h+dc= -go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.21.0 h1:P3Q9RytCMY76ORPCnkkjOa4fkuFqmZiQRor+F/nPlYE= -go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.21.0/go.mod h1:xhYhHK3yLQ78tsoaKPIGUfFulgy961ImOe2gATH3RQc= -go.opentelemetry.io/collector/connector v0.115.0 h1:4Kkm3HQFzNT1eliMOB8FbIn+PLMRJ2qQku5Vmy3V8Ko= -go.opentelemetry.io/collector/connector v0.115.0/go.mod h1:+ByuAmYLrYHoKh9B+LGqUc0N2kXcN2l8Dea8Mp6brZ8= -go.opentelemetry.io/collector/connector/connectorprofiles v0.115.0 h1:aW1f4Az0I+QJyImFccNWAXqik80bnNu27aQqi2hFfD8= -go.opentelemetry.io/collector/connector/connectorprofiles v0.115.0/go.mod h1:lmynB1CucydOsHa8RSSBh5roUZPfuiv65imXhtNzClM= -go.opentelemetry.io/collector/connector/connectortest v0.115.0 h1:GjtourFr0MJmlbtEPAZ/1BZCxkNAeJ0aMTlrxwftJ0k= -go.opentelemetry.io/collector/connector/connectortest v0.115.0/go.mod h1:f3KQXXNlh/XuV8elmnuVVyfY92dJCAovz10gD72OH0k= -go.opentelemetry.io/collector/consumer v1.21.0 h1:THKZ2Vbi6GkamjTBI2hFq5Dc4kINZTWGwQNa8d/Ty9g= -go.opentelemetry.io/collector/consumer v1.21.0/go.mod h1:FQcC4ThMtRYY41dv+IPNK8POLLhAFY3r1YR5fuP7iiY= -go.opentelemetry.io/collector/consumer/consumererror v0.115.0 h1:yli//xBCQMPZKXNgNlXemo4dvqhnFrAmCZ11DvQgmcY= -go.opentelemetry.io/collector/consumer/consumererror v0.115.0/go.mod h1:LwVzAvQ6ZVNG7mbOvurbAo+W/rKws0IcjOwriuZXqPE= -go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.115.0 h1:gaIhzpaGFWauiyznrQ3f++TbcdXxA5rpsX3L9uGjMM8= -go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.115.0/go.mod h1:7oXvuGBSawS5bc413lh1KEMcXkqBcrCqZQahOdnE24U= -go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0 h1:H3fDuyQW1t2HWHkz96WMBQJKUevypOCjBqnqtaAWyoA= -go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0/go.mod h1:IzEmZ91Tp7TBxVDq8Cc9xvLsmO7H08njr6Pu9P5d9ns= -go.opentelemetry.io/collector/consumer/consumertest v0.115.0 h1:hru0I2447y0TluCdwlKYFFtgcpyCnlM+LiOK1JZyA70= -go.opentelemetry.io/collector/consumer/consumertest v0.115.0/go.mod h1:ybjALRJWR6aKNOzEMy1T1ruCULVDEjj4omtOJMrH/kU= -go.opentelemetry.io/collector/exporter v0.115.0 h1:JnxfpOnsuqhTPKJXVKJLS1Cv3BiVrVLzpHOjJEQw+xw= -go.opentelemetry.io/collector/exporter v0.115.0/go.mod h1:xof3fHQK8wADhaKLIJcQ7ChZaFLNC+haRdPN0wgl6kY= -go.opentelemetry.io/collector/exporter/debugexporter v0.115.0 h1:gb9VMQhcbvYqp0SJ4Hp8R9XqOLNLsoTgNJCPKpNEaVc= -go.opentelemetry.io/collector/exporter/debugexporter v0.115.0/go.mod h1:H/HS1UJlcZPNBbOcrsGZc2sPdQDHtbOjHOxMtJkmlcU= -go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles v0.115.0 h1:fetbc740pODH6JW+H49SW0hiAJwQE+/B0SbuIlaY2rg= -go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles v0.115.0/go.mod h1:oEKZ/d5BeaCK6Made9iwaeqmlT4lRbJSlW9nhIn/TwM= -go.opentelemetry.io/collector/exporter/exporterprofiles v0.115.0 h1:lSQEleCn/q9eFufcuK61NdFKU70ZlgI9dBjPCO/4CrE= -go.opentelemetry.io/collector/exporter/exporterprofiles v0.115.0/go.mod h1:7l5K2AecimX2kx+nZC1gKG3QkP247CO1+SodmJ4fFkQ= -go.opentelemetry.io/collector/exporter/exportertest v0.115.0 h1:P9SMTUXQOtcaq40bGtnnAe14zRmR4/yUgj/Tb2BEf/k= -go.opentelemetry.io/collector/exporter/exportertest v0.115.0/go.mod h1:1jMZ9gFGXglb8wfNrBZIgd+RvpZhSyFwdfE+Jtf9w4U= -go.opentelemetry.io/collector/exporter/otlpexporter v0.115.0 h1:Kqr31VFrQvgEMzeg8T1JSXWacjUQoZph39efKN8jBpY= -go.opentelemetry.io/collector/exporter/otlpexporter v0.115.0/go.mod h1:5uy/gduFx2mH0GxJ84sY75NfzQJb9xYmgiL9Pf0dKF8= -go.opentelemetry.io/collector/exporter/otlphttpexporter v0.115.0 h1:I0qzSWGbgph+iva5/jU8tkeUTkkqqcj8+UzMxg5ubF8= -go.opentelemetry.io/collector/exporter/otlphttpexporter v0.115.0/go.mod h1:cUrv5EG12iOs5MXaecfi9K+ZATEELefpyZY6Hj4NlUo= -go.opentelemetry.io/collector/extension v0.115.0 h1:/cBb8AUdD0KMWC6V3lvCC16eP9Fg0wd1Upcp5rgvuGI= -go.opentelemetry.io/collector/extension v0.115.0/go.mod h1:HI7Ak6loyi6ZrZPsQJW1OO1wbaAW8OqXLFNQlTZnreQ= -go.opentelemetry.io/collector/extension/auth v0.115.0 h1:TTMokbBsSHZRFH48PvGSJmgSS8F3Rkr9MWGHZn8eJDk= -go.opentelemetry.io/collector/extension/auth v0.115.0/go.mod h1:3w+2mzeb2OYNOO4Bi41TUo4jr32ap2y7AOq64IDpxQo= -go.opentelemetry.io/collector/extension/auth/authtest v0.115.0 h1:OZe7dKbZ01qodSpZU0ZYzI6zpmmzJ3UvfdBSFAbSgDw= -go.opentelemetry.io/collector/extension/auth/authtest v0.115.0/go.mod h1:fk9WCXP0x91Q64Z8HZKWTHh9PWtgoWE1KXe3n2Bff3U= -go.opentelemetry.io/collector/extension/experimental/storage v0.115.0 h1:sZXw0+77092pq24CkUoTRoHQPLQUsDq6HFRNB0g5yR4= -go.opentelemetry.io/collector/extension/experimental/storage v0.115.0/go.mod h1:qjFH7Y3QYYs88By2ZB5GMSUN5k3ul4Brrq2J6lKACA0= -go.opentelemetry.io/collector/extension/extensioncapabilities v0.115.0 h1:/g25Hp5aoCNKdDjIb3Fc7XRglO8yaBRFLO/IUNPnqNI= -go.opentelemetry.io/collector/extension/extensioncapabilities v0.115.0/go.mod h1:EQx7ETiy330O6q05S2KRZsRNDg0aQEeJmVl7Ipx+Fcw= -go.opentelemetry.io/collector/extension/extensiontest v0.115.0 h1:GBVFxFEskR8jSdu9uaQh2qpXnN5VNXhXjpJ2UjxtE8I= -go.opentelemetry.io/collector/extension/extensiontest v0.115.0/go.mod h1:eu1ecbz5mT+cHoH2H3GmD/rOO0WsicSJD2RLrYuOmRA= -go.opentelemetry.io/collector/extension/zpagesextension v0.115.0 h1:zYrZZocc7n0ZuDyXNkIaX0P0qk2fjMQj7NegwBJZA4k= -go.opentelemetry.io/collector/extension/zpagesextension v0.115.0/go.mod h1:OaXwNHF3MAcInBzCXrhXbTNHfIi9b7YGhXjtCFZqxNY= -go.opentelemetry.io/collector/featuregate v1.21.0 h1:+EULHPJDLMipcwAGZVp9Nm8NriRvoBBMxp7MSiIZVMI= -go.opentelemetry.io/collector/featuregate v1.21.0/go.mod h1:3GaXqflNDVwWndNGBJ1+XJFy3Fv/XrFgjMN60N3z7yg= -go.opentelemetry.io/collector/internal/fanoutconsumer v0.115.0 h1:6DRiSECeApFq6Jj5ug77rG53R6FzJEZBfygkyMEXdpg= -go.opentelemetry.io/collector/internal/fanoutconsumer v0.115.0/go.mod h1:vgQf5HQdmLQqpDHpDq2S3nTRoUuKtRcZpRTsy+UiwYw= -go.opentelemetry.io/collector/internal/sharedcomponent v0.115.0 h1:9TL6T6ALqDpumUJ0tYIuPIg5LGo4r6eoqlNArYX116o= -go.opentelemetry.io/collector/internal/sharedcomponent v0.115.0/go.mod h1:SgBLKMh11bOTPR1bdDZbi5MlqsoDBBFI3uBIwnei+0k= -go.opentelemetry.io/collector/otelcol v0.115.0 h1:wZhFGrSCZcTQ4qw4ePjI2PaSrOCejoQKAjprKD/xavs= -go.opentelemetry.io/collector/otelcol v0.115.0/go.mod h1:iK8DPvaizirIYKDl1zZG7DDYUj6GkkH4KHifVVM88vk= -go.opentelemetry.io/collector/otelcol/otelcoltest v0.115.0 h1:HNlFpQujlnvawBk8nvMGxzjDHWDCfSprxem/EpQn4u8= -go.opentelemetry.io/collector/otelcol/otelcoltest v0.115.0/go.mod h1:WsMbqYl2rm3nPFbdxQqyLXf4iu97nYLeuQ1seZIpV3Y= -go.opentelemetry.io/collector/pdata v1.21.0 h1:PG+UbiFMJ35X/WcAR7Rf/PWmWtRdW0aHlOidsR6c5MA= -go.opentelemetry.io/collector/pdata v1.21.0/go.mod h1:GKb1/zocKJMvxKbS+sl0W85lxhYBTFJ6h6I1tphVyDU= -go.opentelemetry.io/collector/pdata/pprofile v0.115.0 h1:NI89hy13vNDw7EOnQf7Jtitks4HJFO0SUWznTssmP94= -go.opentelemetry.io/collector/pdata/pprofile v0.115.0/go.mod h1:jGzdNfO0XTtfLjXCL/uCC1livg1LlfR+ix2WE/z3RpQ= -go.opentelemetry.io/collector/pdata/testdata v0.115.0 h1:Rblz+AKXdo3fG626jS+KSd0OSA4uMXcTQfpwed6P8LI= -go.opentelemetry.io/collector/pdata/testdata v0.115.0/go.mod h1:inNnRt6S2Nn260EfCBEcjesjlKOSsr0jPwkPqpBkt4s= -go.opentelemetry.io/collector/pipeline v0.115.0 h1:bmACBqb0e8U9ag+vGGHUP7kCfAO7HHROdtzIEg8ulus= -go.opentelemetry.io/collector/pipeline v0.115.0/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74= -go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.115.0 h1:3l9ruCAOrssTUDnyChKNzHWOdTtfThnYaoPZ1/+5sD0= -go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.115.0/go.mod h1:2Myg+law/5lcezo9PhhZ0wjCaLYdGK24s1jDWbSW9VY= -go.opentelemetry.io/collector/processor v0.115.0 h1:+fveHGRe24PZPv/F5taahGuZ9HdNW44hgNWEJhIUdyc= -go.opentelemetry.io/collector/processor v0.115.0/go.mod h1:/oLHBlLsm7tFb7zOIrA5C0j14yBtjXKAgxJJ2Bktyk4= -go.opentelemetry.io/collector/processor/batchprocessor v0.115.0 h1:dgw1jcE/YVFTs41b3Y7SerU3BBSyMEE93AYV+BAxR8E= -go.opentelemetry.io/collector/processor/batchprocessor v0.115.0/go.mod h1:imG1kDEq14UGlxyCjSCf1TUEFdSWRvF7tLoYX9nixEQ= -go.opentelemetry.io/collector/processor/processorprofiles v0.115.0 h1:cCZAs+FXaebZPppqAN3m+X3etoSBL6NvyQo8l0hOZoo= -go.opentelemetry.io/collector/processor/processorprofiles v0.115.0/go.mod h1:kMxF0gknlWX4duuAJFi2/HuIRi6C3w95tOenRa0GKOY= -go.opentelemetry.io/collector/processor/processortest v0.115.0 h1:j9HEaYFOeOB6VYl9zGhBnhQbTkqGBa2udUvu5NTh6hc= -go.opentelemetry.io/collector/processor/processortest v0.115.0/go.mod h1:Gws+VEnp/eW3qAqPpqbKsrbnnxxNfyDjqrfUXbZfZic= -go.opentelemetry.io/collector/receiver v0.115.0 h1:55Q3Jvj6zHCIA1psKqi/3kEMJO4OqUF5tNAEYNdB1U8= -go.opentelemetry.io/collector/receiver v0.115.0/go.mod h1:nBSCh2O/WUcfgpJ+Jpz+B0z0Hn5jHeRvF2WmLij5EIY= -go.opentelemetry.io/collector/receiver/nopreceiver v0.115.0 h1:87dxAcHekbXqLtjcQjnK1An2PWkWAhTly+EXzPEgYOE= -go.opentelemetry.io/collector/receiver/nopreceiver v0.115.0/go.mod h1:Llu88KNSNwvmYPRr2PMDDbVY9zHfHEbPPB4yTjjQQe0= -go.opentelemetry.io/collector/receiver/otlpreceiver v0.115.0 h1:NqMWsGuVy6y6VKTaPeJS7NZ9KAxhE/xyGUC7GaLYm/o= -go.opentelemetry.io/collector/receiver/otlpreceiver v0.115.0/go.mod h1:9ituzngnjsh/YvO+Phayq9BTk/nw0rgK5ZVvX1oxULk= -go.opentelemetry.io/collector/receiver/receiverprofiles v0.115.0 h1:R9JLaj2Al93smIPUkbJshAkb/cY0H5JBOxIx+Zu0NG4= -go.opentelemetry.io/collector/receiver/receiverprofiles v0.115.0/go.mod h1:05E5hGujWeeXJmzKZwTdHyZ/+rRyrQlQB5p5Q2XY39M= -go.opentelemetry.io/collector/receiver/receivertest v0.115.0 h1:OiB684SbHQi6/Pd3ZH0cXjYvCpBS9ilQBfTQx0wVXHg= -go.opentelemetry.io/collector/receiver/receivertest v0.115.0/go.mod h1:Y8Z9U/bz9Xpyt8GI8DxZZgryw3mnnIw+AeKVLTD2cP8= -go.opentelemetry.io/collector/semconv v0.115.0 h1:SoqMvg4ZEB3mz2EdAb6XYa+TuMo5Mir5FRBr3nVFUDY= -go.opentelemetry.io/collector/semconv v0.115.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= -go.opentelemetry.io/collector/service v0.115.0 h1:k4GAOiI5tZgB2QKgwA6c3TeAVr7QL/ft5cOQbzUr8Iw= -go.opentelemetry.io/collector/service v0.115.0/go.mod h1:DKde9LMhNebdREecDSsqiTFLI2wRc+IoV4/wGxU6goY= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/collector v0.118.0 h1:OBqxppK9Ul6bzEabcHsx11pXwgp05sBpqYxIxiOkyFo= +go.opentelemetry.io/collector v0.118.0/go.mod h1:yxfijW5k9dwd9sifTBAEoItE+ahFEtOlyvex1B99uno= +go.opentelemetry.io/collector/client v1.24.0 h1:eH7ctqDnRWNH5QVVbAvdYYdkvr8QWLkEm8FUPaaYbWE= +go.opentelemetry.io/collector/client v1.24.0/go.mod h1:C/38SYPa0tTL6ikPz/glYz6f3GVzEuT4nlEml6IBDMw= +go.opentelemetry.io/collector/component v0.118.0 h1:sSO/ObxJ+yH77Z4DmT1mlSuxhbgUmY1ztt7xCA1F/8w= +go.opentelemetry.io/collector/component v0.118.0/go.mod h1:LUJ3AL2b+tmFr3hZol3hzKzCMvNdqNq0M5CF3SWdv4M= +go.opentelemetry.io/collector/component/componentstatus v0.118.0 h1:1aCIdUjqz0noKNQr1v04P+lwF89Lkua5U7BhH9IAxkE= +go.opentelemetry.io/collector/component/componentstatus v0.118.0/go.mod h1:ynO1Nyj0t1h6x/djIMJy35bhnnWEc2mlQaFgDNUO504= +go.opentelemetry.io/collector/component/componenttest v0.118.0 h1:knEHckoiL2fEWSIc0iehg39zP4IXzi9sHa45O+oxKo8= +go.opentelemetry.io/collector/component/componenttest v0.118.0/go.mod h1:aHc7t7zVwCpbhrWIWY+GMuaMxMCUP8C8P7pJOt8r/vU= +go.opentelemetry.io/collector/config/configauth v0.118.0 h1:uBH/s9kRw/m7VWuibrkCzbXSCVLf9ElKq9NuKb0wAwk= +go.opentelemetry.io/collector/config/configauth v0.118.0/go.mod h1:uAmSGkihIENoIah6mEQ8S/HX4oiFOHZu3EoZLZwi9OI= +go.opentelemetry.io/collector/config/configcompression v1.24.0 h1:jyM6BX7wYcrh+eVSC0FMbWgy/zb9iP58SerOrvisccE= +go.opentelemetry.io/collector/config/configcompression v1.24.0/go.mod h1:LvYG00tbPTv0NOLoZN0wXq1F5thcxvukO8INq7xyfWU= +go.opentelemetry.io/collector/config/configgrpc v0.118.0 h1:if8VfsnnHwVX/E+GgehVXKh85YtAtVci+c4A/M5gPh0= +go.opentelemetry.io/collector/config/configgrpc v0.118.0/go.mod h1:TZqpu5s/iEW5XmhSnzrhXCUQ3W5qaICNvlllBf3GGcw= +go.opentelemetry.io/collector/config/confighttp v0.118.0 h1:ey50dfySOCPgUPJ1x8Kq6CmNcv/TpZHt6cYmPhZItj0= +go.opentelemetry.io/collector/config/confighttp v0.118.0/go.mod h1:4frheVFiIfKUHuD/KAPn+u+d+EUx5GlQTNmoI1ftReA= +go.opentelemetry.io/collector/config/confignet v1.24.0 h1:Je1oO3qCUI4etX9ZVyav/NkeD+sfzZQRmwMGy51Oei4= +go.opentelemetry.io/collector/config/confignet v1.24.0/go.mod h1:ZppUH1hgUJOubawEsxsQ9MzEYFytqo2GnVSS7d4CVxc= +go.opentelemetry.io/collector/config/configopaque v1.24.0 h1:EPOprMDreZPKyIgT0/eVBvEGQVvq7ncvBCBVnWerj54= +go.opentelemetry.io/collector/config/configopaque v1.24.0/go.mod h1:sW0t0iI/VfRL9VYX7Ik6XzVgPcR+Y5kejTLsYcMyDWs= +go.opentelemetry.io/collector/config/configretry v1.24.0 h1:sIPHhNNY2YlHMIJ//63iMxIqlgDeGczId0uUb1njsPM= +go.opentelemetry.io/collector/config/configretry v1.24.0/go.mod h1:cleBc9I0DIWpTiiHfu9v83FUaCTqcPXmebpLxjEIqro= +go.opentelemetry.io/collector/config/configtelemetry v0.118.0 h1:UlN46EViG2X42odWtXgWaqY7Y01ZKpsnswSwXTWx5mM= +go.opentelemetry.io/collector/config/configtelemetry v0.118.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= +go.opentelemetry.io/collector/config/configtls v1.24.0 h1:rOhl8qjIlUVVRHnwQj6/vZe6cuCYImyx7aVDBR35bqI= +go.opentelemetry.io/collector/config/configtls v1.24.0/go.mod h1:d0OdfkbuYEMYDBJLSbpH0wPI29lmSiFT3geqh/ygF2k= +go.opentelemetry.io/collector/confmap v1.24.0 h1:UUHVhkDCsVw14jPOarug9PDQE2vaB2ELPWMr7ARFBCA= +go.opentelemetry.io/collector/confmap v1.24.0/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec= +go.opentelemetry.io/collector/confmap/provider/envprovider v1.24.0 h1:jAtaNR4b5gnddNzyfcpIhURSDq4rai667yV1Ngmku2Y= +go.opentelemetry.io/collector/confmap/provider/envprovider v1.24.0/go.mod h1:X0BuIYyscilkwApnmxlrdz0kTVWgKXq2ih8sTWm8Zio= +go.opentelemetry.io/collector/confmap/provider/fileprovider v1.24.0 h1:QoQulv9L20MhD1TFWH1scbRoo0bxbZqF2quh1VRNMh4= +go.opentelemetry.io/collector/confmap/provider/fileprovider v1.24.0/go.mod h1:ljIH/rWIUHJeWIDEKMRU/ufol/bcgC7ufamchtuTAwM= +go.opentelemetry.io/collector/confmap/provider/httpprovider v1.24.0 h1:1mbj6HlVZ4LNVBYrxM5jQEJKxinpe0LtNZwI7i8pQNY= +go.opentelemetry.io/collector/confmap/provider/httpprovider v1.24.0/go.mod h1:xM2qJmW6mB1lzFpLWIoxX/h4tUnoYTICZoqPND9YWi0= +go.opentelemetry.io/collector/confmap/provider/httpsprovider v1.24.0 h1:/Z3LvIRPJTJEu6mOqELxPiiKMfyl9sUxoZOR/qc7D1I= +go.opentelemetry.io/collector/confmap/provider/httpsprovider v1.24.0/go.mod h1:C61Rq3ppnFUoieBGiZxqDnOUKK8ZmmH2RzDXG1P+OUo= +go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.24.0 h1:Ncr7a3HbVpmjAvPHd0yQM/MV2p7HqJe+zvDPmHdjSCI= +go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.24.0/go.mod h1:i7omVh3uK8efpr7/fSAcOh8Xiv3FLYL26wUuON9i1WI= +go.opentelemetry.io/collector/connector v0.118.0 h1:amay4UriWrtydaAxjQ8/MTTaVYERlZcYLCAGcjoBejw= +go.opentelemetry.io/collector/connector v0.118.0/go.mod h1:R6jbMrHZYg21pZ0nsoo4cSHIn7Lrdpi5R3OWcDEQwhE= +go.opentelemetry.io/collector/connector/connectortest v0.118.0 h1:hLMSTqtFWveXa3b1qJMEaWuaX3PHx7dfl8G/bsac2fE= +go.opentelemetry.io/collector/connector/connectortest v0.118.0/go.mod h1:hm6TNLiQLe65NpENCFsFoiO8fOf3BbN4UF1heUsT73Q= +go.opentelemetry.io/collector/connector/xconnector v0.118.0 h1:0s6rwZmt8va6xd3BEZs7s2QBNFNjLv0kzYi6l44dKqc= +go.opentelemetry.io/collector/connector/xconnector v0.118.0/go.mod h1:12mJPGWo90iZrrpgOkmSd5TkejweL34V/R6AqwqJnMA= +go.opentelemetry.io/collector/consumer v1.24.0 h1:7DeyBm9qdr1EPuCfPjWyChPK16DbVc0wZeSa9LZprFU= +go.opentelemetry.io/collector/consumer v1.24.0/go.mod h1:0G6jvZprIp4dpKMD1ZxCjriiP9GdFvFMObsQEtTk71s= +go.opentelemetry.io/collector/consumer/consumererror v0.118.0 h1:Cx//ZFDa6wUEoRDRYRZ/Rkb52dWNoHj2e9FdlcM9jCA= +go.opentelemetry.io/collector/consumer/consumererror v0.118.0/go.mod h1:2mhnzzLYR5zS2Zz4h9ZnRM8Uogu9qatcfQwGNenhing= +go.opentelemetry.io/collector/consumer/consumererror/xconsumererror v0.118.0 h1:/kkWdw1PQtPb1noZMTt6tbgP1ntWdJ835u1o45nYhTg= +go.opentelemetry.io/collector/consumer/consumererror/xconsumererror v0.118.0/go.mod h1:2mdXnTT0nPd/KTG9w29cc1OGKBLzL2HW+x/o7QVpCpI= +go.opentelemetry.io/collector/consumer/consumertest v0.118.0 h1:8AAS9ejQapP1zqt0+cI6u+AUBheT3X0171N9WtXWsVY= +go.opentelemetry.io/collector/consumer/consumertest v0.118.0/go.mod h1:spRM2wyGr4QZzqMHlLmZnqRCxqXN4Wd0piogC4Qb5PQ= +go.opentelemetry.io/collector/consumer/xconsumer v0.118.0 h1:guWnzzRqgCInjnYlOQ1BPrimppNGIVvnknAjlIbWXuY= +go.opentelemetry.io/collector/consumer/xconsumer v0.118.0/go.mod h1:C5V2d6Ys/Fi6k3tzjBmbdZ9v3J/rZSAMlhx4KVcMIIg= +go.opentelemetry.io/collector/exporter v0.118.0 h1:PE0vF2U+znOB8OVLPWNw40bGCoT/5QquQ8Xbz4i9Rb0= +go.opentelemetry.io/collector/exporter v0.118.0/go.mod h1:5ST3gxT/RzE/vg2bcGDtWJxlQF1ypwk50UpmdK1kUqY= +go.opentelemetry.io/collector/exporter/debugexporter v0.118.0 h1:MUZl270SJSU/fDpIr5cJ+JEPrK6OEsHllmKauWYhxxQ= +go.opentelemetry.io/collector/exporter/debugexporter v0.118.0/go.mod h1:SW3j4Bl3uB/nbTC1D0hog9TcelVot9RXQnScCwx8azw= +go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper v0.118.0 h1:wC4IyE98DR4eXVyT7EnA4iJ6s+sbUTZVq/5KoVWSKDw= +go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper v0.118.0/go.mod h1:spjZv9QX+pCcx/ECSqlo/UKCYJzp2rR5NsvIgpfdUxQ= +go.opentelemetry.io/collector/exporter/exportertest v0.118.0 h1:8gWky42BcJsxoaqWbnqCDUjP3Y84hjC6RD/UWHwR7sI= +go.opentelemetry.io/collector/exporter/exportertest v0.118.0/go.mod h1:UbpQBZvznA8YPqqcKlafVIhB6Qa4fPf2+I67MUGyNqo= +go.opentelemetry.io/collector/exporter/otlpexporter v0.118.0 h1:kfVfskZEroh3zs8HmdCLeo9weAJT5oedd+04McXEBSU= +go.opentelemetry.io/collector/exporter/otlpexporter v0.118.0/go.mod h1:iyvbf05lZdh+KObvNF0uEpaaV9YoQNofm1RRamWbq78= +go.opentelemetry.io/collector/exporter/otlphttpexporter v0.118.0 h1:8ShK60uf6nY6TlSYBZ2Y7eh3sv0WwNkUKgmh3P1U/2U= +go.opentelemetry.io/collector/exporter/otlphttpexporter v0.118.0/go.mod h1:UJXry//sSRs04eg35nZkT1wxP43tPxz/3wbf26eLRkc= +go.opentelemetry.io/collector/exporter/xexporter v0.118.0 h1:PZAo1CFhZHfQwtzUNj+Fwcv/21pWHJHTsrIddD096fw= +go.opentelemetry.io/collector/exporter/xexporter v0.118.0/go.mod h1:x4J+qyrRcp4DfWKqK3DLZomFTIUhedsqCQWqq6Gqps4= +go.opentelemetry.io/collector/extension v0.118.0 h1:9o5jLCTRvs0+rtFDx04zTBuB4WFrE0RvtVCPovYV0sA= +go.opentelemetry.io/collector/extension v0.118.0/go.mod h1:BFwB0WOlse6JnrStO44+k9kwUVjjtseFEHhJLHD7lBg= +go.opentelemetry.io/collector/extension/auth v0.118.0 h1:+eMNUBUK1JK9A3mr95BasbWE90Lxu+WlR9sqS36sJms= +go.opentelemetry.io/collector/extension/auth v0.118.0/go.mod h1:MJpYcRGSERkgOhczqTKoAhkHmcugr+YTlRhc/SpYYYI= +go.opentelemetry.io/collector/extension/auth/authtest v0.118.0 h1:KIORXNc71vfpQrrZOntiZesRCZtQ8alrASWVT/zZkyo= +go.opentelemetry.io/collector/extension/auth/authtest v0.118.0/go.mod h1:0ZlSP9NPAfTRQd6Tx4mOH0IWrp6ufHaVN//L9Mb87gM= +go.opentelemetry.io/collector/extension/extensioncapabilities v0.118.0 h1:I/SjuacUXdBOxa6ZnVMuMKkZX+m40tUm+5YKqWnNv/c= +go.opentelemetry.io/collector/extension/extensioncapabilities v0.118.0/go.mod h1:IxDALY0rMvsENrVui7Y5tvvL/xHNgMKuhfiQiSHMiTQ= +go.opentelemetry.io/collector/extension/extensiontest v0.118.0 h1:rKBUaFS9elGfENG45wANmrwx7mHsmt1+YWCzxjftElg= +go.opentelemetry.io/collector/extension/extensiontest v0.118.0/go.mod h1:CqNXzkIOR32D8EUpptpOXhpFkibs3kFlRyNMEgIW8l4= +go.opentelemetry.io/collector/extension/xextension v0.118.0 h1:P6gvJzqnH9ma2QfnWde/E6Xu9bAzuefzIwm5iupiVPE= +go.opentelemetry.io/collector/extension/xextension v0.118.0/go.mod h1:ne4Q8ZtRlbC0Etr2hTcVkjOpVM2bE2xy1u+R80LUkDw= +go.opentelemetry.io/collector/extension/zpagesextension v0.118.0 h1:XkaLvST4p1/i/dsk5yCwFG4HJUUr6joCbegJc2MEOrE= +go.opentelemetry.io/collector/extension/zpagesextension v0.118.0/go.mod h1:alaAK7I7UeM1Hcs/eNqIjTLIZpqrk3mD1Ua42mJ7JnU= +go.opentelemetry.io/collector/featuregate v1.24.0 h1:DEqDsuJgxjZ3E5JNC9hXCd4sWGFiF7h9kaziODuqwFY= +go.opentelemetry.io/collector/featuregate v1.24.0/go.mod h1:3GaXqflNDVwWndNGBJ1+XJFy3Fv/XrFgjMN60N3z7yg= +go.opentelemetry.io/collector/internal/fanoutconsumer v0.118.0 h1:affTj1Qxjbg9dZ1x2tbV9Rs9/otZQ1lHA++L8qB5KiQ= +go.opentelemetry.io/collector/internal/fanoutconsumer v0.118.0/go.mod h1:9mbE68mYdtTyozr3jTtNMB1RA5F8/dt2aWVYSu6bsQ4= +go.opentelemetry.io/collector/internal/sharedcomponent v0.118.0 h1:aCiwkzBL4VyPEUBmEcTnoPyld5EClJGbwyUNJhHNgEo= +go.opentelemetry.io/collector/internal/sharedcomponent v0.118.0/go.mod h1:drV6vD4acelEUOjM9cgxV5ILs8q2AYUh3EV+Pljdorg= +go.opentelemetry.io/collector/otelcol v0.118.0 h1:uSD3wU0sO4vsw5VvWI2yUFLggLdq1BWN/nC1LJXIhMg= +go.opentelemetry.io/collector/otelcol v0.118.0/go.mod h1:OdKz/AXj+ewCwXp/acZCBIoMIYiIxeNRNkbqUXvWi+o= +go.opentelemetry.io/collector/otelcol/otelcoltest v0.118.0 h1:s4yLzDUPzzPElvcOqth7iOuKe+eBo8iXy6bzAy57sXA= +go.opentelemetry.io/collector/otelcol/otelcoltest v0.118.0/go.mod h1:nNDwBOLXNHVnALpcBzkWQ/770WB3IFvEVgLjgujt3Eo= +go.opentelemetry.io/collector/pdata v1.24.0 h1:D6j92eAzmAbQgivNBUnt8r9juOl8ugb+ihYynoFZIEg= +go.opentelemetry.io/collector/pdata v1.24.0/go.mod h1:cf3/W9E/uIvPS4MR26SnMFJhraUCattzzM6qusuONuc= +go.opentelemetry.io/collector/pdata/pprofile v0.118.0 h1:VK/fr65VFOwEhsSGRPj5c3lCv0yIK1Kt0sZxv9WZBb8= +go.opentelemetry.io/collector/pdata/pprofile v0.118.0/go.mod h1:eJyP/vBm179EghV3dPSnamGAWQwLyd+4z/3yG54YFoQ= +go.opentelemetry.io/collector/pdata/testdata v0.118.0 h1:5N0w1SX9KIRkwvtkrpzQgXy9eGk3vfNG0ds6mhEPMIM= +go.opentelemetry.io/collector/pdata/testdata v0.118.0/go.mod h1:UY+GHV5bOC1BnFburOZ0wiHReJj1XbW12mi2Ogbc5Lw= +go.opentelemetry.io/collector/pipeline v0.118.0 h1:RI1DMe7L0+5hGkx0EDGxG00TaJoh96MEQppgOlGx1Oc= +go.opentelemetry.io/collector/pipeline v0.118.0/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74= +go.opentelemetry.io/collector/pipeline/xpipeline v0.118.0 h1:ZUVF1MYNQYZvmuL30KfP+QbVGSbFZvldBM9hgCe4J4k= +go.opentelemetry.io/collector/pipeline/xpipeline v0.118.0/go.mod h1:XgG1ktGO9J1f6fasMYPWSXL9Raan/VYB9vddKKWp5hQ= +go.opentelemetry.io/collector/processor v0.118.0 h1:NlqWiTTpPP+EPbrqTcNP9nh/4O4/9U9RGWVB49xo4ws= +go.opentelemetry.io/collector/processor v0.118.0/go.mod h1:Y8OD7wk51oPuBqrbn1qXIK91AbprRHP76hlvEzC24U4= +go.opentelemetry.io/collector/processor/batchprocessor v0.118.0 h1:odyJ9l5eakr+TS8sr6U9rz53QD5ZwewL/6pLUtFTJBs= +go.opentelemetry.io/collector/processor/batchprocessor v0.118.0/go.mod h1:fcHRefknjoLMpCRQ9LKEEzrrmSFUejEaTSxCqj5lHhI= +go.opentelemetry.io/collector/processor/processortest v0.118.0 h1:VfTLHuIaJWGyUmrvAOvf63gPMf1vAW68/jtJClEsKtU= +go.opentelemetry.io/collector/processor/processortest v0.118.0/go.mod h1:ZFWxsSoafGNOEk83FtGz43M5ypUzAOvGnfT0aQTDHdU= +go.opentelemetry.io/collector/processor/xprocessor v0.118.0 h1:M/EMhPRbadHLpv7g99fBjfgyuYexBZmgQqb2vjTXjvM= +go.opentelemetry.io/collector/processor/xprocessor v0.118.0/go.mod h1:lkoQoCv2Cz+C0kf2VHgBUDYWDecZLLeaHEvHDXbBCXU= +go.opentelemetry.io/collector/receiver v0.118.0 h1:X4mspHmbbtwdCQZ7o370kNmdWfxRnK1FrsvEShCCKEc= +go.opentelemetry.io/collector/receiver v0.118.0/go.mod h1:wFyfu6sgrkDPLQoGOGMuChGZzkZnYcI/tPJWV4CRTzs= +go.opentelemetry.io/collector/receiver/nopreceiver v0.118.0 h1:JeOZxB26tIIBshKgzhWoLsC90TLF1ftyL0JSVyFtOBk= +go.opentelemetry.io/collector/receiver/nopreceiver v0.118.0/go.mod h1:cxUUVD5rXqBIK1ynSuR0cyJ1B8s1VWx4xZunZ31+EAM= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.118.0 h1:Nud8aaRDb86K2kBeqMTjqAKDUV00JDn+G4wUZ3hDlAk= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.118.0/go.mod h1:MJvDEzWJnm1FMoIoTKmhlT3pPmwJP+65GKWy0lAzd30= +go.opentelemetry.io/collector/receiver/receivertest v0.118.0 h1:XlMr2mPsyXJsMUOqCpEoY3uCPsLZQbNA5fmVNDGB7Bw= +go.opentelemetry.io/collector/receiver/receivertest v0.118.0/go.mod h1:dtu/H1RNjhy11hTVf/XUfc02uGufMhYYdhhYBbglcUg= +go.opentelemetry.io/collector/receiver/xreceiver v0.118.0 h1:dzECve9e0H3ot0JWnWPuQr9Y84RhOYSd0+CjvJskx7Y= +go.opentelemetry.io/collector/receiver/xreceiver v0.118.0/go.mod h1:Lv1nD/mSYSP64iV8k+C+mWWZZOMLRubv9d1SUory3/E= +go.opentelemetry.io/collector/semconv v0.118.0 h1:V4vlMIK7TIaemrrn2VawvQPwruIKpj7Xgw9P5+BL56w= +go.opentelemetry.io/collector/semconv v0.118.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= +go.opentelemetry.io/collector/service v0.118.0 h1:acZ9LzUbEF5M3G7o5FgenPJVuuM2y8c4HW5JVm648L4= +go.opentelemetry.io/collector/service v0.118.0/go.mod h1:uw3cl3UtkAOrEr8UQV2lXKjyTIbhWxURaQec8kE+Pic= go.opentelemetry.io/contrib/bridges/otelzap v0.6.0 h1:j8icMXyyqNf6HGuwlYhniPnVsbJIq7n+WirDu3VAJdQ= go.opentelemetry.io/contrib/bridges/otelzap v0.6.0/go.mod h1:evIOZpl+kAlU5IsaYX2Siw+IbpacAZvXemVsgt70uvw= go.opentelemetry.io/contrib/config v0.10.0 h1:2JknAzMaYjxrHkTnZh3eOme/Y2P5eHE2SWfhfV6Xd6c= @@ -988,8 +1007,8 @@ go.opentelemetry.io/contrib/propagators/b3 v1.31.0 h1:PQPXYscmwbCp76QDvO4hMngF2j go.opentelemetry.io/contrib/propagators/b3 v1.31.0/go.mod h1:jbqfV8wDdqSDrAYxVpXQnpM0XFMq2FtDesblJ7blOwQ= go.opentelemetry.io/contrib/zpages v0.56.0 h1:W7vP6s3juzL5KiHpr41zLNmsJ0QAZudYu8ay0zGAoko= go.opentelemetry.io/contrib/zpages v0.56.0/go.mod h1:IxPRP4TYHw9jLeaEOSDIiA9zmyJNZNO6sbW55iMvSXs= -go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= -go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.7.0 h1:mMOmtYie9Fx6TSVzw4W+NTpvoaS1JWWga37oI1a/4qQ= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.7.0/go.mod h1:yy7nDsMMBUkD+jeekJ36ur5f3jJIrmCwUrY67VFhNpA= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 h1:j7ZSD+5yn+lo3sGV69nW04rRR0jhYnBwjuX3r0HvnK0= @@ -1012,16 +1031,16 @@ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 h1:UGZ1QwZWY67Z6Bm go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0/go.mod h1:fcwWuDuaObkkChiDlhEpSq9+X1C0omv+s5mBtToAQ64= go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk= go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8= -go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= -go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= -go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= -go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= go.opentelemetry.io/otel/sdk/log v0.7.0 h1:dXkeI2S0MLc5g0/AwxTZv6EUEjctiH8aG14Am56NTmQ= go.opentelemetry.io/otel/sdk/log v0.7.0/go.mod h1:oIRXpW+WD6M8BuGj5rtS0aRu/86cbDV/dAfNaZBIjYM= -go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= -go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= -go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= -go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= +go.opentelemetry.io/otel/sdk/metric v1.33.0 h1:Gs5VK9/WUJhNXZgn8MR6ITatvAmKeIuCtNbsP3JkNqU= +go.opentelemetry.io/otel/sdk/metric v1.33.0/go.mod h1:dL5ykHZmm1B1nVRk9dDjChwDmt81MjVp3gLkQRwKf/Q= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -1053,10 +1072,12 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= -golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1067,8 +1088,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1092,6 +1113,9 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1135,20 +1159,21 @@ golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= +golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1162,6 +1187,9 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1228,19 +1256,23 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -1249,6 +1281,7 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= @@ -1268,7 +1301,6 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -1310,8 +1342,10 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= -golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE= +golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1374,10 +1408,10 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= -google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f h1:gap6+3Gk41EItBuyi4XX/bp4oqJ3UwuIMl25yGinuAA= +google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:Ic02D47M+zbarjYYUlK57y316f2MoN0gjAwI3f2S95o= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1393,8 +1427,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= -google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= +google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1407,8 +1441,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -1462,8 +1496,6 @@ k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f/go.mod h1:S9tOR0FxgyusSNR k8s.io/utils v0.0.0-20240821151609-f90d01438635 h1:2wThSvJoW/Ncn9TmQEYXRnevZXi2duqHWf5OX9S3zjI= k8s.io/utils v0.0.0-20240821151609-f90d01438635/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= diff --git a/comp/otelcol/ddflareextension/impl/testdata/simple-dd/config-enhanced-result.yaml b/comp/otelcol/ddflareextension/impl/testdata/simple-dd/config-enhanced-result.yaml index 055f7c67144eb..1876329c2dc8f 100644 --- a/comp/otelcol/ddflareextension/impl/testdata/simple-dd/config-enhanced-result.yaml +++ b/comp/otelcol/ddflareextension/impl/testdata/simple-dd/config-enhanced-result.yaml @@ -7,6 +7,8 @@ exporters: site: datadoghq.com auth: null compression: "" + compression_params: + level: 0 cookies: null disable_keep_alives: false endpoint: "" @@ -58,6 +60,7 @@ exporters: multiplier: 1.5 randomization_factor: 0.5 sending_queue: + blocking: false enabled: true num_consumers: 10 queue_size: 1000 @@ -79,15 +82,15 @@ exporters: reload_interval: 0s server_name_override: "" traces: - compute_stats_by_span_kind: false + compute_stats_by_span_kind: true compute_top_level_by_span_kind: true dialer: timeout: 0s endpoint: https://trace.agent.datadoghq.com ignore_resources: [] - peer_service_aggregation: false + peer_service_aggregation: true peer_tags: [] - peer_tags_aggregation: false + peer_tags_aggregation: true span_name_as_resource_name: false span_name_remappings: {} trace_buffer: 0 diff --git a/comp/otelcol/ddflareextension/impl/testdata/simple-dd/config-provided-result.yaml b/comp/otelcol/ddflareextension/impl/testdata/simple-dd/config-provided-result.yaml index d0b9a8b4b3aa8..c649aae7e1ebf 100644 --- a/comp/otelcol/ddflareextension/impl/testdata/simple-dd/config-provided-result.yaml +++ b/comp/otelcol/ddflareextension/impl/testdata/simple-dd/config-provided-result.yaml @@ -7,6 +7,8 @@ exporters: site: datadoghq.com auth: null compression: "" + compression_params: + level: 0 cookies: null disable_keep_alives: false endpoint: "" @@ -58,6 +60,7 @@ exporters: multiplier: 1.5 randomization_factor: 0.5 sending_queue: + blocking: false enabled: true num_consumers: 10 queue_size: 1000 @@ -79,15 +82,15 @@ exporters: reload_interval: 0s server_name_override: "" traces: - compute_stats_by_span_kind: false + compute_stats_by_span_kind: true compute_top_level_by_span_kind: true dialer: timeout: 0s endpoint: https://trace.agent.datadoghq.com ignore_resources: [] - peer_service_aggregation: false + peer_service_aggregation: true peer_tags: [] - peer_tags_aggregation: false + peer_tags_aggregation: true span_name_as_resource_name: false span_name_remappings: {} trace_buffer: 0 diff --git a/comp/otelcol/logsagentpipeline/go.mod b/comp/otelcol/logsagentpipeline/go.mod index 302cf0cf4787c..12a395a1199b7 100644 --- a/comp/otelcol/logsagentpipeline/go.mod +++ b/comp/otelcol/logsagentpipeline/go.mod @@ -14,6 +14,8 @@ replace ( github.com/DataDog/datadog-agent/comp/core/telemetry => ../../core/telemetry github.com/DataDog/datadog-agent/comp/def => ../../def github.com/DataDog/datadog-agent/comp/logs/agent/config => ../../logs/agent/config + github.com/DataDog/datadog-agent/comp/serializer/logscompression => ../../serializer/logscompression + github.com/DataDog/datadog-agent/comp/serializer/metricscompression => ../../serializer/metricscompression github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ../../../pkg/collector/check/defaults github.com/DataDog/datadog-agent/pkg/config/env => ../../../pkg/config/env github.com/DataDog/datadog-agent/pkg/config/mock => ../../../pkg/config/mock @@ -39,6 +41,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/status/health => ../../../pkg/status/health github.com/DataDog/datadog-agent/pkg/telemetry => ../../../pkg/telemetry github.com/DataDog/datadog-agent/pkg/util/backoff => ../../../pkg/util/backoff + github.com/DataDog/datadog-agent/pkg/util/compression => ../../../pkg/util/compression github.com/DataDog/datadog-agent/pkg/util/defaultpaths => ../../../pkg/util/defaultpaths github.com/DataDog/datadog-agent/pkg/util/executable => ../../../pkg/util/executable github.com/DataDog/datadog-agent/pkg/util/filesystem => ../../../pkg/util/filesystem @@ -47,7 +50,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/http => ../../../pkg/util/http github.com/DataDog/datadog-agent/pkg/util/log => ../../../pkg/util/log github.com/DataDog/datadog-agent/pkg/util/log/setup => ../../../pkg/util/log/setup - github.com/DataDog/datadog-agent/pkg/util/optional => ../../../pkg/util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../../pkg/util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../pkg/util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../pkg/util/scrubber github.com/DataDog/datadog-agent/pkg/util/startstop => ../../../pkg/util/startstop @@ -63,16 +66,17 @@ replace ( require github.com/DataDog/datadog-agent/pkg/logs/pipeline v0.56.0-rc.3 require ( - github.com/DataDog/agent-payload/v5 v5.0.138 // indirect + github.com/DataDog/agent-payload/v5 v5.0.141 // indirect github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/core/secrets v0.59.0 // indirect github.com/DataDog/datadog-agent/comp/core/telemetry v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/def v0.59.0 // indirect github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/serializer/logscompression v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.59.0 // indirect @@ -91,20 +95,21 @@ require ( github.com/DataDog/datadog-agent/pkg/status/health v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/backoff v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/compression v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/fxutil v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/startstop v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect github.com/DataDog/viper v1.14.0 // indirect @@ -125,21 +130,21 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect github.com/klauspost/compress v1.17.11 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.60.1 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -152,11 +157,11 @@ require ( go.uber.org/fx v1.23.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/net v0.34.0 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect - google.golang.org/protobuf v1.35.2 // indirect + google.golang.org/protobuf v1.36.3 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/comp/otelcol/logsagentpipeline/go.sum b/comp/otelcol/logsagentpipeline/go.sum index 19075c11eaba4..d51a9571f5a6f 100644 --- a/comp/otelcol/logsagentpipeline/go.sum +++ b/comp/otelcol/logsagentpipeline/go.sum @@ -1,11 +1,13 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/DataDog/agent-payload/v5 v5.0.138 h1:Wg7hmWuoLC/o0X3zZ+uGcfRHPyaytljudgSY9O59zjc= -github.com/DataDog/agent-payload/v5 v5.0.138/go.mod h1:lxh9lb5xYrBXjblpIWYUi4deJqVbkIfkjwesi5nskDc= +github.com/DataDog/agent-payload/v5 v5.0.141 h1:pV76CyTUEe/LFuS7fwarIfOX5seSuYZylzhj1aGY2DQ= +github.com/DataDog/agent-payload/v5 v5.0.141/go.mod h1:lxh9lb5xYrBXjblpIWYUi4deJqVbkIfkjwesi5nskDc= github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 h1:RoH7VLzTnxHEugRPIgnGlxwDFszFGI7b3WZZUtWuPRM= github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42/go.mod h1:TX7CTOQ3LbQjfAi4SwqUoR5gY1zfUk7VRBDTuArjaDc= github.com/DataDog/viper v1.14.0 h1:dIjTe/uJiah+QFqFZ+MXeqgmUvWhg37l37ZxFWxr3is= github.com/DataDog/viper v1.14.0/go.mod h1:wDdUVJ2SHaMaPrCZrlRCObwkubsX8j5sme3LaR/SGTc= +github.com/DataDog/zstd v1.5.6 h1:LbEglqepa/ipmmQJUDnSsfvA8e8IStVcGaFWDuxvGOY= +github.com/DataDog/zstd v1.5.6/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= @@ -79,7 +81,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -127,8 +128,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -155,8 +156,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -173,8 +174,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -188,8 +189,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -200,8 +201,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -260,8 +261,8 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -282,8 +283,8 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -305,8 +306,8 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= @@ -341,8 +342,8 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/agent.go b/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/agent.go index b014d5b31689e..9da32f7bd6739 100644 --- a/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/agent.go +++ b/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/agent.go @@ -16,6 +16,7 @@ import ( log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/logs/agent/config" "github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline" + compression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/def" pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/logs/auditor" "github.com/DataDog/datadog-agent/pkg/logs/client" @@ -23,7 +24,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/logs/diagnostic" "github.com/DataDog/datadog-agent/pkg/logs/pipeline" "github.com/DataDog/datadog-agent/pkg/status/health" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/util/startstop" "go.uber.org/fx" @@ -41,17 +42,19 @@ const ( type Dependencies struct { fx.In - Lc fx.Lifecycle - Log log.Component - Config configComponent.Component - Hostname hostnameinterface.Component + Lc fx.Lifecycle + Log log.Component + Config configComponent.Component + Hostname hostnameinterface.Component + Compression compression.Component } // Agent represents the data pipeline that collects, decodes, processes and sends logs to the backend. type Agent struct { - log log.Component - config pkgconfigmodel.Reader - hostname hostnameinterface.Component + log log.Component + config pkgconfigmodel.Reader + hostname hostnameinterface.Component + compression compression.Component endpoints *config.Endpoints auditor auditor.Auditor @@ -61,12 +64,12 @@ type Agent struct { } // NewLogsAgentComponent returns a new instance of Agent as a Component -func NewLogsAgentComponent(deps Dependencies) optional.Option[logsagentpipeline.Component] { +func NewLogsAgentComponent(deps Dependencies) option.Option[logsagentpipeline.Component] { logsAgent := NewLogsAgent(deps) if logsAgent == nil { - return optional.NewNoneOption[logsagentpipeline.Component]() + return option.None[logsagentpipeline.Component]() } - return optional.NewOption[logsagentpipeline.Component](logsAgent) + return option.New[logsagentpipeline.Component](logsAgent) } // NewLogsAgent returns a new instance of Agent with the given dependencies @@ -77,9 +80,10 @@ func NewLogsAgent(deps Dependencies) logsagentpipeline.LogsAgent { } logsAgent := &Agent{ - log: deps.Log, - config: deps.Config, - hostname: deps.Hostname, + log: deps.Log, + config: deps.Config, + hostname: deps.Hostname, + compression: deps.Compression, } if deps.Lc != nil { deps.Lc.Append(fx.Hook{ @@ -210,7 +214,7 @@ func (a *Agent) SetupPipeline( destinationsCtx := client.NewDestinationsContext() // setup the pipeline provider that provides pairs of processor and sender - pipelineProvider := pipeline.NewProvider(a.config.GetInt("logs_config.pipelines"), auditor, &diagnostic.NoopMessageReceiver{}, processingRules, a.endpoints, destinationsCtx, NewStatusProvider(), a.hostname, a.config) + pipelineProvider := pipeline.NewProvider(a.config.GetInt("logs_config.pipelines"), auditor, &diagnostic.NoopMessageReceiver{}, processingRules, a.endpoints, destinationsCtx, NewStatusProvider(), a.hostname, a.config, a.compression) a.auditor = auditor a.destinationsCtx = destinationsCtx diff --git a/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/agent_test.go b/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/agent_test.go index da2be9d72e4d7..420a813861083 100644 --- a/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/agent_test.go +++ b/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/agent_test.go @@ -16,6 +16,7 @@ import ( log "github.com/DataDog/datadog-agent/comp/core/log/def" logmock "github.com/DataDog/datadog-agent/comp/core/log/mock" "github.com/DataDog/datadog-agent/comp/logs/agent/config" + compressionfx "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx-mock" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/client/http" "github.com/DataDog/datadog-agent/pkg/logs/client/mock" @@ -71,9 +72,10 @@ func createAgent(suite *AgentTestSuite, endpoints *config.Endpoints) *Agent { )) agent := &Agent{ - log: deps.Log, - config: deps.Config, - endpoints: endpoints, + log: deps.Log, + config: deps.Config, + endpoints: endpoints, + compression: compressionfx.NewMockCompressor(), } agent.setupAgent() diff --git a/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/go.mod b/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/go.mod index 9e6bc558c4423..51ca32d358c90 100644 --- a/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/go.mod +++ b/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/go.mod @@ -15,6 +15,8 @@ replace ( github.com/DataDog/datadog-agent/comp/def => ../../../def github.com/DataDog/datadog-agent/comp/logs/agent/config => ../../../logs/agent/config github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline => ../../logsagentpipeline + github.com/DataDog/datadog-agent/comp/serializer/logscompression => ../../../serializer/logscompression + github.com/DataDog/datadog-agent/comp/serializer/metricscompression => ../../../serializer/metricscompression github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ../../../../pkg/collector/check/defaults github.com/DataDog/datadog-agent/pkg/config/env => ../../../../pkg/config/env github.com/DataDog/datadog-agent/pkg/config/mock => ../../../../pkg/config/mock @@ -40,6 +42,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/status/health => ../../../../pkg/status/health github.com/DataDog/datadog-agent/pkg/telemetry => ../../../../pkg/telemetry github.com/DataDog/datadog-agent/pkg/util/backoff => ../../../../pkg/util/backoff + github.com/DataDog/datadog-agent/pkg/util/compression => ../../../../pkg/util/compression github.com/DataDog/datadog-agent/pkg/util/defaultpaths => ../../../../pkg/util/defaultpaths github.com/DataDog/datadog-agent/pkg/util/executable => ../../../../pkg/util/executable github.com/DataDog/datadog-agent/pkg/util/filesystem => ../../../../pkg/util/filesystem @@ -48,7 +51,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/http => ../../../../pkg/util/http github.com/DataDog/datadog-agent/pkg/util/log => ../../../../pkg/util/log github.com/DataDog/datadog-agent/pkg/util/log/setup => ../../../../pkg/util/log/setup - github.com/DataDog/datadog-agent/pkg/util/optional => ../../../../pkg/util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../../../pkg/util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../../pkg/util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../../pkg/util/scrubber github.com/DataDog/datadog-agent/pkg/util/startstop => ../../../../pkg/util/startstop @@ -62,13 +65,14 @@ replace ( ) require ( - github.com/DataDog/datadog-agent/comp/core/config v0.56.0-rc.3 + github.com/DataDog/datadog-agent/comp/core/config v0.61.0 github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface v0.56.0-rc.3 - github.com/DataDog/datadog-agent/comp/core/log/def v0.0.0-00010101000000-000000000000 - github.com/DataDog/datadog-agent/comp/core/log/mock v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/comp/core/log/def v0.61.0 + github.com/DataDog/datadog-agent/comp/core/log/mock v0.61.0 github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 + github.com/DataDog/datadog-agent/comp/serializer/logscompression v0.61.0 + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 github.com/DataDog/datadog-agent/pkg/logs/auditor v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/client v0.56.0-rc.3 @@ -79,8 +83,8 @@ require ( github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/status/health v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/fxutil v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 github.com/DataDog/datadog-agent/pkg/util/startstop v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/testutil v0.56.0-rc.3 github.com/stretchr/testify v1.10.0 @@ -89,16 +93,16 @@ require ( ) require ( - github.com/DataDog/agent-payload/v5 v5.0.138 // indirect - github.com/DataDog/datadog-agent/comp/core/flare/builder v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/comp/core/flare/types v0.56.0-rc.3 // indirect + github.com/DataDog/agent-payload/v5 v5.0.141 // indirect + github.com/DataDog/datadog-agent/comp/core/flare/builder v0.59.0 // indirect + github.com/DataDog/datadog-agent/comp/core/flare/types v0.59.0 // indirect github.com/DataDog/datadog-agent/comp/core/secrets v0.59.0 // indirect github.com/DataDog/datadog-agent/comp/core/telemetry v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/def v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/mock v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.60.0-devel // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.60.0-devel // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect @@ -108,21 +112,23 @@ require ( github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/backoff v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/compression v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/log/setup v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/log/setup v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect github.com/DataDog/viper v1.14.0 // indirect + github.com/DataDog/zstd v1.5.6 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect @@ -140,21 +146,21 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect github.com/klauspost/compress v1.17.11 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.60.1 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -164,11 +170,11 @@ require ( go.uber.org/atomic v1.11.0 // indirect go.uber.org/dig v1.18.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/net v0.34.0 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect - google.golang.org/protobuf v1.35.2 // indirect + google.golang.org/protobuf v1.36.3 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/go.sum b/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/go.sum index 19075c11eaba4..d51a9571f5a6f 100644 --- a/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/go.sum +++ b/comp/otelcol/logsagentpipeline/logsagentpipelineimpl/go.sum @@ -1,11 +1,13 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/DataDog/agent-payload/v5 v5.0.138 h1:Wg7hmWuoLC/o0X3zZ+uGcfRHPyaytljudgSY9O59zjc= -github.com/DataDog/agent-payload/v5 v5.0.138/go.mod h1:lxh9lb5xYrBXjblpIWYUi4deJqVbkIfkjwesi5nskDc= +github.com/DataDog/agent-payload/v5 v5.0.141 h1:pV76CyTUEe/LFuS7fwarIfOX5seSuYZylzhj1aGY2DQ= +github.com/DataDog/agent-payload/v5 v5.0.141/go.mod h1:lxh9lb5xYrBXjblpIWYUi4deJqVbkIfkjwesi5nskDc= github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 h1:RoH7VLzTnxHEugRPIgnGlxwDFszFGI7b3WZZUtWuPRM= github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42/go.mod h1:TX7CTOQ3LbQjfAi4SwqUoR5gY1zfUk7VRBDTuArjaDc= github.com/DataDog/viper v1.14.0 h1:dIjTe/uJiah+QFqFZ+MXeqgmUvWhg37l37ZxFWxr3is= github.com/DataDog/viper v1.14.0/go.mod h1:wDdUVJ2SHaMaPrCZrlRCObwkubsX8j5sme3LaR/SGTc= +github.com/DataDog/zstd v1.5.6 h1:LbEglqepa/ipmmQJUDnSsfvA8e8IStVcGaFWDuxvGOY= +github.com/DataDog/zstd v1.5.6/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= @@ -79,7 +81,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -127,8 +128,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -155,8 +156,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -173,8 +174,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -188,8 +189,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -200,8 +201,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -260,8 +261,8 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -282,8 +283,8 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -305,8 +306,8 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= @@ -341,8 +342,8 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/comp/otelcol/otlp/components/exporter/datadogexporter/go.mod b/comp/otelcol/otlp/components/exporter/datadogexporter/go.mod index b7d7886ae3aac..ec69f8359d65a 100644 --- a/comp/otelcol/otlp/components/exporter/datadogexporter/go.mod +++ b/comp/otelcol/otlp/components/exporter/datadogexporter/go.mod @@ -23,7 +23,8 @@ replace ( github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/serializerexporter => ../../../../otlp/components/exporter/serializerexporter github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/metricsclient => ../../metricsclient github.com/DataDog/datadog-agent/comp/otelcol/otlp/testutil => ../../../../otlp/testutil - github.com/DataDog/datadog-agent/comp/serializer/compression => ../../../../../serializer/compression/ + github.com/DataDog/datadog-agent/comp/serializer/logscompression => ../../../../../serializer/logscompression + github.com/DataDog/datadog-agent/comp/serializer/metricscompression => ../../../../../serializer/metricscompression github.com/DataDog/datadog-agent/comp/trace/agent/def => ../../../../../../comp/trace/agent/def github.com/DataDog/datadog-agent/comp/trace/compression/def => ../../../../../../comp/trace/compression/def github.com/DataDog/datadog-agent/comp/trace/compression/impl-gzip => ../../../../../../comp/trace/compression/impl-gzip @@ -68,6 +69,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/buf => ../../../../../../pkg/util/buf/ github.com/DataDog/datadog-agent/pkg/util/cgroups => ../../../../../../pkg/util/cgroups/ github.com/DataDog/datadog-agent/pkg/util/common => ../../../../../../pkg/util/common/ + github.com/DataDog/datadog-agent/pkg/util/compression => ../../../../../../pkg/util/compression github.com/DataDog/datadog-agent/pkg/util/defaultpaths => ../../../../../../pkg/util/defaultpaths github.com/DataDog/datadog-agent/pkg/util/executable => ../../../../../../pkg/util/executable/ github.com/DataDog/datadog-agent/pkg/util/filesystem => ../../../../../../pkg/util/filesystem/ @@ -78,7 +80,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/json => ../../../../../../pkg/util/json/ github.com/DataDog/datadog-agent/pkg/util/log => ../../../../../../pkg/util/log/ github.com/DataDog/datadog-agent/pkg/util/log/setup => ../../../../../../pkg/util/log/setup - github.com/DataDog/datadog-agent/pkg/util/optional => ../../../../../../pkg/util/optional/ + github.com/DataDog/datadog-agent/pkg/util/option => ../../../../../../pkg/util/option/ github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../../../../pkg/util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../../../../pkg/util/scrubber/ github.com/DataDog/datadog-agent/pkg/util/sort => ../../../../../../pkg/util/sort/ @@ -100,60 +102,68 @@ require ( github.com/DataDog/datadog-agent/comp/trace/agent/def v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/trace/compression/impl-gzip v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/message v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/proto v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/proto v0.63.0-devel github.com/DataDog/datadog-agent/pkg/serializer v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/trace v0.56.0-rc.3 github.com/DataDog/datadog-go/v5 v5.6.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.22.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.22.0 - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.115.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.24.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.24.0 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.118.0 github.com/stretchr/testify v1.10.0 - go.opentelemetry.io/collector/component v0.115.0 - go.opentelemetry.io/collector/config/confignet v1.21.0 - go.opentelemetry.io/collector/config/configretry v1.21.0 - go.opentelemetry.io/collector/consumer v1.21.0 - go.opentelemetry.io/collector/exporter v0.115.0 - go.opentelemetry.io/collector/exporter/exportertest v0.115.0 - go.opentelemetry.io/collector/featuregate v1.21.0 - go.opentelemetry.io/collector/pdata v1.21.0 - go.opentelemetry.io/otel/metric v1.32.0 - go.opentelemetry.io/otel/trace v1.32.0 + go.opentelemetry.io/collector/component v0.118.0 + go.opentelemetry.io/collector/config/confignet v1.24.0 + go.opentelemetry.io/collector/config/configretry v1.24.0 + go.opentelemetry.io/collector/consumer v1.24.0 + go.opentelemetry.io/collector/exporter v0.118.0 + go.opentelemetry.io/collector/exporter/exportertest v0.118.0 + go.opentelemetry.io/collector/featuregate v1.24.0 + go.opentelemetry.io/collector/pdata v1.24.0 + go.opentelemetry.io/otel/metric v1.33.0 + go.opentelemetry.io/otel/trace v1.33.0 go.uber.org/zap v1.27.0 - google.golang.org/protobuf v1.35.2 + google.golang.org/protobuf v1.36.3 ) -require go.opentelemetry.io/collector/component/componenttest v0.115.0 // indirect +require go.opentelemetry.io/collector/component/componenttest v0.118.0 // indirect require ( - github.com/pierrec/lz4/v4 v4.1.21 // indirect - go.opentelemetry.io/collector/consumer/consumererror v0.115.0 // indirect - go.opentelemetry.io/collector/receiver/receivertest v0.115.0 // indirect + go.opentelemetry.io/collector/consumer/xconsumer v0.118.0 // indirect + go.opentelemetry.io/collector/exporter/xexporter v0.118.0 // indirect + go.opentelemetry.io/collector/extension/xextension v0.118.0 // indirect + go.opentelemetry.io/collector/receiver/xreceiver v0.118.0 // indirect ) require ( - github.com/DataDog/agent-payload/v5 v5.0.138 // indirect - github.com/DataDog/datadog-agent/comp/core/config v0.57.1 // indirect - github.com/DataDog/datadog-agent/comp/core/flare/builder v0.57.1 // indirect - github.com/DataDog/datadog-agent/comp/core/flare/types v0.57.1 // indirect + github.com/pierrec/lz4/v4 v4.1.22 // indirect + go.opentelemetry.io/collector/consumer/consumererror v0.118.0 // indirect + go.opentelemetry.io/collector/receiver/receivertest v0.118.0 // indirect +) + +require ( + github.com/DataDog/agent-payload/v5 v5.0.141 // indirect + github.com/DataDog/datadog-agent/comp/core/config v0.61.0 // indirect + github.com/DataDog/datadog-agent/comp/core/flare/builder v0.59.0 // indirect + github.com/DataDog/datadog-agent/comp/core/flare/types v0.59.0 // indirect github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/comp/core/log/def v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/comp/core/log/def v0.61.0 // indirect github.com/DataDog/datadog-agent/comp/core/secrets v0.59.0 // indirect github.com/DataDog/datadog-agent/comp/core/status v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/comp/core/tagger/origindetection v0.0.0-20241217122454-175edb6c74f2 // indirect + github.com/DataDog/datadog-agent/comp/core/tagger/origindetection v0.62.0-rc.1 // indirect github.com/DataDog/datadog-agent/comp/core/telemetry v0.57.1 // indirect github.com/DataDog/datadog-agent/comp/def v0.59.0 // indirect github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/forwarder/orchestrator/orchestratorinterface v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/comp/serializer/compression v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/serializer/logscompression v0.61.0 // indirect + github.com/DataDog/datadog-agent/comp/serializer/metricscompression v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/trace/compression/def v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/aggregator/ckey v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/api v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/mock v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.60.0-devel // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.60.0-devel // indirect @@ -182,30 +192,31 @@ require ( github.com/DataDog/datadog-agent/pkg/util/buf v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/cgroups v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/common v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/compression v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/fxutil v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/json v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/util/sort v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/startstop v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect - github.com/DataDog/datadog-api-client-go/v2 v2.33.0 // indirect + github.com/DataDog/datadog-api-client-go/v2 v2.34.0 // indirect github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect - github.com/DataDog/go-sqllexer v0.0.17 // indirect + github.com/DataDog/go-sqllexer v0.0.20 // indirect github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.22.0 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.22.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.24.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.24.0 // indirect github.com/DataDog/sketches-go v1.4.6 // indirect github.com/DataDog/viper v1.14.0 // indirect github.com/DataDog/zstd v1.5.6 // indirect @@ -217,7 +228,7 @@ require ( github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect - github.com/containerd/cgroups/v3 v3.0.4 // indirect + github.com/containerd/cgroups/v3 v3.0.5 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/docker/go-units v0.5.0 // indirect @@ -230,10 +241,10 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-viper/mapstructure/v2 v2.2.1 // indirect - github.com/goccy/go-json v0.10.3 // indirect + github.com/goccy/go-json v0.10.4 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/mock v1.6.0 // indirect + github.com/golang/mock v1.7.0-rc.1 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/uuid v1.6.0 // indirect @@ -250,7 +261,7 @@ require ( github.com/knadh/koanf/maps v0.1.1 // indirect github.com/knadh/koanf/providers/confmap v0.1.0 // indirect github.com/knadh/koanf/v2 v2.1.2 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect @@ -262,7 +273,7 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.115.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.118.0 // indirect github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/outcaste-io/ristretto v0.2.3 // indirect github.com/patrickmn/go-cache v2.1.0+incompatible // indirect @@ -271,65 +282,64 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.60.1 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/richardartoul/molecule v1.0.1-0.20240531184615-7ca0df43c0b3 // indirect github.com/rs/cors v1.11.1 // indirect github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stormcat24/protodep v0.1.8 // indirect github.com/stretchr/objx v0.5.2 // indirect - github.com/tinylib/msgp v1.2.4 // indirect + github.com/tinylib/msgp v1.2.5 // indirect github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/numcpus v0.8.0 // indirect github.com/twmb/murmur3 v1.1.8 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect - go.opentelemetry.io/collector/client v1.21.0 // indirect - go.opentelemetry.io/collector/config/configauth v0.115.0 // indirect - go.opentelemetry.io/collector/config/configcompression v1.21.0 // indirect - go.opentelemetry.io/collector/config/confighttp v0.115.0 // indirect - go.opentelemetry.io/collector/config/configopaque v1.21.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.115.0 // indirect - go.opentelemetry.io/collector/config/configtls v1.21.0 // indirect - go.opentelemetry.io/collector/config/internal v0.115.0 // indirect - go.opentelemetry.io/collector/confmap v1.21.0 // indirect - go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/consumer/consumertest v0.115.0 // indirect - go.opentelemetry.io/collector/exporter/exporterprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/extension v0.115.0 // indirect - go.opentelemetry.io/collector/extension/auth v0.115.0 // indirect - go.opentelemetry.io/collector/extension/experimental/storage v0.115.0 // indirect - go.opentelemetry.io/collector/pdata/pprofile v0.115.0 // indirect - go.opentelemetry.io/collector/pipeline v0.115.0 // indirect - go.opentelemetry.io/collector/receiver v0.115.0 // indirect - go.opentelemetry.io/collector/receiver/receiverprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/semconv v0.115.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/collector/client v1.24.0 // indirect + go.opentelemetry.io/collector/config/configauth v0.118.0 // indirect + go.opentelemetry.io/collector/config/configcompression v1.24.0 // indirect + go.opentelemetry.io/collector/config/confighttp v0.118.0 // indirect + go.opentelemetry.io/collector/config/configopaque v1.24.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.118.0 // indirect + go.opentelemetry.io/collector/config/configtls v1.24.0 // indirect + go.opentelemetry.io/collector/confmap v1.24.0 // indirect + go.opentelemetry.io/collector/consumer/consumertest v0.118.0 // indirect + go.opentelemetry.io/collector/extension v0.118.0 // indirect + go.opentelemetry.io/collector/extension/auth v0.118.0 // indirect + go.opentelemetry.io/collector/pdata/pprofile v0.118.0 // indirect + go.opentelemetry.io/collector/pipeline v0.118.0 // indirect + go.opentelemetry.io/collector/receiver v0.118.0 // indirect + go.opentelemetry.io/collector/semconv v0.118.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 // indirect - go.opentelemetry.io/otel v1.32.0 // indirect - go.opentelemetry.io/otel/sdk v1.32.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.32.0 // indirect + go.opentelemetry.io/otel v1.33.0 // indirect + go.opentelemetry.io/otel/sdk v1.33.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.33.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/dig v1.18.0 // indirect go.uber.org/fx v1.23.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sys v0.28.0 // indirect - golang.org/x/term v0.27.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/net v0.34.0 // indirect + golang.org/x/oauth2 v0.25.0 // indirect + golang.org/x/sys v0.29.0 // indirect + golang.org/x/term v0.28.0 // indirect golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.8.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect - google.golang.org/grpc v1.67.1 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect + google.golang.org/grpc v1.69.4 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) + +// github.com/golang/mock is unmaintained and archived, v1.6.0 is the last released version +replace github.com/golang/mock => github.com/golang/mock v1.6.0 diff --git a/comp/otelcol/otlp/components/exporter/datadogexporter/go.sum b/comp/otelcol/otlp/components/exporter/datadogexporter/go.sum index a86b1d8b49cf7..f2b901dbbfbc4 100644 --- a/comp/otelcol/otlp/components/exporter/datadogexporter/go.sum +++ b/comp/otelcol/otlp/components/exporter/datadogexporter/go.sum @@ -1,31 +1,31 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/DataDog/agent-payload/v5 v5.0.138 h1:Wg7hmWuoLC/o0X3zZ+uGcfRHPyaytljudgSY9O59zjc= -github.com/DataDog/agent-payload/v5 v5.0.138/go.mod h1:lxh9lb5xYrBXjblpIWYUi4deJqVbkIfkjwesi5nskDc= -github.com/DataDog/datadog-api-client-go/v2 v2.33.0 h1:OI6kDnJeQmkjfGzxmP0XUQUxMD4tp6oAPXnnJ4VpgUM= -github.com/DataDog/datadog-api-client-go/v2 v2.33.0/go.mod h1:d3tOEgUd2kfsr9uuHQdY+nXrWp4uikgTgVCPdKNK30U= +github.com/DataDog/agent-payload/v5 v5.0.141 h1:pV76CyTUEe/LFuS7fwarIfOX5seSuYZylzhj1aGY2DQ= +github.com/DataDog/agent-payload/v5 v5.0.141/go.mod h1:lxh9lb5xYrBXjblpIWYUi4deJqVbkIfkjwesi5nskDc= +github.com/DataDog/datadog-api-client-go/v2 v2.34.0 h1:0VVmv8uZg8vdBuEpiF2nBGUezl2QITrxdEsLgh38j8M= +github.com/DataDog/datadog-api-client-go/v2 v2.34.0/go.mod h1:d3tOEgUd2kfsr9uuHQdY+nXrWp4uikgTgVCPdKNK30U= github.com/DataDog/datadog-go/v5 v5.6.0 h1:2oCLxjF/4htd55piM75baflj/KoE6VYS7alEUqFvRDw= github.com/DataDog/datadog-go/v5 v5.6.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 h1:RoH7VLzTnxHEugRPIgnGlxwDFszFGI7b3WZZUtWuPRM= github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42/go.mod h1:TX7CTOQ3LbQjfAi4SwqUoR5gY1zfUk7VRBDTuArjaDc= -github.com/DataDog/go-sqllexer v0.0.17 h1:u47fJAVg/+5DA74ZW3w0Qu+3qXHd3GtnA8ZBYixdPrM= -github.com/DataDog/go-sqllexer v0.0.17/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= +github.com/DataDog/go-sqllexer v0.0.20 h1:0fBknHo42yuhawZS3GtuQSdqcwaiojWjYNT6OdsZRfI= +github.com/DataDog/go-sqllexer v0.0.20/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4= github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 h1:EbzDX8HPk5uE2FsJYxD74QmMw0/3CqSKhEr6teh0ncQ= github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49/go.mod h1:SvsjzyJlSg0rKsqYgdcFxeEVflx3ZNAyFfkUHP0TxXg= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.22.0 h1:r1Dx2cRHCBWkVluSZA41i4eoI/nOGbcrrZdkqWjoFCc= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.22.0/go.mod h1:+/dkO8ZiMa8rfm4SmtTF6qPUdBbBcvsWWKaO4xPKAIk= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.22.0 h1:cXcKVEU1D0HlguR7GunnvuI70TghkarCa9DApqzMY94= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.22.0/go.mod h1:ES00EXfyEKgUkjd93tAXCxJA6i0seeOhZoS5Cj2qzzg= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.22.0 h1:yfk2cF8Bx98fSFpGrehEHh1FRqewfxcCTAbUDt5r3F8= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.22.0/go.mod h1:9qzpnBSxSOnKzbF/uHket3SSlQihQHix/ZRC2nZUUYQ= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.22.0 h1:Zqj8YUZ/ualUhM8GDCQX6xKnUJKEiG0eYdFGWmIDG30= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.22.0/go.mod h1:lpr4q6g2TB0BHeLHaz/XleKm8YXQjuxiQEb9Q9HXXE0= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.22.0 h1:w9+ngZDYUMLW+GSRA8x1DvVbuMR+cwlGb8VLwZfgBGs= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.22.0/go.mod h1:UsfqLgiD6Sjhpjkg+YzAd+TdKUZ2m6ZZ8t+tEkLNTMA= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.22.0 h1:63SzQz9Ab8XJj8fQKQz6UZNBhOm8rucwzbDfwTVF6dQ= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.22.0/go.mod h1:E/PY/aQ6S/N5hBPHXZRGmovs5b1BSi4RHGNcB4yP/Z0= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.24.0 h1:Fth9wZCAVbIUvlKq/QXT7QINza+epFaKtIvy1qqybbg= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.24.0/go.mod h1:7D+x/7CIdzklC9spgB3lrg8GUvIW52Y8SMONrBCiPbw= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.24.0 h1:ttW3C3IN8p1goqyvaVpT4Blzg3lQ+sh4MTtB33BbpdE= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.24.0/go.mod h1:FpUbxBqKdi16CDJnRifUzmkETaEYR75xvh2Vo8vvJN0= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.24.0 h1:Y65h9AvfQO7ONOBlqCetvvUhh2XO1wIzN7IfXVFjc84= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.24.0/go.mod h1:7aAFw4o5dZk/kqFniz7ljJwS8covz8DHouGl7BrsnLI= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.24.0 h1:wZaNTYVo2WIHzvn8GBAH4FNbXac5A+hfETeK0YxYYnw= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.24.0/go.mod h1:0JvUXmUWULz1XU0RTaNPLgces6LJvI/FinPO5suiJOo= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.24.0 h1:dG1rn794tdEpU+fqHumwx/Ngcc7uVPlJT/xt/4L1lmQ= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.24.0/go.mod h1:UWDxETdZ0XK3lpVJ4JYa16oYhu5H6IluXPrDtyvMIwU= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.24.0 h1:Uha4TTkbCcYTvUbkbfvUjUmxtPaPKCOtwwl91erkRRg= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.24.0/go.mod h1:RWoMSFb2Q+L0FSRYctEt8Wp0em+InUg+Oe+BU30e7gA= github.com/DataDog/sketches-go v1.4.6 h1:acd5fb+QdUzGrosfNLwrIhqyrbMORpvBy7mE+vHlT3I= github.com/DataDog/sketches-go v1.4.6/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg= github.com/DataDog/viper v1.14.0 h1:dIjTe/uJiah+QFqFZ+MXeqgmUvWhg37l37ZxFWxr3is= @@ -62,8 +62,8 @@ github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/containerd/cgroups/v3 v3.0.4 h1:2fs7l3P0Qxb1nKWuJNFiwhp2CqiKzho71DQkDrHJIo4= -github.com/containerd/cgroups/v3 v3.0.4/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins= +github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJHo6Bzo= +github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -118,8 +118,8 @@ github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss= github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= -github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= -github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/goccy/go-json v0.10.4 h1:JSwxQzIqKfmFX1swYPpUThQZp/Ka4wzJdK0LWVytLPM= +github.com/goccy/go-json v0.10.4/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -131,7 +131,6 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -148,7 +147,6 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -215,8 +213,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lightstep/go-expohisto v1.0.0 h1:UPtTS1rGdtehbbAF7o/dhkWLTDI73UifG8LbfQI7cA4= github.com/lightstep/go-expohisto v1.0.0/go.mod h1:xDXD0++Mu2FOaItXtdDfksfgxfV0z1TMPa+e/EUd0cs= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -248,20 +246,20 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.115.0 h1:a36EJz/mb83f6ieX0v4fNDJ1jXqpeaM6DVQXeFDvdhw= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.115.0/go.mod h1:r5/40YO1eSP5ZreOmRzVOUtDr7YG39ZIUcVjHd+9Izc= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.115.0 h1:WOqt8NpU/JPGYDR4CiWx7g/sHV6Oe9FChzhushwmVdo= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.115.0/go.mod h1:wV/+iU7MyXcyTaY8K5Qx+1Z3yUzrxA40nydPQA476Iw= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.115.0 h1:MerLKMrkM4YoGF6Di0D9yMXO02yCX8mrZAi/+jJVVeI= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.115.0/go.mod h1:R8AkVWe9G5Q0oMOapvm9HNS076E3Min8SVlmhBL3QD0= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.115.0 h1:WEqcnWSy9dNSlGb8pYRBX7zhaz2ReyaeImlenbzNTB4= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.115.0/go.mod h1:6Mk71CakHUA3I6oM9hARDiyQypYyOolvb+4PFYyVEFg= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.115.0 h1:eoapW0JBablApkdv4C1RUuOKfz0U6SwuKMYYSAJH6fE= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.115.0/go.mod h1:hW2AaybTRcwxJySGLC3Fh1vd2VDaQhRBfa7O7w30NS8= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.115.0 h1:R9MRrO+dSkAHBQLZjuwjv2RHXHQqF2Wtm1Ki0VKD5cs= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.115.0/go.mod h1:rKXLXmwdUVcUHwTilroKSejbg3KSwLeYzNPSpkIEnv4= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.115.0 h1:vwZQ7k8oqlK0bdZYTsjP/59zjQQfjSD4fNsWIWsTu2w= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.115.0/go.mod h1:5ObSa9amrbzbYTdAK1Qhv3D/YqCxxnQhP0sk2eWB7Oo= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.118.0 h1:Xnwd0QEyBg6iNPUbc3CnHIb0hVjfTc+jHdFbA9VSa7k= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.118.0/go.mod h1:rmqCuNFMNBUxgyufeU8rpVYOWau8ubr0gmSO1u+R5Hk= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.118.0 h1:4IvL4o5uOf1PspPgjgcrxfPkyZQbgJP6VsyUi5KuSdM= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.118.0/go.mod h1:9cP+bHuftqoYmNDd8LrJ3YTzQl8S1T+qQxSeOIdLM+g= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.117.0 h1:/wMNk8w1UEHKpKoNk1jA2aifHgfGZE+WelGNrCf0CJ0= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.117.0/go.mod h1:ESyMNHmgZYh8Ouhr2veecTMK6sB8gQ8u2s3dsy9Og6k= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.117.0 h1:GqlhXd6J8zgxCYenbI3ew03SJnGec1vEEGzGHw9X/Y0= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.117.0/go.mod h1:OGylX+Bp+urSNNGoI1XG7U6vaRDZk1wN/w6fHP1F7IY= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.118.0 h1:WnOBLIbdKDdtLCmpedY35QIkCOb2yW+BxydQMEIv2Xc= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.118.0/go.mod h1:QNv8LB5TzLUHB4p413mrtLryozBRNHKwIlY2R6UirrQ= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.118.0 h1:zEdd1JoVEBX7Lmf/wjs+45p4rR5+HvT2iF5VcoOgK1g= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.118.0/go.mod h1:WE5ientZ87x3cySOh4D/uVUwxK82DMyCkLBJ43+ehDU= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.118.0 h1:pC1e5BvBf8rjwGb56MiTUFEDHU2LSclaqRNUs3z9Snw= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.118.0/go.mod h1:wZTrQ0XWb1A9XBhl1WmUKLPfqNjERKFYWT5WER70gLg= github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= @@ -274,8 +272,8 @@ github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3v github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c h1:dAMKvw0MlJT1GshSTtih8C2gDs04w8dReiOGXrGLNoY= github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM= -github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= -github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= +github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -285,8 +283,8 @@ github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -303,8 +301,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -326,8 +324,8 @@ github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA= github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= @@ -339,8 +337,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -369,8 +367,8 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tinylib/msgp v1.2.4 h1:yLFeUGostXXSGW5vxfT5dXG/qzkn4schv2I7at5+hVU= -github.com/tinylib/msgp v1.2.4/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= +github.com/tinylib/msgp v1.2.5 h1:WeQg1whrXRFiZusidTQqzETkRpGjFjcIhW6uqWH09po= +github.com/tinylib/msgp v1.2.5/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= @@ -394,94 +392,94 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.opentelemetry.io/collector/client v1.21.0 h1:3Kes8lOFMYVxoxeAmX+DTEAkuS1iTA3NkSfqzGmygJA= -go.opentelemetry.io/collector/client v1.21.0/go.mod h1:jYJGiL0UA975OOyHmjbQSokNWt1OiviI5KjPOMUMGwc= -go.opentelemetry.io/collector/component v0.115.0 h1:iLte1oCiXzjiCnaOBKdsXacfFiECecpWxW3/LeriMoo= -go.opentelemetry.io/collector/component v0.115.0/go.mod h1:oIUFiH7w1eOimdeYhFI+gAIxYSiLDocKVJ0PTvX7d6s= -go.opentelemetry.io/collector/component/componentstatus v0.115.0 h1:pbpUIL+uKDfEiSgKK+S5nuSL6MDIIQYsp4b65ZGVb9M= -go.opentelemetry.io/collector/component/componentstatus v0.115.0/go.mod h1:36A+9XSiOz0Cdhq+UwwPRlEr5CYuSkEnVO9om4BH7d0= -go.opentelemetry.io/collector/component/componenttest v0.115.0 h1:9URDJ9VyP6tuij+YHjp/kSSMecnZOd7oGvzu+rw9SJY= -go.opentelemetry.io/collector/component/componenttest v0.115.0/go.mod h1:PzXvNqKLCiSADZGZFKH+IOHMkaQ0GTHuzysfVbTPKYY= -go.opentelemetry.io/collector/config/configauth v0.115.0 h1:xa+ALdyPgva3rZnLBh1H2oS5MsHP6JxSqMtQmcELnys= -go.opentelemetry.io/collector/config/configauth v0.115.0/go.mod h1:C7anpb3Rf4KswMT+dgOzkW9UX0z/65PLORpUw3p0VYc= -go.opentelemetry.io/collector/config/configcompression v1.21.0 h1:0zbPdZAgPFMAarwJEC4gaR6f/JBP686A3TYSgb3oa+E= -go.opentelemetry.io/collector/config/configcompression v1.21.0/go.mod h1:LvYG00tbPTv0NOLoZN0wXq1F5thcxvukO8INq7xyfWU= -go.opentelemetry.io/collector/config/confighttp v0.115.0 h1:BIy394oNXnqySJwrCqgAJu4gWgAV5aQUDD6k1hy6C8o= -go.opentelemetry.io/collector/config/confighttp v0.115.0/go.mod h1:Wr50ut12NmCEAl4bWLJryw2EjUmJTtYRg89560Q51wc= -go.opentelemetry.io/collector/config/confignet v1.21.0 h1:PeQ5YrMnfftysFL/WVaSrjPOWjD6DfeABY50pf9CZxU= -go.opentelemetry.io/collector/config/confignet v1.21.0/go.mod h1:ZppUH1hgUJOubawEsxsQ9MzEYFytqo2GnVSS7d4CVxc= -go.opentelemetry.io/collector/config/configopaque v1.21.0 h1:PcvRGkBk4Px8BQM7tX+kw4i3jBsfAHGoGQbtZg6Ox7U= -go.opentelemetry.io/collector/config/configopaque v1.21.0/go.mod h1:sW0t0iI/VfRL9VYX7Ik6XzVgPcR+Y5kejTLsYcMyDWs= -go.opentelemetry.io/collector/config/configretry v1.21.0 h1:ZHoOvAkEcv5BBeaJn8IQ6rQ4GMPZWW4S+W7R4QTEbZU= -go.opentelemetry.io/collector/config/configretry v1.21.0/go.mod h1:cleBc9I0DIWpTiiHfu9v83FUaCTqcPXmebpLxjEIqro= -go.opentelemetry.io/collector/config/configtelemetry v0.115.0 h1:U07FinCDop+r2RjWQ3aP9ZWONC7r7kQIp1GkXQi6nsI= -go.opentelemetry.io/collector/config/configtelemetry v0.115.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= -go.opentelemetry.io/collector/config/configtls v1.21.0 h1:ZfrlAYgBD8lzp04W0GxwiDmUbrvKsvDYJi+wkyiXlpA= -go.opentelemetry.io/collector/config/configtls v1.21.0/go.mod h1:5EsNefPfVCMOTlOrr3wyj7LrsOgY7V8iqRl8oFZEqtw= -go.opentelemetry.io/collector/config/internal v0.115.0 h1:eVk57iufZpUXyPJFKTb1Ebx5tmcCyroIlt427r5pxS8= -go.opentelemetry.io/collector/config/internal v0.115.0/go.mod h1:OVkadRWlKAoWjHslqjWtBLAne8ceQm8WYT71ZcBWLFc= -go.opentelemetry.io/collector/confmap v1.21.0 h1:1tIcx2/Suwg8VhuPmQw87ba0ludPmumpFCFRZZa6RXA= -go.opentelemetry.io/collector/confmap v1.21.0/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec= -go.opentelemetry.io/collector/consumer v1.21.0 h1:THKZ2Vbi6GkamjTBI2hFq5Dc4kINZTWGwQNa8d/Ty9g= -go.opentelemetry.io/collector/consumer v1.21.0/go.mod h1:FQcC4ThMtRYY41dv+IPNK8POLLhAFY3r1YR5fuP7iiY= -go.opentelemetry.io/collector/consumer/consumererror v0.115.0 h1:yli//xBCQMPZKXNgNlXemo4dvqhnFrAmCZ11DvQgmcY= -go.opentelemetry.io/collector/consumer/consumererror v0.115.0/go.mod h1:LwVzAvQ6ZVNG7mbOvurbAo+W/rKws0IcjOwriuZXqPE= -go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0 h1:H3fDuyQW1t2HWHkz96WMBQJKUevypOCjBqnqtaAWyoA= -go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0/go.mod h1:IzEmZ91Tp7TBxVDq8Cc9xvLsmO7H08njr6Pu9P5d9ns= -go.opentelemetry.io/collector/consumer/consumertest v0.115.0 h1:hru0I2447y0TluCdwlKYFFtgcpyCnlM+LiOK1JZyA70= -go.opentelemetry.io/collector/consumer/consumertest v0.115.0/go.mod h1:ybjALRJWR6aKNOzEMy1T1ruCULVDEjj4omtOJMrH/kU= -go.opentelemetry.io/collector/exporter v0.115.0 h1:JnxfpOnsuqhTPKJXVKJLS1Cv3BiVrVLzpHOjJEQw+xw= -go.opentelemetry.io/collector/exporter v0.115.0/go.mod h1:xof3fHQK8wADhaKLIJcQ7ChZaFLNC+haRdPN0wgl6kY= -go.opentelemetry.io/collector/exporter/exporterprofiles v0.115.0 h1:lSQEleCn/q9eFufcuK61NdFKU70ZlgI9dBjPCO/4CrE= -go.opentelemetry.io/collector/exporter/exporterprofiles v0.115.0/go.mod h1:7l5K2AecimX2kx+nZC1gKG3QkP247CO1+SodmJ4fFkQ= -go.opentelemetry.io/collector/exporter/exportertest v0.115.0 h1:P9SMTUXQOtcaq40bGtnnAe14zRmR4/yUgj/Tb2BEf/k= -go.opentelemetry.io/collector/exporter/exportertest v0.115.0/go.mod h1:1jMZ9gFGXglb8wfNrBZIgd+RvpZhSyFwdfE+Jtf9w4U= -go.opentelemetry.io/collector/extension v0.115.0 h1:/cBb8AUdD0KMWC6V3lvCC16eP9Fg0wd1Upcp5rgvuGI= -go.opentelemetry.io/collector/extension v0.115.0/go.mod h1:HI7Ak6loyi6ZrZPsQJW1OO1wbaAW8OqXLFNQlTZnreQ= -go.opentelemetry.io/collector/extension/auth v0.115.0 h1:TTMokbBsSHZRFH48PvGSJmgSS8F3Rkr9MWGHZn8eJDk= -go.opentelemetry.io/collector/extension/auth v0.115.0/go.mod h1:3w+2mzeb2OYNOO4Bi41TUo4jr32ap2y7AOq64IDpxQo= -go.opentelemetry.io/collector/extension/auth/authtest v0.115.0 h1:OZe7dKbZ01qodSpZU0ZYzI6zpmmzJ3UvfdBSFAbSgDw= -go.opentelemetry.io/collector/extension/auth/authtest v0.115.0/go.mod h1:fk9WCXP0x91Q64Z8HZKWTHh9PWtgoWE1KXe3n2Bff3U= -go.opentelemetry.io/collector/extension/experimental/storage v0.115.0 h1:sZXw0+77092pq24CkUoTRoHQPLQUsDq6HFRNB0g5yR4= -go.opentelemetry.io/collector/extension/experimental/storage v0.115.0/go.mod h1:qjFH7Y3QYYs88By2ZB5GMSUN5k3ul4Brrq2J6lKACA0= -go.opentelemetry.io/collector/extension/extensiontest v0.115.0 h1:GBVFxFEskR8jSdu9uaQh2qpXnN5VNXhXjpJ2UjxtE8I= -go.opentelemetry.io/collector/extension/extensiontest v0.115.0/go.mod h1:eu1ecbz5mT+cHoH2H3GmD/rOO0WsicSJD2RLrYuOmRA= -go.opentelemetry.io/collector/featuregate v1.21.0 h1:+EULHPJDLMipcwAGZVp9Nm8NriRvoBBMxp7MSiIZVMI= -go.opentelemetry.io/collector/featuregate v1.21.0/go.mod h1:3GaXqflNDVwWndNGBJ1+XJFy3Fv/XrFgjMN60N3z7yg= -go.opentelemetry.io/collector/pdata v1.21.0 h1:PG+UbiFMJ35X/WcAR7Rf/PWmWtRdW0aHlOidsR6c5MA= -go.opentelemetry.io/collector/pdata v1.21.0/go.mod h1:GKb1/zocKJMvxKbS+sl0W85lxhYBTFJ6h6I1tphVyDU= -go.opentelemetry.io/collector/pdata/pprofile v0.115.0 h1:NI89hy13vNDw7EOnQf7Jtitks4HJFO0SUWznTssmP94= -go.opentelemetry.io/collector/pdata/pprofile v0.115.0/go.mod h1:jGzdNfO0XTtfLjXCL/uCC1livg1LlfR+ix2WE/z3RpQ= -go.opentelemetry.io/collector/pdata/testdata v0.115.0 h1:Rblz+AKXdo3fG626jS+KSd0OSA4uMXcTQfpwed6P8LI= -go.opentelemetry.io/collector/pdata/testdata v0.115.0/go.mod h1:inNnRt6S2Nn260EfCBEcjesjlKOSsr0jPwkPqpBkt4s= -go.opentelemetry.io/collector/pipeline v0.115.0 h1:bmACBqb0e8U9ag+vGGHUP7kCfAO7HHROdtzIEg8ulus= -go.opentelemetry.io/collector/pipeline v0.115.0/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74= -go.opentelemetry.io/collector/processor v0.115.0 h1:+fveHGRe24PZPv/F5taahGuZ9HdNW44hgNWEJhIUdyc= -go.opentelemetry.io/collector/processor v0.115.0/go.mod h1:/oLHBlLsm7tFb7zOIrA5C0j14yBtjXKAgxJJ2Bktyk4= -go.opentelemetry.io/collector/processor/processorprofiles v0.115.0 h1:cCZAs+FXaebZPppqAN3m+X3etoSBL6NvyQo8l0hOZoo= -go.opentelemetry.io/collector/processor/processorprofiles v0.115.0/go.mod h1:kMxF0gknlWX4duuAJFi2/HuIRi6C3w95tOenRa0GKOY= -go.opentelemetry.io/collector/processor/processortest v0.115.0 h1:j9HEaYFOeOB6VYl9zGhBnhQbTkqGBa2udUvu5NTh6hc= -go.opentelemetry.io/collector/processor/processortest v0.115.0/go.mod h1:Gws+VEnp/eW3qAqPpqbKsrbnnxxNfyDjqrfUXbZfZic= -go.opentelemetry.io/collector/receiver v0.115.0 h1:55Q3Jvj6zHCIA1psKqi/3kEMJO4OqUF5tNAEYNdB1U8= -go.opentelemetry.io/collector/receiver v0.115.0/go.mod h1:nBSCh2O/WUcfgpJ+Jpz+B0z0Hn5jHeRvF2WmLij5EIY= -go.opentelemetry.io/collector/receiver/receiverprofiles v0.115.0 h1:R9JLaj2Al93smIPUkbJshAkb/cY0H5JBOxIx+Zu0NG4= -go.opentelemetry.io/collector/receiver/receiverprofiles v0.115.0/go.mod h1:05E5hGujWeeXJmzKZwTdHyZ/+rRyrQlQB5p5Q2XY39M= -go.opentelemetry.io/collector/receiver/receivertest v0.115.0 h1:OiB684SbHQi6/Pd3ZH0cXjYvCpBS9ilQBfTQx0wVXHg= -go.opentelemetry.io/collector/receiver/receivertest v0.115.0/go.mod h1:Y8Z9U/bz9Xpyt8GI8DxZZgryw3mnnIw+AeKVLTD2cP8= -go.opentelemetry.io/collector/semconv v0.115.0 h1:SoqMvg4ZEB3mz2EdAb6XYa+TuMo5Mir5FRBr3nVFUDY= -go.opentelemetry.io/collector/semconv v0.115.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/collector/client v1.24.0 h1:eH7ctqDnRWNH5QVVbAvdYYdkvr8QWLkEm8FUPaaYbWE= +go.opentelemetry.io/collector/client v1.24.0/go.mod h1:C/38SYPa0tTL6ikPz/glYz6f3GVzEuT4nlEml6IBDMw= +go.opentelemetry.io/collector/component v0.118.0 h1:sSO/ObxJ+yH77Z4DmT1mlSuxhbgUmY1ztt7xCA1F/8w= +go.opentelemetry.io/collector/component v0.118.0/go.mod h1:LUJ3AL2b+tmFr3hZol3hzKzCMvNdqNq0M5CF3SWdv4M= +go.opentelemetry.io/collector/component/componentstatus v0.118.0 h1:1aCIdUjqz0noKNQr1v04P+lwF89Lkua5U7BhH9IAxkE= +go.opentelemetry.io/collector/component/componentstatus v0.118.0/go.mod h1:ynO1Nyj0t1h6x/djIMJy35bhnnWEc2mlQaFgDNUO504= +go.opentelemetry.io/collector/component/componenttest v0.118.0 h1:knEHckoiL2fEWSIc0iehg39zP4IXzi9sHa45O+oxKo8= +go.opentelemetry.io/collector/component/componenttest v0.118.0/go.mod h1:aHc7t7zVwCpbhrWIWY+GMuaMxMCUP8C8P7pJOt8r/vU= +go.opentelemetry.io/collector/config/configauth v0.118.0 h1:uBH/s9kRw/m7VWuibrkCzbXSCVLf9ElKq9NuKb0wAwk= +go.opentelemetry.io/collector/config/configauth v0.118.0/go.mod h1:uAmSGkihIENoIah6mEQ8S/HX4oiFOHZu3EoZLZwi9OI= +go.opentelemetry.io/collector/config/configcompression v1.24.0 h1:jyM6BX7wYcrh+eVSC0FMbWgy/zb9iP58SerOrvisccE= +go.opentelemetry.io/collector/config/configcompression v1.24.0/go.mod h1:LvYG00tbPTv0NOLoZN0wXq1F5thcxvukO8INq7xyfWU= +go.opentelemetry.io/collector/config/confighttp v0.118.0 h1:ey50dfySOCPgUPJ1x8Kq6CmNcv/TpZHt6cYmPhZItj0= +go.opentelemetry.io/collector/config/confighttp v0.118.0/go.mod h1:4frheVFiIfKUHuD/KAPn+u+d+EUx5GlQTNmoI1ftReA= +go.opentelemetry.io/collector/config/confignet v1.24.0 h1:Je1oO3qCUI4etX9ZVyav/NkeD+sfzZQRmwMGy51Oei4= +go.opentelemetry.io/collector/config/confignet v1.24.0/go.mod h1:ZppUH1hgUJOubawEsxsQ9MzEYFytqo2GnVSS7d4CVxc= +go.opentelemetry.io/collector/config/configopaque v1.24.0 h1:EPOprMDreZPKyIgT0/eVBvEGQVvq7ncvBCBVnWerj54= +go.opentelemetry.io/collector/config/configopaque v1.24.0/go.mod h1:sW0t0iI/VfRL9VYX7Ik6XzVgPcR+Y5kejTLsYcMyDWs= +go.opentelemetry.io/collector/config/configretry v1.24.0 h1:sIPHhNNY2YlHMIJ//63iMxIqlgDeGczId0uUb1njsPM= +go.opentelemetry.io/collector/config/configretry v1.24.0/go.mod h1:cleBc9I0DIWpTiiHfu9v83FUaCTqcPXmebpLxjEIqro= +go.opentelemetry.io/collector/config/configtelemetry v0.118.0 h1:UlN46EViG2X42odWtXgWaqY7Y01ZKpsnswSwXTWx5mM= +go.opentelemetry.io/collector/config/configtelemetry v0.118.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= +go.opentelemetry.io/collector/config/configtls v1.24.0 h1:rOhl8qjIlUVVRHnwQj6/vZe6cuCYImyx7aVDBR35bqI= +go.opentelemetry.io/collector/config/configtls v1.24.0/go.mod h1:d0OdfkbuYEMYDBJLSbpH0wPI29lmSiFT3geqh/ygF2k= +go.opentelemetry.io/collector/confmap v1.24.0 h1:UUHVhkDCsVw14jPOarug9PDQE2vaB2ELPWMr7ARFBCA= +go.opentelemetry.io/collector/confmap v1.24.0/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec= +go.opentelemetry.io/collector/consumer v1.24.0 h1:7DeyBm9qdr1EPuCfPjWyChPK16DbVc0wZeSa9LZprFU= +go.opentelemetry.io/collector/consumer v1.24.0/go.mod h1:0G6jvZprIp4dpKMD1ZxCjriiP9GdFvFMObsQEtTk71s= +go.opentelemetry.io/collector/consumer/consumererror v0.118.0 h1:Cx//ZFDa6wUEoRDRYRZ/Rkb52dWNoHj2e9FdlcM9jCA= +go.opentelemetry.io/collector/consumer/consumererror v0.118.0/go.mod h1:2mhnzzLYR5zS2Zz4h9ZnRM8Uogu9qatcfQwGNenhing= +go.opentelemetry.io/collector/consumer/consumertest v0.118.0 h1:8AAS9ejQapP1zqt0+cI6u+AUBheT3X0171N9WtXWsVY= +go.opentelemetry.io/collector/consumer/consumertest v0.118.0/go.mod h1:spRM2wyGr4QZzqMHlLmZnqRCxqXN4Wd0piogC4Qb5PQ= +go.opentelemetry.io/collector/consumer/xconsumer v0.118.0 h1:guWnzzRqgCInjnYlOQ1BPrimppNGIVvnknAjlIbWXuY= +go.opentelemetry.io/collector/consumer/xconsumer v0.118.0/go.mod h1:C5V2d6Ys/Fi6k3tzjBmbdZ9v3J/rZSAMlhx4KVcMIIg= +go.opentelemetry.io/collector/exporter v0.118.0 h1:PE0vF2U+znOB8OVLPWNw40bGCoT/5QquQ8Xbz4i9Rb0= +go.opentelemetry.io/collector/exporter v0.118.0/go.mod h1:5ST3gxT/RzE/vg2bcGDtWJxlQF1ypwk50UpmdK1kUqY= +go.opentelemetry.io/collector/exporter/exportertest v0.118.0 h1:8gWky42BcJsxoaqWbnqCDUjP3Y84hjC6RD/UWHwR7sI= +go.opentelemetry.io/collector/exporter/exportertest v0.118.0/go.mod h1:UbpQBZvznA8YPqqcKlafVIhB6Qa4fPf2+I67MUGyNqo= +go.opentelemetry.io/collector/exporter/xexporter v0.118.0 h1:PZAo1CFhZHfQwtzUNj+Fwcv/21pWHJHTsrIddD096fw= +go.opentelemetry.io/collector/exporter/xexporter v0.118.0/go.mod h1:x4J+qyrRcp4DfWKqK3DLZomFTIUhedsqCQWqq6Gqps4= +go.opentelemetry.io/collector/extension v0.118.0 h1:9o5jLCTRvs0+rtFDx04zTBuB4WFrE0RvtVCPovYV0sA= +go.opentelemetry.io/collector/extension v0.118.0/go.mod h1:BFwB0WOlse6JnrStO44+k9kwUVjjtseFEHhJLHD7lBg= +go.opentelemetry.io/collector/extension/auth v0.118.0 h1:+eMNUBUK1JK9A3mr95BasbWE90Lxu+WlR9sqS36sJms= +go.opentelemetry.io/collector/extension/auth v0.118.0/go.mod h1:MJpYcRGSERkgOhczqTKoAhkHmcugr+YTlRhc/SpYYYI= +go.opentelemetry.io/collector/extension/auth/authtest v0.118.0 h1:KIORXNc71vfpQrrZOntiZesRCZtQ8alrASWVT/zZkyo= +go.opentelemetry.io/collector/extension/auth/authtest v0.118.0/go.mod h1:0ZlSP9NPAfTRQd6Tx4mOH0IWrp6ufHaVN//L9Mb87gM= +go.opentelemetry.io/collector/extension/extensiontest v0.118.0 h1:rKBUaFS9elGfENG45wANmrwx7mHsmt1+YWCzxjftElg= +go.opentelemetry.io/collector/extension/extensiontest v0.118.0/go.mod h1:CqNXzkIOR32D8EUpptpOXhpFkibs3kFlRyNMEgIW8l4= +go.opentelemetry.io/collector/extension/xextension v0.118.0 h1:P6gvJzqnH9ma2QfnWde/E6Xu9bAzuefzIwm5iupiVPE= +go.opentelemetry.io/collector/extension/xextension v0.118.0/go.mod h1:ne4Q8ZtRlbC0Etr2hTcVkjOpVM2bE2xy1u+R80LUkDw= +go.opentelemetry.io/collector/featuregate v1.24.0 h1:DEqDsuJgxjZ3E5JNC9hXCd4sWGFiF7h9kaziODuqwFY= +go.opentelemetry.io/collector/featuregate v1.24.0/go.mod h1:3GaXqflNDVwWndNGBJ1+XJFy3Fv/XrFgjMN60N3z7yg= +go.opentelemetry.io/collector/pdata v1.24.0 h1:D6j92eAzmAbQgivNBUnt8r9juOl8ugb+ihYynoFZIEg= +go.opentelemetry.io/collector/pdata v1.24.0/go.mod h1:cf3/W9E/uIvPS4MR26SnMFJhraUCattzzM6qusuONuc= +go.opentelemetry.io/collector/pdata/pprofile v0.118.0 h1:VK/fr65VFOwEhsSGRPj5c3lCv0yIK1Kt0sZxv9WZBb8= +go.opentelemetry.io/collector/pdata/pprofile v0.118.0/go.mod h1:eJyP/vBm179EghV3dPSnamGAWQwLyd+4z/3yG54YFoQ= +go.opentelemetry.io/collector/pdata/testdata v0.118.0 h1:5N0w1SX9KIRkwvtkrpzQgXy9eGk3vfNG0ds6mhEPMIM= +go.opentelemetry.io/collector/pdata/testdata v0.118.0/go.mod h1:UY+GHV5bOC1BnFburOZ0wiHReJj1XbW12mi2Ogbc5Lw= +go.opentelemetry.io/collector/pipeline v0.118.0 h1:RI1DMe7L0+5hGkx0EDGxG00TaJoh96MEQppgOlGx1Oc= +go.opentelemetry.io/collector/pipeline v0.118.0/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74= +go.opentelemetry.io/collector/processor v0.118.0 h1:NlqWiTTpPP+EPbrqTcNP9nh/4O4/9U9RGWVB49xo4ws= +go.opentelemetry.io/collector/processor v0.118.0/go.mod h1:Y8OD7wk51oPuBqrbn1qXIK91AbprRHP76hlvEzC24U4= +go.opentelemetry.io/collector/processor/processortest v0.118.0 h1:VfTLHuIaJWGyUmrvAOvf63gPMf1vAW68/jtJClEsKtU= +go.opentelemetry.io/collector/processor/processortest v0.118.0/go.mod h1:ZFWxsSoafGNOEk83FtGz43M5ypUzAOvGnfT0aQTDHdU= +go.opentelemetry.io/collector/processor/xprocessor v0.118.0 h1:M/EMhPRbadHLpv7g99fBjfgyuYexBZmgQqb2vjTXjvM= +go.opentelemetry.io/collector/processor/xprocessor v0.118.0/go.mod h1:lkoQoCv2Cz+C0kf2VHgBUDYWDecZLLeaHEvHDXbBCXU= +go.opentelemetry.io/collector/receiver v0.118.0 h1:X4mspHmbbtwdCQZ7o370kNmdWfxRnK1FrsvEShCCKEc= +go.opentelemetry.io/collector/receiver v0.118.0/go.mod h1:wFyfu6sgrkDPLQoGOGMuChGZzkZnYcI/tPJWV4CRTzs= +go.opentelemetry.io/collector/receiver/receivertest v0.118.0 h1:XlMr2mPsyXJsMUOqCpEoY3uCPsLZQbNA5fmVNDGB7Bw= +go.opentelemetry.io/collector/receiver/receivertest v0.118.0/go.mod h1:dtu/H1RNjhy11hTVf/XUfc02uGufMhYYdhhYBbglcUg= +go.opentelemetry.io/collector/receiver/xreceiver v0.118.0 h1:dzECve9e0H3ot0JWnWPuQr9Y84RhOYSd0+CjvJskx7Y= +go.opentelemetry.io/collector/receiver/xreceiver v0.118.0/go.mod h1:Lv1nD/mSYSP64iV8k+C+mWWZZOMLRubv9d1SUory3/E= +go.opentelemetry.io/collector/semconv v0.118.0 h1:V4vlMIK7TIaemrrn2VawvQPwruIKpj7Xgw9P5+BL56w= +go.opentelemetry.io/collector/semconv v0.118.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM= -go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= -go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= -go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= -go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= -go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= -go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= -go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= -go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= -go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= -go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= +go.opentelemetry.io/otel/sdk/metric v1.33.0 h1:Gs5VK9/WUJhNXZgn8MR6ITatvAmKeIuCtNbsP3JkNqU= +go.opentelemetry.io/otel/sdk/metric v1.33.0/go.mod h1:dL5ykHZmm1B1nVRk9dDjChwDmt81MjVp3gLkQRwKf/Q= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -507,11 +505,11 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -534,11 +532,11 @@ golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= +golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -568,11 +566,11 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= -golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= @@ -607,19 +605,19 @@ google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= -google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= +google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/comp/otelcol/otlp/components/exporter/datadogexporter/traces_exporter_test.go b/comp/otelcol/otlp/components/exporter/datadogexporter/traces_exporter_test.go index e17c24f8553a6..d600b5a4d99d9 100644 --- a/comp/otelcol/otlp/components/exporter/datadogexporter/traces_exporter_test.go +++ b/comp/otelcol/otlp/components/exporter/datadogexporter/traces_exporter_test.go @@ -90,8 +90,8 @@ func testTraceExporter(enableReceiveResourceSpansV2 bool, t *testing.T) { tcfg.TraceWriter.FlushPeriodSeconds = 0.1 tcfg.Endpoints[0].APIKey = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" tcfg.Endpoints[0].Host = server.URL - if enableReceiveResourceSpansV2 { - tcfg.Features["enable_receive_resource_spans_v2"] = struct{}{} + if !enableReceiveResourceSpansV2 { + tcfg.Features["disable_receive_resource_spans_v2"] = struct{}{} } ctx := context.Background() traceagent := pkgagent.NewAgent(ctx, tcfg, telemetry.NewNoopCollector(), &ddgostatsd.NoOpClient{}, gzip.NewComponent()) @@ -132,8 +132,8 @@ func testNewTracesExporter(enableReceiveResourceSpansV2 bool, t *testing.T) { tcfg.Endpoints[0].APIKey = "ddog_32_characters_long_api_key1" ctx := context.Background() tcfg.ReceiverEnabled = false - if enableReceiveResourceSpansV2 { - tcfg.Features["enable_receive_resource_spans_v2"] = struct{}{} + if !enableReceiveResourceSpansV2 { + tcfg.Features["disable_receive_resource_spans_v2"] = struct{}{} } traceagent := pkgagent.NewAgent(ctx, tcfg, telemetry.NewNoopCollector(), &ddgostatsd.NoOpClient{}, gzip.NewComponent()) diff --git a/comp/otelcol/otlp/components/exporter/logsagentexporter/go.mod b/comp/otelcol/otlp/components/exporter/logsagentexporter/go.mod index 1bd29cf7cf9ea..2b821e8197990 100644 --- a/comp/otelcol/otlp/components/exporter/logsagentexporter/go.mod +++ b/comp/otelcol/otlp/components/exporter/logsagentexporter/go.mod @@ -31,7 +31,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../../../../pkg/util/fxutil github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../../../../../pkg/util/hostname/validate github.com/DataDog/datadog-agent/pkg/util/log => ../../../../../../pkg/util/log - github.com/DataDog/datadog-agent/pkg/util/optional => ../../../../../../pkg/util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../../../../../pkg/util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../../../../pkg/util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../../../../pkg/util/scrubber github.com/DataDog/datadog-agent/pkg/util/statstracker => ../../../../../../pkg/util/statstracker @@ -47,23 +47,30 @@ require ( github.com/DataDog/datadog-agent/comp/otelcol/otlp/testutil v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/message v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.22.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.22.0 + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.24.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.24.0 github.com/stormcat24/protodep v0.1.8 github.com/stretchr/testify v1.10.0 - go.opentelemetry.io/collector/component v0.115.0 - go.opentelemetry.io/collector/exporter v0.115.0 - go.opentelemetry.io/collector/exporter/exportertest v0.115.0 - go.opentelemetry.io/collector/pdata v1.21.0 + go.opentelemetry.io/collector/component v0.118.0 + go.opentelemetry.io/collector/exporter v0.118.0 + go.opentelemetry.io/collector/exporter/exportertest v0.118.0 + go.opentelemetry.io/collector/pdata v1.24.0 +) + +require ( + go.opentelemetry.io/collector/consumer/xconsumer v0.118.0 // indirect + go.opentelemetry.io/collector/exporter/xexporter v0.118.0 // indirect + go.opentelemetry.io/collector/extension/xextension v0.118.0 // indirect + go.opentelemetry.io/collector/receiver/xreceiver v0.118.0 // indirect ) require ( github.com/hashicorp/go-version v1.7.0 // indirect - go.opentelemetry.io/collector/component/componenttest v0.115.0 // indirect - go.opentelemetry.io/collector/consumer/consumererror v0.115.0 // indirect - go.opentelemetry.io/collector/featuregate v1.21.0 // indirect - go.opentelemetry.io/collector/receiver/receivertest v0.115.0 // indirect + go.opentelemetry.io/collector/component/componenttest v0.118.0 // indirect + go.opentelemetry.io/collector/consumer/consumererror v0.118.0 // indirect + go.opentelemetry.io/collector/featuregate v1.24.0 // indirect + go.opentelemetry.io/collector/receiver/receivertest v0.118.0 // indirect ) require ( @@ -71,8 +78,8 @@ require ( github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/mock v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.59.0 // indirect @@ -80,18 +87,18 @@ require ( github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/proto v0.55.0 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect - github.com/DataDog/datadog-api-client-go/v2 v2.33.0 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.22.0 // indirect + github.com/DataDog/datadog-api-client-go/v2 v2.34.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.24.0 // indirect github.com/DataDog/sketches-go v1.4.6 // indirect github.com/DataDog/viper v1.14.0 // indirect github.com/DataDog/zstd v1.5.6 // indirect @@ -106,14 +113,14 @@ require ( github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.3.0 // indirect - github.com/goccy/go-json v0.10.3 // indirect + github.com/goccy/go-json v0.10.4 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/google/uuid v1.6.0 // indirect github.com/hashicorp/hcl v1.0.1-vault-5 // indirect github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect @@ -125,46 +132,43 @@ require ( github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/tinylib/msgp v1.2.4 // indirect + github.com/tinylib/msgp v1.2.5 // indirect github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/numcpus v0.8.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect - go.opentelemetry.io/collector/config/configretry v1.21.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.115.0 // indirect - go.opentelemetry.io/collector/consumer v1.21.0 // indirect - go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/consumer/consumertest v0.115.0 // indirect - go.opentelemetry.io/collector/exporter/exporterprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/extension v0.115.0 // indirect - go.opentelemetry.io/collector/extension/experimental/storage v0.115.0 // indirect - go.opentelemetry.io/collector/pdata/pprofile v0.115.0 // indirect - go.opentelemetry.io/collector/pipeline v0.115.0 // indirect - go.opentelemetry.io/collector/receiver v0.115.0 // indirect - go.opentelemetry.io/collector/receiver/receiverprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/semconv v0.115.0 // indirect - go.opentelemetry.io/otel v1.32.0 // indirect - go.opentelemetry.io/otel/metric v1.32.0 // indirect - go.opentelemetry.io/otel/sdk v1.32.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.32.0 // indirect - go.opentelemetry.io/otel/trace v1.32.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/collector/config/configretry v1.24.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.118.0 // indirect + go.opentelemetry.io/collector/consumer v1.24.0 // indirect + go.opentelemetry.io/collector/consumer/consumertest v0.118.0 // indirect + go.opentelemetry.io/collector/extension v0.118.0 // indirect + go.opentelemetry.io/collector/pdata/pprofile v0.118.0 // indirect + go.opentelemetry.io/collector/pipeline v0.118.0 // indirect + go.opentelemetry.io/collector/receiver v0.118.0 // indirect + go.opentelemetry.io/collector/semconv v0.118.0 // indirect + go.opentelemetry.io/otel v1.33.0 // indirect + go.opentelemetry.io/otel/metric v1.33.0 // indirect + go.opentelemetry.io/otel/sdk v1.33.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.33.0 // indirect + go.opentelemetry.io/otel/trace v1.33.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sys v0.28.0 // indirect - golang.org/x/term v0.27.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/net v0.34.0 // indirect + golang.org/x/oauth2 v0.25.0 // indirect + golang.org/x/sys v0.29.0 // indirect + golang.org/x/term v0.28.0 // indirect golang.org/x/text v0.21.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect - google.golang.org/grpc v1.67.1 // indirect - google.golang.org/protobuf v1.35.2 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect + google.golang.org/grpc v1.69.4 // indirect + google.golang.org/protobuf v1.36.3 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/comp/otelcol/otlp/components/exporter/logsagentexporter/go.sum b/comp/otelcol/otlp/components/exporter/logsagentexporter/go.sum index d1856ceabaf7e..543e856f555c6 100644 --- a/comp/otelcol/otlp/components/exporter/logsagentexporter/go.sum +++ b/comp/otelcol/otlp/components/exporter/logsagentexporter/go.sum @@ -1,13 +1,13 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/DataDog/datadog-api-client-go/v2 v2.33.0 h1:OI6kDnJeQmkjfGzxmP0XUQUxMD4tp6oAPXnnJ4VpgUM= -github.com/DataDog/datadog-api-client-go/v2 v2.33.0/go.mod h1:d3tOEgUd2kfsr9uuHQdY+nXrWp4uikgTgVCPdKNK30U= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.22.0 h1:r1Dx2cRHCBWkVluSZA41i4eoI/nOGbcrrZdkqWjoFCc= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.22.0/go.mod h1:+/dkO8ZiMa8rfm4SmtTF6qPUdBbBcvsWWKaO4xPKAIk= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.22.0 h1:yfk2cF8Bx98fSFpGrehEHh1FRqewfxcCTAbUDt5r3F8= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.22.0/go.mod h1:9qzpnBSxSOnKzbF/uHket3SSlQihQHix/ZRC2nZUUYQ= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.22.0 h1:Zqj8YUZ/ualUhM8GDCQX6xKnUJKEiG0eYdFGWmIDG30= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.22.0/go.mod h1:lpr4q6g2TB0BHeLHaz/XleKm8YXQjuxiQEb9Q9HXXE0= +github.com/DataDog/datadog-api-client-go/v2 v2.34.0 h1:0VVmv8uZg8vdBuEpiF2nBGUezl2QITrxdEsLgh38j8M= +github.com/DataDog/datadog-api-client-go/v2 v2.34.0/go.mod h1:d3tOEgUd2kfsr9uuHQdY+nXrWp4uikgTgVCPdKNK30U= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.24.0 h1:Fth9wZCAVbIUvlKq/QXT7QINza+epFaKtIvy1qqybbg= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.24.0/go.mod h1:7D+x/7CIdzklC9spgB3lrg8GUvIW52Y8SMONrBCiPbw= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.24.0 h1:Y65h9AvfQO7ONOBlqCetvvUhh2XO1wIzN7IfXVFjc84= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.24.0/go.mod h1:7aAFw4o5dZk/kqFniz7ljJwS8covz8DHouGl7BrsnLI= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.24.0 h1:wZaNTYVo2WIHzvn8GBAH4FNbXac5A+hfETeK0YxYYnw= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.24.0/go.mod h1:0JvUXmUWULz1XU0RTaNPLgces6LJvI/FinPO5suiJOo= github.com/DataDog/sketches-go v1.4.6 h1:acd5fb+QdUzGrosfNLwrIhqyrbMORpvBy7mE+vHlT3I= github.com/DataDog/sketches-go v1.4.6/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg= github.com/DataDog/viper v1.14.0 h1:dIjTe/uJiah+QFqFZ+MXeqgmUvWhg37l37ZxFWxr3is= @@ -79,8 +79,8 @@ github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiU github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= -github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/goccy/go-json v0.10.4 h1:JSwxQzIqKfmFX1swYPpUThQZp/Ka4wzJdK0LWVytLPM= +github.com/goccy/go-json v0.10.4/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= @@ -100,7 +100,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -147,8 +146,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -187,8 +186,8 @@ github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -205,8 +204,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -219,8 +218,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -231,8 +230,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -252,8 +251,8 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tinylib/msgp v1.2.4 h1:yLFeUGostXXSGW5vxfT5dXG/qzkn4schv2I7at5+hVU= -github.com/tinylib/msgp v1.2.4/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= +github.com/tinylib/msgp v1.2.5 h1:WeQg1whrXRFiZusidTQqzETkRpGjFjcIhW6uqWH09po= +github.com/tinylib/msgp v1.2.5/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= @@ -274,62 +273,64 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.opentelemetry.io/collector/component v0.115.0 h1:iLte1oCiXzjiCnaOBKdsXacfFiECecpWxW3/LeriMoo= -go.opentelemetry.io/collector/component v0.115.0/go.mod h1:oIUFiH7w1eOimdeYhFI+gAIxYSiLDocKVJ0PTvX7d6s= -go.opentelemetry.io/collector/component/componenttest v0.115.0 h1:9URDJ9VyP6tuij+YHjp/kSSMecnZOd7oGvzu+rw9SJY= -go.opentelemetry.io/collector/component/componenttest v0.115.0/go.mod h1:PzXvNqKLCiSADZGZFKH+IOHMkaQ0GTHuzysfVbTPKYY= -go.opentelemetry.io/collector/config/configretry v1.21.0 h1:ZHoOvAkEcv5BBeaJn8IQ6rQ4GMPZWW4S+W7R4QTEbZU= -go.opentelemetry.io/collector/config/configretry v1.21.0/go.mod h1:cleBc9I0DIWpTiiHfu9v83FUaCTqcPXmebpLxjEIqro= -go.opentelemetry.io/collector/config/configtelemetry v0.115.0 h1:U07FinCDop+r2RjWQ3aP9ZWONC7r7kQIp1GkXQi6nsI= -go.opentelemetry.io/collector/config/configtelemetry v0.115.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= -go.opentelemetry.io/collector/consumer v1.21.0 h1:THKZ2Vbi6GkamjTBI2hFq5Dc4kINZTWGwQNa8d/Ty9g= -go.opentelemetry.io/collector/consumer v1.21.0/go.mod h1:FQcC4ThMtRYY41dv+IPNK8POLLhAFY3r1YR5fuP7iiY= -go.opentelemetry.io/collector/consumer/consumererror v0.115.0 h1:yli//xBCQMPZKXNgNlXemo4dvqhnFrAmCZ11DvQgmcY= -go.opentelemetry.io/collector/consumer/consumererror v0.115.0/go.mod h1:LwVzAvQ6ZVNG7mbOvurbAo+W/rKws0IcjOwriuZXqPE= -go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0 h1:H3fDuyQW1t2HWHkz96WMBQJKUevypOCjBqnqtaAWyoA= -go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0/go.mod h1:IzEmZ91Tp7TBxVDq8Cc9xvLsmO7H08njr6Pu9P5d9ns= -go.opentelemetry.io/collector/consumer/consumertest v0.115.0 h1:hru0I2447y0TluCdwlKYFFtgcpyCnlM+LiOK1JZyA70= -go.opentelemetry.io/collector/consumer/consumertest v0.115.0/go.mod h1:ybjALRJWR6aKNOzEMy1T1ruCULVDEjj4omtOJMrH/kU= -go.opentelemetry.io/collector/exporter v0.115.0 h1:JnxfpOnsuqhTPKJXVKJLS1Cv3BiVrVLzpHOjJEQw+xw= -go.opentelemetry.io/collector/exporter v0.115.0/go.mod h1:xof3fHQK8wADhaKLIJcQ7ChZaFLNC+haRdPN0wgl6kY= -go.opentelemetry.io/collector/exporter/exporterprofiles v0.115.0 h1:lSQEleCn/q9eFufcuK61NdFKU70ZlgI9dBjPCO/4CrE= -go.opentelemetry.io/collector/exporter/exporterprofiles v0.115.0/go.mod h1:7l5K2AecimX2kx+nZC1gKG3QkP247CO1+SodmJ4fFkQ= -go.opentelemetry.io/collector/exporter/exportertest v0.115.0 h1:P9SMTUXQOtcaq40bGtnnAe14zRmR4/yUgj/Tb2BEf/k= -go.opentelemetry.io/collector/exporter/exportertest v0.115.0/go.mod h1:1jMZ9gFGXglb8wfNrBZIgd+RvpZhSyFwdfE+Jtf9w4U= -go.opentelemetry.io/collector/extension v0.115.0 h1:/cBb8AUdD0KMWC6V3lvCC16eP9Fg0wd1Upcp5rgvuGI= -go.opentelemetry.io/collector/extension v0.115.0/go.mod h1:HI7Ak6loyi6ZrZPsQJW1OO1wbaAW8OqXLFNQlTZnreQ= -go.opentelemetry.io/collector/extension/experimental/storage v0.115.0 h1:sZXw0+77092pq24CkUoTRoHQPLQUsDq6HFRNB0g5yR4= -go.opentelemetry.io/collector/extension/experimental/storage v0.115.0/go.mod h1:qjFH7Y3QYYs88By2ZB5GMSUN5k3ul4Brrq2J6lKACA0= -go.opentelemetry.io/collector/extension/extensiontest v0.115.0 h1:GBVFxFEskR8jSdu9uaQh2qpXnN5VNXhXjpJ2UjxtE8I= -go.opentelemetry.io/collector/extension/extensiontest v0.115.0/go.mod h1:eu1ecbz5mT+cHoH2H3GmD/rOO0WsicSJD2RLrYuOmRA= -go.opentelemetry.io/collector/featuregate v1.21.0 h1:+EULHPJDLMipcwAGZVp9Nm8NriRvoBBMxp7MSiIZVMI= -go.opentelemetry.io/collector/featuregate v1.21.0/go.mod h1:3GaXqflNDVwWndNGBJ1+XJFy3Fv/XrFgjMN60N3z7yg= -go.opentelemetry.io/collector/pdata v1.21.0 h1:PG+UbiFMJ35X/WcAR7Rf/PWmWtRdW0aHlOidsR6c5MA= -go.opentelemetry.io/collector/pdata v1.21.0/go.mod h1:GKb1/zocKJMvxKbS+sl0W85lxhYBTFJ6h6I1tphVyDU= -go.opentelemetry.io/collector/pdata/pprofile v0.115.0 h1:NI89hy13vNDw7EOnQf7Jtitks4HJFO0SUWznTssmP94= -go.opentelemetry.io/collector/pdata/pprofile v0.115.0/go.mod h1:jGzdNfO0XTtfLjXCL/uCC1livg1LlfR+ix2WE/z3RpQ= -go.opentelemetry.io/collector/pdata/testdata v0.115.0 h1:Rblz+AKXdo3fG626jS+KSd0OSA4uMXcTQfpwed6P8LI= -go.opentelemetry.io/collector/pdata/testdata v0.115.0/go.mod h1:inNnRt6S2Nn260EfCBEcjesjlKOSsr0jPwkPqpBkt4s= -go.opentelemetry.io/collector/pipeline v0.115.0 h1:bmACBqb0e8U9ag+vGGHUP7kCfAO7HHROdtzIEg8ulus= -go.opentelemetry.io/collector/pipeline v0.115.0/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74= -go.opentelemetry.io/collector/receiver v0.115.0 h1:55Q3Jvj6zHCIA1psKqi/3kEMJO4OqUF5tNAEYNdB1U8= -go.opentelemetry.io/collector/receiver v0.115.0/go.mod h1:nBSCh2O/WUcfgpJ+Jpz+B0z0Hn5jHeRvF2WmLij5EIY= -go.opentelemetry.io/collector/receiver/receiverprofiles v0.115.0 h1:R9JLaj2Al93smIPUkbJshAkb/cY0H5JBOxIx+Zu0NG4= -go.opentelemetry.io/collector/receiver/receiverprofiles v0.115.0/go.mod h1:05E5hGujWeeXJmzKZwTdHyZ/+rRyrQlQB5p5Q2XY39M= -go.opentelemetry.io/collector/receiver/receivertest v0.115.0 h1:OiB684SbHQi6/Pd3ZH0cXjYvCpBS9ilQBfTQx0wVXHg= -go.opentelemetry.io/collector/receiver/receivertest v0.115.0/go.mod h1:Y8Z9U/bz9Xpyt8GI8DxZZgryw3mnnIw+AeKVLTD2cP8= -go.opentelemetry.io/collector/semconv v0.115.0 h1:SoqMvg4ZEB3mz2EdAb6XYa+TuMo5Mir5FRBr3nVFUDY= -go.opentelemetry.io/collector/semconv v0.115.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= -go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= -go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= -go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= -go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= -go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= -go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= -go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= -go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= -go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= -go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/collector/component v0.118.0 h1:sSO/ObxJ+yH77Z4DmT1mlSuxhbgUmY1ztt7xCA1F/8w= +go.opentelemetry.io/collector/component v0.118.0/go.mod h1:LUJ3AL2b+tmFr3hZol3hzKzCMvNdqNq0M5CF3SWdv4M= +go.opentelemetry.io/collector/component/componenttest v0.118.0 h1:knEHckoiL2fEWSIc0iehg39zP4IXzi9sHa45O+oxKo8= +go.opentelemetry.io/collector/component/componenttest v0.118.0/go.mod h1:aHc7t7zVwCpbhrWIWY+GMuaMxMCUP8C8P7pJOt8r/vU= +go.opentelemetry.io/collector/config/configretry v1.24.0 h1:sIPHhNNY2YlHMIJ//63iMxIqlgDeGczId0uUb1njsPM= +go.opentelemetry.io/collector/config/configretry v1.24.0/go.mod h1:cleBc9I0DIWpTiiHfu9v83FUaCTqcPXmebpLxjEIqro= +go.opentelemetry.io/collector/config/configtelemetry v0.118.0 h1:UlN46EViG2X42odWtXgWaqY7Y01ZKpsnswSwXTWx5mM= +go.opentelemetry.io/collector/config/configtelemetry v0.118.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= +go.opentelemetry.io/collector/consumer v1.24.0 h1:7DeyBm9qdr1EPuCfPjWyChPK16DbVc0wZeSa9LZprFU= +go.opentelemetry.io/collector/consumer v1.24.0/go.mod h1:0G6jvZprIp4dpKMD1ZxCjriiP9GdFvFMObsQEtTk71s= +go.opentelemetry.io/collector/consumer/consumererror v0.118.0 h1:Cx//ZFDa6wUEoRDRYRZ/Rkb52dWNoHj2e9FdlcM9jCA= +go.opentelemetry.io/collector/consumer/consumererror v0.118.0/go.mod h1:2mhnzzLYR5zS2Zz4h9ZnRM8Uogu9qatcfQwGNenhing= +go.opentelemetry.io/collector/consumer/consumertest v0.118.0 h1:8AAS9ejQapP1zqt0+cI6u+AUBheT3X0171N9WtXWsVY= +go.opentelemetry.io/collector/consumer/consumertest v0.118.0/go.mod h1:spRM2wyGr4QZzqMHlLmZnqRCxqXN4Wd0piogC4Qb5PQ= +go.opentelemetry.io/collector/consumer/xconsumer v0.118.0 h1:guWnzzRqgCInjnYlOQ1BPrimppNGIVvnknAjlIbWXuY= +go.opentelemetry.io/collector/consumer/xconsumer v0.118.0/go.mod h1:C5V2d6Ys/Fi6k3tzjBmbdZ9v3J/rZSAMlhx4KVcMIIg= +go.opentelemetry.io/collector/exporter v0.118.0 h1:PE0vF2U+znOB8OVLPWNw40bGCoT/5QquQ8Xbz4i9Rb0= +go.opentelemetry.io/collector/exporter v0.118.0/go.mod h1:5ST3gxT/RzE/vg2bcGDtWJxlQF1ypwk50UpmdK1kUqY= +go.opentelemetry.io/collector/exporter/exportertest v0.118.0 h1:8gWky42BcJsxoaqWbnqCDUjP3Y84hjC6RD/UWHwR7sI= +go.opentelemetry.io/collector/exporter/exportertest v0.118.0/go.mod h1:UbpQBZvznA8YPqqcKlafVIhB6Qa4fPf2+I67MUGyNqo= +go.opentelemetry.io/collector/exporter/xexporter v0.118.0 h1:PZAo1CFhZHfQwtzUNj+Fwcv/21pWHJHTsrIddD096fw= +go.opentelemetry.io/collector/exporter/xexporter v0.118.0/go.mod h1:x4J+qyrRcp4DfWKqK3DLZomFTIUhedsqCQWqq6Gqps4= +go.opentelemetry.io/collector/extension v0.118.0 h1:9o5jLCTRvs0+rtFDx04zTBuB4WFrE0RvtVCPovYV0sA= +go.opentelemetry.io/collector/extension v0.118.0/go.mod h1:BFwB0WOlse6JnrStO44+k9kwUVjjtseFEHhJLHD7lBg= +go.opentelemetry.io/collector/extension/extensiontest v0.118.0 h1:rKBUaFS9elGfENG45wANmrwx7mHsmt1+YWCzxjftElg= +go.opentelemetry.io/collector/extension/extensiontest v0.118.0/go.mod h1:CqNXzkIOR32D8EUpptpOXhpFkibs3kFlRyNMEgIW8l4= +go.opentelemetry.io/collector/extension/xextension v0.118.0 h1:P6gvJzqnH9ma2QfnWde/E6Xu9bAzuefzIwm5iupiVPE= +go.opentelemetry.io/collector/extension/xextension v0.118.0/go.mod h1:ne4Q8ZtRlbC0Etr2hTcVkjOpVM2bE2xy1u+R80LUkDw= +go.opentelemetry.io/collector/featuregate v1.24.0 h1:DEqDsuJgxjZ3E5JNC9hXCd4sWGFiF7h9kaziODuqwFY= +go.opentelemetry.io/collector/featuregate v1.24.0/go.mod h1:3GaXqflNDVwWndNGBJ1+XJFy3Fv/XrFgjMN60N3z7yg= +go.opentelemetry.io/collector/pdata v1.24.0 h1:D6j92eAzmAbQgivNBUnt8r9juOl8ugb+ihYynoFZIEg= +go.opentelemetry.io/collector/pdata v1.24.0/go.mod h1:cf3/W9E/uIvPS4MR26SnMFJhraUCattzzM6qusuONuc= +go.opentelemetry.io/collector/pdata/pprofile v0.118.0 h1:VK/fr65VFOwEhsSGRPj5c3lCv0yIK1Kt0sZxv9WZBb8= +go.opentelemetry.io/collector/pdata/pprofile v0.118.0/go.mod h1:eJyP/vBm179EghV3dPSnamGAWQwLyd+4z/3yG54YFoQ= +go.opentelemetry.io/collector/pdata/testdata v0.118.0 h1:5N0w1SX9KIRkwvtkrpzQgXy9eGk3vfNG0ds6mhEPMIM= +go.opentelemetry.io/collector/pdata/testdata v0.118.0/go.mod h1:UY+GHV5bOC1BnFburOZ0wiHReJj1XbW12mi2Ogbc5Lw= +go.opentelemetry.io/collector/pipeline v0.118.0 h1:RI1DMe7L0+5hGkx0EDGxG00TaJoh96MEQppgOlGx1Oc= +go.opentelemetry.io/collector/pipeline v0.118.0/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74= +go.opentelemetry.io/collector/receiver v0.118.0 h1:X4mspHmbbtwdCQZ7o370kNmdWfxRnK1FrsvEShCCKEc= +go.opentelemetry.io/collector/receiver v0.118.0/go.mod h1:wFyfu6sgrkDPLQoGOGMuChGZzkZnYcI/tPJWV4CRTzs= +go.opentelemetry.io/collector/receiver/receivertest v0.118.0 h1:XlMr2mPsyXJsMUOqCpEoY3uCPsLZQbNA5fmVNDGB7Bw= +go.opentelemetry.io/collector/receiver/receivertest v0.118.0/go.mod h1:dtu/H1RNjhy11hTVf/XUfc02uGufMhYYdhhYBbglcUg= +go.opentelemetry.io/collector/receiver/xreceiver v0.118.0 h1:dzECve9e0H3ot0JWnWPuQr9Y84RhOYSd0+CjvJskx7Y= +go.opentelemetry.io/collector/receiver/xreceiver v0.118.0/go.mod h1:Lv1nD/mSYSP64iV8k+C+mWWZZOMLRubv9d1SUory3/E= +go.opentelemetry.io/collector/semconv v0.118.0 h1:V4vlMIK7TIaemrrn2VawvQPwruIKpj7Xgw9P5+BL56w= +go.opentelemetry.io/collector/semconv v0.118.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= +go.opentelemetry.io/otel/sdk/metric v1.33.0 h1:Gs5VK9/WUJhNXZgn8MR6ITatvAmKeIuCtNbsP3JkNqU= +go.opentelemetry.io/otel/sdk/metric v1.33.0/go.mod h1:dL5ykHZmm1B1nVRk9dDjChwDmt81MjVp3gLkQRwKf/Q= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= @@ -355,8 +356,8 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -377,11 +378,11 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= +golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -404,10 +405,10 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= -golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= @@ -439,17 +440,17 @@ google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= -google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= +google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/comp/otelcol/otlp/components/exporter/serializerexporter/consumer.go b/comp/otelcol/otlp/components/exporter/serializerexporter/consumer.go index 945b743771e89..e4a8bed35a223 100644 --- a/comp/otelcol/otlp/components/exporter/serializerexporter/consumer.go +++ b/comp/otelcol/otlp/components/exporter/serializerexporter/consumer.go @@ -25,6 +25,52 @@ import ( "github.com/tinylib/msgp/msgp" ) +var metricOriginsMappings = map[otlpmetrics.OriginProductDetail]metrics.MetricSource{ + otlpmetrics.OriginProductDetailUnknown: metrics.MetricSourceOpenTelemetryCollectorUnknown, + otlpmetrics.OriginProductDetailDockerStatsReceiver: metrics.MetricSourceOpenTelemetryCollectorDockerstatsReceiver, + otlpmetrics.OriginProductDetailElasticsearchReceiver: metrics.MetricSourceOpenTelemetryCollectorElasticsearchReceiver, + otlpmetrics.OriginProductDetailExpVarReceiver: metrics.MetricSourceOpenTelemetryCollectorExpvarReceiver, + otlpmetrics.OriginProductDetailFileStatsReceiver: metrics.MetricSourceOpenTelemetryCollectorFilestatsReceiver, + otlpmetrics.OriginProductDetailFlinkMetricsReceiver: metrics.MetricSourceOpenTelemetryCollectorFlinkmetricsReceiver, + otlpmetrics.OriginProductDetailGitProviderReceiver: metrics.MetricSourceOpenTelemetryCollectorGitproviderReceiver, + otlpmetrics.OriginProductDetailHAProxyReceiver: metrics.MetricSourceOpenTelemetryCollectorHaproxyReceiver, + otlpmetrics.OriginProductDetailHostMetricsReceiver: metrics.MetricSourceOpenTelemetryCollectorHostmetricsReceiver, + otlpmetrics.OriginProductDetailHTTPCheckReceiver: metrics.MetricSourceOpenTelemetryCollectorHttpcheckReceiver, + otlpmetrics.OriginProductDetailIISReceiver: metrics.MetricSourceOpenTelemetryCollectorIisReceiver, + otlpmetrics.OriginProductDetailK8SClusterReceiver: metrics.MetricSourceOpenTelemetryCollectorK8sclusterReceiver, + otlpmetrics.OriginProductDetailKafkaMetricsReceiver: metrics.MetricSourceOpenTelemetryCollectorKafkametricsReceiver, + otlpmetrics.OriginProductDetailKubeletStatsReceiver: metrics.MetricSourceOpenTelemetryCollectorKubeletstatsReceiver, + otlpmetrics.OriginProductDetailMemcachedReceiver: metrics.MetricSourceOpenTelemetryCollectorMemcachedReceiver, + otlpmetrics.OriginProductDetailMongoDBAtlasReceiver: metrics.MetricSourceOpenTelemetryCollectorMongodbatlasReceiver, + otlpmetrics.OriginProductDetailMongoDBReceiver: metrics.MetricSourceOpenTelemetryCollectorMongodbReceiver, + otlpmetrics.OriginProductDetailMySQLReceiver: metrics.MetricSourceOpenTelemetryCollectorMysqlReceiver, + otlpmetrics.OriginProductDetailNginxReceiver: metrics.MetricSourceOpenTelemetryCollectorNginxReceiver, + otlpmetrics.OriginProductDetailNSXTReceiver: metrics.MetricSourceOpenTelemetryCollectorNsxtReceiver, + otlpmetrics.OriginProductDetailOracleDBReceiver: metrics.MetricSourceOpenTelemetryCollectorOracledbReceiver, + otlpmetrics.OriginProductDetailPostgreSQLReceiver: metrics.MetricSourceOpenTelemetryCollectorPostgresqlReceiver, + otlpmetrics.OriginProductDetailPrometheusReceiver: metrics.MetricSourceOpenTelemetryCollectorPrometheusReceiver, + otlpmetrics.OriginProductDetailRabbitMQReceiver: metrics.MetricSourceOpenTelemetryCollectorRabbitmqReceiver, + otlpmetrics.OriginProductDetailRedisReceiver: metrics.MetricSourceOpenTelemetryCollectorRedisReceiver, + otlpmetrics.OriginProductDetailRiakReceiver: metrics.MetricSourceOpenTelemetryCollectorRiakReceiver, + otlpmetrics.OriginProductDetailSAPHANAReceiver: metrics.MetricSourceOpenTelemetryCollectorSaphanaReceiver, + otlpmetrics.OriginProductDetailSNMPReceiver: metrics.MetricSourceOpenTelemetryCollectorSnmpReceiver, + otlpmetrics.OriginProductDetailSnowflakeReceiver: metrics.MetricSourceOpenTelemetryCollectorSnowflakeReceiver, + otlpmetrics.OriginProductDetailSplunkEnterpriseReceiver: metrics.MetricSourceOpenTelemetryCollectorSplunkenterpriseReceiver, + otlpmetrics.OriginProductDetailSQLServerReceiver: metrics.MetricSourceOpenTelemetryCollectorSqlserverReceiver, + otlpmetrics.OriginProductDetailSSHCheckReceiver: metrics.MetricSourceOpenTelemetryCollectorSshcheckReceiver, + otlpmetrics.OriginProductDetailStatsDReceiver: metrics.MetricSourceOpenTelemetryCollectorStatsdReceiver, + otlpmetrics.OriginProductDetailVCenterReceiver: metrics.MetricSourceOpenTelemetryCollectorVcenterReceiver, + otlpmetrics.OriginProductDetailZookeeperReceiver: metrics.MetricSourceOpenTelemetryCollectorZookeeperReceiver, + otlpmetrics.OriginProductDetailActiveDirectoryDSReceiver: metrics.MetricSourceOpenTelemetryCollectorActiveDirectorydsReceiver, + otlpmetrics.OriginProductDetailAerospikeReceiver: metrics.MetricSourceOpenTelemetryCollectorAerospikeReceiver, + otlpmetrics.OriginProductDetailApacheReceiver: metrics.MetricSourceOpenTelemetryCollectorApacheReceiver, + otlpmetrics.OriginProductDetailApacheSparkReceiver: metrics.MetricSourceOpenTelemetryCollectorApachesparkReceiver, + otlpmetrics.OriginProductDetailAzureMonitorReceiver: metrics.MetricSourceOpenTelemetryCollectorAzuremonitorReceiver, + otlpmetrics.OriginProductDetailBigIPReceiver: metrics.MetricSourceOpenTelemetryCollectorBigipReceiver, + otlpmetrics.OriginProductDetailChronyReceiver: metrics.MetricSourceOpenTelemetryCollectorChronyReceiver, + otlpmetrics.OriginProductDetailCouchDBReceiver: metrics.MetricSourceOpenTelemetryCollectorCouchdbReceiver, +} + var _ otlpmetrics.Consumer = (*serializerConsumer)(nil) type serializerConsumer struct { @@ -48,6 +94,10 @@ func (c *serializerConsumer) ConsumeAPMStats(ss *pb.ClientStatsPayload) { } func (c *serializerConsumer) ConsumeSketch(ctx context.Context, dimensions *otlpmetrics.Dimensions, ts uint64, qsketch *quantile.Sketch) { + msrc, ok := metricOriginsMappings[dimensions.OriginProductDetail()] + if !ok { + msrc = metrics.MetricSourceOpenTelemetryCollectorUnknown + } c.sketches = append(c.sketches, &metrics.SketchSeries{ Name: dimensions.Name(), Tags: tagset.CompositeTagsFromSlice(c.enricher.Enrich(ctx, c.extraTags, dimensions)), @@ -57,6 +107,7 @@ func (c *serializerConsumer) ConsumeSketch(ctx context.Context, dimensions *otlp Ts: int64(ts / 1e9), Sketch: qsketch, }}, + Source: msrc, }) } @@ -71,6 +122,10 @@ func apiTypeFromTranslatorType(typ otlpmetrics.DataType) metrics.APIMetricType { } func (c *serializerConsumer) ConsumeTimeSeries(ctx context.Context, dimensions *otlpmetrics.Dimensions, typ otlpmetrics.DataType, ts uint64, value float64) { + msrc, ok := metricOriginsMappings[dimensions.OriginProductDetail()] + if !ok { + msrc = metrics.MetricSourceOpenTelemetryCollectorUnknown + } c.series = append(c.series, &metrics.Serie{ Name: dimensions.Name(), @@ -79,6 +134,7 @@ func (c *serializerConsumer) ConsumeTimeSeries(ctx context.Context, dimensions * Host: dimensions.Host(), MType: apiTypeFromTranslatorType(typ), Interval: 0, // OTLP metrics do not have an interval. + Source: msrc, }, ) } diff --git a/comp/otelcol/otlp/components/exporter/serializerexporter/exporter_test.go b/comp/otelcol/otlp/components/exporter/serializerexporter/exporter_test.go index d5588e36cadbe..b27c5335c8dc2 100644 --- a/comp/otelcol/otlp/components/exporter/serializerexporter/exporter_test.go +++ b/comp/otelcol/otlp/components/exporter/serializerexporter/exporter_test.go @@ -54,13 +54,14 @@ func (r *metricRecorder) SendIterableSeries(s metrics.SerieSource) error { return nil } +const ( + histogramMetricName = "test.histogram" + numberMetricName = "test.gauge" + histogramRuntimeMetricName = "process.runtime.dotnet.exceptions.count" + numberRuntimeMetricName = "process.runtime.go.goroutines" +) + func Test_ConsumeMetrics_Tags(t *testing.T) { - const ( - histogramMetricName = "test.histogram" - numberMetricName = "test.gauge" - histogramRuntimeMetricName = "process.runtime.dotnet.exceptions.count" - numberRuntimeMetricName = "process.runtime.go.goroutines" - ) tests := []struct { name string genMetrics func(t *testing.T) pmetric.Metrics @@ -218,6 +219,110 @@ func Test_ConsumeMetrics_Tags(t *testing.T) { } } +func Test_ConsumeMetrics_MetricOrigins(t *testing.T) { + tests := []struct { + name string + genMetrics func(t *testing.T) pmetric.Metrics + msrc metrics.MetricSource + }{ + { + name: "metric origin in sketches", + genMetrics: func(_ *testing.T) pmetric.Metrics { + md := pmetric.NewMetrics() + rms := md.ResourceMetrics() + rm := rms.AppendEmpty() + ilms := rm.ScopeMetrics() + ilm := ilms.AppendEmpty() + ilm.Scope().SetName("github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver/memory") + metricsArray := ilm.Metrics() + met := metricsArray.AppendEmpty() + met.SetName(histogramMetricName) + met.SetEmptyHistogram() + met.Histogram().SetAggregationTemporality(pmetric.AggregationTemporalityDelta) + hdps := met.Histogram().DataPoints() + hdp := hdps.AppendEmpty() + hdp.BucketCounts().FromRaw([]uint64{100}) + hdp.SetCount(100) + hdp.SetSum(0) + return md + }, + msrc: metrics.MetricSourceOpenTelemetryCollectorHostmetricsReceiver, + }, + { + name: "metric origin in timeseries", + genMetrics: func(_ *testing.T) pmetric.Metrics { + md := pmetric.NewMetrics() + rms := md.ResourceMetrics() + rm := rms.AppendEmpty() + ilms := rm.ScopeMetrics() + ilm := ilms.AppendEmpty() + ilm.Scope().SetName("github.com/open-telemetry/opentelemetry-collector-contrib/receiver/kubeletstatsreceiver") + metricsArray := ilm.Metrics() + met := metricsArray.AppendEmpty() + met.SetName(numberMetricName) + met.SetEmptyGauge() + gdps := met.Gauge().DataPoints() + gdp := gdps.AppendEmpty() + gdp.SetIntValue(100) + return md + }, + msrc: metrics.MetricSourceOpenTelemetryCollectorKubeletstatsReceiver, + }, + { + name: "unknown metric origin", + genMetrics: func(_ *testing.T) pmetric.Metrics { + md := pmetric.NewMetrics() + rms := md.ResourceMetrics() + rm := rms.AppendEmpty() + ilms := rm.ScopeMetrics() + ilm := ilms.AppendEmpty() + ilm.Scope().SetName("github.com/open-telemetry/opentelemetry-collector-contrib/receiver/myreceiver") + metricsArray := ilm.Metrics() + met := metricsArray.AppendEmpty() + met.SetName(numberMetricName) + met.SetEmptyGauge() + gdps := met.Gauge().DataPoints() + gdp := gdps.AppendEmpty() + gdp.SetIntValue(100) + return md + }, + msrc: metrics.MetricSourceOpenTelemetryCollectorUnknown, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + rec := &metricRecorder{} + ctx := context.Background() + f := NewFactory(rec, &MockTagEnricher{}, func(context.Context) (string, error) { + return "", nil + }, nil, nil) + cfg := f.CreateDefaultConfig().(*ExporterConfig) + exp, err := f.CreateMetrics( + ctx, + exportertest.NewNopSettings(), + cfg, + ) + require.NoError(t, err) + require.NoError(t, exp.Start(ctx, componenttest.NewNopHost())) + require.NoError(t, exp.ConsumeMetrics(ctx, tt.genMetrics(t))) + require.NoError(t, exp.Shutdown(ctx)) + + for _, serie := range rec.series { + if serie.Name != numberMetricName { + continue + } + assert.Equal(t, serie.Source, tt.msrc) + } + for _, sketch := range rec.sketchSeriesList { + if sketch.Name != histogramMetricName { + continue + } + assert.Equal(t, sketch.Source, tt.msrc) + } + }) + } +} + func newMetrics( histogramMetricName string, histogramDataPoint pmetric.HistogramDataPoint, diff --git a/comp/otelcol/otlp/components/exporter/serializerexporter/go.mod b/comp/otelcol/otlp/components/exporter/serializerexporter/go.mod index 1c14c95bf9c38..fae224629289a 100644 --- a/comp/otelcol/otlp/components/exporter/serializerexporter/go.mod +++ b/comp/otelcol/otlp/components/exporter/serializerexporter/go.mod @@ -11,11 +11,13 @@ replace ( github.com/DataDog/datadog-agent/comp/core/log/mock => ../../../../../core/log/mock github.com/DataDog/datadog-agent/comp/core/secrets => ../../../../../core/secrets github.com/DataDog/datadog-agent/comp/core/status => ../../../../../core/status + github.com/DataDog/datadog-agent/comp/core/tagger/origindetection => ../../../../../core/tagger/origindetection github.com/DataDog/datadog-agent/comp/core/telemetry => ../../../../../core/telemetry github.com/DataDog/datadog-agent/comp/def => ../../../../../def github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder => ../../../../../forwarder/defaultforwarder github.com/DataDog/datadog-agent/comp/forwarder/orchestrator/orchestratorinterface => ../../../../../forwarder/orchestrator/orchestratorinterface - github.com/DataDog/datadog-agent/comp/serializer/compression => ../../../../../serializer/compression/ + github.com/DataDog/datadog-agent/comp/serializer/logscompression => ../../../../../serializer/logscompression + github.com/DataDog/datadog-agent/comp/serializer/metricscompression => ../../../../../serializer/metricscompression github.com/DataDog/datadog-agent/pkg/aggregator/ckey => ../../../../../../pkg/aggregator/ckey github.com/DataDog/datadog-agent/pkg/api => ../../../../../../pkg/api github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ../../../../../../pkg/collector/check/defaults @@ -42,6 +44,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/backoff => ../../../../../../pkg/util/backoff/ github.com/DataDog/datadog-agent/pkg/util/buf => ../../../../../../pkg/util/buf/ github.com/DataDog/datadog-agent/pkg/util/common => ../../../../../../pkg/util/common/ + github.com/DataDog/datadog-agent/pkg/util/compression => ../../../../../../pkg/util/compression/ github.com/DataDog/datadog-agent/pkg/util/defaultpaths => ../../../../../../pkg/util/defaultpaths github.com/DataDog/datadog-agent/pkg/util/executable => ../../../../../../pkg/util/executable/ github.com/DataDog/datadog-agent/pkg/util/filesystem => ../../../../../../pkg/util/filesystem/ @@ -51,7 +54,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/json => ../../../../../../pkg/util/json/ github.com/DataDog/datadog-agent/pkg/util/log => ../../../../../../pkg/util/log/ github.com/DataDog/datadog-agent/pkg/util/log/setup => ../../../../../../pkg/util/log/setup - github.com/DataDog/datadog-agent/pkg/util/optional => ../../../../../../pkg/util/optional/ + github.com/DataDog/datadog-agent/pkg/util/option => ../../../../../../pkg/util/option/ github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../../../../pkg/util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../../../../pkg/util/scrubber/ github.com/DataDog/datadog-agent/pkg/util/sort => ../../../../../../pkg/util/sort/ @@ -65,50 +68,49 @@ replace ( require ( github.com/DataDog/datadog-agent/pkg/metrics v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/proto v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/proto v0.63.0-devel github.com/DataDog/datadog-agent/pkg/serializer v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/tagset v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.22.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.22.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.22.0 + github.com/DataDog/datadog-agent/pkg/util/log v0.61.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.24.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.24.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.24.0 github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.118.0 github.com/stretchr/testify v1.10.0 - github.com/tinylib/msgp v1.2.4 - go.opentelemetry.io/collector/component v0.115.0 - go.opentelemetry.io/collector/config/confignet v1.21.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.115.0 // indirect - go.opentelemetry.io/collector/confmap v1.21.0 // indirect - go.opentelemetry.io/collector/consumer v1.21.0 - go.opentelemetry.io/collector/exporter v0.115.0 - go.opentelemetry.io/collector/extension v0.115.0 // indirect - go.opentelemetry.io/collector/pdata v1.21.0 - go.opentelemetry.io/collector/receiver v0.115.0 // indirect - go.opentelemetry.io/collector/semconv v0.115.0 // indirect + github.com/tinylib/msgp v1.2.5 + go.opentelemetry.io/collector/component v0.118.0 + go.opentelemetry.io/collector/config/confignet v1.24.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.118.0 // indirect + go.opentelemetry.io/collector/confmap v1.24.0 // indirect + go.opentelemetry.io/collector/consumer v1.24.0 + go.opentelemetry.io/collector/exporter v0.118.0 + go.opentelemetry.io/collector/extension v0.118.0 // indirect + go.opentelemetry.io/collector/pdata v1.24.0 + go.opentelemetry.io/collector/receiver v0.118.0 // indirect + go.opentelemetry.io/collector/semconv v0.118.0 // indirect go.uber.org/multierr v1.11.0 ) require ( - github.com/DataDog/agent-payload/v5 v5.0.138 // indirect - github.com/DataDog/datadog-agent/comp/core/config v0.57.1 // indirect - github.com/DataDog/datadog-agent/comp/core/flare/builder v0.57.1 // indirect - github.com/DataDog/datadog-agent/comp/core/flare/types v0.57.1 // indirect - github.com/DataDog/datadog-agent/comp/core/log/def v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/agent-payload/v5 v5.0.141 // indirect + github.com/DataDog/datadog-agent/comp/core/config v0.61.0 // indirect + github.com/DataDog/datadog-agent/comp/core/flare/builder v0.59.0 // indirect + github.com/DataDog/datadog-agent/comp/core/flare/types v0.59.0 // indirect + github.com/DataDog/datadog-agent/comp/core/log/def v0.61.0 // indirect github.com/DataDog/datadog-agent/comp/core/secrets v0.59.0 // indirect github.com/DataDog/datadog-agent/comp/core/status v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/core/telemetry v0.57.1 // indirect - github.com/DataDog/datadog-agent/comp/def v0.57.1 // indirect + github.com/DataDog/datadog-agent/comp/def v0.59.0 // indirect github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/forwarder/orchestrator/orchestratorinterface v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/comp/serializer/compression v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/aggregator/ckey v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/api v0.57.1 // indirect github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/mock v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.60.0-devel // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.60.0-devel // indirect @@ -122,18 +124,18 @@ require ( github.com/DataDog/datadog-agent/pkg/util/buf v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/common v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/fxutil v0.57.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/fxutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/json v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/util/sort v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 // indirect github.com/DataDog/sketches-go v1.4.6 // indirect @@ -167,7 +169,7 @@ require ( github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect github.com/klauspost/compress v1.17.11 // indirect github.com/knadh/koanf/v2 v2.1.2 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect @@ -180,15 +182,15 @@ require ( github.com/pelletier/go-toml v1.9.5 // indirect github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.60.1 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/richardartoul/molecule v1.0.1-0.20240531184615-7ca0df43c0b3 // indirect github.com/rs/cors v1.11.1 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -197,62 +199,68 @@ require ( github.com/tklauser/numcpus v0.8.0 // indirect github.com/twmb/murmur3 v1.1.8 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect - go.opentelemetry.io/collector/config/configretry v1.21.0 // indirect - go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/consumer/consumertest v0.115.0 // indirect - go.opentelemetry.io/collector/exporter/exporterprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/extension/experimental/storage v0.115.0 // indirect - go.opentelemetry.io/collector/pdata/pprofile v0.115.0 // indirect - go.opentelemetry.io/collector/pipeline v0.115.0 // indirect - go.opentelemetry.io/collector/receiver/receiverprofiles v0.115.0 // indirect - go.opentelemetry.io/otel v1.32.0 // indirect - go.opentelemetry.io/otel/metric v1.32.0 // indirect - go.opentelemetry.io/otel/sdk v1.32.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.32.0 // indirect - go.opentelemetry.io/otel/trace v1.32.0 // indirect + go.opentelemetry.io/collector/config/configretry v1.24.0 // indirect + go.opentelemetry.io/collector/consumer/consumertest v0.118.0 // indirect + go.opentelemetry.io/collector/pdata/pprofile v0.118.0 // indirect + go.opentelemetry.io/collector/pipeline v0.118.0 // indirect + go.opentelemetry.io/otel v1.33.0 // indirect + go.opentelemetry.io/otel/metric v1.33.0 // indirect + go.opentelemetry.io/otel/sdk v1.33.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.33.0 // indirect + go.opentelemetry.io/otel/trace v1.33.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/dig v1.18.0 // indirect go.uber.org/fx v1.23.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/net v0.34.0 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect - google.golang.org/grpc v1.67.1 // indirect - google.golang.org/protobuf v1.35.2 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect + google.golang.org/grpc v1.69.4 // indirect + google.golang.org/protobuf v1.36.3 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) require ( - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.115.0 - go.opentelemetry.io/collector/component/componenttest v0.115.0 - go.opentelemetry.io/collector/exporter/exportertest v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.118.0 + go.opentelemetry.io/collector/component/componenttest v0.118.0 + go.opentelemetry.io/collector/exporter/exportertest v0.118.0 +) + +require ( + go.opentelemetry.io/collector/consumer/xconsumer v0.118.0 // indirect + go.opentelemetry.io/collector/exporter/xexporter v0.118.0 // indirect + go.opentelemetry.io/collector/extension/xextension v0.118.0 // indirect + go.opentelemetry.io/collector/receiver/xreceiver v0.118.0 // indirect ) require ( github.com/hashicorp/go-version v1.7.0 // indirect - go.opentelemetry.io/collector/featuregate v1.21.0 // indirect + go.opentelemetry.io/collector/featuregate v1.24.0 // indirect ) require ( + github.com/DataDog/datadog-agent/comp/core/tagger/origindetection v0.62.0-rc.1 // indirect + github.com/DataDog/datadog-agent/comp/serializer/metricscompression v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/compression v0.56.0-rc.3 // indirect github.com/ebitengine/purego v0.8.1 // indirect github.com/knadh/koanf/maps v0.1.1 // indirect github.com/knadh/koanf/providers/confmap v0.1.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/pierrec/lz4/v4 v4.1.21 // indirect + github.com/pierrec/lz4/v4 v4.1.22 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect - go.opentelemetry.io/collector/client v1.21.0 // indirect - go.opentelemetry.io/collector/config/configauth v0.115.0 // indirect - go.opentelemetry.io/collector/config/configcompression v1.21.0 // indirect - go.opentelemetry.io/collector/config/confighttp v0.115.0 // indirect - go.opentelemetry.io/collector/config/configopaque v1.21.0 // indirect - go.opentelemetry.io/collector/config/configtls v1.21.0 // indirect - go.opentelemetry.io/collector/config/internal v0.115.0 // indirect - go.opentelemetry.io/collector/consumer/consumererror v0.115.0 // indirect - go.opentelemetry.io/collector/extension/auth v0.115.0 // indirect - go.opentelemetry.io/collector/receiver/receivertest v0.115.0 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/collector/client v1.24.0 // indirect + go.opentelemetry.io/collector/config/configauth v0.118.0 // indirect + go.opentelemetry.io/collector/config/configcompression v1.24.0 // indirect + go.opentelemetry.io/collector/config/confighttp v0.118.0 // indirect + go.opentelemetry.io/collector/config/configopaque v1.24.0 // indirect + go.opentelemetry.io/collector/config/configtls v1.24.0 // indirect + go.opentelemetry.io/collector/consumer/consumererror v0.118.0 // indirect + go.opentelemetry.io/collector/extension/auth v0.118.0 // indirect + go.opentelemetry.io/collector/receiver/receivertest v0.118.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 // indirect ) diff --git a/comp/otelcol/otlp/components/exporter/serializerexporter/go.sum b/comp/otelcol/otlp/components/exporter/serializerexporter/go.sum index 0b852b1fd8dd7..d98aa2d7a6ebb 100644 --- a/comp/otelcol/otlp/components/exporter/serializerexporter/go.sum +++ b/comp/otelcol/otlp/components/exporter/serializerexporter/go.sum @@ -1,17 +1,17 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/DataDog/agent-payload/v5 v5.0.138 h1:Wg7hmWuoLC/o0X3zZ+uGcfRHPyaytljudgSY9O59zjc= -github.com/DataDog/agent-payload/v5 v5.0.138/go.mod h1:lxh9lb5xYrBXjblpIWYUi4deJqVbkIfkjwesi5nskDc= +github.com/DataDog/agent-payload/v5 v5.0.141 h1:pV76CyTUEe/LFuS7fwarIfOX5seSuYZylzhj1aGY2DQ= +github.com/DataDog/agent-payload/v5 v5.0.141/go.mod h1:lxh9lb5xYrBXjblpIWYUi4deJqVbkIfkjwesi5nskDc= github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 h1:EbzDX8HPk5uE2FsJYxD74QmMw0/3CqSKhEr6teh0ncQ= github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49/go.mod h1:SvsjzyJlSg0rKsqYgdcFxeEVflx3ZNAyFfkUHP0TxXg= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.22.0 h1:cXcKVEU1D0HlguR7GunnvuI70TghkarCa9DApqzMY94= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.22.0/go.mod h1:ES00EXfyEKgUkjd93tAXCxJA6i0seeOhZoS5Cj2qzzg= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.22.0 h1:yfk2cF8Bx98fSFpGrehEHh1FRqewfxcCTAbUDt5r3F8= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.22.0/go.mod h1:9qzpnBSxSOnKzbF/uHket3SSlQihQHix/ZRC2nZUUYQ= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.22.0 h1:w9+ngZDYUMLW+GSRA8x1DvVbuMR+cwlGb8VLwZfgBGs= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.22.0/go.mod h1:UsfqLgiD6Sjhpjkg+YzAd+TdKUZ2m6ZZ8t+tEkLNTMA= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.22.0 h1:63SzQz9Ab8XJj8fQKQz6UZNBhOm8rucwzbDfwTVF6dQ= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.22.0/go.mod h1:E/PY/aQ6S/N5hBPHXZRGmovs5b1BSi4RHGNcB4yP/Z0= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.24.0 h1:ttW3C3IN8p1goqyvaVpT4Blzg3lQ+sh4MTtB33BbpdE= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.24.0/go.mod h1:FpUbxBqKdi16CDJnRifUzmkETaEYR75xvh2Vo8vvJN0= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.24.0 h1:Y65h9AvfQO7ONOBlqCetvvUhh2XO1wIzN7IfXVFjc84= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.24.0/go.mod h1:7aAFw4o5dZk/kqFniz7ljJwS8covz8DHouGl7BrsnLI= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.24.0 h1:dG1rn794tdEpU+fqHumwx/Ngcc7uVPlJT/xt/4L1lmQ= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.24.0/go.mod h1:UWDxETdZ0XK3lpVJ4JYa16oYhu5H6IluXPrDtyvMIwU= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.24.0 h1:Uha4TTkbCcYTvUbkbfvUjUmxtPaPKCOtwwl91erkRRg= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.24.0/go.mod h1:RWoMSFb2Q+L0FSRYctEt8Wp0em+InUg+Oe+BU30e7gA= github.com/DataDog/sketches-go v1.4.6 h1:acd5fb+QdUzGrosfNLwrIhqyrbMORpvBy7mE+vHlT3I= github.com/DataDog/sketches-go v1.4.6/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg= github.com/DataDog/viper v1.14.0 h1:dIjTe/uJiah+QFqFZ+MXeqgmUvWhg37l37ZxFWxr3is= @@ -112,7 +112,6 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -177,8 +176,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lightstep/go-expohisto v1.0.0 h1:UPtTS1rGdtehbbAF7o/dhkWLTDI73UifG8LbfQI7cA4= github.com/lightstep/go-expohisto v1.0.0/go.mod h1:xDXD0++Mu2FOaItXtdDfksfgxfV0z1TMPa+e/EUd0cs= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -208,16 +207,16 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.115.0 h1:a36EJz/mb83f6ieX0v4fNDJ1jXqpeaM6DVQXeFDvdhw= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.115.0/go.mod h1:r5/40YO1eSP5ZreOmRzVOUtDr7YG39ZIUcVjHd+9Izc= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.115.0 h1:WOqt8NpU/JPGYDR4CiWx7g/sHV6Oe9FChzhushwmVdo= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.115.0/go.mod h1:wV/+iU7MyXcyTaY8K5Qx+1Z3yUzrxA40nydPQA476Iw= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.115.0 h1:MerLKMrkM4YoGF6Di0D9yMXO02yCX8mrZAi/+jJVVeI= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.115.0/go.mod h1:R8AkVWe9G5Q0oMOapvm9HNS076E3Min8SVlmhBL3QD0= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.115.0 h1:WEqcnWSy9dNSlGb8pYRBX7zhaz2ReyaeImlenbzNTB4= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.115.0/go.mod h1:6Mk71CakHUA3I6oM9hARDiyQypYyOolvb+4PFYyVEFg= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.115.0 h1:eoapW0JBablApkdv4C1RUuOKfz0U6SwuKMYYSAJH6fE= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.115.0/go.mod h1:hW2AaybTRcwxJySGLC3Fh1vd2VDaQhRBfa7O7w30NS8= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.118.0 h1:Xnwd0QEyBg6iNPUbc3CnHIb0hVjfTc+jHdFbA9VSa7k= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.118.0/go.mod h1:rmqCuNFMNBUxgyufeU8rpVYOWau8ubr0gmSO1u+R5Hk= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.118.0 h1:4IvL4o5uOf1PspPgjgcrxfPkyZQbgJP6VsyUi5KuSdM= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.118.0/go.mod h1:9cP+bHuftqoYmNDd8LrJ3YTzQl8S1T+qQxSeOIdLM+g= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.117.0 h1:/wMNk8w1UEHKpKoNk1jA2aifHgfGZE+WelGNrCf0CJ0= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.117.0/go.mod h1:ESyMNHmgZYh8Ouhr2veecTMK6sB8gQ8u2s3dsy9Og6k= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.117.0 h1:GqlhXd6J8zgxCYenbI3ew03SJnGec1vEEGzGHw9X/Y0= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.117.0/go.mod h1:OGylX+Bp+urSNNGoI1XG7U6vaRDZk1wN/w6fHP1F7IY= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.118.0 h1:WnOBLIbdKDdtLCmpedY35QIkCOb2yW+BxydQMEIv2Xc= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.118.0/go.mod h1:QNv8LB5TzLUHB4p413mrtLryozBRNHKwIlY2R6UirrQ= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= @@ -226,8 +225,8 @@ github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3v github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c h1:dAMKvw0MlJT1GshSTtih8C2gDs04w8dReiOGXrGLNoY= github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM= -github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= -github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= +github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -236,8 +235,8 @@ github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -254,8 +253,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -275,8 +274,8 @@ github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWN github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA= github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -287,8 +286,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -309,8 +308,8 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tinylib/msgp v1.2.4 h1:yLFeUGostXXSGW5vxfT5dXG/qzkn4schv2I7at5+hVU= -github.com/tinylib/msgp v1.2.4/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= +github.com/tinylib/msgp v1.2.5 h1:WeQg1whrXRFiZusidTQqzETkRpGjFjcIhW6uqWH09po= +github.com/tinylib/msgp v1.2.5/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= @@ -333,86 +332,86 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.opentelemetry.io/collector/client v1.21.0 h1:3Kes8lOFMYVxoxeAmX+DTEAkuS1iTA3NkSfqzGmygJA= -go.opentelemetry.io/collector/client v1.21.0/go.mod h1:jYJGiL0UA975OOyHmjbQSokNWt1OiviI5KjPOMUMGwc= -go.opentelemetry.io/collector/component v0.115.0 h1:iLte1oCiXzjiCnaOBKdsXacfFiECecpWxW3/LeriMoo= -go.opentelemetry.io/collector/component v0.115.0/go.mod h1:oIUFiH7w1eOimdeYhFI+gAIxYSiLDocKVJ0PTvX7d6s= -go.opentelemetry.io/collector/component/componenttest v0.115.0 h1:9URDJ9VyP6tuij+YHjp/kSSMecnZOd7oGvzu+rw9SJY= -go.opentelemetry.io/collector/component/componenttest v0.115.0/go.mod h1:PzXvNqKLCiSADZGZFKH+IOHMkaQ0GTHuzysfVbTPKYY= -go.opentelemetry.io/collector/config/configauth v0.115.0 h1:xa+ALdyPgva3rZnLBh1H2oS5MsHP6JxSqMtQmcELnys= -go.opentelemetry.io/collector/config/configauth v0.115.0/go.mod h1:C7anpb3Rf4KswMT+dgOzkW9UX0z/65PLORpUw3p0VYc= -go.opentelemetry.io/collector/config/configcompression v1.21.0 h1:0zbPdZAgPFMAarwJEC4gaR6f/JBP686A3TYSgb3oa+E= -go.opentelemetry.io/collector/config/configcompression v1.21.0/go.mod h1:LvYG00tbPTv0NOLoZN0wXq1F5thcxvukO8INq7xyfWU= -go.opentelemetry.io/collector/config/confighttp v0.115.0 h1:BIy394oNXnqySJwrCqgAJu4gWgAV5aQUDD6k1hy6C8o= -go.opentelemetry.io/collector/config/confighttp v0.115.0/go.mod h1:Wr50ut12NmCEAl4bWLJryw2EjUmJTtYRg89560Q51wc= -go.opentelemetry.io/collector/config/confignet v1.21.0 h1:PeQ5YrMnfftysFL/WVaSrjPOWjD6DfeABY50pf9CZxU= -go.opentelemetry.io/collector/config/confignet v1.21.0/go.mod h1:ZppUH1hgUJOubawEsxsQ9MzEYFytqo2GnVSS7d4CVxc= -go.opentelemetry.io/collector/config/configopaque v1.21.0 h1:PcvRGkBk4Px8BQM7tX+kw4i3jBsfAHGoGQbtZg6Ox7U= -go.opentelemetry.io/collector/config/configopaque v1.21.0/go.mod h1:sW0t0iI/VfRL9VYX7Ik6XzVgPcR+Y5kejTLsYcMyDWs= -go.opentelemetry.io/collector/config/configretry v1.21.0 h1:ZHoOvAkEcv5BBeaJn8IQ6rQ4GMPZWW4S+W7R4QTEbZU= -go.opentelemetry.io/collector/config/configretry v1.21.0/go.mod h1:cleBc9I0DIWpTiiHfu9v83FUaCTqcPXmebpLxjEIqro= -go.opentelemetry.io/collector/config/configtelemetry v0.115.0 h1:U07FinCDop+r2RjWQ3aP9ZWONC7r7kQIp1GkXQi6nsI= -go.opentelemetry.io/collector/config/configtelemetry v0.115.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= -go.opentelemetry.io/collector/config/configtls v1.21.0 h1:ZfrlAYgBD8lzp04W0GxwiDmUbrvKsvDYJi+wkyiXlpA= -go.opentelemetry.io/collector/config/configtls v1.21.0/go.mod h1:5EsNefPfVCMOTlOrr3wyj7LrsOgY7V8iqRl8oFZEqtw= -go.opentelemetry.io/collector/config/internal v0.115.0 h1:eVk57iufZpUXyPJFKTb1Ebx5tmcCyroIlt427r5pxS8= -go.opentelemetry.io/collector/config/internal v0.115.0/go.mod h1:OVkadRWlKAoWjHslqjWtBLAne8ceQm8WYT71ZcBWLFc= -go.opentelemetry.io/collector/confmap v1.21.0 h1:1tIcx2/Suwg8VhuPmQw87ba0ludPmumpFCFRZZa6RXA= -go.opentelemetry.io/collector/confmap v1.21.0/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec= -go.opentelemetry.io/collector/consumer v1.21.0 h1:THKZ2Vbi6GkamjTBI2hFq5Dc4kINZTWGwQNa8d/Ty9g= -go.opentelemetry.io/collector/consumer v1.21.0/go.mod h1:FQcC4ThMtRYY41dv+IPNK8POLLhAFY3r1YR5fuP7iiY= -go.opentelemetry.io/collector/consumer/consumererror v0.115.0 h1:yli//xBCQMPZKXNgNlXemo4dvqhnFrAmCZ11DvQgmcY= -go.opentelemetry.io/collector/consumer/consumererror v0.115.0/go.mod h1:LwVzAvQ6ZVNG7mbOvurbAo+W/rKws0IcjOwriuZXqPE= -go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0 h1:H3fDuyQW1t2HWHkz96WMBQJKUevypOCjBqnqtaAWyoA= -go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0/go.mod h1:IzEmZ91Tp7TBxVDq8Cc9xvLsmO7H08njr6Pu9P5d9ns= -go.opentelemetry.io/collector/consumer/consumertest v0.115.0 h1:hru0I2447y0TluCdwlKYFFtgcpyCnlM+LiOK1JZyA70= -go.opentelemetry.io/collector/consumer/consumertest v0.115.0/go.mod h1:ybjALRJWR6aKNOzEMy1T1ruCULVDEjj4omtOJMrH/kU= -go.opentelemetry.io/collector/exporter v0.115.0 h1:JnxfpOnsuqhTPKJXVKJLS1Cv3BiVrVLzpHOjJEQw+xw= -go.opentelemetry.io/collector/exporter v0.115.0/go.mod h1:xof3fHQK8wADhaKLIJcQ7ChZaFLNC+haRdPN0wgl6kY= -go.opentelemetry.io/collector/exporter/exporterprofiles v0.115.0 h1:lSQEleCn/q9eFufcuK61NdFKU70ZlgI9dBjPCO/4CrE= -go.opentelemetry.io/collector/exporter/exporterprofiles v0.115.0/go.mod h1:7l5K2AecimX2kx+nZC1gKG3QkP247CO1+SodmJ4fFkQ= -go.opentelemetry.io/collector/exporter/exportertest v0.115.0 h1:P9SMTUXQOtcaq40bGtnnAe14zRmR4/yUgj/Tb2BEf/k= -go.opentelemetry.io/collector/exporter/exportertest v0.115.0/go.mod h1:1jMZ9gFGXglb8wfNrBZIgd+RvpZhSyFwdfE+Jtf9w4U= -go.opentelemetry.io/collector/extension v0.115.0 h1:/cBb8AUdD0KMWC6V3lvCC16eP9Fg0wd1Upcp5rgvuGI= -go.opentelemetry.io/collector/extension v0.115.0/go.mod h1:HI7Ak6loyi6ZrZPsQJW1OO1wbaAW8OqXLFNQlTZnreQ= -go.opentelemetry.io/collector/extension/auth v0.115.0 h1:TTMokbBsSHZRFH48PvGSJmgSS8F3Rkr9MWGHZn8eJDk= -go.opentelemetry.io/collector/extension/auth v0.115.0/go.mod h1:3w+2mzeb2OYNOO4Bi41TUo4jr32ap2y7AOq64IDpxQo= -go.opentelemetry.io/collector/extension/auth/authtest v0.115.0 h1:OZe7dKbZ01qodSpZU0ZYzI6zpmmzJ3UvfdBSFAbSgDw= -go.opentelemetry.io/collector/extension/auth/authtest v0.115.0/go.mod h1:fk9WCXP0x91Q64Z8HZKWTHh9PWtgoWE1KXe3n2Bff3U= -go.opentelemetry.io/collector/extension/experimental/storage v0.115.0 h1:sZXw0+77092pq24CkUoTRoHQPLQUsDq6HFRNB0g5yR4= -go.opentelemetry.io/collector/extension/experimental/storage v0.115.0/go.mod h1:qjFH7Y3QYYs88By2ZB5GMSUN5k3ul4Brrq2J6lKACA0= -go.opentelemetry.io/collector/extension/extensiontest v0.115.0 h1:GBVFxFEskR8jSdu9uaQh2qpXnN5VNXhXjpJ2UjxtE8I= -go.opentelemetry.io/collector/extension/extensiontest v0.115.0/go.mod h1:eu1ecbz5mT+cHoH2H3GmD/rOO0WsicSJD2RLrYuOmRA= -go.opentelemetry.io/collector/featuregate v1.21.0 h1:+EULHPJDLMipcwAGZVp9Nm8NriRvoBBMxp7MSiIZVMI= -go.opentelemetry.io/collector/featuregate v1.21.0/go.mod h1:3GaXqflNDVwWndNGBJ1+XJFy3Fv/XrFgjMN60N3z7yg= -go.opentelemetry.io/collector/pdata v1.21.0 h1:PG+UbiFMJ35X/WcAR7Rf/PWmWtRdW0aHlOidsR6c5MA= -go.opentelemetry.io/collector/pdata v1.21.0/go.mod h1:GKb1/zocKJMvxKbS+sl0W85lxhYBTFJ6h6I1tphVyDU= -go.opentelemetry.io/collector/pdata/pprofile v0.115.0 h1:NI89hy13vNDw7EOnQf7Jtitks4HJFO0SUWznTssmP94= -go.opentelemetry.io/collector/pdata/pprofile v0.115.0/go.mod h1:jGzdNfO0XTtfLjXCL/uCC1livg1LlfR+ix2WE/z3RpQ= -go.opentelemetry.io/collector/pdata/testdata v0.115.0 h1:Rblz+AKXdo3fG626jS+KSd0OSA4uMXcTQfpwed6P8LI= -go.opentelemetry.io/collector/pdata/testdata v0.115.0/go.mod h1:inNnRt6S2Nn260EfCBEcjesjlKOSsr0jPwkPqpBkt4s= -go.opentelemetry.io/collector/pipeline v0.115.0 h1:bmACBqb0e8U9ag+vGGHUP7kCfAO7HHROdtzIEg8ulus= -go.opentelemetry.io/collector/pipeline v0.115.0/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74= -go.opentelemetry.io/collector/receiver v0.115.0 h1:55Q3Jvj6zHCIA1psKqi/3kEMJO4OqUF5tNAEYNdB1U8= -go.opentelemetry.io/collector/receiver v0.115.0/go.mod h1:nBSCh2O/WUcfgpJ+Jpz+B0z0Hn5jHeRvF2WmLij5EIY= -go.opentelemetry.io/collector/receiver/receiverprofiles v0.115.0 h1:R9JLaj2Al93smIPUkbJshAkb/cY0H5JBOxIx+Zu0NG4= -go.opentelemetry.io/collector/receiver/receiverprofiles v0.115.0/go.mod h1:05E5hGujWeeXJmzKZwTdHyZ/+rRyrQlQB5p5Q2XY39M= -go.opentelemetry.io/collector/receiver/receivertest v0.115.0 h1:OiB684SbHQi6/Pd3ZH0cXjYvCpBS9ilQBfTQx0wVXHg= -go.opentelemetry.io/collector/receiver/receivertest v0.115.0/go.mod h1:Y8Z9U/bz9Xpyt8GI8DxZZgryw3mnnIw+AeKVLTD2cP8= -go.opentelemetry.io/collector/semconv v0.115.0 h1:SoqMvg4ZEB3mz2EdAb6XYa+TuMo5Mir5FRBr3nVFUDY= -go.opentelemetry.io/collector/semconv v0.115.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/collector/client v1.24.0 h1:eH7ctqDnRWNH5QVVbAvdYYdkvr8QWLkEm8FUPaaYbWE= +go.opentelemetry.io/collector/client v1.24.0/go.mod h1:C/38SYPa0tTL6ikPz/glYz6f3GVzEuT4nlEml6IBDMw= +go.opentelemetry.io/collector/component v0.118.0 h1:sSO/ObxJ+yH77Z4DmT1mlSuxhbgUmY1ztt7xCA1F/8w= +go.opentelemetry.io/collector/component v0.118.0/go.mod h1:LUJ3AL2b+tmFr3hZol3hzKzCMvNdqNq0M5CF3SWdv4M= +go.opentelemetry.io/collector/component/componenttest v0.118.0 h1:knEHckoiL2fEWSIc0iehg39zP4IXzi9sHa45O+oxKo8= +go.opentelemetry.io/collector/component/componenttest v0.118.0/go.mod h1:aHc7t7zVwCpbhrWIWY+GMuaMxMCUP8C8P7pJOt8r/vU= +go.opentelemetry.io/collector/config/configauth v0.118.0 h1:uBH/s9kRw/m7VWuibrkCzbXSCVLf9ElKq9NuKb0wAwk= +go.opentelemetry.io/collector/config/configauth v0.118.0/go.mod h1:uAmSGkihIENoIah6mEQ8S/HX4oiFOHZu3EoZLZwi9OI= +go.opentelemetry.io/collector/config/configcompression v1.24.0 h1:jyM6BX7wYcrh+eVSC0FMbWgy/zb9iP58SerOrvisccE= +go.opentelemetry.io/collector/config/configcompression v1.24.0/go.mod h1:LvYG00tbPTv0NOLoZN0wXq1F5thcxvukO8INq7xyfWU= +go.opentelemetry.io/collector/config/confighttp v0.118.0 h1:ey50dfySOCPgUPJ1x8Kq6CmNcv/TpZHt6cYmPhZItj0= +go.opentelemetry.io/collector/config/confighttp v0.118.0/go.mod h1:4frheVFiIfKUHuD/KAPn+u+d+EUx5GlQTNmoI1ftReA= +go.opentelemetry.io/collector/config/confignet v1.24.0 h1:Je1oO3qCUI4etX9ZVyav/NkeD+sfzZQRmwMGy51Oei4= +go.opentelemetry.io/collector/config/confignet v1.24.0/go.mod h1:ZppUH1hgUJOubawEsxsQ9MzEYFytqo2GnVSS7d4CVxc= +go.opentelemetry.io/collector/config/configopaque v1.24.0 h1:EPOprMDreZPKyIgT0/eVBvEGQVvq7ncvBCBVnWerj54= +go.opentelemetry.io/collector/config/configopaque v1.24.0/go.mod h1:sW0t0iI/VfRL9VYX7Ik6XzVgPcR+Y5kejTLsYcMyDWs= +go.opentelemetry.io/collector/config/configretry v1.24.0 h1:sIPHhNNY2YlHMIJ//63iMxIqlgDeGczId0uUb1njsPM= +go.opentelemetry.io/collector/config/configretry v1.24.0/go.mod h1:cleBc9I0DIWpTiiHfu9v83FUaCTqcPXmebpLxjEIqro= +go.opentelemetry.io/collector/config/configtelemetry v0.118.0 h1:UlN46EViG2X42odWtXgWaqY7Y01ZKpsnswSwXTWx5mM= +go.opentelemetry.io/collector/config/configtelemetry v0.118.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= +go.opentelemetry.io/collector/config/configtls v1.24.0 h1:rOhl8qjIlUVVRHnwQj6/vZe6cuCYImyx7aVDBR35bqI= +go.opentelemetry.io/collector/config/configtls v1.24.0/go.mod h1:d0OdfkbuYEMYDBJLSbpH0wPI29lmSiFT3geqh/ygF2k= +go.opentelemetry.io/collector/confmap v1.24.0 h1:UUHVhkDCsVw14jPOarug9PDQE2vaB2ELPWMr7ARFBCA= +go.opentelemetry.io/collector/confmap v1.24.0/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec= +go.opentelemetry.io/collector/consumer v1.24.0 h1:7DeyBm9qdr1EPuCfPjWyChPK16DbVc0wZeSa9LZprFU= +go.opentelemetry.io/collector/consumer v1.24.0/go.mod h1:0G6jvZprIp4dpKMD1ZxCjriiP9GdFvFMObsQEtTk71s= +go.opentelemetry.io/collector/consumer/consumererror v0.118.0 h1:Cx//ZFDa6wUEoRDRYRZ/Rkb52dWNoHj2e9FdlcM9jCA= +go.opentelemetry.io/collector/consumer/consumererror v0.118.0/go.mod h1:2mhnzzLYR5zS2Zz4h9ZnRM8Uogu9qatcfQwGNenhing= +go.opentelemetry.io/collector/consumer/consumertest v0.118.0 h1:8AAS9ejQapP1zqt0+cI6u+AUBheT3X0171N9WtXWsVY= +go.opentelemetry.io/collector/consumer/consumertest v0.118.0/go.mod h1:spRM2wyGr4QZzqMHlLmZnqRCxqXN4Wd0piogC4Qb5PQ= +go.opentelemetry.io/collector/consumer/xconsumer v0.118.0 h1:guWnzzRqgCInjnYlOQ1BPrimppNGIVvnknAjlIbWXuY= +go.opentelemetry.io/collector/consumer/xconsumer v0.118.0/go.mod h1:C5V2d6Ys/Fi6k3tzjBmbdZ9v3J/rZSAMlhx4KVcMIIg= +go.opentelemetry.io/collector/exporter v0.118.0 h1:PE0vF2U+znOB8OVLPWNw40bGCoT/5QquQ8Xbz4i9Rb0= +go.opentelemetry.io/collector/exporter v0.118.0/go.mod h1:5ST3gxT/RzE/vg2bcGDtWJxlQF1ypwk50UpmdK1kUqY= +go.opentelemetry.io/collector/exporter/exportertest v0.118.0 h1:8gWky42BcJsxoaqWbnqCDUjP3Y84hjC6RD/UWHwR7sI= +go.opentelemetry.io/collector/exporter/exportertest v0.118.0/go.mod h1:UbpQBZvznA8YPqqcKlafVIhB6Qa4fPf2+I67MUGyNqo= +go.opentelemetry.io/collector/exporter/xexporter v0.118.0 h1:PZAo1CFhZHfQwtzUNj+Fwcv/21pWHJHTsrIddD096fw= +go.opentelemetry.io/collector/exporter/xexporter v0.118.0/go.mod h1:x4J+qyrRcp4DfWKqK3DLZomFTIUhedsqCQWqq6Gqps4= +go.opentelemetry.io/collector/extension v0.118.0 h1:9o5jLCTRvs0+rtFDx04zTBuB4WFrE0RvtVCPovYV0sA= +go.opentelemetry.io/collector/extension v0.118.0/go.mod h1:BFwB0WOlse6JnrStO44+k9kwUVjjtseFEHhJLHD7lBg= +go.opentelemetry.io/collector/extension/auth v0.118.0 h1:+eMNUBUK1JK9A3mr95BasbWE90Lxu+WlR9sqS36sJms= +go.opentelemetry.io/collector/extension/auth v0.118.0/go.mod h1:MJpYcRGSERkgOhczqTKoAhkHmcugr+YTlRhc/SpYYYI= +go.opentelemetry.io/collector/extension/auth/authtest v0.118.0 h1:KIORXNc71vfpQrrZOntiZesRCZtQ8alrASWVT/zZkyo= +go.opentelemetry.io/collector/extension/auth/authtest v0.118.0/go.mod h1:0ZlSP9NPAfTRQd6Tx4mOH0IWrp6ufHaVN//L9Mb87gM= +go.opentelemetry.io/collector/extension/extensiontest v0.118.0 h1:rKBUaFS9elGfENG45wANmrwx7mHsmt1+YWCzxjftElg= +go.opentelemetry.io/collector/extension/extensiontest v0.118.0/go.mod h1:CqNXzkIOR32D8EUpptpOXhpFkibs3kFlRyNMEgIW8l4= +go.opentelemetry.io/collector/extension/xextension v0.118.0 h1:P6gvJzqnH9ma2QfnWde/E6Xu9bAzuefzIwm5iupiVPE= +go.opentelemetry.io/collector/extension/xextension v0.118.0/go.mod h1:ne4Q8ZtRlbC0Etr2hTcVkjOpVM2bE2xy1u+R80LUkDw= +go.opentelemetry.io/collector/featuregate v1.24.0 h1:DEqDsuJgxjZ3E5JNC9hXCd4sWGFiF7h9kaziODuqwFY= +go.opentelemetry.io/collector/featuregate v1.24.0/go.mod h1:3GaXqflNDVwWndNGBJ1+XJFy3Fv/XrFgjMN60N3z7yg= +go.opentelemetry.io/collector/pdata v1.24.0 h1:D6j92eAzmAbQgivNBUnt8r9juOl8ugb+ihYynoFZIEg= +go.opentelemetry.io/collector/pdata v1.24.0/go.mod h1:cf3/W9E/uIvPS4MR26SnMFJhraUCattzzM6qusuONuc= +go.opentelemetry.io/collector/pdata/pprofile v0.118.0 h1:VK/fr65VFOwEhsSGRPj5c3lCv0yIK1Kt0sZxv9WZBb8= +go.opentelemetry.io/collector/pdata/pprofile v0.118.0/go.mod h1:eJyP/vBm179EghV3dPSnamGAWQwLyd+4z/3yG54YFoQ= +go.opentelemetry.io/collector/pdata/testdata v0.118.0 h1:5N0w1SX9KIRkwvtkrpzQgXy9eGk3vfNG0ds6mhEPMIM= +go.opentelemetry.io/collector/pdata/testdata v0.118.0/go.mod h1:UY+GHV5bOC1BnFburOZ0wiHReJj1XbW12mi2Ogbc5Lw= +go.opentelemetry.io/collector/pipeline v0.118.0 h1:RI1DMe7L0+5hGkx0EDGxG00TaJoh96MEQppgOlGx1Oc= +go.opentelemetry.io/collector/pipeline v0.118.0/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74= +go.opentelemetry.io/collector/receiver v0.118.0 h1:X4mspHmbbtwdCQZ7o370kNmdWfxRnK1FrsvEShCCKEc= +go.opentelemetry.io/collector/receiver v0.118.0/go.mod h1:wFyfu6sgrkDPLQoGOGMuChGZzkZnYcI/tPJWV4CRTzs= +go.opentelemetry.io/collector/receiver/receivertest v0.118.0 h1:XlMr2mPsyXJsMUOqCpEoY3uCPsLZQbNA5fmVNDGB7Bw= +go.opentelemetry.io/collector/receiver/receivertest v0.118.0/go.mod h1:dtu/H1RNjhy11hTVf/XUfc02uGufMhYYdhhYBbglcUg= +go.opentelemetry.io/collector/receiver/xreceiver v0.118.0 h1:dzECve9e0H3ot0JWnWPuQr9Y84RhOYSd0+CjvJskx7Y= +go.opentelemetry.io/collector/receiver/xreceiver v0.118.0/go.mod h1:Lv1nD/mSYSP64iV8k+C+mWWZZOMLRubv9d1SUory3/E= +go.opentelemetry.io/collector/semconv v0.118.0 h1:V4vlMIK7TIaemrrn2VawvQPwruIKpj7Xgw9P5+BL56w= +go.opentelemetry.io/collector/semconv v0.118.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM= -go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= -go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= -go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= -go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= -go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= -go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= -go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= -go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= -go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= -go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= +go.opentelemetry.io/otel/sdk/metric v1.33.0 h1:Gs5VK9/WUJhNXZgn8MR6ITatvAmKeIuCtNbsP3JkNqU= +go.opentelemetry.io/otel/sdk/metric v1.33.0/go.mod h1:dL5ykHZmm1B1nVRk9dDjChwDmt81MjVp3gLkQRwKf/Q= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= @@ -438,8 +437,8 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -460,8 +459,8 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -485,8 +484,8 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= @@ -518,19 +517,19 @@ google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= -google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= +google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/comp/otelcol/otlp/components/metricsclient/go.mod b/comp/otelcol/otlp/components/metricsclient/go.mod index 9f3e33feb8bf7..60f91091c0dcb 100644 --- a/comp/otelcol/otlp/components/metricsclient/go.mod +++ b/comp/otelcol/otlp/components/metricsclient/go.mod @@ -8,9 +8,9 @@ require ( github.com/DataDog/datadog-agent/pkg/trace v0.56.0-rc.3 github.com/DataDog/datadog-go/v5 v5.6.0 github.com/stretchr/testify v1.10.0 - go.opentelemetry.io/otel v1.32.0 - go.opentelemetry.io/otel/metric v1.32.0 - go.opentelemetry.io/otel/sdk/metric v1.32.0 + go.opentelemetry.io/otel v1.33.0 + go.opentelemetry.io/otel/metric v1.33.0 + go.opentelemetry.io/otel/sdk/metric v1.33.0 ) require ( @@ -19,13 +19,11 @@ require ( github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/kr/pretty v0.3.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/rogpeppe/go-internal v1.13.1 // indirect - go.opentelemetry.io/otel/sdk v1.32.0 // indirect - go.opentelemetry.io/otel/trace v1.32.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/otel/sdk v1.33.0 // indirect + go.opentelemetry.io/otel/trace v1.33.0 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/sys v0.28.0 // indirect - gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + golang.org/x/sys v0.29.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/comp/otelcol/otlp/components/metricsclient/go.sum b/comp/otelcol/otlp/components/metricsclient/go.sum index a5181f913d8e8..6208b5282a9d3 100644 --- a/comp/otelcol/otlp/components/metricsclient/go.sum +++ b/comp/otelcol/otlp/components/metricsclient/go.sum @@ -3,7 +3,6 @@ github.com/DataDog/datadog-go/v5 v5.6.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUF github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -18,19 +17,14 @@ github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= @@ -46,16 +40,18 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= -go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= -go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= -go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= -go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= -go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= -go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= -go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= -go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= -go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= +go.opentelemetry.io/otel/sdk/metric v1.33.0 h1:Gs5VK9/WUJhNXZgn8MR6ITatvAmKeIuCtNbsP3JkNqU= +go.opentelemetry.io/otel/sdk/metric v1.33.0/go.mod h1:dL5ykHZmm1B1nVRk9dDjChwDmt81MjVp3gLkQRwKf/Q= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -64,8 +60,8 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -75,8 +71,8 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/go.mod b/comp/otelcol/otlp/components/processor/infraattributesprocessor/go.mod index 0faa1b34bcd0e..07c504169dfc3 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/go.mod +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/go.mod @@ -1,6 +1,6 @@ module github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/processor/infraattributesprocessor -go 1.22.0 +go 1.23.0 replace ( github.com/DataDog/datadog-agent/comp/api/api/def => ../../../../../api/api/def @@ -24,7 +24,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../../../../pkg/util/fxutil github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../../../../../pkg/util/hostname/validate github.com/DataDog/datadog-agent/pkg/util/log => ../../../../../../pkg/util/log - github.com/DataDog/datadog-agent/pkg/util/optional => ../../../../../../pkg/util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../../../../../pkg/util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../../../../pkg/util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../../../../pkg/util/scrubber github.com/DataDog/datadog-agent/pkg/util/system => ../../../../../../pkg/util/system @@ -34,23 +34,28 @@ replace ( ) require ( - github.com/DataDog/datadog-agent/comp/core/tagger/tags v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/comp/core/tagger/tags v0.61.0 github.com/DataDog/datadog-agent/comp/core/tagger/types v0.59.0 github.com/stretchr/testify v1.10.0 - go.opentelemetry.io/collector/component v0.115.0 - go.opentelemetry.io/collector/component/componenttest v0.115.0 - go.opentelemetry.io/collector/confmap v1.21.0 - go.opentelemetry.io/collector/consumer v1.21.0 - go.opentelemetry.io/collector/consumer/consumertest v0.115.0 - go.opentelemetry.io/collector/pdata v1.21.0 - go.opentelemetry.io/collector/processor v0.115.0 - go.opentelemetry.io/collector/processor/processortest v0.115.0 - go.opentelemetry.io/collector/semconv v0.115.0 + go.opentelemetry.io/collector/component v0.118.0 + go.opentelemetry.io/collector/component/componenttest v0.118.0 + go.opentelemetry.io/collector/confmap v1.24.0 + go.opentelemetry.io/collector/consumer v1.24.0 + go.opentelemetry.io/collector/consumer/consumertest v0.118.0 + go.opentelemetry.io/collector/pdata v1.24.0 + go.opentelemetry.io/collector/processor v0.118.0 + go.opentelemetry.io/collector/processor/processortest v0.118.0 + go.opentelemetry.io/collector/semconv v0.118.0 go.opentelemetry.io/otel/metric v1.32.0 go.opentelemetry.io/otel/trace v1.32.0 go.uber.org/zap v1.27.0 ) +require ( + go.opentelemetry.io/collector/consumer/xconsumer v0.118.0 // indirect + go.opentelemetry.io/collector/processor/xprocessor v0.118.0 // indirect +) + require ( github.com/DataDog/datadog-agent/comp/core/tagger/utils v0.59.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect @@ -68,13 +73,11 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - go.opentelemetry.io/collector/component/componentstatus v0.115.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.115.0 // indirect - go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/pdata/pprofile v0.115.0 // indirect - go.opentelemetry.io/collector/pdata/testdata v0.115.0 // indirect - go.opentelemetry.io/collector/pipeline v0.115.0 // indirect - go.opentelemetry.io/collector/processor/processorprofiles v0.115.0 // indirect + go.opentelemetry.io/collector/component/componentstatus v0.118.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.118.0 // indirect + go.opentelemetry.io/collector/pdata/pprofile v0.118.0 // indirect + go.opentelemetry.io/collector/pdata/testdata v0.118.0 // indirect + go.opentelemetry.io/collector/pipeline v0.118.0 // indirect go.opentelemetry.io/otel v1.32.0 // indirect go.opentelemetry.io/otel/sdk v1.32.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.32.0 // indirect @@ -83,7 +86,7 @@ require ( golang.org/x/sys v0.28.0 // indirect golang.org/x/text v0.21.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect - google.golang.org/grpc v1.67.1 // indirect - google.golang.org/protobuf v1.35.2 // indirect + google.golang.org/grpc v1.69.4 // indirect + google.golang.org/protobuf v1.36.3 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/comp/otelcol/otlp/components/processor/infraattributesprocessor/go.sum b/comp/otelcol/otlp/components/processor/infraattributesprocessor/go.sum index 05605cfdfae08..bf39b9338e82f 100644 --- a/comp/otelcol/otlp/components/processor/infraattributesprocessor/go.sum +++ b/comp/otelcol/otlp/components/processor/infraattributesprocessor/go.sum @@ -11,6 +11,8 @@ github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIx github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -50,38 +52,38 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opentelemetry.io/collector/component v0.115.0 h1:iLte1oCiXzjiCnaOBKdsXacfFiECecpWxW3/LeriMoo= -go.opentelemetry.io/collector/component v0.115.0/go.mod h1:oIUFiH7w1eOimdeYhFI+gAIxYSiLDocKVJ0PTvX7d6s= -go.opentelemetry.io/collector/component/componentstatus v0.115.0 h1:pbpUIL+uKDfEiSgKK+S5nuSL6MDIIQYsp4b65ZGVb9M= -go.opentelemetry.io/collector/component/componentstatus v0.115.0/go.mod h1:36A+9XSiOz0Cdhq+UwwPRlEr5CYuSkEnVO9om4BH7d0= -go.opentelemetry.io/collector/component/componenttest v0.115.0 h1:9URDJ9VyP6tuij+YHjp/kSSMecnZOd7oGvzu+rw9SJY= -go.opentelemetry.io/collector/component/componenttest v0.115.0/go.mod h1:PzXvNqKLCiSADZGZFKH+IOHMkaQ0GTHuzysfVbTPKYY= -go.opentelemetry.io/collector/config/configtelemetry v0.115.0 h1:U07FinCDop+r2RjWQ3aP9ZWONC7r7kQIp1GkXQi6nsI= -go.opentelemetry.io/collector/config/configtelemetry v0.115.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= -go.opentelemetry.io/collector/confmap v1.21.0 h1:1tIcx2/Suwg8VhuPmQw87ba0ludPmumpFCFRZZa6RXA= -go.opentelemetry.io/collector/confmap v1.21.0/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec= -go.opentelemetry.io/collector/consumer v1.21.0 h1:THKZ2Vbi6GkamjTBI2hFq5Dc4kINZTWGwQNa8d/Ty9g= -go.opentelemetry.io/collector/consumer v1.21.0/go.mod h1:FQcC4ThMtRYY41dv+IPNK8POLLhAFY3r1YR5fuP7iiY= -go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0 h1:H3fDuyQW1t2HWHkz96WMBQJKUevypOCjBqnqtaAWyoA= -go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0/go.mod h1:IzEmZ91Tp7TBxVDq8Cc9xvLsmO7H08njr6Pu9P5d9ns= -go.opentelemetry.io/collector/consumer/consumertest v0.115.0 h1:hru0I2447y0TluCdwlKYFFtgcpyCnlM+LiOK1JZyA70= -go.opentelemetry.io/collector/consumer/consumertest v0.115.0/go.mod h1:ybjALRJWR6aKNOzEMy1T1ruCULVDEjj4omtOJMrH/kU= -go.opentelemetry.io/collector/pdata v1.21.0 h1:PG+UbiFMJ35X/WcAR7Rf/PWmWtRdW0aHlOidsR6c5MA= -go.opentelemetry.io/collector/pdata v1.21.0/go.mod h1:GKb1/zocKJMvxKbS+sl0W85lxhYBTFJ6h6I1tphVyDU= -go.opentelemetry.io/collector/pdata/pprofile v0.115.0 h1:NI89hy13vNDw7EOnQf7Jtitks4HJFO0SUWznTssmP94= -go.opentelemetry.io/collector/pdata/pprofile v0.115.0/go.mod h1:jGzdNfO0XTtfLjXCL/uCC1livg1LlfR+ix2WE/z3RpQ= -go.opentelemetry.io/collector/pdata/testdata v0.115.0 h1:Rblz+AKXdo3fG626jS+KSd0OSA4uMXcTQfpwed6P8LI= -go.opentelemetry.io/collector/pdata/testdata v0.115.0/go.mod h1:inNnRt6S2Nn260EfCBEcjesjlKOSsr0jPwkPqpBkt4s= -go.opentelemetry.io/collector/pipeline v0.115.0 h1:bmACBqb0e8U9ag+vGGHUP7kCfAO7HHROdtzIEg8ulus= -go.opentelemetry.io/collector/pipeline v0.115.0/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74= -go.opentelemetry.io/collector/processor v0.115.0 h1:+fveHGRe24PZPv/F5taahGuZ9HdNW44hgNWEJhIUdyc= -go.opentelemetry.io/collector/processor v0.115.0/go.mod h1:/oLHBlLsm7tFb7zOIrA5C0j14yBtjXKAgxJJ2Bktyk4= -go.opentelemetry.io/collector/processor/processorprofiles v0.115.0 h1:cCZAs+FXaebZPppqAN3m+X3etoSBL6NvyQo8l0hOZoo= -go.opentelemetry.io/collector/processor/processorprofiles v0.115.0/go.mod h1:kMxF0gknlWX4duuAJFi2/HuIRi6C3w95tOenRa0GKOY= -go.opentelemetry.io/collector/processor/processortest v0.115.0 h1:j9HEaYFOeOB6VYl9zGhBnhQbTkqGBa2udUvu5NTh6hc= -go.opentelemetry.io/collector/processor/processortest v0.115.0/go.mod h1:Gws+VEnp/eW3qAqPpqbKsrbnnxxNfyDjqrfUXbZfZic= -go.opentelemetry.io/collector/semconv v0.115.0 h1:SoqMvg4ZEB3mz2EdAb6XYa+TuMo5Mir5FRBr3nVFUDY= -go.opentelemetry.io/collector/semconv v0.115.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= +go.opentelemetry.io/collector/component v0.118.0 h1:sSO/ObxJ+yH77Z4DmT1mlSuxhbgUmY1ztt7xCA1F/8w= +go.opentelemetry.io/collector/component v0.118.0/go.mod h1:LUJ3AL2b+tmFr3hZol3hzKzCMvNdqNq0M5CF3SWdv4M= +go.opentelemetry.io/collector/component/componentstatus v0.118.0 h1:1aCIdUjqz0noKNQr1v04P+lwF89Lkua5U7BhH9IAxkE= +go.opentelemetry.io/collector/component/componentstatus v0.118.0/go.mod h1:ynO1Nyj0t1h6x/djIMJy35bhnnWEc2mlQaFgDNUO504= +go.opentelemetry.io/collector/component/componenttest v0.118.0 h1:knEHckoiL2fEWSIc0iehg39zP4IXzi9sHa45O+oxKo8= +go.opentelemetry.io/collector/component/componenttest v0.118.0/go.mod h1:aHc7t7zVwCpbhrWIWY+GMuaMxMCUP8C8P7pJOt8r/vU= +go.opentelemetry.io/collector/config/configtelemetry v0.118.0 h1:UlN46EViG2X42odWtXgWaqY7Y01ZKpsnswSwXTWx5mM= +go.opentelemetry.io/collector/config/configtelemetry v0.118.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= +go.opentelemetry.io/collector/confmap v1.24.0 h1:UUHVhkDCsVw14jPOarug9PDQE2vaB2ELPWMr7ARFBCA= +go.opentelemetry.io/collector/confmap v1.24.0/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec= +go.opentelemetry.io/collector/consumer v1.24.0 h1:7DeyBm9qdr1EPuCfPjWyChPK16DbVc0wZeSa9LZprFU= +go.opentelemetry.io/collector/consumer v1.24.0/go.mod h1:0G6jvZprIp4dpKMD1ZxCjriiP9GdFvFMObsQEtTk71s= +go.opentelemetry.io/collector/consumer/consumertest v0.118.0 h1:8AAS9ejQapP1zqt0+cI6u+AUBheT3X0171N9WtXWsVY= +go.opentelemetry.io/collector/consumer/consumertest v0.118.0/go.mod h1:spRM2wyGr4QZzqMHlLmZnqRCxqXN4Wd0piogC4Qb5PQ= +go.opentelemetry.io/collector/consumer/xconsumer v0.118.0 h1:guWnzzRqgCInjnYlOQ1BPrimppNGIVvnknAjlIbWXuY= +go.opentelemetry.io/collector/consumer/xconsumer v0.118.0/go.mod h1:C5V2d6Ys/Fi6k3tzjBmbdZ9v3J/rZSAMlhx4KVcMIIg= +go.opentelemetry.io/collector/pdata v1.24.0 h1:D6j92eAzmAbQgivNBUnt8r9juOl8ugb+ihYynoFZIEg= +go.opentelemetry.io/collector/pdata v1.24.0/go.mod h1:cf3/W9E/uIvPS4MR26SnMFJhraUCattzzM6qusuONuc= +go.opentelemetry.io/collector/pdata/pprofile v0.118.0 h1:VK/fr65VFOwEhsSGRPj5c3lCv0yIK1Kt0sZxv9WZBb8= +go.opentelemetry.io/collector/pdata/pprofile v0.118.0/go.mod h1:eJyP/vBm179EghV3dPSnamGAWQwLyd+4z/3yG54YFoQ= +go.opentelemetry.io/collector/pdata/testdata v0.118.0 h1:5N0w1SX9KIRkwvtkrpzQgXy9eGk3vfNG0ds6mhEPMIM= +go.opentelemetry.io/collector/pdata/testdata v0.118.0/go.mod h1:UY+GHV5bOC1BnFburOZ0wiHReJj1XbW12mi2Ogbc5Lw= +go.opentelemetry.io/collector/pipeline v0.118.0 h1:RI1DMe7L0+5hGkx0EDGxG00TaJoh96MEQppgOlGx1Oc= +go.opentelemetry.io/collector/pipeline v0.118.0/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74= +go.opentelemetry.io/collector/processor v0.118.0 h1:NlqWiTTpPP+EPbrqTcNP9nh/4O4/9U9RGWVB49xo4ws= +go.opentelemetry.io/collector/processor v0.118.0/go.mod h1:Y8OD7wk51oPuBqrbn1qXIK91AbprRHP76hlvEzC24U4= +go.opentelemetry.io/collector/processor/processortest v0.118.0 h1:VfTLHuIaJWGyUmrvAOvf63gPMf1vAW68/jtJClEsKtU= +go.opentelemetry.io/collector/processor/processortest v0.118.0/go.mod h1:ZFWxsSoafGNOEk83FtGz43M5ypUzAOvGnfT0aQTDHdU= +go.opentelemetry.io/collector/processor/xprocessor v0.118.0 h1:M/EMhPRbadHLpv7g99fBjfgyuYexBZmgQqb2vjTXjvM= +go.opentelemetry.io/collector/processor/xprocessor v0.118.0/go.mod h1:lkoQoCv2Cz+C0kf2VHgBUDYWDecZLLeaHEvHDXbBCXU= +go.opentelemetry.io/collector/semconv v0.118.0 h1:V4vlMIK7TIaemrrn2VawvQPwruIKpj7Xgw9P5+BL56w= +go.opentelemetry.io/collector/semconv v0.118.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= @@ -131,10 +133,10 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= -google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= -google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= +google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/comp/otelcol/otlp/components/statsprocessor/agent_test.go b/comp/otelcol/otlp/components/statsprocessor/agent_test.go index 9c28867fed8d3..75f8fe7a3b491 100644 --- a/comp/otelcol/otlp/components/statsprocessor/agent_test.go +++ b/comp/otelcol/otlp/components/statsprocessor/agent_test.go @@ -59,9 +59,10 @@ func testTraceAgent(enableReceiveResourceSpansV2 bool, t *testing.T) { require.NoError(t, err) cfg.OTLPReceiver.AttributesTranslator = attributesTranslator cfg.BucketInterval = 50 * time.Millisecond - if enableReceiveResourceSpansV2 { - cfg.Features["enable_receive_resource_spans_v2"] = struct{}{} + if !enableReceiveResourceSpansV2 { + cfg.Features["disable_receive_resource_spans_v2"] = struct{}{} } + cfg.Features["enable_operation_and_resource_name_logic_v2"] = struct{}{} out := make(chan *pb.StatsPayload, 10) ctx := context.Background() _, metricClient, timingReporter := setupMetricClient() @@ -109,6 +110,7 @@ func testTraceAgent(enableReceiveResourceSpansV2 bool, t *testing.T) { assert.Greater(t, len(bucket.Stats), 0) actual = append(actual, bucket.Stats...) } + assert.Equal(t, "Internal", cspayload.Stats[0].Stats[0].Name) } case <-timeout: t.Fatal("timed out") diff --git a/comp/otelcol/otlp/components/statsprocessor/go.mod b/comp/otelcol/otlp/components/statsprocessor/go.mod index cff45d337dcd1..28acd03be303b 100644 --- a/comp/otelcol/otlp/components/statsprocessor/go.mod +++ b/comp/otelcol/otlp/components/statsprocessor/go.mod @@ -24,14 +24,14 @@ require ( github.com/DataDog/datadog-agent/pkg/proto v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/trace v0.56.0-rc.3 github.com/DataDog/datadog-go/v5 v5.6.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.22.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.24.0 github.com/stretchr/testify v1.10.0 - go.opentelemetry.io/collector/component/componenttest v0.115.0 - go.opentelemetry.io/collector/pdata v1.21.0 - go.opentelemetry.io/otel/sdk/metric v1.32.0 + go.opentelemetry.io/collector/component/componenttest v0.118.0 + go.opentelemetry.io/collector/pdata v1.24.0 + go.opentelemetry.io/otel/sdk/metric v1.33.0 ) -require go.opentelemetry.io/collector/component v0.115.0 // indirect +require go.opentelemetry.io/collector/component v0.118.0 // indirect require ( github.com/DataDog/datadog-agent/comp/core/tagger/origindetection v0.0.0-20241217122454-175edb6c74f2 // indirect @@ -43,13 +43,13 @@ require ( github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect - github.com/DataDog/go-sqllexer v0.0.17 // indirect + github.com/DataDog/go-sqllexer v0.0.20 // indirect github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect github.com/DataDog/sketches-go v1.4.6 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect - github.com/containerd/cgroups/v3 v3.0.4 // indirect + github.com/containerd/cgroups/v3 v3.0.5 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/docker/go-units v0.5.0 // indirect @@ -60,7 +60,7 @@ require ( github.com/go-ole/go-ole v1.3.0 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/mock v1.6.0 // indirect + github.com/golang/mock v1.7.0-rc.1 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/uuid v1.6.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -77,29 +77,33 @@ require ( github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect - github.com/tinylib/msgp v1.2.4 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect + github.com/tinylib/msgp v1.2.5 // indirect github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/numcpus v0.8.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.115.0 // indirect - go.opentelemetry.io/collector/semconv v0.115.0 // indirect - go.opentelemetry.io/otel v1.32.0 // indirect - go.opentelemetry.io/otel/metric v1.32.0 // indirect - go.opentelemetry.io/otel/sdk v1.32.0 // indirect - go.opentelemetry.io/otel/trace v1.32.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.118.0 // indirect + go.opentelemetry.io/collector/semconv v0.118.0 // indirect + go.opentelemetry.io/otel v1.33.0 // indirect + go.opentelemetry.io/otel/metric v1.33.0 // indirect + go.opentelemetry.io/otel/sdk v1.33.0 // indirect + go.opentelemetry.io/otel/trace v1.33.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/net v0.34.0 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.8.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect - google.golang.org/grpc v1.67.1 // indirect - google.golang.org/protobuf v1.35.2 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect + google.golang.org/grpc v1.69.4 // indirect + google.golang.org/protobuf v1.36.3 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) replace github.com/DataDog/datadog-agent/pkg/version => ../../../../../pkg/version + +// github.com/golang/mock is unmaintained and archived, v1.6.0 is the last released version +replace github.com/golang/mock => github.com/golang/mock v1.6.0 diff --git a/comp/otelcol/otlp/components/statsprocessor/go.sum b/comp/otelcol/otlp/components/statsprocessor/go.sum index abd898f9bed2a..c28b03c3ee2b0 100644 --- a/comp/otelcol/otlp/components/statsprocessor/go.sum +++ b/comp/otelcol/otlp/components/statsprocessor/go.sum @@ -1,11 +1,11 @@ github.com/DataDog/datadog-go/v5 v5.6.0 h1:2oCLxjF/4htd55piM75baflj/KoE6VYS7alEUqFvRDw= github.com/DataDog/datadog-go/v5 v5.6.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= -github.com/DataDog/go-sqllexer v0.0.17 h1:u47fJAVg/+5DA74ZW3w0Qu+3qXHd3GtnA8ZBYixdPrM= -github.com/DataDog/go-sqllexer v0.0.17/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= +github.com/DataDog/go-sqllexer v0.0.20 h1:0fBknHo42yuhawZS3GtuQSdqcwaiojWjYNT6OdsZRfI= +github.com/DataDog/go-sqllexer v0.0.20/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4= github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.22.0 h1:yfk2cF8Bx98fSFpGrehEHh1FRqewfxcCTAbUDt5r3F8= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.22.0/go.mod h1:9qzpnBSxSOnKzbF/uHket3SSlQihQHix/ZRC2nZUUYQ= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.24.0 h1:Y65h9AvfQO7ONOBlqCetvvUhh2XO1wIzN7IfXVFjc84= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.24.0/go.mod h1:7aAFw4o5dZk/kqFniz7ljJwS8covz8DHouGl7BrsnLI= github.com/DataDog/sketches-go v1.4.6 h1:acd5fb+QdUzGrosfNLwrIhqyrbMORpvBy7mE+vHlT3I= github.com/DataDog/sketches-go v1.4.6/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg= github.com/DataDog/zstd v1.5.6 h1:LbEglqepa/ipmmQJUDnSsfvA8e8IStVcGaFWDuxvGOY= @@ -18,8 +18,8 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= -github.com/containerd/cgroups/v3 v3.0.4 h1:2fs7l3P0Qxb1nKWuJNFiwhp2CqiKzho71DQkDrHJIo4= -github.com/containerd/cgroups/v3 v3.0.4/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins= +github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJHo6Bzo= +github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -80,10 +80,10 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.115.0 h1:R9MRrO+dSkAHBQLZjuwjv2RHXHQqF2Wtm1Ki0VKD5cs= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.115.0/go.mod h1:rKXLXmwdUVcUHwTilroKSejbg3KSwLeYzNPSpkIEnv4= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.115.0 h1:vwZQ7k8oqlK0bdZYTsjP/59zjQQfjSD4fNsWIWsTu2w= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.115.0/go.mod h1:5ObSa9amrbzbYTdAK1Qhv3D/YqCxxnQhP0sk2eWB7Oo= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.118.0 h1:zEdd1JoVEBX7Lmf/wjs+45p4rR5+HvT2iF5VcoOgK1g= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.118.0/go.mod h1:WE5ientZ87x3cySOh4D/uVUwxK82DMyCkLBJ43+ehDU= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.118.0 h1:pC1e5BvBf8rjwGb56MiTUFEDHU2LSclaqRNUs3z9Snw= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.118.0/go.mod h1:wZTrQ0XWb1A9XBhl1WmUKLPfqNjERKFYWT5WER70gLg= github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/outcaste-io/ristretto v0.2.3 h1:AK4zt/fJ76kjlYObOeNwh4T3asEuaCmp26pOvUOL9w0= @@ -103,8 +103,8 @@ github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA= github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -119,8 +119,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/tinylib/msgp v1.2.4 h1:yLFeUGostXXSGW5vxfT5dXG/qzkn4schv2I7at5+hVU= -github.com/tinylib/msgp v1.2.4/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= +github.com/tinylib/msgp v1.2.5 h1:WeQg1whrXRFiZusidTQqzETkRpGjFjcIhW6uqWH09po= +github.com/tinylib/msgp v1.2.5/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= @@ -134,46 +134,48 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -go.opentelemetry.io/collector/component v0.115.0 h1:iLte1oCiXzjiCnaOBKdsXacfFiECecpWxW3/LeriMoo= -go.opentelemetry.io/collector/component v0.115.0/go.mod h1:oIUFiH7w1eOimdeYhFI+gAIxYSiLDocKVJ0PTvX7d6s= -go.opentelemetry.io/collector/component/componentstatus v0.115.0 h1:pbpUIL+uKDfEiSgKK+S5nuSL6MDIIQYsp4b65ZGVb9M= -go.opentelemetry.io/collector/component/componentstatus v0.115.0/go.mod h1:36A+9XSiOz0Cdhq+UwwPRlEr5CYuSkEnVO9om4BH7d0= -go.opentelemetry.io/collector/component/componenttest v0.115.0 h1:9URDJ9VyP6tuij+YHjp/kSSMecnZOd7oGvzu+rw9SJY= -go.opentelemetry.io/collector/component/componenttest v0.115.0/go.mod h1:PzXvNqKLCiSADZGZFKH+IOHMkaQ0GTHuzysfVbTPKYY= -go.opentelemetry.io/collector/config/configtelemetry v0.115.0 h1:U07FinCDop+r2RjWQ3aP9ZWONC7r7kQIp1GkXQi6nsI= -go.opentelemetry.io/collector/config/configtelemetry v0.115.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= -go.opentelemetry.io/collector/consumer v1.21.0 h1:THKZ2Vbi6GkamjTBI2hFq5Dc4kINZTWGwQNa8d/Ty9g= -go.opentelemetry.io/collector/consumer v1.21.0/go.mod h1:FQcC4ThMtRYY41dv+IPNK8POLLhAFY3r1YR5fuP7iiY= -go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0 h1:H3fDuyQW1t2HWHkz96WMBQJKUevypOCjBqnqtaAWyoA= -go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0/go.mod h1:IzEmZ91Tp7TBxVDq8Cc9xvLsmO7H08njr6Pu9P5d9ns= -go.opentelemetry.io/collector/consumer/consumertest v0.115.0 h1:hru0I2447y0TluCdwlKYFFtgcpyCnlM+LiOK1JZyA70= -go.opentelemetry.io/collector/consumer/consumertest v0.115.0/go.mod h1:ybjALRJWR6aKNOzEMy1T1ruCULVDEjj4omtOJMrH/kU= -go.opentelemetry.io/collector/pdata v1.21.0 h1:PG+UbiFMJ35X/WcAR7Rf/PWmWtRdW0aHlOidsR6c5MA= -go.opentelemetry.io/collector/pdata v1.21.0/go.mod h1:GKb1/zocKJMvxKbS+sl0W85lxhYBTFJ6h6I1tphVyDU= -go.opentelemetry.io/collector/pdata/pprofile v0.115.0 h1:NI89hy13vNDw7EOnQf7Jtitks4HJFO0SUWznTssmP94= -go.opentelemetry.io/collector/pdata/pprofile v0.115.0/go.mod h1:jGzdNfO0XTtfLjXCL/uCC1livg1LlfR+ix2WE/z3RpQ= -go.opentelemetry.io/collector/pdata/testdata v0.115.0 h1:Rblz+AKXdo3fG626jS+KSd0OSA4uMXcTQfpwed6P8LI= -go.opentelemetry.io/collector/pdata/testdata v0.115.0/go.mod h1:inNnRt6S2Nn260EfCBEcjesjlKOSsr0jPwkPqpBkt4s= -go.opentelemetry.io/collector/pipeline v0.115.0 h1:bmACBqb0e8U9ag+vGGHUP7kCfAO7HHROdtzIEg8ulus= -go.opentelemetry.io/collector/pipeline v0.115.0/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74= -go.opentelemetry.io/collector/processor v0.115.0 h1:+fveHGRe24PZPv/F5taahGuZ9HdNW44hgNWEJhIUdyc= -go.opentelemetry.io/collector/processor v0.115.0/go.mod h1:/oLHBlLsm7tFb7zOIrA5C0j14yBtjXKAgxJJ2Bktyk4= -go.opentelemetry.io/collector/processor/processorprofiles v0.115.0 h1:cCZAs+FXaebZPppqAN3m+X3etoSBL6NvyQo8l0hOZoo= -go.opentelemetry.io/collector/processor/processorprofiles v0.115.0/go.mod h1:kMxF0gknlWX4duuAJFi2/HuIRi6C3w95tOenRa0GKOY= -go.opentelemetry.io/collector/processor/processortest v0.115.0 h1:j9HEaYFOeOB6VYl9zGhBnhQbTkqGBa2udUvu5NTh6hc= -go.opentelemetry.io/collector/processor/processortest v0.115.0/go.mod h1:Gws+VEnp/eW3qAqPpqbKsrbnnxxNfyDjqrfUXbZfZic= -go.opentelemetry.io/collector/semconv v0.115.0 h1:SoqMvg4ZEB3mz2EdAb6XYa+TuMo5Mir5FRBr3nVFUDY= -go.opentelemetry.io/collector/semconv v0.115.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= -go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= -go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= -go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= -go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= -go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= -go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= -go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= -go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= -go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= -go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/collector/component v0.118.0 h1:sSO/ObxJ+yH77Z4DmT1mlSuxhbgUmY1ztt7xCA1F/8w= +go.opentelemetry.io/collector/component v0.118.0/go.mod h1:LUJ3AL2b+tmFr3hZol3hzKzCMvNdqNq0M5CF3SWdv4M= +go.opentelemetry.io/collector/component/componentstatus v0.118.0 h1:1aCIdUjqz0noKNQr1v04P+lwF89Lkua5U7BhH9IAxkE= +go.opentelemetry.io/collector/component/componentstatus v0.118.0/go.mod h1:ynO1Nyj0t1h6x/djIMJy35bhnnWEc2mlQaFgDNUO504= +go.opentelemetry.io/collector/component/componenttest v0.118.0 h1:knEHckoiL2fEWSIc0iehg39zP4IXzi9sHa45O+oxKo8= +go.opentelemetry.io/collector/component/componenttest v0.118.0/go.mod h1:aHc7t7zVwCpbhrWIWY+GMuaMxMCUP8C8P7pJOt8r/vU= +go.opentelemetry.io/collector/config/configtelemetry v0.118.0 h1:UlN46EViG2X42odWtXgWaqY7Y01ZKpsnswSwXTWx5mM= +go.opentelemetry.io/collector/config/configtelemetry v0.118.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= +go.opentelemetry.io/collector/consumer v1.24.0 h1:7DeyBm9qdr1EPuCfPjWyChPK16DbVc0wZeSa9LZprFU= +go.opentelemetry.io/collector/consumer v1.24.0/go.mod h1:0G6jvZprIp4dpKMD1ZxCjriiP9GdFvFMObsQEtTk71s= +go.opentelemetry.io/collector/consumer/consumertest v0.118.0 h1:8AAS9ejQapP1zqt0+cI6u+AUBheT3X0171N9WtXWsVY= +go.opentelemetry.io/collector/consumer/consumertest v0.118.0/go.mod h1:spRM2wyGr4QZzqMHlLmZnqRCxqXN4Wd0piogC4Qb5PQ= +go.opentelemetry.io/collector/consumer/xconsumer v0.118.0 h1:guWnzzRqgCInjnYlOQ1BPrimppNGIVvnknAjlIbWXuY= +go.opentelemetry.io/collector/consumer/xconsumer v0.118.0/go.mod h1:C5V2d6Ys/Fi6k3tzjBmbdZ9v3J/rZSAMlhx4KVcMIIg= +go.opentelemetry.io/collector/pdata v1.24.0 h1:D6j92eAzmAbQgivNBUnt8r9juOl8ugb+ihYynoFZIEg= +go.opentelemetry.io/collector/pdata v1.24.0/go.mod h1:cf3/W9E/uIvPS4MR26SnMFJhraUCattzzM6qusuONuc= +go.opentelemetry.io/collector/pdata/pprofile v0.118.0 h1:VK/fr65VFOwEhsSGRPj5c3lCv0yIK1Kt0sZxv9WZBb8= +go.opentelemetry.io/collector/pdata/pprofile v0.118.0/go.mod h1:eJyP/vBm179EghV3dPSnamGAWQwLyd+4z/3yG54YFoQ= +go.opentelemetry.io/collector/pdata/testdata v0.118.0 h1:5N0w1SX9KIRkwvtkrpzQgXy9eGk3vfNG0ds6mhEPMIM= +go.opentelemetry.io/collector/pdata/testdata v0.118.0/go.mod h1:UY+GHV5bOC1BnFburOZ0wiHReJj1XbW12mi2Ogbc5Lw= +go.opentelemetry.io/collector/pipeline v0.118.0 h1:RI1DMe7L0+5hGkx0EDGxG00TaJoh96MEQppgOlGx1Oc= +go.opentelemetry.io/collector/pipeline v0.118.0/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74= +go.opentelemetry.io/collector/processor v0.118.0 h1:NlqWiTTpPP+EPbrqTcNP9nh/4O4/9U9RGWVB49xo4ws= +go.opentelemetry.io/collector/processor v0.118.0/go.mod h1:Y8OD7wk51oPuBqrbn1qXIK91AbprRHP76hlvEzC24U4= +go.opentelemetry.io/collector/processor/processortest v0.118.0 h1:VfTLHuIaJWGyUmrvAOvf63gPMf1vAW68/jtJClEsKtU= +go.opentelemetry.io/collector/processor/processortest v0.118.0/go.mod h1:ZFWxsSoafGNOEk83FtGz43M5ypUzAOvGnfT0aQTDHdU= +go.opentelemetry.io/collector/processor/xprocessor v0.118.0 h1:M/EMhPRbadHLpv7g99fBjfgyuYexBZmgQqb2vjTXjvM= +go.opentelemetry.io/collector/processor/xprocessor v0.118.0/go.mod h1:lkoQoCv2Cz+C0kf2VHgBUDYWDecZLLeaHEvHDXbBCXU= +go.opentelemetry.io/collector/semconv v0.118.0 h1:V4vlMIK7TIaemrrn2VawvQPwruIKpj7Xgw9P5+BL56w= +go.opentelemetry.io/collector/semconv v0.118.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= +go.opentelemetry.io/otel/sdk/metric v1.33.0 h1:Gs5VK9/WUJhNXZgn8MR6ITatvAmKeIuCtNbsP3JkNqU= +go.opentelemetry.io/otel/sdk/metric v1.33.0/go.mod h1:dL5ykHZmm1B1nVRk9dDjChwDmt81MjVp3gLkQRwKf/Q= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= @@ -186,8 +188,8 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -196,8 +198,8 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -214,8 +216,8 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -234,12 +236,12 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= -google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= -google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= +google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= +google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/comp/otelcol/otlp/config.go b/comp/otelcol/otlp/config.go index ce56626e2f3e8..1ee62509c467a 100644 --- a/comp/otelcol/otlp/config.go +++ b/comp/otelcol/otlp/config.go @@ -20,7 +20,7 @@ import ( "github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/serializerexporter" "github.com/DataDog/datadog-agent/comp/otelcol/otlp/configcheck" coreconfig "github.com/DataDog/datadog-agent/pkg/config/setup" - "github.com/DataDog/datadog-agent/pkg/util" + tagutil "github.com/DataDog/datadog-agent/pkg/util/tags" ) func portToUint(v int) (port uint, err error) { @@ -52,7 +52,7 @@ func FromAgentConfig(cfg config.Reader) (PipelineConfig, error) { metricsConfigMap["apm_stats_receiver_addr"] = fmt.Sprintf("http://localhost:%s/v0.6/stats", coreconfig.Datadog().GetString("apm_config.receiver_port")) } - tags := strings.Join(util.GetStaticTagsSlice(context.TODO(), cfg), ",") + tags := strings.Join(tagutil.GetStaticTagsSlice(context.TODO(), cfg), ",") if tags != "" { metricsConfigMap["tags"] = tags } diff --git a/comp/otelcol/otlp/integrationtest/integration_test.go b/comp/otelcol/otlp/integrationtest/integration_test.go index 003850a691979..8e9cbdf5204d6 100644 --- a/comp/otelcol/otlp/integrationtest/integration_test.go +++ b/comp/otelcol/otlp/integrationtest/integration_test.go @@ -63,8 +63,9 @@ import ( "github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/serializerexporter" "github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/metricsclient" "github.com/DataDog/datadog-agent/comp/otelcol/otlp/testutil" - compression "github.com/DataDog/datadog-agent/comp/serializer/compression/def" - implzlib "github.com/DataDog/datadog-agent/comp/serializer/compression/impl-zlib" + logscompressionfx "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx" + metricscompression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/def" + metricscompressionfx "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/fx-otel" tracecomp "github.com/DataDog/datadog-agent/comp/trace" traceagentcomp "github.com/DataDog/datadog-agent/comp/trace/agent/impl" gzipfx "github.com/DataDog/datadog-agent/comp/trace/compression/fx-gzip" @@ -74,8 +75,9 @@ import ( pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/serializer" "github.com/DataDog/datadog-agent/pkg/trace/telemetry" + "github.com/DataDog/datadog-agent/pkg/util/compression" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) func runTestOTelAgent(ctx context.Context, params *subcommands.GlobalParams) error { @@ -112,16 +114,18 @@ func runTestOTelAgent(ctx context.Context, params *subcommands.GlobalParams) err return h.Get }), hostnameinterface.MockModule(), - fx.Supply(optional.NewNoneOption[secrets.Component]()), + fx.Supply(option.None[secrets.Component]()), fx.Provide(func(_ coreconfig.Component) logdef.Params { return logdef.ForDaemon(params.LoggerName, "log_file", pkgconfigsetup.DefaultOTelAgentLogFile) }), logsagentpipelineimpl.Module(), - // We create strategy.ZlibStrategy directly to avoid build tags - fx.Provide(implzlib.NewComponent), - fx.Provide(func(s implzlib.Provides) compression.Component { - return s.Comp + logscompressionfx.Module(), + metricscompressionfx.Module(), + // For FX to provide the compression.Compressor interface (used by serializer.NewSerializer) + // implemented by the metricsCompression.Component + fx.Provide(func(c metricscompression.Component) compression.Compressor { + return c }), fx.Provide(serializer.NewSerializer), // For FX to provide the serializer.MetricSerializer from the serializer.Serializer @@ -133,7 +137,7 @@ func runTestOTelAgent(ctx context.Context, params *subcommands.GlobalParams) err return defaultforwarder.Forwarder(c), nil }), orchestratorimpl.MockModule(), - fx.Invoke(func(_ collectordef.Component, _ defaultforwarder.Forwarder, _ optional.Option[logsagentpipeline.Component]) { + fx.Invoke(func(_ collectordef.Component, _ defaultforwarder.Forwarder, _ option.Option[logsagentpipeline.Component]) { }), taggerfx.Module(tagger.Params{}), noopsimpl.Module(), diff --git a/comp/otelcol/otlp/integrationtest/integration_test_config.yaml b/comp/otelcol/otlp/integrationtest/integration_test_config.yaml index 177ef04859eeb..b6e25d8d823c0 100644 --- a/comp/otelcol/otlp/integrationtest/integration_test_config.yaml +++ b/comp/otelcol/otlp/integrationtest/integration_test_config.yaml @@ -28,7 +28,7 @@ exporters: debug: datadog: api: - key: "key" + key: "00000000000000000000000000000000" traces: endpoint: ${env:SERVER_URL} trace_buffer: 10 diff --git a/comp/otelcol/otlp/testutil/go.mod b/comp/otelcol/otlp/testutil/go.mod index 609205dc9e161..5bf3cb2c50e5a 100644 --- a/comp/otelcol/otlp/testutil/go.mod +++ b/comp/otelcol/otlp/testutil/go.mod @@ -23,7 +23,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../../pkg/util/fxutil github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../../../pkg/util/hostname/validate github.com/DataDog/datadog-agent/pkg/util/log => ../../../../pkg/util/log - github.com/DataDog/datadog-agent/pkg/util/optional => ../../../../pkg/util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../../../pkg/util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../../pkg/util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../../pkg/util/scrubber github.com/DataDog/datadog-agent/pkg/util/system => ../../../../pkg/util/system @@ -34,34 +34,34 @@ replace ( require ( github.com/DataDog/datadog-agent/pkg/config/mock v0.59.0 - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 github.com/DataDog/datadog-agent/pkg/proto v0.55.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.22.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.22.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.24.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.24.0 github.com/DataDog/sketches-go v1.4.6 github.com/stretchr/testify v1.10.0 - go.opentelemetry.io/collector/pdata v1.21.0 - google.golang.org/protobuf v1.35.2 + go.opentelemetry.io/collector/pdata v1.24.0 + google.golang.org/protobuf v1.36.3 ) require ( github.com/DataDog/datadog-agent/comp/core/secrets v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect github.com/DataDog/viper v1.14.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect @@ -75,7 +75,7 @@ require ( github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect @@ -85,24 +85,25 @@ require ( github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/tinylib/msgp v1.2.4 // indirect + github.com/tinylib/msgp v1.2.5 // indirect github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/numcpus v0.8.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect + go.opentelemetry.io/otel/sdk v1.33.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/net v0.34.0 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect - google.golang.org/grpc v1.67.1 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect + google.golang.org/grpc v1.69.4 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/comp/otelcol/otlp/testutil/go.sum b/comp/otelcol/otlp/testutil/go.sum index dca00de17ecd0..c7f67b81a37e0 100644 --- a/comp/otelcol/otlp/testutil/go.sum +++ b/comp/otelcol/otlp/testutil/go.sum @@ -1,9 +1,9 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.22.0 h1:r1Dx2cRHCBWkVluSZA41i4eoI/nOGbcrrZdkqWjoFCc= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.22.0/go.mod h1:+/dkO8ZiMa8rfm4SmtTF6qPUdBbBcvsWWKaO4xPKAIk= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.22.0 h1:yfk2cF8Bx98fSFpGrehEHh1FRqewfxcCTAbUDt5r3F8= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.22.0/go.mod h1:9qzpnBSxSOnKzbF/uHket3SSlQihQHix/ZRC2nZUUYQ= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.24.0 h1:Fth9wZCAVbIUvlKq/QXT7QINza+epFaKtIvy1qqybbg= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.24.0/go.mod h1:7D+x/7CIdzklC9spgB3lrg8GUvIW52Y8SMONrBCiPbw= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.24.0 h1:Y65h9AvfQO7ONOBlqCetvvUhh2XO1wIzN7IfXVFjc84= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.24.0/go.mod h1:7aAFw4o5dZk/kqFniz7ljJwS8covz8DHouGl7BrsnLI= github.com/DataDog/sketches-go v1.4.6 h1:acd5fb+QdUzGrosfNLwrIhqyrbMORpvBy7mE+vHlT3I= github.com/DataDog/sketches-go v1.4.6/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg= github.com/DataDog/viper v1.14.0 h1:dIjTe/uJiah+QFqFZ+MXeqgmUvWhg37l37ZxFWxr3is= @@ -58,6 +58,10 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= @@ -81,13 +85,14 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= @@ -124,8 +129,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -159,8 +164,8 @@ github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -177,8 +182,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -191,8 +196,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -203,8 +208,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -222,8 +227,8 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tinylib/msgp v1.2.4 h1:yLFeUGostXXSGW5vxfT5dXG/qzkn4schv2I7at5+hVU= -github.com/tinylib/msgp v1.2.4/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= +github.com/tinylib/msgp v1.2.5 h1:WeQg1whrXRFiZusidTQqzETkRpGjFjcIhW6uqWH09po= +github.com/tinylib/msgp v1.2.5/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= @@ -244,8 +249,20 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.opentelemetry.io/collector/pdata v1.21.0 h1:PG+UbiFMJ35X/WcAR7Rf/PWmWtRdW0aHlOidsR6c5MA= -go.opentelemetry.io/collector/pdata v1.21.0/go.mod h1:GKb1/zocKJMvxKbS+sl0W85lxhYBTFJ6h6I1tphVyDU= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/collector/pdata v1.24.0 h1:D6j92eAzmAbQgivNBUnt8r9juOl8ugb+ihYynoFZIEg= +go.opentelemetry.io/collector/pdata v1.24.0/go.mod h1:cf3/W9E/uIvPS4MR26SnMFJhraUCattzzM6qusuONuc= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= +go.opentelemetry.io/otel/sdk/metric v1.33.0 h1:Gs5VK9/WUJhNXZgn8MR6ITatvAmKeIuCtNbsP3JkNqU= +go.opentelemetry.io/otel/sdk/metric v1.33.0/go.mod h1:dL5ykHZmm1B1nVRk9dDjChwDmt81MjVp3gLkQRwKf/Q= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= @@ -271,8 +288,8 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -293,8 +310,8 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -316,8 +333,8 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= @@ -349,17 +366,17 @@ google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= -google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= +google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/comp/process/apiserver/apiserver_test.go b/comp/process/apiserver/apiserver_test.go index 4f8b7f2cca342..c57a236e84897 100644 --- a/comp/process/apiserver/apiserver_test.go +++ b/comp/process/apiserver/apiserver_test.go @@ -28,6 +28,7 @@ import ( workloadmetafx "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx" "github.com/DataDog/datadog-agent/pkg/api/util" "github.com/DataDog/datadog-agent/pkg/util/fxutil" + "github.com/DataDog/datadog-agent/pkg/util/log" ) func TestLifecycle(t *testing.T) { @@ -97,16 +98,20 @@ func TestPostAuthentication(t *testing.T) { url := fmt.Sprintf("https://localhost:%d/config/log_level?value=debug", port) req, err := http.NewRequest("POST", url, nil) require.NoError(c, err) + log.Infof("Issuing unauthenticated test request to url: %s", url) res, err := util.GetClient(false).Do(req) require.NoError(c, err) defer res.Body.Close() + log.Info("Received unauthenticated test response") assert.Equal(c, http.StatusUnauthorized, res.StatusCode) // With authentication req.Header.Set("Authorization", "Bearer "+util.GetAuthToken()) + log.Infof("Issuing authenticated test request to url: %s", url) res, err = util.GetClient(false).Do(req) require.NoError(c, err) defer res.Body.Close() + log.Info("Received authenticated test response") assert.Equal(c, http.StatusOK, res.StatusCode) }, 5*time.Second, time.Second) } diff --git a/comp/process/bundle.go b/comp/process/bundle.go index 96802180c2cce..b30a1bf04c11c 100644 --- a/comp/process/bundle.go +++ b/comp/process/bundle.go @@ -26,6 +26,7 @@ import ( "github.com/DataDog/datadog-agent/comp/process/rtcontainercheck/rtcontainercheckimpl" "github.com/DataDog/datadog-agent/comp/process/runner/runnerimpl" "github.com/DataDog/datadog-agent/comp/process/submitter/submitterimpl" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -55,5 +56,6 @@ func Bundle() fxutil.BundleOptions { apiserver.Module(), forwardersimpl.Module(), + logscompression.Module(), ) } diff --git a/comp/remote-config/rcclient/component.go b/comp/remote-config/rcclient/component.go index 6383fb54a0e6a..99c91f028b45c 100644 --- a/comp/remote-config/rcclient/component.go +++ b/comp/remote-config/rcclient/component.go @@ -11,7 +11,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/config/remote/data" "github.com/DataDog/datadog-agent/pkg/remoteconfig/state" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // team: remote-config @@ -37,7 +37,7 @@ type Params struct { // This helper allows code that needs a disabled Optional type for rcclient to get it. The helper is split from // the implementation to avoid linking with the dependencies from rcclient. func NoneModule() fxutil.Module { - return fxutil.Component(fx.Provide(func() optional.Option[Component] { - return optional.NewNoneOption[Component]() + return fxutil.Component(fx.Provide(func() option.Option[Component] { + return option.None[Component]() })) } diff --git a/comp/remote-config/rcclient/rcclientimpl/rcclient.go b/comp/remote-config/rcclient/rcclientimpl/rcclient.go index 374bb41bfe459..56ddec37f58ed 100644 --- a/comp/remote-config/rcclient/rcclientimpl/rcclient.go +++ b/comp/remote-config/rcclient/rcclientimpl/rcclient.go @@ -30,7 +30,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/remoteconfig/state" "github.com/DataDog/datadog-agent/pkg/util/fxutil" pkglog "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // Module defines the fx options for this component. @@ -56,7 +56,7 @@ type rcClient struct { taskListeners []types.RCAgentTaskListener settingsComponent settings.Component config configcomp.Component - sysprobeConfig optional.Option[sysprobeconfig.Component] + sysprobeConfig option.Option[sysprobeconfig.Component] isSystemProbe bool } @@ -71,7 +71,7 @@ type dependencies struct { TaskListeners []types.RCAgentTaskListener `group:"rCAgentTaskListener"` // <-- Fill automatically by Fx SettingsComponent settings.Component Config configcomp.Component - SysprobeConfig optional.Option[sysprobeconfig.Component] + SysprobeConfig option.Option[sysprobeconfig.Component] } // newRemoteConfigClient must not populate any Fx groups or return any types that would be consumed as dependencies by diff --git a/comp/remote-config/rcservice/rcserviceimpl/rcservice.go b/comp/remote-config/rcservice/rcserviceimpl/rcservice.go index 48226f0dcb916..3cd68f427bc38 100644 --- a/comp/remote-config/rcservice/rcserviceimpl/rcservice.go +++ b/comp/remote-config/rcservice/rcserviceimpl/rcservice.go @@ -11,18 +11,17 @@ import ( "fmt" "time" - log "github.com/DataDog/datadog-agent/comp/core/log/def" - "github.com/DataDog/datadog-agent/comp/metadata/host/hostimpl/hosttags" - "github.com/DataDog/datadog-agent/pkg/util/optional" - cfgcomp "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/core/hostname" + log "github.com/DataDog/datadog-agent/comp/core/log/def" + "github.com/DataDog/datadog-agent/comp/metadata/host/hostimpl/hosttags" "github.com/DataDog/datadog-agent/comp/remote-config/rcservice" "github.com/DataDog/datadog-agent/comp/remote-config/rctelemetryreporter" remoteconfig "github.com/DataDog/datadog-agent/pkg/config/remote/service" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/util/fxutil" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/version" "go.uber.org/fx" @@ -48,8 +47,8 @@ type dependencies struct { } // newRemoteConfigServiceOptional conditionally creates and configures a new remote config service, based on whether RC is enabled. -func newRemoteConfigServiceOptional(deps dependencies) optional.Option[rcservice.Component] { - none := optional.NewNoneOption[rcservice.Component]() +func newRemoteConfigServiceOptional(deps dependencies) option.Option[rcservice.Component] { + none := option.None[rcservice.Component]() if !pkgconfigsetup.IsRemoteConfigEnabled(deps.Cfg) { return none } @@ -60,7 +59,7 @@ func newRemoteConfigServiceOptional(deps dependencies) optional.Option[rcservice return none } - return optional.NewOption[rcservice.Component](configService) + return option.New[rcservice.Component](configService) } // newRemoteConfigServiceOptional creates and configures a new remote config service diff --git a/comp/remote-config/rcservicemrf/rcservicemrfimpl/rcservicemrf.go b/comp/remote-config/rcservicemrf/rcservicemrfimpl/rcservicemrf.go index a474da3cfeb89..d0322142510a9 100644 --- a/comp/remote-config/rcservicemrf/rcservicemrfimpl/rcservicemrf.go +++ b/comp/remote-config/rcservicemrf/rcservicemrfimpl/rcservicemrf.go @@ -22,7 +22,7 @@ import ( pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/version" "go.uber.org/fx" @@ -47,8 +47,8 @@ type dependencies struct { } // newMrfRemoteConfigServiceOptional conditionally creates and configures a new MRF remote config service, based on whether RC is enabled. -func newMrfRemoteConfigServiceOptional(deps dependencies) optional.Option[rcservicemrf.Component] { - none := optional.NewNoneOption[rcservicemrf.Component]() +func newMrfRemoteConfigServiceOptional(deps dependencies) option.Option[rcservicemrf.Component] { + none := option.None[rcservicemrf.Component]() if !pkgconfigsetup.IsRemoteConfigEnabled(deps.Cfg) || !deps.Cfg.GetBool("multi_region_failover.enabled") { return none } @@ -59,7 +59,7 @@ func newMrfRemoteConfigServiceOptional(deps dependencies) optional.Option[rcserv return none } - return optional.NewOption[rcservicemrf.Component](mrfConfigService) + return option.New[rcservicemrf.Component](mrfConfigService) } // newMrfRemoteConfigServiceOptional creates and configures a new service that receives remote config updates from the configured DD failover DC diff --git a/comp/serializer/compression/common/common.go b/comp/serializer/compression/common/common.go deleted file mode 100644 index 65b8699152859..0000000000000 --- a/comp/serializer/compression/common/common.go +++ /dev/null @@ -1,16 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -// Package common provides a set of constants describing the compression options -package common - -// ZlibKind defines a const value for the zlib compressor -const ZlibKind = "zlib" - -// ZstdKind defines a const value for the zstd compressor -const ZstdKind = "zstd" - -// NoneKind defines a const value for disabling compression -const NoneKind = "none" diff --git a/comp/serializer/compression/def/component.go b/comp/serializer/compression/def/component.go deleted file mode 100644 index 3aaf50f2af200..0000000000000 --- a/comp/serializer/compression/def/component.go +++ /dev/null @@ -1,35 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2024-present Datadog, Inc. - -// Package compression provides a compression implementation based on the configuration or available build tags. -package compression - -import ( - "bytes" - "io" -) - -// team: agent-metrics-logs - -// ZlibEncoding is the content-encoding value for Zlib -const ZlibEncoding = "deflate" - -// ZstdEncoding is the content-encoding value for Zstd -const ZstdEncoding = "zstd" - -// Component is the component type. -type Component interface { - Compress(src []byte) ([]byte, error) - Decompress(src []byte) ([]byte, error) - CompressBound(sourceLen int) int - ContentEncoding() string - NewStreamCompressor(output *bytes.Buffer) StreamCompressor -} - -// StreamCompressor is the interface that zlib and zstd should implement -type StreamCompressor interface { - io.WriteCloser - Flush() error -} diff --git a/comp/serializer/compression/mock/component_mock.go b/comp/serializer/compression/mock/component_mock.go deleted file mode 100644 index 726e179142b0a..0000000000000 --- a/comp/serializer/compression/mock/component_mock.go +++ /dev/null @@ -1,16 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2024-present Datadog, Inc. - -//go:build test - -// Package mock provides the mock component for serializer/compression -package mock - -import compression "github.com/DataDog/datadog-agent/comp/serializer/compression/def" - -// Mock implements mock-specific methods. -type Mock interface { - compression.Component -} diff --git a/comp/serializer/compression/selector/def.go b/comp/serializer/compression/selector/def.go deleted file mode 100644 index 0e8b5837b57e9..0000000000000 --- a/comp/serializer/compression/selector/def.go +++ /dev/null @@ -1,21 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package selector - -import ( - "github.com/DataDog/datadog-agent/comp/core/config" - compression "github.com/DataDog/datadog-agent/comp/serializer/compression/def" -) - -// Requires contains the config for Compression -type Requires struct { - Cfg config.Component -} - -// Provides contains the compression component -type Provides struct { - Comp compression.Component -} diff --git a/comp/serializer/compression/selector/no-zlib-no-zstd.go b/comp/serializer/compression/selector/no-zlib-no-zstd.go deleted file mode 100644 index f370ab640200b..0000000000000 --- a/comp/serializer/compression/selector/no-zlib-no-zstd.go +++ /dev/null @@ -1,27 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -//go:build !zlib && !zstd - -// Package selector provides correct compression impl to fx -package selector - -import ( - "github.com/DataDog/datadog-agent/comp/core/config" - compression "github.com/DataDog/datadog-agent/comp/serializer/compression/def" - implnoop "github.com/DataDog/datadog-agent/comp/serializer/compression/impl-noop" -) - -// NewCompressorReq returns a new Compressor based on serializer_compressor_kind -// This function is called only when there is no zlib or zstd tag -func NewCompressorReq(_ Requires) Provides { - return Provides{Comp: implnoop.NewComponent().Comp} -} - -// NewCompressor returns a new Compressor based on serializer_compressor_kind -// This function is called only when there is no zlib or zstd tag -func NewCompressor(cfg config.Component) compression.Component { - return NewCompressorReq(Requires{Cfg: cfg}).Comp -} diff --git a/comp/serializer/compression/selector/zlib-and-zstd.go b/comp/serializer/compression/selector/zlib-and-zstd.go deleted file mode 100644 index df324defbc911..0000000000000 --- a/comp/serializer/compression/selector/zlib-and-zstd.go +++ /dev/null @@ -1,43 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -//go:build zlib && zstd - -// Package selector provides correct compression impl to fx -package selector - -import ( - "github.com/DataDog/datadog-agent/comp/core/config" - "github.com/DataDog/datadog-agent/comp/serializer/compression/common" - compression "github.com/DataDog/datadog-agent/comp/serializer/compression/def" - implnoop "github.com/DataDog/datadog-agent/comp/serializer/compression/impl-noop" - implzlib "github.com/DataDog/datadog-agent/comp/serializer/compression/impl-zlib" - implzstd "github.com/DataDog/datadog-agent/comp/serializer/compression/impl-zstd" - "github.com/DataDog/datadog-agent/pkg/util/log" -) - -// NewCompressorReq returns a new Compressor based on serializer_compressor_kind -// This function is called when both zlib and zstd build tags are included -func NewCompressorReq(req Requires) Provides { - switch req.Cfg.GetString("serializer_compressor_kind") { - case common.ZlibKind: - return Provides{implzlib.NewComponent().Comp} - case common.ZstdKind: - level := req.Cfg.GetInt("serializer_zstd_compressor_level") - return Provides{implzstd.NewComponent(implzstd.Requires{Level: level}).Comp} - case common.NoneKind: - log.Warn("no serializer_compressor_kind set. use zlib or zstd") - return Provides{implnoop.NewComponent().Comp} - default: - log.Warn("invalid serializer_compressor_kind detected. use one of 'zlib', 'zstd'") - return Provides{implnoop.NewComponent().Comp} - } -} - -// NewCompressor returns a new Compressor based on serializer_compressor_kind -// This function is called when both zlib and zstd build tags are included -func NewCompressor(cfg config.Component) compression.Component { - return NewCompressorReq(Requires{Cfg: cfg}).Comp -} diff --git a/comp/serializer/compression/selector/zlib-no-zstd.go b/comp/serializer/compression/selector/zlib-no-zstd.go deleted file mode 100644 index febf91e379e94..0000000000000 --- a/comp/serializer/compression/selector/zlib-no-zstd.go +++ /dev/null @@ -1,42 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -//go:build zlib && !zstd - -// Package selector provides correct compression impl to fx -package selector - -import ( - "github.com/DataDog/datadog-agent/comp/core/config" - "github.com/DataDog/datadog-agent/comp/serializer/compression/common" - compression "github.com/DataDog/datadog-agent/comp/serializer/compression/def" - implnoop "github.com/DataDog/datadog-agent/comp/serializer/compression/impl-noop" - implzlib "github.com/DataDog/datadog-agent/comp/serializer/compression/impl-zlib" - "github.com/DataDog/datadog-agent/pkg/util/log" -) - -// NewCompressorReq returns a new Compressor based on serializer_compressor_kind -// This function is called only when the zlib build tag is included -func NewCompressorReq(req Requires) Provides { - switch req.Cfg.GetString("serializer_compressor_kind") { - case common.ZlibKind: - return Provides{implzlib.NewComponent().Comp} - case common.ZstdKind: - log.Warn("zstd build tag not included. using zlib") - return Provides{implzlib.NewComponent().Comp} - case common.NoneKind: - log.Warn("no serializer_compressor_kind set. use zlib or zstd") - return Provides{implnoop.NewComponent().Comp} - default: - log.Warn("invalid serializer_compressor_kind detected. use zlib or zstd") - return Provides{implnoop.NewComponent().Comp} - } -} - -// NewCompressor returns a new Compressor based on serializer_compressor_kind -// This function is called only when the zlib build tag is included -func NewCompressor(cfg config.Component) compression.Component { - return NewCompressorReq(Requires{Cfg: cfg}).Comp -} diff --git a/comp/serializer/logscompression/def/component.go b/comp/serializer/logscompression/def/component.go new file mode 100644 index 0000000000000..5f959aea6ffb0 --- /dev/null +++ b/comp/serializer/logscompression/def/component.go @@ -0,0 +1,22 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package logscompression provides the component for logs compression +package logscompression + +// team: agent-processing-and-routing + +import ( + "github.com/DataDog/datadog-agent/pkg/util/compression" +) + +// Component is the component type. +// The logscompression component provides a factory that returns a requested Compressor +// used when setting up the endpoints. +// (This is different from the metrics compressor which returns the requested Compressor +// by reading the configuration at load time). +type Component interface { + NewCompressor(kind string, level int) compression.Compressor +} diff --git a/comp/serializer/compression/doc.go b/comp/serializer/logscompression/doc.go similarity index 73% rename from comp/serializer/compression/doc.go rename to comp/serializer/logscompression/doc.go index e15c0ed472a27..d711ccdd6bf28 100644 --- a/comp/serializer/compression/doc.go +++ b/comp/serializer/logscompression/doc.go @@ -3,5 +3,5 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -// Package compression contains the serializer compression component -package compression +// Package logs contains the serializer compression component for metrics +package logs diff --git a/comp/serializer/logscompression/fx-mock/fx.go b/comp/serializer/logscompression/fx-mock/fx.go new file mode 100644 index 0000000000000..ceac5e34ae2bf --- /dev/null +++ b/comp/serializer/logscompression/fx-mock/fx.go @@ -0,0 +1,36 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build test + +// Package fx provides the fx module for the serializer/compression component +package fx + +import ( + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/def" + "github.com/DataDog/datadog-agent/pkg/util/compression" + "github.com/DataDog/datadog-agent/pkg/util/compression/selector" + "github.com/DataDog/datadog-agent/pkg/util/fxutil" +) + +type component struct{} + +func (*component) NewCompressor(_kind string, _level int) compression.Compressor { + return selector.NewNoopCompressor() +} + +// NewMockCompressor returns a mock component that will always return a noop compressor. +func NewMockCompressor() logscompression.Component { + return &component{} +} + +// MockModule defines the fx options for the mock component. +func MockModule() fxutil.Module { + return fxutil.Component( + fxutil.ProvideComponentConstructor( + NewMockCompressor, + ), + ) +} diff --git a/comp/serializer/compression/fx/fx.go b/comp/serializer/logscompression/fx/fx.go similarity index 71% rename from comp/serializer/compression/fx/fx.go rename to comp/serializer/logscompression/fx/fx.go index 093ea3f4d6e67..e135cbd5b5614 100644 --- a/comp/serializer/compression/fx/fx.go +++ b/comp/serializer/logscompression/fx/fx.go @@ -3,11 +3,11 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -// Package fx provides the fx module for the serializer/compression component +// Package fx provides the fx module for the serializer/logscompression component package fx import ( - "github.com/DataDog/datadog-agent/comp/serializer/compression/selector" + "github.com/DataDog/datadog-agent/comp/serializer/logscompression/impl" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -15,7 +15,7 @@ import ( func Module() fxutil.Module { return fxutil.Component( fxutil.ProvideComponentConstructor( - selector.NewCompressorReq, + logscompressionimpl.NewComponent, ), ) } diff --git a/comp/serializer/logscompression/go.mod b/comp/serializer/logscompression/go.mod new file mode 100644 index 0000000000000..98fd210aa14f0 --- /dev/null +++ b/comp/serializer/logscompression/go.mod @@ -0,0 +1,133 @@ +module github.com/DataDog/datadog-agent/comp/serializer/logscompression + +go 1.22.0 + +replace ( + github.com/DataDog/datadog-agent/comp/core/config => ../../core/config + github.com/DataDog/datadog-agent/pkg/util/compression => ../../../pkg/util/compression + github.com/DataDog/datadog-agent/pkg/util/compression/selector => ../../../pkg/util/compression/selector + github.com/DataDog/datadog-agent/pkg/util/defaultpaths => ../../../pkg/util/defaultpaths + github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../pkg/util/fxutil + github.com/DataDog/datadog-agent/pkg/util/option => ../../../pkg/util/option +) + +require ( + github.com/DataDog/datadog-agent/pkg/util/compression v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/fxutil v0.59.0 +) + +require ( + github.com/DataDog/datadog-agent/comp/core/config v0.61.0 // indirect + github.com/DataDog/datadog-agent/comp/core/flare/builder v0.59.0 // indirect + github.com/DataDog/datadog-agent/comp/core/flare/types v0.59.0 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.59.0 // indirect + github.com/DataDog/datadog-agent/comp/def v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/mock v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect + github.com/DataDog/viper v1.14.0 // indirect + github.com/DataDog/zstd v1.5.6 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/ebitengine/purego v0.8.1 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect + github.com/go-ole/go-ole v1.3.0 // indirect + github.com/hashicorp/hcl v1.0.1-vault-5 // indirect + github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect + github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect + github.com/pelletier/go-toml v1.9.5 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.7.1 // indirect + github.com/spf13/cobra v1.8.1 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/stretchr/testify v1.10.0 // indirect + github.com/tklauser/go-sysconf v0.3.14 // indirect + github.com/tklauser/numcpus v0.8.0 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect + go.uber.org/atomic v1.11.0 // indirect + go.uber.org/dig v1.18.0 // indirect + go.uber.org/fx v1.23.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/sys v0.29.0 // indirect + golang.org/x/text v0.21.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) + +replace github.com/DataDog/datadog-agent/comp/def => ../../def + +replace github.com/DataDog/datadog-agent/pkg/util/log => ../../../pkg/util/log + +replace github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../pkg/util/scrubber + +replace github.com/DataDog/datadog-agent/pkg/version => ../../../pkg/version + +replace github.com/DataDog/datadog-agent/comp/api/api/def => ../../api/api/def + +replace github.com/DataDog/datadog-agent/comp/core/flare/builder => ../../core/flare/builder + +replace github.com/DataDog/datadog-agent/comp/core/flare/types => ../../core/flare/types + +replace github.com/DataDog/datadog-agent/comp/core/secrets => ../../core/secrets + +replace github.com/DataDog/datadog-agent/comp/core/telemetry => ../../core/telemetry + +replace github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ../../../pkg/collector/check/defaults + +replace github.com/DataDog/datadog-agent/pkg/config/env => ../../../pkg/config/env + +replace github.com/DataDog/datadog-agent/pkg/config/mock => ../../../pkg/config/mock + +replace github.com/DataDog/datadog-agent/pkg/config/model => ../../../pkg/config/model + +replace github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../pkg/config/nodetreemodel + +replace github.com/DataDog/datadog-agent/pkg/config/setup => ../../../pkg/config/setup + +replace github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../pkg/config/teeconfig + +replace github.com/DataDog/datadog-agent/pkg/util/executable => ../../../pkg/util/executable + +replace github.com/DataDog/datadog-agent/pkg/util/filesystem => ../../../pkg/util/filesystem + +replace github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../../pkg/util/hostname/validate + +replace github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../pkg/util/pointer + +replace github.com/DataDog/datadog-agent/pkg/util/system => ../../../pkg/util/system + +replace github.com/DataDog/datadog-agent/pkg/util/system/socket => ../../../pkg/util/system/socket + +replace github.com/DataDog/datadog-agent/pkg/util/testutil => ../../../pkg/util/testutil + +replace github.com/DataDog/datadog-agent/pkg/util/winutil => ../../../pkg/util/winutil + +replace github.com/DataDog/datadog-agent/pkg/config/structure => ../../../pkg/config/structure diff --git a/comp/serializer/compression/go.sum b/comp/serializer/logscompression/go.sum similarity index 95% rename from comp/serializer/compression/go.sum rename to comp/serializer/logscompression/go.sum index 3b405d22ebcc8..9073aac386c01 100644 --- a/comp/serializer/compression/go.sum +++ b/comp/serializer/logscompression/go.sum @@ -74,7 +74,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -112,8 +111,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -140,8 +139,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -158,8 +157,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -173,8 +172,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -185,8 +184,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -241,8 +240,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -279,8 +278,8 @@ golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= @@ -308,8 +307,8 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/comp/serializer/logscompression/impl/logscompressionimpl.go b/comp/serializer/logscompression/impl/logscompressionimpl.go new file mode 100644 index 0000000000000..27d57f9fa1e1d --- /dev/null +++ b/comp/serializer/logscompression/impl/logscompressionimpl.go @@ -0,0 +1,29 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package logscompressionimpl provides the implementation for the serializer/logscompression component +package logscompressionimpl + +import ( + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/def" + "github.com/DataDog/datadog-agent/pkg/util/compression" + "github.com/DataDog/datadog-agent/pkg/util/compression/selector" +) + +type component struct{} + +// Provides contains the compression component +type Provides struct { + Comp logscompression.Component +} + +func (*component) NewCompressor(kind string, level int) compression.Compressor { + return selector.NewCompressor(kind, level) +} + +// NewComponent creates a new logscompression component. +func NewComponent() logscompression.Component { + return &component{} +} diff --git a/comp/serializer/metricscompression/def/component.go b/comp/serializer/metricscompression/def/component.go new file mode 100644 index 0000000000000..9c1e6a03fa0c5 --- /dev/null +++ b/comp/serializer/metricscompression/def/component.go @@ -0,0 +1,18 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package metricscompression provides the component for metrics compression +package metricscompression + +// team: agent-processing-and-routing + +import ( + "github.com/DataDog/datadog-agent/pkg/util/compression" +) + +// Component is the component type. +type Component interface { + compression.Compressor +} diff --git a/comp/serializer/metricscompression/doc.go b/comp/serializer/metricscompression/doc.go new file mode 100644 index 0000000000000..ea66f65b8953c --- /dev/null +++ b/comp/serializer/metricscompression/doc.go @@ -0,0 +1,7 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package metrics contains the serializer compression component for metrics +package metrics diff --git a/comp/serializer/compression/fx-mock/fx.go b/comp/serializer/metricscompression/fx-mock/fx.go similarity index 67% rename from comp/serializer/compression/fx-mock/fx.go rename to comp/serializer/metricscompression/fx-mock/fx.go index 93c27867bc9c7..f9cdc61f63cd1 100644 --- a/comp/serializer/compression/fx-mock/fx.go +++ b/comp/serializer/metricscompression/fx-mock/fx.go @@ -9,8 +9,9 @@ package fx import ( - compression "github.com/DataDog/datadog-agent/comp/serializer/compression/def" - compressionnoop "github.com/DataDog/datadog-agent/comp/serializer/compression/impl-noop" + compression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/def" + common "github.com/DataDog/datadog-agent/pkg/util/compression" + "github.com/DataDog/datadog-agent/pkg/util/compression/selector" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -23,7 +24,7 @@ func MockModule() fxutil.Module { ) } -// NewMockCompressor returns a new Mock +// NewMockCompressor returns a noop compressor. func NewMockCompressor() compression.Component { - return compressionnoop.NewComponent().Comp + return selector.NewCompressor(common.NoneKind, 1) } diff --git a/comp/serializer/compression/fx-zstd/fx.go b/comp/serializer/metricscompression/fx-otel/fx.go similarity index 67% rename from comp/serializer/compression/fx-zstd/fx.go rename to comp/serializer/metricscompression/fx-otel/fx.go index 9c7e35a7a3d90..c383efd244299 100644 --- a/comp/serializer/compression/fx-zstd/fx.go +++ b/comp/serializer/metricscompression/fx-otel/fx.go @@ -3,11 +3,11 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -// Package fx provides the fx module for the serializer/compression component +// Package fx provides the fx module that will be used by otel for the serializer/metricscompression component package fx import ( - compressionimpl "github.com/DataDog/datadog-agent/comp/serializer/compression/impl-zstd" + "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/impl" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -15,7 +15,7 @@ import ( func Module() fxutil.Module { return fxutil.Component( fxutil.ProvideComponentConstructor( - compressionimpl.NewComponent, + metricscompressionimpl.NewCompressorReqOtel, ), ) } diff --git a/comp/serializer/compression/fx-zlib/fx.go b/comp/serializer/metricscompression/fx/fx.go similarity index 70% rename from comp/serializer/compression/fx-zlib/fx.go rename to comp/serializer/metricscompression/fx/fx.go index f10af59cfc6d8..9dd06857e5b53 100644 --- a/comp/serializer/compression/fx-zlib/fx.go +++ b/comp/serializer/metricscompression/fx/fx.go @@ -3,11 +3,11 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -// Package fx provides the fx module for the serializer/compression component +// Package fx provides the fx module for the serializer/metricscompression component package fx import ( - compressionimpl "github.com/DataDog/datadog-agent/comp/serializer/compression/impl-zlib" + "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/impl" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -15,7 +15,7 @@ import ( func Module() fxutil.Module { return fxutil.Component( fxutil.ProvideComponentConstructor( - compressionimpl.NewComponent, + metricscompressionimpl.NewCompressorReq, ), ) } diff --git a/comp/serializer/compression/go.mod b/comp/serializer/metricscompression/go.mod similarity index 54% rename from comp/serializer/compression/go.mod rename to comp/serializer/metricscompression/go.mod index f1c59cd40d645..5a49803b479de 100644 --- a/comp/serializer/compression/go.mod +++ b/comp/serializer/metricscompression/go.mod @@ -1,30 +1,16 @@ -module github.com/DataDog/datadog-agent/comp/serializer/compression +module github.com/DataDog/datadog-agent/comp/serializer/metricscompression go 1.22.0 replace ( - github.com/DataDog/datadog-agent/comp/api/api/def => ../../api/api/def github.com/DataDog/datadog-agent/comp/core/config => ../../core/config - github.com/DataDog/datadog-agent/comp/core/flare/builder => ../../core/flare/builder - github.com/DataDog/datadog-agent/comp/core/flare/types => ../../core/flare/types - github.com/DataDog/datadog-agent/comp/core/secrets => ../../core/secrets - github.com/DataDog/datadog-agent/comp/core/telemetry => ../../core/telemetry - github.com/DataDog/datadog-agent/comp/def => ../../def - github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ../../../pkg/collector/check/defaults - github.com/DataDog/datadog-agent/pkg/config/env => ../../../pkg/config/env - github.com/DataDog/datadog-agent/pkg/config/mock => ../../../pkg/config/mock - github.com/DataDog/datadog-agent/pkg/config/model => ../../../pkg/config/model - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../pkg/config/nodetreemodel - github.com/DataDog/datadog-agent/pkg/config/setup => ../../../pkg/config/setup - github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../pkg/config/teeconfig - github.com/DataDog/datadog-agent/pkg/telemetry => ../../../pkg/telemetry + github.com/DataDog/datadog-agent/pkg/util/compression => ../../../pkg/util/compression + github.com/DataDog/datadog-agent/pkg/util/compression/impl-zlib => ../../../pkg/util/compression/impl-zlib github.com/DataDog/datadog-agent/pkg/util/defaultpaths => ../../../pkg/util/defaultpaths - github.com/DataDog/datadog-agent/pkg/util/executable => ../../../pkg/util/executable - github.com/DataDog/datadog-agent/pkg/util/filesystem => ../../../pkg/util/filesystem github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../pkg/util/fxutil github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../../pkg/util/hostname/validate github.com/DataDog/datadog-agent/pkg/util/log => ../../../pkg/util/log - github.com/DataDog/datadog-agent/pkg/util/optional => ../../../pkg/util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../../pkg/util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../pkg/util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../pkg/util/scrubber github.com/DataDog/datadog-agent/pkg/util/system => ../../../pkg/util/system @@ -34,36 +20,37 @@ replace ( ) require ( - github.com/DataDog/datadog-agent/comp/core/config v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 - github.com/DataDog/zstd v1.5.6 + github.com/DataDog/datadog-agent/comp/core/config v0.61.0 + github.com/DataDog/datadog-agent/pkg/util/compression v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/fxutil v0.59.0 ) require ( - github.com/DataDog/datadog-agent/comp/core/flare/builder v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/comp/core/flare/types v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/flare/builder v0.59.0 // indirect + github.com/DataDog/datadog-agent/comp/core/flare/types v0.59.0 // indirect github.com/DataDog/datadog-agent/comp/core/secrets v0.59.0 // indirect - github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/def v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/mock v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect github.com/DataDog/viper v1.14.0 // indirect + github.com/DataDog/zstd v1.5.6 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect @@ -74,16 +61,16 @@ require ( github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -96,13 +83,43 @@ require ( go.uber.org/fx v1.23.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) -replace github.com/DataDog/datadog-agent/pkg/config/structure => ../../../pkg/config/structure +replace github.com/DataDog/datadog-agent/comp/api/api/def => ../../api/api/def + +replace github.com/DataDog/datadog-agent/comp/core/flare/builder => ../../core/flare/builder + +replace github.com/DataDog/datadog-agent/comp/core/flare/types => ../../core/flare/types + +replace github.com/DataDog/datadog-agent/comp/core/secrets => ../../core/secrets + +replace github.com/DataDog/datadog-agent/comp/core/telemetry => ../../core/telemetry + +replace github.com/DataDog/datadog-agent/comp/def => ../../def + +replace github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ../../../pkg/collector/check/defaults + +replace github.com/DataDog/datadog-agent/pkg/config/env => ../../../pkg/config/env + +replace github.com/DataDog/datadog-agent/pkg/config/mock => ../../../pkg/config/mock + +replace github.com/DataDog/datadog-agent/pkg/config/model => ../../../pkg/config/model + +replace github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../../pkg/config/nodetreemodel + +replace github.com/DataDog/datadog-agent/pkg/config/setup => ../../../pkg/config/setup + +replace github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../../pkg/config/teeconfig + +replace github.com/DataDog/datadog-agent/pkg/util/executable => ../../../pkg/util/executable + +replace github.com/DataDog/datadog-agent/pkg/util/filesystem => ../../../pkg/util/filesystem replace github.com/DataDog/datadog-agent/pkg/version => ../../../pkg/version + +replace github.com/DataDog/datadog-agent/pkg/config/structure => ../../../pkg/config/structure diff --git a/comp/serializer/metricscompression/go.sum b/comp/serializer/metricscompression/go.sum new file mode 100644 index 0000000000000..9073aac386c01 --- /dev/null +++ b/comp/serializer/metricscompression/go.sum @@ -0,0 +1,333 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/viper v1.14.0 h1:dIjTe/uJiah+QFqFZ+MXeqgmUvWhg37l37ZxFWxr3is= +github.com/DataDog/viper v1.14.0/go.mod h1:wDdUVJ2SHaMaPrCZrlRCObwkubsX8j5sme3LaR/SGTc= +github.com/DataDog/zstd v1.5.6 h1:LbEglqepa/ipmmQJUDnSsfvA8e8IStVcGaFWDuxvGOY= +github.com/DataDog/zstd v1.5.6/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/ebitengine/purego v0.8.1 h1:sdRKd6plj7KYW33EH5As6YKfe8m9zbN9JMrOjNVF/BE= +github.com/ebitengine/purego v0.8.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= +github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= +github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= +github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb h1:PGufWXXDq9yaev6xX1YQauaO1MV90e6Mpoq1I7Lz/VM= +github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= +github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= +github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= +github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20200122045848-3419fae592fc/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw= +go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.23.0 h1:lIr/gYWQGfTwGcSXWXu4vP5Ws6iqnNEIY+F/aFzCKTg= +go.uber.org/fx v1.23.0/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= diff --git a/comp/serializer/metricscompression/impl/metricscompression.go b/comp/serializer/metricscompression/impl/metricscompression.go new file mode 100644 index 0000000000000..82aeefaf2b905 --- /dev/null +++ b/comp/serializer/metricscompression/impl/metricscompression.go @@ -0,0 +1,38 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package metricscompressionimpl provides the implementation for the serializer/metricscompression component +package metricscompressionimpl + +import ( + "github.com/DataDog/datadog-agent/comp/core/config" + metricscompression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/def" + zlib "github.com/DataDog/datadog-agent/pkg/util/compression/impl-zlib" + "github.com/DataDog/datadog-agent/pkg/util/compression/selector" +) + +// Requires contains the config for Compression +type Requires struct { + Cfg config.Component +} + +// NewCompressorReq returns the compression component +func NewCompressorReq(req Requires) Provides { + return Provides{ + selector.FromConfig(req.Cfg), + } +} + +// Provides contains the compression component +type Provides struct { + Comp metricscompression.Component +} + +// NewCompressorReqOtel returns the compression component for Otel +func NewCompressorReqOtel() Provides { + return Provides{ + Comp: zlib.New(), + } +} diff --git a/comp/snmpscan/def/component.go b/comp/snmpscan/def/component.go index 1acd1b6cdb768..04015fd73060f 100644 --- a/comp/snmpscan/def/component.go +++ b/comp/snmpscan/def/component.go @@ -7,6 +7,7 @@ package snmpscan import ( + "github.com/DataDog/datadog-agent/pkg/networkdevice/metadata" "github.com/gosnmp/gosnmp" ) @@ -15,6 +16,7 @@ import ( // Component is the component type. type Component interface { // Triggers a device scan - RunDeviceScan(snmpConection *gosnmp.GoSNMP, deviceNamespace string, deviceIPAddress string) error + RunDeviceScan(snmpConection *gosnmp.GoSNMP, deviceNamespace string, deviceID string) error RunSnmpWalk(snmpConection *gosnmp.GoSNMP, firstOid string) error + SendPayload(payload metadata.NetworkDevicesMetadata) error } diff --git a/comp/snmpscan/impl/devicescan.go b/comp/snmpscan/impl/devicescan.go index cef73d6aee845..1de132b39cb0b 100644 --- a/comp/snmpscan/impl/devicescan.go +++ b/comp/snmpscan/impl/devicescan.go @@ -6,23 +6,20 @@ package snmpscanimpl import ( - "encoding/json" "time" - "github.com/DataDog/datadog-agent/comp/forwarder/eventplatform" - "github.com/DataDog/datadog-agent/pkg/logs/message" "github.com/DataDog/datadog-agent/pkg/networkdevice/metadata" "github.com/DataDog/datadog-agent/pkg/snmp/gosnmplib" "github.com/gosnmp/gosnmp" ) -func (s snmpScannerImpl) RunDeviceScan(snmpConnection *gosnmp.GoSNMP, deviceNamespace string, deviceIPAddress string) error { +func (s snmpScannerImpl) RunDeviceScan(snmpConnection *gosnmp.GoSNMP, deviceNamespace string, deviceID string) error { + // execute the scan pdus, err := gatherPDUs(snmpConnection) if err != nil { return err } - deviceID := deviceNamespace + ":" + deviceIPAddress var deviceOids []*metadata.DeviceOID for _, pdu := range pdus { record, err := metadata.DeviceOIDFromPDU(deviceID, pdu) @@ -35,15 +32,8 @@ func (s snmpScannerImpl) RunDeviceScan(snmpConnection *gosnmp.GoSNMP, deviceName metadataPayloads := metadata.BatchDeviceScan(deviceNamespace, time.Now(), metadata.PayloadMetadataBatchSize, deviceOids) for _, payload := range metadataPayloads { - payloadBytes, err := json.Marshal(payload) + err := s.SendPayload(payload) if err != nil { - s.log.Errorf("Error marshalling device metadata: %v", err) - continue - } - m := message.NewMessage(payloadBytes, nil, "", 0) - s.log.Debugf("Device OID metadata payload is %d bytes", len(payloadBytes)) - s.log.Tracef("Device OID metadata payload: %s", string(payloadBytes)) - if err := s.epforwarder.SendEventPlatformEventBlocking(m, eventplatform.EventTypeNetworkDevicesMetadata); err != nil { return err } } diff --git a/comp/snmpscan/impl/sendpayload.go b/comp/snmpscan/impl/sendpayload.go new file mode 100644 index 0000000000000..9d2e767e8a603 --- /dev/null +++ b/comp/snmpscan/impl/sendpayload.go @@ -0,0 +1,28 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +package snmpscanimpl + +import ( + "encoding/json" + "github.com/DataDog/datadog-agent/comp/forwarder/eventplatform" + "github.com/DataDog/datadog-agent/pkg/logs/message" + "github.com/DataDog/datadog-agent/pkg/networkdevice/metadata" +) + +func (s snmpScannerImpl) SendPayload(payload metadata.NetworkDevicesMetadata) error { + payloadBytes, err := json.Marshal(payload) + if err != nil { + s.log.Errorf("Error marshalling device metadata: %v", err) + return nil + } + m := message.NewMessage(payloadBytes, nil, "", 0) + s.log.Debugf("Device metadata payload is %d bytes", len(payloadBytes)) + s.log.Tracef("Device metadata payload: %s", string(payloadBytes)) + if err := s.epforwarder.SendEventPlatformEventBlocking(m, eventplatform.EventTypeNetworkDevicesMetadata); err != nil { + return err + } + return nil +} diff --git a/comp/snmpscan/mock/mock.go b/comp/snmpscan/mock/mock.go index 3804870a33f40..0387e4e139c24 100644 --- a/comp/snmpscan/mock/mock.go +++ b/comp/snmpscan/mock/mock.go @@ -13,6 +13,7 @@ import ( log "github.com/DataDog/datadog-agent/comp/core/log/def" snmpscan "github.com/DataDog/datadog-agent/comp/snmpscan/def" + "github.com/DataDog/datadog-agent/pkg/networkdevice/metadata" "github.com/gosnmp/gosnmp" ) @@ -38,3 +39,6 @@ func (m mock) RunDeviceScan(_ *gosnmp.GoSNMP, _ string, _ string) error { func (m mock) RunSnmpWalk(_ *gosnmp.GoSNMP, _ string) error { return nil } +func (m mock) SendPayload(_ metadata.NetworkDevicesMetadata) error { + return nil +} diff --git a/comp/trace/agent/def/go.mod b/comp/trace/agent/def/go.mod index 1bfc5a3ba75af..da112c37ef5d2 100644 --- a/comp/trace/agent/def/go.mod +++ b/comp/trace/agent/def/go.mod @@ -6,10 +6,12 @@ replace github.com/DataDog/datadog-agent/pkg/proto => ../../../../pkg/proto require ( github.com/DataDog/datadog-agent/pkg/proto v0.56.0-rc.3 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.22.0 - go.opentelemetry.io/collector/pdata v1.21.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.24.0 + go.opentelemetry.io/collector/pdata v1.24.0 ) +require go.opentelemetry.io/collector/component/componenttest v0.118.0 // indirect + require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -17,19 +19,20 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect - github.com/tinylib/msgp v1.2.4 // indirect - go.opentelemetry.io/collector/component v0.115.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.115.0 // indirect - go.opentelemetry.io/collector/semconv v0.115.0 // indirect - go.opentelemetry.io/otel v1.32.0 // indirect - go.opentelemetry.io/otel/metric v1.32.0 // indirect - go.opentelemetry.io/otel/trace v1.32.0 // indirect + github.com/tinylib/msgp v1.2.5 // indirect + go.opentelemetry.io/collector/component v0.118.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.118.0 // indirect + go.opentelemetry.io/collector/semconv v0.118.0 // indirect + go.opentelemetry.io/otel v1.33.0 // indirect + go.opentelemetry.io/otel/metric v1.33.0 // indirect + go.opentelemetry.io/otel/sdk v1.33.0 // indirect + go.opentelemetry.io/otel/trace v1.33.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/net v0.34.0 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect - google.golang.org/grpc v1.67.1 // indirect - google.golang.org/protobuf v1.35.2 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect + google.golang.org/grpc v1.69.4 // indirect + google.golang.org/protobuf v1.36.3 // indirect ) diff --git a/comp/trace/agent/def/go.sum b/comp/trace/agent/def/go.sum index 92539a14a3bd1..683e7bc8bcb0e 100644 --- a/comp/trace/agent/def/go.sum +++ b/comp/trace/agent/def/go.sum @@ -1,5 +1,5 @@ -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.22.0 h1:yfk2cF8Bx98fSFpGrehEHh1FRqewfxcCTAbUDt5r3F8= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.22.0/go.mod h1:9qzpnBSxSOnKzbF/uHket3SSlQihQHix/ZRC2nZUUYQ= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.24.0 h1:Y65h9AvfQO7ONOBlqCetvvUhh2XO1wIzN7IfXVFjc84= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.24.0/go.mod h1:7aAFw4o5dZk/kqFniz7ljJwS8covz8DHouGl7BrsnLI= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -39,34 +39,36 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/tinylib/msgp v1.2.4 h1:yLFeUGostXXSGW5vxfT5dXG/qzkn4schv2I7at5+hVU= -github.com/tinylib/msgp v1.2.4/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= +github.com/tinylib/msgp v1.2.5 h1:WeQg1whrXRFiZusidTQqzETkRpGjFjcIhW6uqWH09po= +github.com/tinylib/msgp v1.2.5/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= github.com/vmihailenco/msgpack/v4 v4.3.13 h1:A2wsiTbvp63ilDaWmsk2wjx6xZdxQOvpiNlKBGKKXKI= github.com/vmihailenco/msgpack/v4 v4.3.13/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= github.com/vmihailenco/tagparser v0.1.2 h1:gnjoVuB/kljJ5wICEEOpx98oXMWPLj22G67Vbd1qPqc= github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opentelemetry.io/collector/component v0.115.0 h1:iLte1oCiXzjiCnaOBKdsXacfFiECecpWxW3/LeriMoo= -go.opentelemetry.io/collector/component v0.115.0/go.mod h1:oIUFiH7w1eOimdeYhFI+gAIxYSiLDocKVJ0PTvX7d6s= -go.opentelemetry.io/collector/component/componenttest v0.115.0 h1:9URDJ9VyP6tuij+YHjp/kSSMecnZOd7oGvzu+rw9SJY= -go.opentelemetry.io/collector/component/componenttest v0.115.0/go.mod h1:PzXvNqKLCiSADZGZFKH+IOHMkaQ0GTHuzysfVbTPKYY= -go.opentelemetry.io/collector/config/configtelemetry v0.115.0 h1:U07FinCDop+r2RjWQ3aP9ZWONC7r7kQIp1GkXQi6nsI= -go.opentelemetry.io/collector/config/configtelemetry v0.115.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= -go.opentelemetry.io/collector/pdata v1.21.0 h1:PG+UbiFMJ35X/WcAR7Rf/PWmWtRdW0aHlOidsR6c5MA= -go.opentelemetry.io/collector/pdata v1.21.0/go.mod h1:GKb1/zocKJMvxKbS+sl0W85lxhYBTFJ6h6I1tphVyDU= -go.opentelemetry.io/collector/semconv v0.115.0 h1:SoqMvg4ZEB3mz2EdAb6XYa+TuMo5Mir5FRBr3nVFUDY= -go.opentelemetry.io/collector/semconv v0.115.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= -go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= -go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= -go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= -go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= -go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= -go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= -go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= -go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= -go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= -go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/collector/component v0.118.0 h1:sSO/ObxJ+yH77Z4DmT1mlSuxhbgUmY1ztt7xCA1F/8w= +go.opentelemetry.io/collector/component v0.118.0/go.mod h1:LUJ3AL2b+tmFr3hZol3hzKzCMvNdqNq0M5CF3SWdv4M= +go.opentelemetry.io/collector/component/componenttest v0.118.0 h1:knEHckoiL2fEWSIc0iehg39zP4IXzi9sHa45O+oxKo8= +go.opentelemetry.io/collector/component/componenttest v0.118.0/go.mod h1:aHc7t7zVwCpbhrWIWY+GMuaMxMCUP8C8P7pJOt8r/vU= +go.opentelemetry.io/collector/config/configtelemetry v0.118.0 h1:UlN46EViG2X42odWtXgWaqY7Y01ZKpsnswSwXTWx5mM= +go.opentelemetry.io/collector/config/configtelemetry v0.118.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= +go.opentelemetry.io/collector/pdata v1.24.0 h1:D6j92eAzmAbQgivNBUnt8r9juOl8ugb+ihYynoFZIEg= +go.opentelemetry.io/collector/pdata v1.24.0/go.mod h1:cf3/W9E/uIvPS4MR26SnMFJhraUCattzzM6qusuONuc= +go.opentelemetry.io/collector/semconv v0.118.0 h1:V4vlMIK7TIaemrrn2VawvQPwruIKpj7Xgw9P5+BL56w= +go.opentelemetry.io/collector/semconv v0.118.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= +go.opentelemetry.io/otel/sdk/metric v1.33.0 h1:Gs5VK9/WUJhNXZgn8MR6ITatvAmKeIuCtNbsP3JkNqU= +go.opentelemetry.io/otel/sdk/metric v1.33.0/go.mod h1:dL5ykHZmm1B1nVRk9dDjChwDmt81MjVp3gLkQRwKf/Q= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -82,16 +84,16 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= @@ -106,11 +108,11 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= -google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= -google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= +google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= +google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/comp/trace/agent/impl/agent.go b/comp/trace/agent/impl/agent.go index de9237dedeea5..f05d2d7b3c310 100644 --- a/comp/trace/agent/impl/agent.go +++ b/comp/trace/agent/impl/agent.go @@ -24,7 +24,6 @@ import ( "go.opentelemetry.io/collector/pdata/ptrace" "go.uber.org/fx" - "github.com/DataDog/datadog-agent/comp/api/authtoken" "github.com/DataDog/datadog-agent/comp/core/secrets" tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" "github.com/DataDog/datadog-agent/comp/dogstatsd/statsd" @@ -40,7 +39,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/trace/telemetry" "github.com/DataDog/datadog-agent/pkg/trace/watchdog" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/version" ddgostatsd "github.com/DataDog/datadog-go/v5/statsd" @@ -62,14 +61,13 @@ type dependencies struct { Shutdowner fx.Shutdowner Config config.Component - Secrets optional.Option[secrets.Component] + Secrets option.Option[secrets.Component] Context context.Context Params *Params TelemetryCollector telemetry.TelemetryCollector Statsd statsd.Component Tagger tagger.Component Compressor compression.Component - At authtoken.Component } var _ traceagent.Component = (*component)(nil) @@ -91,11 +89,10 @@ type component struct { cancel context.CancelFunc config config.Component - secrets optional.Option[secrets.Component] + secrets option.Option[secrets.Component] params *Params tagger tagger.Component telemetryCollector telemetry.TelemetryCollector - at authtoken.Component wg *sync.WaitGroup } @@ -118,7 +115,6 @@ func NewAgent(deps dependencies) (traceagent.Component, error) { params: deps.Params, telemetryCollector: deps.TelemetryCollector, tagger: deps.Tagger, - at: deps.At, wg: &sync.WaitGroup{}, } statsdCl, err := setupMetrics(deps.Statsd, c.config, c.telemetryCollector) diff --git a/comp/trace/agent/impl/run.go b/comp/trace/agent/impl/run.go index f40b36e29ae7f..20ecbc6496a1b 100644 --- a/comp/trace/agent/impl/run.go +++ b/comp/trace/agent/impl/run.go @@ -98,9 +98,6 @@ func runAgentSidekicks(ag component) error { })) } - // Configure the Trace Agent Debug server to use the IPC certificate - ag.Agent.DebugServer.SetTLSConfig(ag.at.GetTLSServerConfig()) - log.Infof("Trace agent running on host %s", tracecfg.Hostname) if pcfg := profilingConfig(tracecfg); pcfg != nil { if err := profiling.Start(*pcfg); err != nil { diff --git a/comp/trace/bundle_test.go b/comp/trace/bundle_test.go index 692e7c255f473..e9874a4b40077 100644 --- a/comp/trace/bundle_test.go +++ b/comp/trace/bundle_test.go @@ -13,8 +13,6 @@ import ( "github.com/stretchr/testify/require" "go.uber.org/fx" - "github.com/DataDog/datadog-agent/comp/api/authtoken/createandfetchimpl" - "github.com/DataDog/datadog-agent/comp/api/authtoken/fetchonlyimpl" "github.com/DataDog/datadog-agent/comp/core" coreconfig "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" @@ -47,7 +45,6 @@ func TestBundleDependencies(t *testing.T) { zstdfx.Module(), taggerfx.Module(tagger.Params{}), fx.Supply(&traceagentimpl.Params{}), - createandfetchimpl.Module(), ) } @@ -78,7 +75,6 @@ func TestMockBundleDependencies(t *testing.T) { fx.Invoke(func(_ traceagent.Component) {}), MockBundle(), taggerfx.Module(tagger.Params{}), - fetchonlyimpl.MockModule(), )) require.NotNil(t, cfg.Object()) diff --git a/comp/trace/config/config_test.go b/comp/trace/config/config_test.go index 9706e2c784f4c..3b201cef2495c 100644 --- a/comp/trace/config/config_test.go +++ b/comp/trace/config/config_test.go @@ -9,6 +9,7 @@ import ( "bufio" "bytes" "context" + "crypto/tls" _ "embed" "encoding/json" "errors" @@ -555,6 +556,7 @@ func TestFullYamlConfig(t *testing.T) { assert.Equal(t, "mymachine", cfg.Hostname) assert.Equal(t, "https://user:password@proxy_for_https:1234", cfg.ProxyURL.String()) assert.True(t, cfg.SkipSSLValidation) + assert.Equal(t, uint16(tls.VersionTLS13), cfg.NewHTTPTransport().TLSClientConfig.MinVersion) assert.Equal(t, 18125, cfg.StatsdPort) assert.False(t, cfg.Enabled) assert.Equal(t, "abc", cfg.LogFilePath) @@ -610,6 +612,12 @@ func TestFullYamlConfig(t *testing.T) { Repl: "?", Re: regexp.MustCompile("(?s).*"), }, + { + Name: "exception.stacktrace", + Pattern: "(?s).*", + Repl: "?", + Re: regexp.MustCompile("(?s).*"), + }, }, cfg.ReplaceTags) assert.EqualValues(t, []string{"/health", "/500"}, cfg.Ignore["resource"]) @@ -629,6 +637,7 @@ func TestFullYamlConfig(t *testing.T) { assert.True(t, o.CreditCards.Enabled) assert.True(t, o.CreditCards.Luhn) assert.True(t, o.Cache.Enabled) + assert.Equal(t, int64(5555555), o.Cache.MaxSize) assert.True(t, cfg.InstallSignature.Found) assert.Equal(t, traceconfig.InstallSignatureConfig{ @@ -1776,6 +1785,22 @@ func TestLoadEnv(t *testing.T) { assert.False(t, cfg.Obfuscation.Cache.Enabled) }) + env = "DD_APM_OBFUSCATION_CACHE_MAX_SIZE" + t.Run(env, func(t *testing.T) { + t.Setenv(env, "1234567") + + c := buildConfigComponent(t, true, fx.Replace(corecomp.MockParams{ + Params: corecomp.Params{ConfFilePath: "./testdata/full.yaml"}, + })) + cfg := c.Object() + + assert.NotNil(t, cfg) + actualConfig := pkgconfigsetup.Datadog().GetString("apm_config.obfuscation.cache.max_size") + actualParsed := cfg.Obfuscation.Cache.MaxSize + assert.Equal(t, "1234567", actualConfig) + assert.Equal(t, int64(1234567), actualParsed) + }) + env = "DD_APM_PROFILING_ADDITIONAL_ENDPOINTS" t.Run(env, func(t *testing.T) { t.Setenv(env, `{"url1": ["key1", "key2"], "url2": ["key3"]}`) diff --git a/comp/trace/config/setup.go b/comp/trace/config/setup.go index 06a157dc6418b..7a786aeb6e716 100644 --- a/comp/trace/config/setup.go +++ b/comp/trace/config/setup.go @@ -127,6 +127,9 @@ func prepareConfig(c corecompcfg.Component, tagger tagger.Component) (*config.Ag } cfg.ContainerProcRoot = coreConfigObject.GetString("container_proc_root") cfg.GetAgentAuthToken = apiutil.GetAuthToken + cfg.HTTPTransportFunc = func() *http.Transport { + return httputils.CreateHTTPTransport(coreConfigObject) + } return cfg, nil } @@ -227,6 +230,9 @@ func applyDatadogConfig(c *config.AgentConfig, core corecompcfg.Component) error if core.IsSet("apm_config.connection_limit") { c.ConnectionLimit = core.GetInt("apm_config.connection_limit") } + if core.IsSet("apm_config.sql_obfuscation_mode") { + c.SQLObfuscationMode = core.GetString("apm_config.sql_obfuscation_mode") + } /** * NOTE: PeerTagsAggregation is on by default as of Q4 2024. To get the default experience, @@ -405,110 +411,41 @@ func applyDatadogConfig(c *config.AgentConfig, core corecompcfg.Component) error c.TelemetryConfig.Endpoints = appendEndpoints(c.TelemetryConfig.Endpoints, "apm_config.telemetry.additional_endpoints") } c.Obfuscation = new(config.ObfuscationConfig) - if core.IsSet("apm_config.obfuscation") { - cfg := pkgconfigsetup.Datadog() - var o config.ObfuscationConfig - err := structure.UnmarshalKey(cfg, "apm_config.obfuscation", &o) - if err == nil { - c.Obfuscation = &o - if o.RemoveStackTraces { - if err = addReplaceRule(c, "error.stack", `(?s).*`, "?"); err != nil { - return err - } - } - } - } - { - // Obfuscation of database statements will be ON by default. Any new obfuscators should likely be - // enabled by default as well. This can be explicitly disabled with the agent config. Any changes - // to obfuscation options or defaults must be reflected in the public docs. - c.Obfuscation.ES.Enabled = true - c.Obfuscation.OpenSearch.Enabled = true - c.Obfuscation.Mongo.Enabled = true - c.Obfuscation.Memcached.Enabled = true - c.Obfuscation.Redis.Enabled = true - c.Obfuscation.CreditCards.Enabled = true - c.Obfuscation.Cache.Enabled = true - - // TODO(x): There is an issue with pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation"), probably coming from Viper, - // where it returns false even is "apm_config.obfuscation.credit_cards.enabled" is set via an environment - // variable, so we need a temporary workaround by specifically setting env. var. accessible fields. - if core.IsSet("apm_config.obfuscation.credit_cards.enabled") { - c.Obfuscation.CreditCards.Enabled = core.GetBool("apm_config.obfuscation.credit_cards.enabled") - } - if core.IsSet("apm_config.obfuscation.credit_cards.luhn") { - c.Obfuscation.CreditCards.Luhn = core.GetBool("apm_config.obfuscation.credit_cards.luhn") - } - if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.elasticsearch.enabled") { - c.Obfuscation.ES.Enabled = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.elasticsearch.enabled") - } - if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.elasticsearch.keep_values") { - c.Obfuscation.ES.KeepValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.elasticsearch.keep_values") - } - if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.elasticsearch.obfuscate_sql_values") { - c.Obfuscation.ES.ObfuscateSQLValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.elasticsearch.obfuscate_sql_values") - } - if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.opensearch.enabled") { - c.Obfuscation.OpenSearch.Enabled = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.opensearch.enabled") - } - if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.opensearch.keep_values") { - c.Obfuscation.OpenSearch.KeepValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.opensearch.keep_values") - } - if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.opensearch.obfuscate_sql_values") { - c.Obfuscation.OpenSearch.ObfuscateSQLValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.opensearch.obfuscate_sql_values") - } - if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.http.remove_query_string") { - c.Obfuscation.HTTP.RemoveQueryString = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.http.remove_query_string") - } - if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.http.remove_paths_with_digits") { - c.Obfuscation.HTTP.RemovePathDigits = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.http.remove_paths_with_digits") - } - if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.memcached.enabled") { - c.Obfuscation.Memcached.Enabled = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.memcached.enabled") - } - if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.memcached.keep_command") { - c.Obfuscation.Memcached.KeepCommand = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.memcached.keep_command") - } - if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.mongodb.enabled") { - c.Obfuscation.Mongo.Enabled = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.mongodb.enabled") - } - if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.mongodb.keep_values") { - c.Obfuscation.Mongo.KeepValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.mongodb.keep_values") - } - if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.mongodb.obfuscate_sql_values") { - c.Obfuscation.Mongo.ObfuscateSQLValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.mongodb.obfuscate_sql_values") - } - if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.redis.enabled") { - c.Obfuscation.Redis.Enabled = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.redis.enabled") - } - if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.redis.remove_all_args") { - c.Obfuscation.Redis.RemoveAllArgs = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.redis.remove_all_args") - } - if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.remove_stack_traces") { - c.Obfuscation.RemoveStackTraces = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.remove_stack_traces") - } - if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.sql_exec_plan.enabled") { - c.Obfuscation.SQLExecPlan.Enabled = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.sql_exec_plan.enabled") - } - if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.sql_exec_plan.keep_values") { - c.Obfuscation.SQLExecPlan.KeepValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan.keep_values") - } - if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.sql_exec_plan.obfuscate_sql_values") { - c.Obfuscation.SQLExecPlan.ObfuscateSQLValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan.obfuscate_sql_values") - } - if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.sql_exec_plan_normalize.enabled") { - c.Obfuscation.SQLExecPlanNormalize.Enabled = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.sql_exec_plan_normalize.enabled") - } - if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.sql_exec_plan_normalize.keep_values") { - c.Obfuscation.SQLExecPlanNormalize.KeepValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan_normalize.keep_values") - } - if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.sql_exec_plan_normalize.obfuscate_sql_values") { - c.Obfuscation.SQLExecPlanNormalize.ObfuscateSQLValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan_normalize.obfuscate_sql_values") + c.Obfuscation.ES.Enabled = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.elasticsearch.enabled") + c.Obfuscation.ES.KeepValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.elasticsearch.keep_values") + c.Obfuscation.ES.ObfuscateSQLValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.elasticsearch.obfuscate_sql_values") + c.Obfuscation.OpenSearch.Enabled = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.opensearch.enabled") + c.Obfuscation.OpenSearch.KeepValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.opensearch.keep_values") + c.Obfuscation.OpenSearch.ObfuscateSQLValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.opensearch.obfuscate_sql_values") + c.Obfuscation.Mongo.Enabled = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.mongodb.enabled") + c.Obfuscation.Mongo.KeepValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.mongodb.keep_values") + c.Obfuscation.Mongo.ObfuscateSQLValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.mongodb.obfuscate_sql_values") + c.Obfuscation.SQLExecPlan.Enabled = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.sql_exec_plan.enabled") + c.Obfuscation.SQLExecPlan.KeepValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan.keep_values") + c.Obfuscation.SQLExecPlan.ObfuscateSQLValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan.obfuscate_sql_values") + c.Obfuscation.SQLExecPlanNormalize.Enabled = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.sql_exec_plan_normalize.enabled") + c.Obfuscation.SQLExecPlanNormalize.KeepValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan_normalize.keep_values") + c.Obfuscation.SQLExecPlanNormalize.ObfuscateSQLValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.sql_exec_plan_normalize.obfuscate_sql_values") + c.Obfuscation.HTTP.RemoveQueryString = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.http.remove_query_string") + c.Obfuscation.HTTP.RemovePathDigits = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.http.remove_paths_with_digits") + c.Obfuscation.RemoveStackTraces = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.remove_stack_traces") + if c.Obfuscation.RemoveStackTraces { + if err = addReplaceRule(c, "error.stack", `(?s).*`, "?"); err != nil { + return err } - if pkgconfigsetup.Datadog().IsSet("apm_config.obfuscation.cache.enabled") { - c.Obfuscation.Cache.Enabled = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.cache.enabled") + if err = addReplaceRule(c, "exception.stacktrace", `(?s).*`, "?"); err != nil { + return err } } + c.Obfuscation.Memcached.Enabled = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.memcached.enabled") + c.Obfuscation.Memcached.KeepCommand = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.memcached.keep_command") + c.Obfuscation.Redis.Enabled = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.redis.enabled") + c.Obfuscation.Redis.RemoveAllArgs = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.redis.remove_all_args") + c.Obfuscation.CreditCards.Enabled = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.credit_cards.enabled") + c.Obfuscation.CreditCards.Luhn = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.credit_cards.luhn") + c.Obfuscation.CreditCards.KeepValues = pkgconfigsetup.Datadog().GetStringSlice("apm_config.obfuscation.credit_cards.keep_values") + c.Obfuscation.Cache.Enabled = pkgconfigsetup.Datadog().GetBool("apm_config.obfuscation.cache.enabled") + c.Obfuscation.Cache.MaxSize = pkgconfigsetup.Datadog().GetInt64("apm_config.obfuscation.cache.max_size") if core.IsSet("apm_config.filter_tags.require") { tags := core.GetStringSlice("apm_config.filter_tags.require") diff --git a/comp/trace/config/testdata/full.yaml b/comp/trace/config/testdata/full.yaml index 0c6535550e615..e1897a1f56b7d 100644 --- a/comp/trace/config/testdata/full.yaml +++ b/comp/trace/config/testdata/full.yaml @@ -7,6 +7,7 @@ proxy: - https://my2.endpoint.eu use_dogstatsd: yes skip_ssl_validation: yes +min_tls_version: "tlsv1.3" dogstatsd_port: 18125 dogstatsd_non_local_traffic: yes log_level: info @@ -91,3 +92,4 @@ apm_config: luhn: true cache: enabled: true + max_size: 5555555 diff --git a/comp/trace/status/statusimpl/status.go b/comp/trace/status/statusimpl/status.go index 00a8730b87da8..e476ee0281d7a 100644 --- a/comp/trace/status/statusimpl/status.go +++ b/comp/trace/status/statusimpl/status.go @@ -95,7 +95,7 @@ func (s statusProvider) populateStatus() map[string]interface{} { port := s.Config.GetInt("apm_config.debug.port") c := client() - url := fmt.Sprintf("https://localhost:%d/debug/vars", port) + url := fmt.Sprintf("http://localhost:%d/debug/vars", port) resp, err := apiutil.DoGet(c, url, apiutil.CloseConnection) if err != nil { return map[string]interface{}{ diff --git a/comp/updater/updater/updaterimpl/updater.go b/comp/updater/updater/updaterimpl/updater.go index 5ed1cbd252375..847061133f10e 100644 --- a/comp/updater/updater/updaterimpl/updater.go +++ b/comp/updater/updater/updaterimpl/updater.go @@ -20,7 +20,7 @@ import ( updatercomp "github.com/DataDog/datadog-agent/comp/updater/updater" "github.com/DataDog/datadog-agent/pkg/fleet/daemon" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) var ( @@ -41,7 +41,7 @@ type dependencies struct { Hostname hostname.Component Log log.Component Config config.Component - RemoteConfig optional.Option[rcservice.Component] + RemoteConfig option.Option[rcservice.Component] } func newUpdaterComponent(lc fx.Lifecycle, dependencies dependencies) (updatercomp.Component, error) { diff --git a/comp/updater/updater/updaterimpl/updater_test.go b/comp/updater/updater/updaterimpl/updater_test.go index 09dee5e100d0e..4232495111938 100644 --- a/comp/updater/updater/updaterimpl/updater_test.go +++ b/comp/updater/updater/updaterimpl/updater_test.go @@ -15,7 +15,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core" "github.com/DataDog/datadog-agent/comp/remote-config/rcservice" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) type testDependencies struct { @@ -32,7 +32,7 @@ func TestUpdaterWithoutRemoteConfig(t *testing.T) { deps := fxutil.Test[testDependencies](t, fx.Options( core.MockBundle(), fx.Supply(core.BundleParams{}), - fx.Supply(optional.NewNoneOption[rcservice.Component]()), + fx.Supply(option.None[rcservice.Component]()), Module(), )) _, err := newUpdaterComponent(&mockLifecycle{}, deps.Dependencies) diff --git a/devenv/scripts/Install-DevEnv.ps1 b/devenv/scripts/Install-DevEnv.ps1 index e977850a7e5ec..5d7b7e6a4c43b 100644 --- a/devenv/scripts/Install-DevEnv.ps1 +++ b/devenv/scripts/Install-DevEnv.ps1 @@ -45,7 +45,7 @@ Write-Host -ForegroundColor Yellow -BackgroundColor DarkGreen '- Installing Gola $ErrorActionPreference = 'Stop' $ProgressPreference = 'SilentlyContinue' -$go_version = "1.23.3" +$go_version = "1.23.5" Write-Host -ForegroundColor Green "Installing go $go_version" $gozip = "https://dl.google.com/go/go$go_version.windows-amd64.zip" diff --git a/docs/cloud-workload-security/backend_linux.md b/docs/cloud-workload-security/backend_linux.md index c13719c22d141..8d4a78ad43576 100644 --- a/docs/cloud-workload-security/backend_linux.md +++ b/docs/cloud-workload-security/backend_linux.md @@ -65,6 +65,20 @@ CSM Threats event for Linux systems have the following JSON schema: ], "description": "AWSSecurityCredentialsSerializer serializes the security credentials from an AWS IMDS request" }, + "AcceptEvent": { + "properties": { + "addr": { + "$ref": "#/$defs/IPPortFamily", + "description": "Bound address (if any)" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "addr" + ], + "description": "AcceptEventSerializer serializes a bind event to JSON" + }, "AgentContext": { "properties": { "rule_id": { @@ -622,6 +636,43 @@ CSM Threats event for Linux systems have the following JSON schema: ], "description": "FileEventSerializer serializes a file event to JSON" }, + "Flow": { + "properties": { + "l3_protocol": { + "type": "string", + "description": "l3_protocol is the layer 3 protocol name" + }, + "l4_protocol": { + "type": "string", + "description": "l4_protocol is the layer 4 protocol name" + }, + "source": { + "$ref": "#/$defs/IPPort", + "description": "source is the emitter of the network event" + }, + "destination": { + "$ref": "#/$defs/IPPort", + "description": "destination is the receiver of the network event" + }, + "ingress": { + "$ref": "#/$defs/NetworkStats", + "description": "ingress holds the network statistics for ingress traffic" + }, + "egress": { + "$ref": "#/$defs/NetworkStats", + "description": "egress holds the network statistics for egress traffic" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "l3_protocol", + "l4_protocol", + "source", + "destination" + ], + "description": "FlowSerializer defines a new flow serializer" + }, "IMDSEvent": { "properties": { "type": { @@ -906,6 +957,10 @@ CSM Threats event for Linux systems have the following JSON schema: "size": { "type": "integer", "description": "size is the size in bytes of the network event" + }, + "network_direction": { + "type": "string", + "description": "network_direction indicates if the packet was captured on ingress or egress" } }, "additionalProperties": false, @@ -915,7 +970,8 @@ CSM Threats event for Linux systems have the following JSON schema: "l4_protocol", "source", "destination", - "size" + "size", + "network_direction" ], "description": "NetworkContextSerializer serializes the network context to JSON" }, @@ -943,6 +999,39 @@ CSM Threats event for Linux systems have the following JSON schema: ], "description": "NetworkDeviceSerializer serializes the network device context to JSON" }, + "NetworkFlowMonitor": { + "properties": { + "device": { + "$ref": "#/$defs/NetworkDevice", + "description": "device is the network device on which the event was captured" + }, + "flows": { + "items": { + "$ref": "#/$defs/Flow" + }, + "type": "array", + "description": "flows is the list of flows with network statistics that were captured" + } + }, + "additionalProperties": false, + "type": "object", + "description": "NetworkFlowMonitorSerializer defines a network monitor event serializer" + }, + "NetworkStats": { + "properties": { + "data_size": { + "type": "integer", + "description": "data_size is the total count of bytes sent or received" + }, + "packet_count": { + "type": "integer", + "description": "packet_count is the total count of packets sent or received" + } + }, + "additionalProperties": false, + "type": "object", + "description": "NetworkStatsSerializer defines a new network stats serializer" + }, "PTraceEvent": { "properties": { "request": { @@ -1376,6 +1465,10 @@ CSM Threats event for Linux systems have the following JSON schema: "type": "integer", "description": "size is the size in bytes of the network event" }, + "network_direction": { + "type": "string", + "description": "network_direction indicates if the packet was captured on ingress or egress" + }, "tls": { "$ref": "#/$defs/TLSContext" } @@ -1387,7 +1480,8 @@ CSM Threats event for Linux systems have the following JSON schema: "l4_protocol", "source", "destination", - "size" + "size", + "network_direction" ], "description": "RawPacketSerializer defines a raw packet serializer" }, @@ -1615,6 +1709,12 @@ CSM Threats event for Linux systems have the following JSON schema: }, "mount": { "$ref": "#/$defs/SyscallArgs" + }, + "mkdir": { + "$ref": "#/$defs/SyscallArgs" + }, + "rmdir": { + "$ref": "#/$defs/SyscallArgs" } }, "additionalProperties": false, @@ -1766,6 +1866,9 @@ CSM Threats event for Linux systems have the following JSON schema: "imds": { "$ref": "#/$defs/IMDSEvent" }, + "accept": { + "$ref": "#/$defs/AcceptEvent" + }, "bind": { "$ref": "#/$defs/BindEvent" }, @@ -1786,6 +1889,9 @@ CSM Threats event for Linux systems have the following JSON schema: }, "packet": { "$ref": "#/$defs/RawPacket" + }, + "network_flow_monitor": { + "$ref": "#/$defs/NetworkFlowMonitor" } }, "additionalProperties": false, @@ -1822,6 +1928,7 @@ CSM Threats event for Linux systems have the following JSON schema: | `splice` | $ref | Please see [SpliceEvent](#spliceevent) | | `dns` | $ref | Please see [DNSEvent](#dnsevent) | | `imds` | $ref | Please see [IMDSEvent](#imdsevent) | +| `accept` | $ref | Please see [AcceptEvent](#acceptevent) | | `bind` | $ref | Please see [BindEvent](#bindevent) | | `connect` | $ref | Please see [ConnectEvent](#connectevent) | | `mount` | $ref | Please see [MountEvent](#mountevent) | @@ -1829,6 +1936,7 @@ CSM Threats event for Linux systems have the following JSON schema: | `usr` | $ref | Please see [UserContext](#usercontext) | | `syscall` | $ref | Please see [SyscallContext](#syscallcontext) | | `packet` | $ref | Please see [RawPacket](#rawpacket) | +| `network_flow_monitor` | $ref | Please see [NetworkFlowMonitor](#networkflowmonitor) | ## `AWSIMDSEvent` @@ -1914,6 +2022,35 @@ CSM Threats event for Linux systems have the following JSON schema: | `expiration` | expiration is the expiration date of the credentials | +## `AcceptEvent` + + +{{< code-block lang="json" collapsible="true" >}} +{ + "properties": { + "addr": { + "$ref": "#/$defs/IPPortFamily", + "description": "Bound address (if any)" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "addr" + ], + "description": "AcceptEventSerializer serializes a bind event to JSON" +} + +{{< /code-block >}} + +| Field | Description | +| ----- | ----------- | +| `addr` | Bound address (if any) | + +| References | +| ---------- | +| [IPPortFamily](#ipportfamily) | + ## `AgentContext` @@ -2741,6 +2878,64 @@ CSM Threats event for Linux systems have the following JSON schema: | ---------- | | [File](#file) | +## `Flow` + + +{{< code-block lang="json" collapsible="true" >}} +{ + "properties": { + "l3_protocol": { + "type": "string", + "description": "l3_protocol is the layer 3 protocol name" + }, + "l4_protocol": { + "type": "string", + "description": "l4_protocol is the layer 4 protocol name" + }, + "source": { + "$ref": "#/$defs/IPPort", + "description": "source is the emitter of the network event" + }, + "destination": { + "$ref": "#/$defs/IPPort", + "description": "destination is the receiver of the network event" + }, + "ingress": { + "$ref": "#/$defs/NetworkStats", + "description": "ingress holds the network statistics for ingress traffic" + }, + "egress": { + "$ref": "#/$defs/NetworkStats", + "description": "egress holds the network statistics for egress traffic" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "l3_protocol", + "l4_protocol", + "source", + "destination" + ], + "description": "FlowSerializer defines a new flow serializer" +} + +{{< /code-block >}} + +| Field | Description | +| ----- | ----------- | +| `l3_protocol` | l3_protocol is the layer 3 protocol name | +| `l4_protocol` | l4_protocol is the layer 4 protocol name | +| `source` | source is the emitter of the network event | +| `destination` | destination is the receiver of the network event | +| `ingress` | ingress holds the network statistics for ingress traffic | +| `egress` | egress holds the network statistics for egress traffic | + +| References | +| ---------- | +| [IPPort](#ipport) | +| [NetworkStats](#networkstats) | + ## `IMDSEvent` @@ -3162,6 +3357,10 @@ CSM Threats event for Linux systems have the following JSON schema: "size": { "type": "integer", "description": "size is the size in bytes of the network event" + }, + "network_direction": { + "type": "string", + "description": "network_direction indicates if the packet was captured on ingress or egress" } }, "additionalProperties": false, @@ -3171,7 +3370,8 @@ CSM Threats event for Linux systems have the following JSON schema: "l4_protocol", "source", "destination", - "size" + "size", + "network_direction" ], "description": "NetworkContextSerializer serializes the network context to JSON" } @@ -3186,6 +3386,7 @@ CSM Threats event for Linux systems have the following JSON schema: | `source` | source is the emitter of the network event | | `destination` | destination is the receiver of the network event | | `size` | size is the size in bytes of the network event | +| `network_direction` | network_direction indicates if the packet was captured on ingress or egress | | References | | ---------- | @@ -3230,6 +3431,68 @@ CSM Threats event for Linux systems have the following JSON schema: | `ifname` | ifname is the network interface name | +## `NetworkFlowMonitor` + + +{{< code-block lang="json" collapsible="true" >}} +{ + "properties": { + "device": { + "$ref": "#/$defs/NetworkDevice", + "description": "device is the network device on which the event was captured" + }, + "flows": { + "items": { + "$ref": "#/$defs/Flow" + }, + "type": "array", + "description": "flows is the list of flows with network statistics that were captured" + } + }, + "additionalProperties": false, + "type": "object", + "description": "NetworkFlowMonitorSerializer defines a network monitor event serializer" +} + +{{< /code-block >}} + +| Field | Description | +| ----- | ----------- | +| `device` | device is the network device on which the event was captured | +| `flows` | flows is the list of flows with network statistics that were captured | + +| References | +| ---------- | +| [NetworkDevice](#networkdevice) | + +## `NetworkStats` + + +{{< code-block lang="json" collapsible="true" >}} +{ + "properties": { + "data_size": { + "type": "integer", + "description": "data_size is the total count of bytes sent or received" + }, + "packet_count": { + "type": "integer", + "description": "packet_count is the total count of packets sent or received" + } + }, + "additionalProperties": false, + "type": "object", + "description": "NetworkStatsSerializer defines a new network stats serializer" +} + +{{< /code-block >}} + +| Field | Description | +| ----- | ----------- | +| `data_size` | data_size is the total count of bytes sent or received | +| `packet_count` | packet_count is the total count of packets sent or received | + + ## `PTraceEvent` @@ -3811,6 +4074,10 @@ CSM Threats event for Linux systems have the following JSON schema: "type": "integer", "description": "size is the size in bytes of the network event" }, + "network_direction": { + "type": "string", + "description": "network_direction indicates if the packet was captured on ingress or egress" + }, "tls": { "$ref": "#/$defs/TLSContext" } @@ -3822,7 +4089,8 @@ CSM Threats event for Linux systems have the following JSON schema: "l4_protocol", "source", "destination", - "size" + "size", + "network_direction" ], "description": "RawPacketSerializer defines a raw packet serializer" } @@ -3837,6 +4105,7 @@ CSM Threats event for Linux systems have the following JSON schema: | `source` | source is the emitter of the network event | | `destination` | destination is the receiver of the network event | | `size` | size is the size in bytes of the network event | +| `network_direction` | network_direction indicates if the packet was captured on ingress or egress | | References | | ---------- | @@ -4206,6 +4475,12 @@ CSM Threats event for Linux systems have the following JSON schema: }, "mount": { "$ref": "#/$defs/SyscallArgs" + }, + "mkdir": { + "$ref": "#/$defs/SyscallArgs" + }, + "rmdir": { + "$ref": "#/$defs/SyscallArgs" } }, "additionalProperties": false, diff --git a/docs/cloud-workload-security/backend_linux.schema.json b/docs/cloud-workload-security/backend_linux.schema.json index 7b9f9b7e3ea60..b4e6fed965914 100644 --- a/docs/cloud-workload-security/backend_linux.schema.json +++ b/docs/cloud-workload-security/backend_linux.schema.json @@ -54,6 +54,20 @@ ], "description": "AWSSecurityCredentialsSerializer serializes the security credentials from an AWS IMDS request" }, + "AcceptEvent": { + "properties": { + "addr": { + "$ref": "#/$defs/IPPortFamily", + "description": "Bound address (if any)" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "addr" + ], + "description": "AcceptEventSerializer serializes a bind event to JSON" + }, "AgentContext": { "properties": { "rule_id": { @@ -611,6 +625,43 @@ ], "description": "FileEventSerializer serializes a file event to JSON" }, + "Flow": { + "properties": { + "l3_protocol": { + "type": "string", + "description": "l3_protocol is the layer 3 protocol name" + }, + "l4_protocol": { + "type": "string", + "description": "l4_protocol is the layer 4 protocol name" + }, + "source": { + "$ref": "#/$defs/IPPort", + "description": "source is the emitter of the network event" + }, + "destination": { + "$ref": "#/$defs/IPPort", + "description": "destination is the receiver of the network event" + }, + "ingress": { + "$ref": "#/$defs/NetworkStats", + "description": "ingress holds the network statistics for ingress traffic" + }, + "egress": { + "$ref": "#/$defs/NetworkStats", + "description": "egress holds the network statistics for egress traffic" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "l3_protocol", + "l4_protocol", + "source", + "destination" + ], + "description": "FlowSerializer defines a new flow serializer" + }, "IMDSEvent": { "properties": { "type": { @@ -895,6 +946,10 @@ "size": { "type": "integer", "description": "size is the size in bytes of the network event" + }, + "network_direction": { + "type": "string", + "description": "network_direction indicates if the packet was captured on ingress or egress" } }, "additionalProperties": false, @@ -904,7 +959,8 @@ "l4_protocol", "source", "destination", - "size" + "size", + "network_direction" ], "description": "NetworkContextSerializer serializes the network context to JSON" }, @@ -932,6 +988,39 @@ ], "description": "NetworkDeviceSerializer serializes the network device context to JSON" }, + "NetworkFlowMonitor": { + "properties": { + "device": { + "$ref": "#/$defs/NetworkDevice", + "description": "device is the network device on which the event was captured" + }, + "flows": { + "items": { + "$ref": "#/$defs/Flow" + }, + "type": "array", + "description": "flows is the list of flows with network statistics that were captured" + } + }, + "additionalProperties": false, + "type": "object", + "description": "NetworkFlowMonitorSerializer defines a network monitor event serializer" + }, + "NetworkStats": { + "properties": { + "data_size": { + "type": "integer", + "description": "data_size is the total count of bytes sent or received" + }, + "packet_count": { + "type": "integer", + "description": "packet_count is the total count of packets sent or received" + } + }, + "additionalProperties": false, + "type": "object", + "description": "NetworkStatsSerializer defines a new network stats serializer" + }, "PTraceEvent": { "properties": { "request": { @@ -1365,6 +1454,10 @@ "type": "integer", "description": "size is the size in bytes of the network event" }, + "network_direction": { + "type": "string", + "description": "network_direction indicates if the packet was captured on ingress or egress" + }, "tls": { "$ref": "#/$defs/TLSContext" } @@ -1376,7 +1469,8 @@ "l4_protocol", "source", "destination", - "size" + "size", + "network_direction" ], "description": "RawPacketSerializer defines a raw packet serializer" }, @@ -1604,6 +1698,12 @@ }, "mount": { "$ref": "#/$defs/SyscallArgs" + }, + "mkdir": { + "$ref": "#/$defs/SyscallArgs" + }, + "rmdir": { + "$ref": "#/$defs/SyscallArgs" } }, "additionalProperties": false, @@ -1755,6 +1855,9 @@ "imds": { "$ref": "#/$defs/IMDSEvent" }, + "accept": { + "$ref": "#/$defs/AcceptEvent" + }, "bind": { "$ref": "#/$defs/BindEvent" }, @@ -1775,6 +1878,9 @@ }, "packet": { "$ref": "#/$defs/RawPacket" + }, + "network_flow_monitor": { + "$ref": "#/$defs/NetworkFlowMonitor" } }, "additionalProperties": false, diff --git a/docs/cloud-workload-security/linux_expressions.md b/docs/cloud-workload-security/linux_expressions.md index 7effc494d89f7..84f7f1b6ebcd7 100644 --- a/docs/cloud-workload-security/linux_expressions.md +++ b/docs/cloud-workload-security/linux_expressions.md @@ -29,6 +29,7 @@ Triggers are events that correspond to types of activity seen by the system. The | SECL Event | Type | Definition | Agent Version | | ---------- | ---- | ---------- | ------------- | +| `accept` | Network | An accept was executed | 7.60 | | `bind` | Network | A bind was executed | 7.37 | | `bpf` | Kernel | A BPF command was executed | 7.33 | | `capset` | Process | A process changed its capacity set | 7.27 | @@ -46,8 +47,9 @@ Triggers are events that correspond to types of activity seen by the system. The | `mmap` | Kernel | A mmap command was executed | 7.35 | | `mount` | File | [Experimental] A filesystem was mounted | 7.42 | | `mprotect` | Kernel | A mprotect command was executed | 7.35 | +| `network_flow_monitor` | Network | A network monitor event was sent | 7.63 | | `open` | File | A file was opened | 7.27 | -| `packet` | Network | A raw network packet captured | 7.60 | +| `packet` | Network | A raw network packet was captured | 7.60 | | `ptrace` | Kernel | A ptrace command was executed | 7.35 | | `removexattr` | File | Remove extended attributes | 7.27 | | `rename` | File | A file/directory was renamed | 7.27 | @@ -122,8 +124,8 @@ The *file.rights* attribute can now be used in addition to *file.mode*. *file.mo | [`cgroup.file.inode`](#common-pathkey-inode-doc) | Inode of the file | | [`cgroup.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`cgroup.id`](#common-cgroupcontext-id-doc) | ID of the cgroup | -| [`cgroup.manager`](#common-cgroupcontext-manager-doc) | Lifecycle manager of the cgroup | -| [`cgroup.version`](#common-cgroupcontext-version-doc) | Version of the cgroup API | +| [`cgroup.manager`](#common-cgroupcontext-manager-doc) | [Experimental] Lifecycle manager of the cgroup | +| [`cgroup.version`](#common-cgroupcontext-version-doc) | [Experimental] Version of the cgroup API | | [`container.created_at`](#container-created_at-doc) | Timestamp of the creation of the container | | [`container.id`](#container-id-doc) | ID of the container | | [`container.runtime`](#container-runtime-doc) | Runtime managing the container | @@ -146,8 +148,8 @@ The *file.rights* attribute can now be used in addition to *file.mode*. *file.mo | [`process.ancestors.cgroup.file.inode`](#common-pathkey-inode-doc) | Inode of the file | | [`process.ancestors.cgroup.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`process.ancestors.cgroup.id`](#common-cgroupcontext-id-doc) | ID of the cgroup | -| [`process.ancestors.cgroup.manager`](#common-cgroupcontext-manager-doc) | Lifecycle manager of the cgroup | -| [`process.ancestors.cgroup.version`](#common-cgroupcontext-version-doc) | Version of the cgroup API | +| [`process.ancestors.cgroup.manager`](#common-cgroupcontext-manager-doc) | [Experimental] Lifecycle manager of the cgroup | +| [`process.ancestors.cgroup.version`](#common-cgroupcontext-version-doc) | [Experimental] Version of the cgroup API | | [`process.ancestors.comm`](#common-process-comm-doc) | Comm attribute of the process | | [`process.ancestors.container.id`](#common-process-container-id-doc) | Container ID | | [`process.ancestors.created_at`](#common-process-created_at-doc) | Timestamp of the creation of the process | @@ -229,8 +231,8 @@ The *file.rights* attribute can now be used in addition to *file.mode*. *file.mo | [`process.cgroup.file.inode`](#common-pathkey-inode-doc) | Inode of the file | | [`process.cgroup.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`process.cgroup.id`](#common-cgroupcontext-id-doc) | ID of the cgroup | -| [`process.cgroup.manager`](#common-cgroupcontext-manager-doc) | Lifecycle manager of the cgroup | -| [`process.cgroup.version`](#common-cgroupcontext-version-doc) | Version of the cgroup API | +| [`process.cgroup.manager`](#common-cgroupcontext-manager-doc) | [Experimental] Lifecycle manager of the cgroup | +| [`process.cgroup.version`](#common-cgroupcontext-version-doc) | [Experimental] Version of the cgroup API | | [`process.comm`](#common-process-comm-doc) | Comm attribute of the process | | [`process.container.id`](#common-process-container-id-doc) | Container ID | | [`process.created_at`](#common-process-created_at-doc) | Timestamp of the creation of the process | @@ -302,8 +304,8 @@ The *file.rights* attribute can now be used in addition to *file.mode*. *file.mo | [`process.parent.cgroup.file.inode`](#common-pathkey-inode-doc) | Inode of the file | | [`process.parent.cgroup.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`process.parent.cgroup.id`](#common-cgroupcontext-id-doc) | ID of the cgroup | -| [`process.parent.cgroup.manager`](#common-cgroupcontext-manager-doc) | Lifecycle manager of the cgroup | -| [`process.parent.cgroup.version`](#common-cgroupcontext-version-doc) | Version of the cgroup API | +| [`process.parent.cgroup.manager`](#common-cgroupcontext-manager-doc) | [Experimental] Lifecycle manager of the cgroup | +| [`process.parent.cgroup.version`](#common-cgroupcontext-version-doc) | [Experimental] Version of the cgroup API | | [`process.parent.comm`](#common-process-comm-doc) | Comm attribute of the process | | [`process.parent.container.id`](#common-process-container-id-doc) | Container ID | | [`process.parent.created_at`](#common-process-created_at-doc) | Timestamp of the creation of the process | @@ -382,6 +384,18 @@ The *file.rights* attribute can now be used in addition to *file.mode*. *file.mo | [`process.user_session.k8s_uid`](#common-usersessioncontext-k8s_uid-doc) | Kubernetes UID of the user that executed the process | | [`process.user_session.k8s_username`](#common-usersessioncontext-k8s_username-doc) | Kubernetes username of the user that executed the process | +### Event `accept` + +An accept was executed + +| Property | Definition | +| -------- | ------------- | +| [`accept.addr.family`](#accept-addr-family-doc) | Address family | +| [`accept.addr.ip`](#common-ipportcontext-ip-doc) | IP address | +| [`accept.addr.is_public`](#common-ipportcontext-is_public-doc) | Whether the IP address belongs to a public network | +| [`accept.addr.port`](#common-ipportcontext-port-doc) | Port number | +| [`accept.retval`](#common-syscallevent-retval-doc) | Return value of the syscall | + ### Event `bind` A bind was executed @@ -550,6 +564,7 @@ A DNS request was sent | [`network.device.ifname`](#common-networkdevicecontext-ifname-doc) | Interface ifname | | [`network.l3_protocol`](#common-networkcontext-l3_protocol-doc) | L3 protocol of the network packet | | [`network.l4_protocol`](#common-networkcontext-l4_protocol-doc) | L4 protocol of the network packet | +| [`network.network_direction`](#common-networkcontext-network_direction-doc) | Network direction of the network packet | | [`network.size`](#common-networkcontext-size-doc) | Size in bytes of the network packet | | [`network.source.ip`](#common-ipportcontext-ip-doc) | IP address | | [`network.source.is_public`](#common-ipportcontext-is_public-doc) | Whether the IP address belongs to a public network | @@ -573,8 +588,8 @@ A process was executed or forked | [`exec.cgroup.file.inode`](#common-pathkey-inode-doc) | Inode of the file | | [`exec.cgroup.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`exec.cgroup.id`](#common-cgroupcontext-id-doc) | ID of the cgroup | -| [`exec.cgroup.manager`](#common-cgroupcontext-manager-doc) | Lifecycle manager of the cgroup | -| [`exec.cgroup.version`](#common-cgroupcontext-version-doc) | Version of the cgroup API | +| [`exec.cgroup.manager`](#common-cgroupcontext-manager-doc) | [Experimental] Lifecycle manager of the cgroup | +| [`exec.cgroup.version`](#common-cgroupcontext-version-doc) | [Experimental] Version of the cgroup API | | [`exec.comm`](#common-process-comm-doc) | Comm attribute of the process | | [`exec.container.id`](#common-process-container-id-doc) | Container ID | | [`exec.created_at`](#common-process-created_at-doc) | Timestamp of the creation of the process | @@ -664,8 +679,8 @@ A process was terminated | [`exit.cgroup.file.inode`](#common-pathkey-inode-doc) | Inode of the file | | [`exit.cgroup.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`exit.cgroup.id`](#common-cgroupcontext-id-doc) | ID of the cgroup | -| [`exit.cgroup.manager`](#common-cgroupcontext-manager-doc) | Lifecycle manager of the cgroup | -| [`exit.cgroup.version`](#common-cgroupcontext-version-doc) | Version of the cgroup API | +| [`exit.cgroup.manager`](#common-cgroupcontext-manager-doc) | [Experimental] Lifecycle manager of the cgroup | +| [`exit.cgroup.version`](#common-cgroupcontext-version-doc) | [Experimental] Version of the cgroup API | | [`exit.code`](#exit-code-doc) | Exit code of the process or number of the signal that caused the process to terminate | | [`exit.comm`](#common-process-comm-doc) | Comm attribute of the process | | [`exit.container.id`](#common-process-container-id-doc) | Container ID | @@ -756,6 +771,7 @@ An IMDS event was captured | [`network.device.ifname`](#common-networkdevicecontext-ifname-doc) | Interface ifname | | [`network.l3_protocol`](#common-networkcontext-l3_protocol-doc) | L3 protocol of the network packet | | [`network.l4_protocol`](#common-networkcontext-l4_protocol-doc) | L4 protocol of the network packet | +| [`network.network_direction`](#common-networkcontext-network_direction-doc) | Network direction of the network packet | | [`network.size`](#common-networkcontext-size-doc) | Size in bytes of the network packet | | [`network.source.ip`](#common-ipportcontext-ip-doc) | IP address | | [`network.source.is_public`](#common-ipportcontext-is_public-doc) | Whether the IP address belongs to a public network | @@ -873,6 +889,8 @@ A directory was created | [`mkdir.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`mkdir.file.user`](#common-filefields-user-doc) | User of the file's owner | | [`mkdir.retval`](#common-syscallevent-retval-doc) | Return value of the syscall | +| [`mkdir.syscall.mode`](#mkdir-syscall-mode-doc) | Mode of the new directory | +| [`mkdir.syscall.path`](#mkdir-syscall-path-doc) | Path argument of the syscall | ### Event `mmap` @@ -931,6 +949,27 @@ A mprotect command was executed | [`mprotect.retval`](#common-syscallevent-retval-doc) | Return value of the syscall | | [`mprotect.vm_protection`](#mprotect-vm_protection-doc) | initial memory segment protection | +### Event `network_flow_monitor` + +A network monitor event was sent + +| Property | Definition | +| -------- | ------------- | +| [`network_flow_monitor.device.ifname`](#common-networkdevicecontext-ifname-doc) | Interface ifname | +| [`network_flow_monitor.flows.destination.ip`](#common-ipportcontext-ip-doc) | IP address | +| [`network_flow_monitor.flows.destination.is_public`](#common-ipportcontext-is_public-doc) | Whether the IP address belongs to a public network | +| [`network_flow_monitor.flows.destination.port`](#common-ipportcontext-port-doc) | Port number | +| [`network_flow_monitor.flows.egress.data_size`](#common-networkstats-data_size-doc) | Amount of data transmitted or received | +| [`network_flow_monitor.flows.egress.packet_count`](#common-networkstats-packet_count-doc) | Count of network packets transmitted or received | +| [`network_flow_monitor.flows.ingress.data_size`](#common-networkstats-data_size-doc) | Amount of data transmitted or received | +| [`network_flow_monitor.flows.ingress.packet_count`](#common-networkstats-packet_count-doc) | Count of network packets transmitted or received | +| [`network_flow_monitor.flows.l3_protocol`](#network_flow_monitor-flows-l3_protocol-doc) | L3 protocol of the network packet | +| [`network_flow_monitor.flows.l4_protocol`](#network_flow_monitor-flows-l4_protocol-doc) | L4 protocol of the network packet | +| [`network_flow_monitor.flows.length`](#common-string-length-doc) | Length of the corresponding element | +| [`network_flow_monitor.flows.source.ip`](#common-ipportcontext-ip-doc) | IP address | +| [`network_flow_monitor.flows.source.is_public`](#common-ipportcontext-is_public-doc) | Whether the IP address belongs to a public network | +| [`network_flow_monitor.flows.source.port`](#common-ipportcontext-port-doc) | Port number | + ### Event `open` A file was opened @@ -966,7 +1005,7 @@ A file was opened ### Event `packet` -A raw network packet captured +A raw network packet was captured | Property | Definition | | -------- | ------------- | @@ -977,6 +1016,7 @@ A raw network packet captured | [`packet.filter`](#packet-filter-doc) | pcap filter expression | | [`packet.l3_protocol`](#common-networkcontext-l3_protocol-doc) | L3 protocol of the network packet | | [`packet.l4_protocol`](#common-networkcontext-l4_protocol-doc) | L4 protocol of the network packet | +| [`packet.network_direction`](#common-networkcontext-network_direction-doc) | Network direction of the network packet | | [`packet.size`](#common-networkcontext-size-doc) | Size in bytes of the network packet | | [`packet.source.ip`](#common-ipportcontext-ip-doc) | IP address | | [`packet.source.is_public`](#common-ipportcontext-is_public-doc) | Whether the IP address belongs to a public network | @@ -1003,8 +1043,8 @@ A ptrace command was executed | [`ptrace.tracee.ancestors.cgroup.file.inode`](#common-pathkey-inode-doc) | Inode of the file | | [`ptrace.tracee.ancestors.cgroup.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`ptrace.tracee.ancestors.cgroup.id`](#common-cgroupcontext-id-doc) | ID of the cgroup | -| [`ptrace.tracee.ancestors.cgroup.manager`](#common-cgroupcontext-manager-doc) | Lifecycle manager of the cgroup | -| [`ptrace.tracee.ancestors.cgroup.version`](#common-cgroupcontext-version-doc) | Version of the cgroup API | +| [`ptrace.tracee.ancestors.cgroup.manager`](#common-cgroupcontext-manager-doc) | [Experimental] Lifecycle manager of the cgroup | +| [`ptrace.tracee.ancestors.cgroup.version`](#common-cgroupcontext-version-doc) | [Experimental] Version of the cgroup API | | [`ptrace.tracee.ancestors.comm`](#common-process-comm-doc) | Comm attribute of the process | | [`ptrace.tracee.ancestors.container.id`](#common-process-container-id-doc) | Container ID | | [`ptrace.tracee.ancestors.created_at`](#common-process-created_at-doc) | Timestamp of the creation of the process | @@ -1086,8 +1126,8 @@ A ptrace command was executed | [`ptrace.tracee.cgroup.file.inode`](#common-pathkey-inode-doc) | Inode of the file | | [`ptrace.tracee.cgroup.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`ptrace.tracee.cgroup.id`](#common-cgroupcontext-id-doc) | ID of the cgroup | -| [`ptrace.tracee.cgroup.manager`](#common-cgroupcontext-manager-doc) | Lifecycle manager of the cgroup | -| [`ptrace.tracee.cgroup.version`](#common-cgroupcontext-version-doc) | Version of the cgroup API | +| [`ptrace.tracee.cgroup.manager`](#common-cgroupcontext-manager-doc) | [Experimental] Lifecycle manager of the cgroup | +| [`ptrace.tracee.cgroup.version`](#common-cgroupcontext-version-doc) | [Experimental] Version of the cgroup API | | [`ptrace.tracee.comm`](#common-process-comm-doc) | Comm attribute of the process | | [`ptrace.tracee.container.id`](#common-process-container-id-doc) | Container ID | | [`ptrace.tracee.created_at`](#common-process-created_at-doc) | Timestamp of the creation of the process | @@ -1159,8 +1199,8 @@ A ptrace command was executed | [`ptrace.tracee.parent.cgroup.file.inode`](#common-pathkey-inode-doc) | Inode of the file | | [`ptrace.tracee.parent.cgroup.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`ptrace.tracee.parent.cgroup.id`](#common-cgroupcontext-id-doc) | ID of the cgroup | -| [`ptrace.tracee.parent.cgroup.manager`](#common-cgroupcontext-manager-doc) | Lifecycle manager of the cgroup | -| [`ptrace.tracee.parent.cgroup.version`](#common-cgroupcontext-version-doc) | Version of the cgroup API | +| [`ptrace.tracee.parent.cgroup.manager`](#common-cgroupcontext-manager-doc) | [Experimental] Lifecycle manager of the cgroup | +| [`ptrace.tracee.parent.cgroup.version`](#common-cgroupcontext-version-doc) | [Experimental] Version of the cgroup API | | [`ptrace.tracee.parent.comm`](#common-process-comm-doc) | Comm attribute of the process | | [`ptrace.tracee.parent.container.id`](#common-process-container-id-doc) | Container ID | | [`ptrace.tracee.parent.created_at`](#common-process-created_at-doc) | Timestamp of the creation of the process | @@ -1346,6 +1386,7 @@ A directory was removed | [`rmdir.file.uid`](#common-filefields-uid-doc) | UID of the file's owner | | [`rmdir.file.user`](#common-filefields-user-doc) | User of the file's owner | | [`rmdir.retval`](#common-syscallevent-retval-doc) | Return value of the syscall | +| [`rmdir.syscall.path`](#rmdir-syscall-path-doc) | Path argument of the syscall | ### Event `selinux` @@ -1434,8 +1475,8 @@ A signal was sent | [`signal.target.ancestors.cgroup.file.inode`](#common-pathkey-inode-doc) | Inode of the file | | [`signal.target.ancestors.cgroup.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`signal.target.ancestors.cgroup.id`](#common-cgroupcontext-id-doc) | ID of the cgroup | -| [`signal.target.ancestors.cgroup.manager`](#common-cgroupcontext-manager-doc) | Lifecycle manager of the cgroup | -| [`signal.target.ancestors.cgroup.version`](#common-cgroupcontext-version-doc) | Version of the cgroup API | +| [`signal.target.ancestors.cgroup.manager`](#common-cgroupcontext-manager-doc) | [Experimental] Lifecycle manager of the cgroup | +| [`signal.target.ancestors.cgroup.version`](#common-cgroupcontext-version-doc) | [Experimental] Version of the cgroup API | | [`signal.target.ancestors.comm`](#common-process-comm-doc) | Comm attribute of the process | | [`signal.target.ancestors.container.id`](#common-process-container-id-doc) | Container ID | | [`signal.target.ancestors.created_at`](#common-process-created_at-doc) | Timestamp of the creation of the process | @@ -1517,8 +1558,8 @@ A signal was sent | [`signal.target.cgroup.file.inode`](#common-pathkey-inode-doc) | Inode of the file | | [`signal.target.cgroup.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`signal.target.cgroup.id`](#common-cgroupcontext-id-doc) | ID of the cgroup | -| [`signal.target.cgroup.manager`](#common-cgroupcontext-manager-doc) | Lifecycle manager of the cgroup | -| [`signal.target.cgroup.version`](#common-cgroupcontext-version-doc) | Version of the cgroup API | +| [`signal.target.cgroup.manager`](#common-cgroupcontext-manager-doc) | [Experimental] Lifecycle manager of the cgroup | +| [`signal.target.cgroup.version`](#common-cgroupcontext-version-doc) | [Experimental] Version of the cgroup API | | [`signal.target.comm`](#common-process-comm-doc) | Comm attribute of the process | | [`signal.target.container.id`](#common-process-container-id-doc) | Container ID | | [`signal.target.created_at`](#common-process-created_at-doc) | Timestamp of the creation of the process | @@ -1590,8 +1631,8 @@ A signal was sent | [`signal.target.parent.cgroup.file.inode`](#common-pathkey-inode-doc) | Inode of the file | | [`signal.target.parent.cgroup.file.mount_id`](#common-pathkey-mount_id-doc) | Mount ID of the file | | [`signal.target.parent.cgroup.id`](#common-cgroupcontext-id-doc) | ID of the cgroup | -| [`signal.target.parent.cgroup.manager`](#common-cgroupcontext-manager-doc) | Lifecycle manager of the cgroup | -| [`signal.target.parent.cgroup.version`](#common-cgroupcontext-version-doc) | Version of the cgroup API | +| [`signal.target.parent.cgroup.manager`](#common-cgroupcontext-manager-doc) | [Experimental] Lifecycle manager of the cgroup | +| [`signal.target.parent.cgroup.version`](#common-cgroupcontext-version-doc) | [Experimental] Version of the cgroup API | | [`signal.target.parent.comm`](#common-process-comm-doc) | Comm attribute of the process | | [`signal.target.parent.container.id`](#common-process-container-id-doc) | Container ID | | [`signal.target.parent.created_at`](#common-process-created_at-doc) | Timestamp of the creation of the process | @@ -1942,6 +1983,15 @@ Definition: Timestamp of the creation of the process `exec` `exit` `process` `process.ancestors` `process.parent` `ptrace.tracee` `ptrace.tracee.ancestors` `ptrace.tracee.parent` `signal.target` `signal.target.ancestors` `signal.target.parent` +### `*.data_size` {#common-networkstats-data_size-doc} +Type: int + +Definition: Amount of data transmitted or received + +`*.data_size` has 2 possible prefixes: +`network_flow_monitor.flows.egress` `network_flow_monitor.flows.ingress` + + ### `*.egid` {#common-credentials-egid-doc} Type: int @@ -2127,8 +2177,8 @@ Type: string Definition: Interface ifname -`*.ifname` has 2 possible prefixes: -`network.device` `packet.device` +`*.ifname` has 3 possible prefixes: +`network.device` `network_flow_monitor.device` `packet.device` ### `*.in_upper_layer` {#common-filefields-in_upper_layer-doc} @@ -2154,8 +2204,8 @@ Type: IP/CIDR Definition: IP address -`*.ip` has 6 possible prefixes: -`bind.addr` `connect.addr` `network.destination` `network.source` `packet.destination` `packet.source` +`*.ip` has 9 possible prefixes: +`accept.addr` `bind.addr` `connect.addr` `network.destination` `network.source` `network_flow_monitor.flows.destination` `network_flow_monitor.flows.source` `packet.destination` `packet.source` ### `*.is_exec` {#common-process-is_exec-doc} @@ -2181,8 +2231,8 @@ Type: bool Definition: Whether the IP address belongs to a public network -`*.is_public` has 6 possible prefixes: -`bind.addr` `connect.addr` `network.destination` `network.source` `packet.destination` `packet.source` +`*.is_public` has 9 possible prefixes: +`accept.addr` `bind.addr` `connect.addr` `network.destination` `network.source` `network_flow_monitor.flows.destination` `network_flow_monitor.flows.source` `packet.destination` `packet.source` ### `*.is_thread` {#common-process-is_thread-doc} @@ -2250,14 +2300,14 @@ Type: int Definition: Length of the corresponding element -`*.length` has 82 possible prefixes: -`chdir.file.name` `chdir.file.path` `chmod.file.name` `chmod.file.path` `chown.file.name` `chown.file.path` `dns.question.name` `exec.file.name` `exec.file.path` `exec.interpreter.file.name` `exec.interpreter.file.path` `exit.file.name` `exit.file.path` `exit.interpreter.file.name` `exit.interpreter.file.path` `link.file.destination.name` `link.file.destination.path` `link.file.name` `link.file.path` `load_module.file.name` `load_module.file.path` `mkdir.file.name` `mkdir.file.path` `mmap.file.name` `mmap.file.path` `open.file.name` `open.file.path` `process.ancestors` `process.ancestors.file.name` `process.ancestors.file.path` `process.ancestors.interpreter.file.name` `process.ancestors.interpreter.file.path` `process.file.name` `process.file.path` `process.interpreter.file.name` `process.interpreter.file.path` `process.parent.file.name` `process.parent.file.path` `process.parent.interpreter.file.name` `process.parent.interpreter.file.path` `ptrace.tracee.ancestors` `ptrace.tracee.ancestors.file.name` `ptrace.tracee.ancestors.file.path` `ptrace.tracee.ancestors.interpreter.file.name` `ptrace.tracee.ancestors.interpreter.file.path` `ptrace.tracee.file.name` `ptrace.tracee.file.path` `ptrace.tracee.interpreter.file.name` `ptrace.tracee.interpreter.file.path` `ptrace.tracee.parent.file.name` `ptrace.tracee.parent.file.path` `ptrace.tracee.parent.interpreter.file.name` `ptrace.tracee.parent.interpreter.file.path` `removexattr.file.name` `removexattr.file.path` `rename.file.destination.name` `rename.file.destination.path` `rename.file.name` `rename.file.path` `rmdir.file.name` `rmdir.file.path` `setxattr.file.name` `setxattr.file.path` `signal.target.ancestors` `signal.target.ancestors.file.name` `signal.target.ancestors.file.path` `signal.target.ancestors.interpreter.file.name` `signal.target.ancestors.interpreter.file.path` `signal.target.file.name` `signal.target.file.path` `signal.target.interpreter.file.name` `signal.target.interpreter.file.path` `signal.target.parent.file.name` `signal.target.parent.file.path` `signal.target.parent.interpreter.file.name` `signal.target.parent.interpreter.file.path` `splice.file.name` `splice.file.path` `unlink.file.name` `unlink.file.path` `utimes.file.name` `utimes.file.path` +`*.length` has 83 possible prefixes: +`chdir.file.name` `chdir.file.path` `chmod.file.name` `chmod.file.path` `chown.file.name` `chown.file.path` `dns.question.name` `exec.file.name` `exec.file.path` `exec.interpreter.file.name` `exec.interpreter.file.path` `exit.file.name` `exit.file.path` `exit.interpreter.file.name` `exit.interpreter.file.path` `link.file.destination.name` `link.file.destination.path` `link.file.name` `link.file.path` `load_module.file.name` `load_module.file.path` `mkdir.file.name` `mkdir.file.path` `mmap.file.name` `mmap.file.path` `network_flow_monitor.flows` `open.file.name` `open.file.path` `process.ancestors` `process.ancestors.file.name` `process.ancestors.file.path` `process.ancestors.interpreter.file.name` `process.ancestors.interpreter.file.path` `process.file.name` `process.file.path` `process.interpreter.file.name` `process.interpreter.file.path` `process.parent.file.name` `process.parent.file.path` `process.parent.interpreter.file.name` `process.parent.interpreter.file.path` `ptrace.tracee.ancestors` `ptrace.tracee.ancestors.file.name` `ptrace.tracee.ancestors.file.path` `ptrace.tracee.ancestors.interpreter.file.name` `ptrace.tracee.ancestors.interpreter.file.path` `ptrace.tracee.file.name` `ptrace.tracee.file.path` `ptrace.tracee.interpreter.file.name` `ptrace.tracee.interpreter.file.path` `ptrace.tracee.parent.file.name` `ptrace.tracee.parent.file.path` `ptrace.tracee.parent.interpreter.file.name` `ptrace.tracee.parent.interpreter.file.path` `removexattr.file.name` `removexattr.file.path` `rename.file.destination.name` `rename.file.destination.path` `rename.file.name` `rename.file.path` `rmdir.file.name` `rmdir.file.path` `setxattr.file.name` `setxattr.file.path` `signal.target.ancestors` `signal.target.ancestors.file.name` `signal.target.ancestors.file.path` `signal.target.ancestors.interpreter.file.name` `signal.target.ancestors.interpreter.file.path` `signal.target.file.name` `signal.target.file.path` `signal.target.interpreter.file.name` `signal.target.interpreter.file.path` `signal.target.parent.file.name` `signal.target.parent.file.path` `signal.target.parent.interpreter.file.name` `signal.target.parent.interpreter.file.path` `splice.file.name` `splice.file.path` `unlink.file.name` `unlink.file.path` `utimes.file.name` `utimes.file.path` ### `*.manager` {#common-cgroupcontext-manager-doc} Type: string -Definition: Lifecycle manager of the cgroup +Definition: [Experimental] Lifecycle manager of the cgroup `*.manager` has 12 possible prefixes: `cgroup` `exec.cgroup` `exit.cgroup` `process.ancestors.cgroup` `process.cgroup` `process.parent.cgroup` `ptrace.tracee.ancestors.cgroup` `ptrace.tracee.cgroup` `ptrace.tracee.parent.cgroup` `signal.target.ancestors.cgroup` `signal.target.cgroup` `signal.target.parent.cgroup` @@ -2311,6 +2361,18 @@ exec.file.name == "apt" Matches the execution of any file named apt. +### `*.network_direction` {#common-networkcontext-network_direction-doc} +Type: int + +Definition: Network direction of the network packet + +`*.network_direction` has 2 possible prefixes: +`network` `packet` + +Constants: [Network directions](#network-directions) + + + ### `*.package.name` {#common-fileevent-package-name-doc} Type: string @@ -2338,6 +2400,15 @@ Definition: [Experimental] Full version of the package that provided this file `chdir.file` `chmod.file` `chown.file` `exec.file` `exec.interpreter.file` `exit.file` `exit.interpreter.file` `link.file` `link.file.destination` `load_module.file` `mkdir.file` `mmap.file` `open.file` `process.ancestors.file` `process.ancestors.interpreter.file` `process.file` `process.interpreter.file` `process.parent.file` `process.parent.interpreter.file` `ptrace.tracee.ancestors.file` `ptrace.tracee.ancestors.interpreter.file` `ptrace.tracee.file` `ptrace.tracee.interpreter.file` `ptrace.tracee.parent.file` `ptrace.tracee.parent.interpreter.file` `removexattr.file` `rename.file` `rename.file.destination` `rmdir.file` `setxattr.file` `signal.target.ancestors.file` `signal.target.ancestors.interpreter.file` `signal.target.file` `signal.target.interpreter.file` `signal.target.parent.file` `signal.target.parent.interpreter.file` `splice.file` `unlink.file` `utimes.file` +### `*.packet_count` {#common-networkstats-packet_count-doc} +Type: int + +Definition: Count of network packets transmitted or received + +`*.packet_count` has 2 possible prefixes: +`network_flow_monitor.flows.egress` `network_flow_monitor.flows.ingress` + + ### `*.path` {#common-fileevent-path-doc} Type: string @@ -2378,8 +2449,8 @@ Type: int Definition: Port number -`*.port` has 6 possible prefixes: -`bind.addr` `connect.addr` `network.destination` `network.source` `packet.destination` `packet.source` +`*.port` has 9 possible prefixes: +`accept.addr` `bind.addr` `connect.addr` `network.destination` `network.source` `network_flow_monitor.flows.destination` `network_flow_monitor.flows.source` `packet.destination` `packet.source` ### `*.ppid` {#common-process-ppid-doc} @@ -2396,8 +2467,8 @@ Type: int Definition: Return value of the syscall -`*.retval` has 23 possible prefixes: -`bind` `bpf` `chdir` `chmod` `chown` `connect` `link` `load_module` `mkdir` `mmap` `mount` `mprotect` `open` `ptrace` `removexattr` `rename` `rmdir` `setxattr` `signal` `splice` `unlink` `unload_module` `utimes` +`*.retval` has 24 possible prefixes: +`accept` `bind` `bpf` `chdir` `chmod` `chown` `connect` `link` `load_module` `mkdir` `mmap` `mount` `mprotect` `open` `ptrace` `removexattr` `rename` `rmdir` `setxattr` `signal` `splice` `unlink` `unload_module` `utimes` Constants: [Error constants](#error-constants) @@ -2490,12 +2561,19 @@ Definition: User of the file's owner ### `*.version` {#common-cgroupcontext-version-doc} Type: int -Definition: Version of the cgroup API +Definition: [Experimental] Version of the cgroup API `*.version` has 12 possible prefixes: `cgroup` `exec.cgroup` `exit.cgroup` `process.ancestors.cgroup` `process.cgroup` `process.parent.cgroup` `ptrace.tracee.ancestors.cgroup` `ptrace.tracee.cgroup` `ptrace.tracee.parent.cgroup` `signal.target.ancestors.cgroup` `signal.target.cgroup` `signal.target.parent.cgroup` +### `accept.addr.family` {#accept-addr-family-doc} +Type: int + +Definition: Address family + + + ### `bind.addr.family` {#bind-addr-family-doc} Type: int @@ -2969,6 +3047,20 @@ Constants: [File mode constants](#file-mode-constants) +### `mkdir.syscall.mode` {#mkdir-syscall-mode-doc} +Type: int + +Definition: Mode of the new directory + + + +### `mkdir.syscall.path` {#mkdir-syscall-path-doc} +Type: string + +Definition: Path argument of the syscall + + + ### `mmap.flags` {#mmap-flags-doc} Type: int @@ -3058,6 +3150,26 @@ Constants: [Virtual Memory flags](#virtual-memory-flags) +### `network_flow_monitor.flows.l3_protocol` {#network_flow_monitor-flows-l3_protocol-doc} +Type: int + +Definition: L3 protocol of the network packet + + +Constants: [L3 protocols](#l3-protocols) + + + +### `network_flow_monitor.flows.l4_protocol` {#network_flow_monitor-flows-l4_protocol-doc} +Type: int + +Definition: L4 protocol of the network packet + + +Constants: [L4 protocols](#l4-protocols) + + + ### `open.file.destination.mode` {#open-file-destination-mode-doc} Type: int @@ -3137,6 +3249,13 @@ Definition: Path argument of the syscall +### `rmdir.syscall.path` {#rmdir-syscall-path-doc} +Type: string + +Definition: Path argument of the syscall + + + ### `selinux.bool.name` {#selinux-bool-name-doc} Type: string @@ -4236,6 +4355,14 @@ Network Address Family constants are the supported network address families. | `AF_XDP` | all | | `AF_MAX` | all | +### `Network directions` {#network-directions} +Network directions are the supported directions of network packets. + +| Name | Architectures | +| ---- |---------------| +| `INGRESS` | all | +| `EGRESS` | all | + ### `Open flags` {#open-flags} Open flags are the supported flags for the open syscall. diff --git a/docs/cloud-workload-security/scripts/requirements-docs.txt b/docs/cloud-workload-security/scripts/requirements-docs.txt index 829907f315820..1809103f5e091 100644 --- a/docs/cloud-workload-security/scripts/requirements-docs.txt +++ b/docs/cloud-workload-security/scripts/requirements-docs.txt @@ -1 +1 @@ -jinja2==3.0.1 +jinja2==3.1.5 diff --git a/docs/cloud-workload-security/secl_linux.json b/docs/cloud-workload-security/secl_linux.json index b4f480879c3ed..850a130b737a4 100644 --- a/docs/cloud-workload-security/secl_linux.json +++ b/docs/cloud-workload-security/secl_linux.json @@ -24,12 +24,12 @@ }, { "name": "cgroup.manager", - "definition": "Lifecycle manager of the cgroup", + "definition": "[Experimental] Lifecycle manager of the cgroup", "property_doc_link": "common-cgroupcontext-manager-doc" }, { "name": "cgroup.version", - "definition": "Version of the cgroup API", + "definition": "[Experimental] Version of the cgroup API", "property_doc_link": "common-cgroupcontext-version-doc" }, { @@ -144,12 +144,12 @@ }, { "name": "process.ancestors.cgroup.manager", - "definition": "Lifecycle manager of the cgroup", + "definition": "[Experimental] Lifecycle manager of the cgroup", "property_doc_link": "common-cgroupcontext-manager-doc" }, { "name": "process.ancestors.cgroup.version", - "definition": "Version of the cgroup API", + "definition": "[Experimental] Version of the cgroup API", "property_doc_link": "common-cgroupcontext-version-doc" }, { @@ -559,12 +559,12 @@ }, { "name": "process.cgroup.manager", - "definition": "Lifecycle manager of the cgroup", + "definition": "[Experimental] Lifecycle manager of the cgroup", "property_doc_link": "common-cgroupcontext-manager-doc" }, { "name": "process.cgroup.version", - "definition": "Version of the cgroup API", + "definition": "[Experimental] Version of the cgroup API", "property_doc_link": "common-cgroupcontext-version-doc" }, { @@ -924,12 +924,12 @@ }, { "name": "process.parent.cgroup.manager", - "definition": "Lifecycle manager of the cgroup", + "definition": "[Experimental] Lifecycle manager of the cgroup", "property_doc_link": "common-cgroupcontext-manager-doc" }, { "name": "process.parent.cgroup.version", - "definition": "Version of the cgroup API", + "definition": "[Experimental] Version of the cgroup API", "property_doc_link": "common-cgroupcontext-version-doc" }, { @@ -1319,6 +1319,40 @@ } ] }, + { + "name": "accept", + "definition": "An accept was executed", + "type": "Network", + "from_agent_version": "7.60", + "experimental": false, + "properties": [ + { + "name": "accept.addr.family", + "definition": "Address family", + "property_doc_link": "accept-addr-family-doc" + }, + { + "name": "accept.addr.ip", + "definition": "IP address", + "property_doc_link": "common-ipportcontext-ip-doc" + }, + { + "name": "accept.addr.is_public", + "definition": "Whether the IP address belongs to a public network", + "property_doc_link": "common-ipportcontext-is_public-doc" + }, + { + "name": "accept.addr.port", + "definition": "Port number", + "property_doc_link": "common-ipportcontext-port-doc" + }, + { + "name": "accept.retval", + "definition": "Return value of the syscall", + "property_doc_link": "common-syscallevent-retval-doc" + } + ] + }, { "name": "bind", "definition": "A bind was executed", @@ -1944,6 +1978,11 @@ "definition": "L4 protocol of the network packet", "property_doc_link": "common-networkcontext-l4_protocol-doc" }, + { + "name": "network.network_direction", + "definition": "Network direction of the network packet", + "property_doc_link": "common-networkcontext-network_direction-doc" + }, { "name": "network.size", "definition": "Size in bytes of the network packet", @@ -2035,12 +2074,12 @@ }, { "name": "exec.cgroup.manager", - "definition": "Lifecycle manager of the cgroup", + "definition": "[Experimental] Lifecycle manager of the cgroup", "property_doc_link": "common-cgroupcontext-manager-doc" }, { "name": "exec.cgroup.version", - "definition": "Version of the cgroup API", + "definition": "[Experimental] Version of the cgroup API", "property_doc_link": "common-cgroupcontext-version-doc" }, { @@ -2464,12 +2503,12 @@ }, { "name": "exit.cgroup.manager", - "definition": "Lifecycle manager of the cgroup", + "definition": "[Experimental] Lifecycle manager of the cgroup", "property_doc_link": "common-cgroupcontext-manager-doc" }, { "name": "exit.cgroup.version", - "definition": "Version of the cgroup API", + "definition": "[Experimental] Version of the cgroup API", "property_doc_link": "common-cgroupcontext-version-doc" }, { @@ -2896,6 +2935,11 @@ "definition": "L4 protocol of the network packet", "property_doc_link": "common-networkcontext-l4_protocol-doc" }, + { + "name": "network.network_direction", + "definition": "Network direction of the network packet", + "property_doc_link": "common-networkcontext-network_direction-doc" + }, { "name": "network.size", "definition": "Size in bytes of the network packet", @@ -3402,6 +3446,16 @@ "name": "mkdir.retval", "definition": "Return value of the syscall", "property_doc_link": "common-syscallevent-retval-doc" + }, + { + "name": "mkdir.syscall.mode", + "definition": "Mode of the new directory", + "property_doc_link": "mkdir-syscall-mode-doc" + }, + { + "name": "mkdir.syscall.path", + "definition": "Path argument of the syscall", + "property_doc_link": "mkdir-syscall-path-doc" } ] }, @@ -3602,6 +3656,85 @@ } ] }, + { + "name": "network_flow_monitor", + "definition": "A network monitor event was sent", + "type": "Network", + "from_agent_version": "7.63", + "experimental": false, + "properties": [ + { + "name": "network_flow_monitor.device.ifname", + "definition": "Interface ifname", + "property_doc_link": "common-networkdevicecontext-ifname-doc" + }, + { + "name": "network_flow_monitor.flows.destination.ip", + "definition": "IP address", + "property_doc_link": "common-ipportcontext-ip-doc" + }, + { + "name": "network_flow_monitor.flows.destination.is_public", + "definition": "Whether the IP address belongs to a public network", + "property_doc_link": "common-ipportcontext-is_public-doc" + }, + { + "name": "network_flow_monitor.flows.destination.port", + "definition": "Port number", + "property_doc_link": "common-ipportcontext-port-doc" + }, + { + "name": "network_flow_monitor.flows.egress.data_size", + "definition": "Amount of data transmitted or received", + "property_doc_link": "common-networkstats-data_size-doc" + }, + { + "name": "network_flow_monitor.flows.egress.packet_count", + "definition": "Count of network packets transmitted or received", + "property_doc_link": "common-networkstats-packet_count-doc" + }, + { + "name": "network_flow_monitor.flows.ingress.data_size", + "definition": "Amount of data transmitted or received", + "property_doc_link": "common-networkstats-data_size-doc" + }, + { + "name": "network_flow_monitor.flows.ingress.packet_count", + "definition": "Count of network packets transmitted or received", + "property_doc_link": "common-networkstats-packet_count-doc" + }, + { + "name": "network_flow_monitor.flows.l3_protocol", + "definition": "L3 protocol of the network packet", + "property_doc_link": "network_flow_monitor-flows-l3_protocol-doc" + }, + { + "name": "network_flow_monitor.flows.l4_protocol", + "definition": "L4 protocol of the network packet", + "property_doc_link": "network_flow_monitor-flows-l4_protocol-doc" + }, + { + "name": "network_flow_monitor.flows.length", + "definition": "Length of the corresponding element", + "property_doc_link": "common-string-length-doc" + }, + { + "name": "network_flow_monitor.flows.source.ip", + "definition": "IP address", + "property_doc_link": "common-ipportcontext-ip-doc" + }, + { + "name": "network_flow_monitor.flows.source.is_public", + "definition": "Whether the IP address belongs to a public network", + "property_doc_link": "common-ipportcontext-is_public-doc" + }, + { + "name": "network_flow_monitor.flows.source.port", + "definition": "Port number", + "property_doc_link": "common-ipportcontext-port-doc" + } + ] + }, { "name": "open", "definition": "A file was opened", @@ -3743,7 +3876,7 @@ }, { "name": "packet", - "definition": "A raw network packet captured", + "definition": "A raw network packet was captured", "type": "Network", "from_agent_version": "7.60", "experimental": false, @@ -3783,6 +3916,11 @@ "definition": "L4 protocol of the network packet", "property_doc_link": "common-networkcontext-l4_protocol-doc" }, + { + "name": "packet.network_direction", + "definition": "Network direction of the network packet", + "property_doc_link": "common-networkcontext-network_direction-doc" + }, { "name": "packet.size", "definition": "Size in bytes of the network packet", @@ -3889,12 +4027,12 @@ }, { "name": "ptrace.tracee.ancestors.cgroup.manager", - "definition": "Lifecycle manager of the cgroup", + "definition": "[Experimental] Lifecycle manager of the cgroup", "property_doc_link": "common-cgroupcontext-manager-doc" }, { "name": "ptrace.tracee.ancestors.cgroup.version", - "definition": "Version of the cgroup API", + "definition": "[Experimental] Version of the cgroup API", "property_doc_link": "common-cgroupcontext-version-doc" }, { @@ -4304,12 +4442,12 @@ }, { "name": "ptrace.tracee.cgroup.manager", - "definition": "Lifecycle manager of the cgroup", + "definition": "[Experimental] Lifecycle manager of the cgroup", "property_doc_link": "common-cgroupcontext-manager-doc" }, { "name": "ptrace.tracee.cgroup.version", - "definition": "Version of the cgroup API", + "definition": "[Experimental] Version of the cgroup API", "property_doc_link": "common-cgroupcontext-version-doc" }, { @@ -4669,12 +4807,12 @@ }, { "name": "ptrace.tracee.parent.cgroup.manager", - "definition": "Lifecycle manager of the cgroup", + "definition": "[Experimental] Lifecycle manager of the cgroup", "property_doc_link": "common-cgroupcontext-manager-doc" }, { "name": "ptrace.tracee.parent.cgroup.version", - "definition": "Version of the cgroup API", + "definition": "[Experimental] Version of the cgroup API", "property_doc_link": "common-cgroupcontext-version-doc" }, { @@ -5523,6 +5661,11 @@ "name": "rmdir.retval", "definition": "Return value of the syscall", "property_doc_link": "common-syscallevent-retval-doc" + }, + { + "name": "rmdir.syscall.path", + "definition": "Path argument of the syscall", + "property_doc_link": "rmdir-syscall-path-doc" } ] }, @@ -5836,12 +5979,12 @@ }, { "name": "signal.target.ancestors.cgroup.manager", - "definition": "Lifecycle manager of the cgroup", + "definition": "[Experimental] Lifecycle manager of the cgroup", "property_doc_link": "common-cgroupcontext-manager-doc" }, { "name": "signal.target.ancestors.cgroup.version", - "definition": "Version of the cgroup API", + "definition": "[Experimental] Version of the cgroup API", "property_doc_link": "common-cgroupcontext-version-doc" }, { @@ -6251,12 +6394,12 @@ }, { "name": "signal.target.cgroup.manager", - "definition": "Lifecycle manager of the cgroup", + "definition": "[Experimental] Lifecycle manager of the cgroup", "property_doc_link": "common-cgroupcontext-manager-doc" }, { "name": "signal.target.cgroup.version", - "definition": "Version of the cgroup API", + "definition": "[Experimental] Version of the cgroup API", "property_doc_link": "common-cgroupcontext-version-doc" }, { @@ -6616,12 +6759,12 @@ }, { "name": "signal.target.parent.cgroup.manager", - "definition": "Lifecycle manager of the cgroup", + "definition": "[Experimental] Lifecycle manager of the cgroup", "property_doc_link": "common-cgroupcontext-manager-doc" }, { "name": "signal.target.parent.cgroup.version", - "definition": "Version of the cgroup API", + "definition": "[Experimental] Version of the cgroup API", "property_doc_link": "common-cgroupcontext-version-doc" }, { @@ -7752,6 +7895,19 @@ "constants_link": "", "examples": [] }, + { + "name": "*.data_size", + "link": "common-networkstats-data_size-doc", + "type": "int", + "definition": "Amount of data transmitted or received", + "prefixes": [ + "network_flow_monitor.flows.egress", + "network_flow_monitor.flows.ingress" + ], + "constants": "", + "constants_link": "", + "examples": [] + }, { "name": "*.egid", "link": "common-credentials-egid-doc", @@ -8294,6 +8450,7 @@ "definition": "Interface ifname", "prefixes": [ "network.device", + "network_flow_monitor.device", "packet.device" ], "constants": "", @@ -8418,10 +8575,13 @@ "type": "IP/CIDR", "definition": "IP address", "prefixes": [ + "accept.addr", "bind.addr", "connect.addr", "network.destination", "network.source", + "network_flow_monitor.flows.destination", + "network_flow_monitor.flows.source", "packet.destination", "packet.source" ], @@ -8479,10 +8639,13 @@ "type": "bool", "definition": "Whether the IP address belongs to a public network", "prefixes": [ + "accept.addr", "bind.addr", "connect.addr", "network.destination", "network.source", + "network_flow_monitor.flows.destination", + "network_flow_monitor.flows.source", "packet.destination", "packet.source" ], @@ -8635,6 +8798,7 @@ "mkdir.file.path", "mmap.file.name", "mmap.file.path", + "network_flow_monitor.flows", "open.file.name", "open.file.path", "process.ancestors", @@ -8701,7 +8865,7 @@ "name": "*.manager", "link": "common-cgroupcontext-manager-doc", "type": "string", - "definition": "Lifecycle manager of the cgroup", + "definition": "[Experimental] Lifecycle manager of the cgroup", "prefixes": [ "cgroup", "exec.cgroup", @@ -8937,6 +9101,19 @@ } ] }, + { + "name": "*.network_direction", + "link": "common-networkcontext-network_direction-doc", + "type": "int", + "definition": "Network direction of the network packet", + "prefixes": [ + "network", + "packet" + ], + "constants": "Network directions", + "constants_link": "network-directions", + "examples": [] + }, { "name": "*.package.name", "link": "common-fileevent-package-name-doc", @@ -9087,6 +9264,19 @@ "constants_link": "", "examples": [] }, + { + "name": "*.packet_count", + "link": "common-networkstats-packet_count-doc", + "type": "int", + "definition": "Count of network packets transmitted or received", + "prefixes": [ + "network_flow_monitor.flows.egress", + "network_flow_monitor.flows.ingress" + ], + "constants": "", + "constants_link": "", + "examples": [] + }, { "name": "*.path", "link": "common-fileevent-path-doc", @@ -9174,10 +9364,13 @@ "type": "int", "definition": "Port number", "prefixes": [ + "accept.addr", "bind.addr", "connect.addr", "network.destination", "network.source", + "network_flow_monitor.flows.destination", + "network_flow_monitor.flows.source", "packet.destination", "packet.source" ], @@ -9213,6 +9406,7 @@ "type": "int", "definition": "Return value of the syscall", "prefixes": [ + "accept", "bind", "bpf", "chdir", @@ -9501,7 +9695,7 @@ "name": "*.version", "link": "common-cgroupcontext-version-doc", "type": "int", - "definition": "Version of the cgroup API", + "definition": "[Experimental] Version of the cgroup API", "prefixes": [ "cgroup", "exec.cgroup", @@ -9520,6 +9714,18 @@ "constants_link": "", "examples": [] }, + { + "name": "accept.addr.family", + "link": "accept-addr-family-doc", + "type": "int", + "definition": "Address family", + "prefixes": [ + "accept" + ], + "constants": "", + "constants_link": "", + "examples": [] + }, { "name": "bind.addr.family", "link": "bind-addr-family-doc", @@ -10264,6 +10470,30 @@ "constants_link": "file-mode-constants", "examples": [] }, + { + "name": "mkdir.syscall.mode", + "link": "mkdir-syscall-mode-doc", + "type": "int", + "definition": "Mode of the new directory", + "prefixes": [ + "mkdir" + ], + "constants": "", + "constants_link": "", + "examples": [] + }, + { + "name": "mkdir.syscall.path", + "link": "mkdir-syscall-path-doc", + "type": "string", + "definition": "Path argument of the syscall", + "prefixes": [ + "mkdir" + ], + "constants": "", + "constants_link": "", + "examples": [] + }, { "name": "mmap.flags", "link": "mmap-flags-doc", @@ -10396,6 +10626,30 @@ "constants_link": "virtual-memory-flags", "examples": [] }, + { + "name": "network_flow_monitor.flows.l3_protocol", + "link": "network_flow_monitor-flows-l3_protocol-doc", + "type": "int", + "definition": "L3 protocol of the network packet", + "prefixes": [ + "network_flow_monitor.flows" + ], + "constants": "L3 protocols", + "constants_link": "l3-protocols", + "examples": [] + }, + { + "name": "network_flow_monitor.flows.l4_protocol", + "link": "network_flow_monitor-flows-l4_protocol-doc", + "type": "int", + "definition": "L4 protocol of the network packet", + "prefixes": [ + "network_flow_monitor.flows" + ], + "constants": "L4 protocols", + "constants_link": "l4-protocols", + "examples": [] + }, { "name": "open.file.destination.mode", "link": "open-file-destination-mode-doc", @@ -10516,6 +10770,18 @@ "constants_link": "", "examples": [] }, + { + "name": "rmdir.syscall.path", + "link": "rmdir-syscall-path-doc", + "type": "string", + "definition": "Path argument of the syscall", + "prefixes": [ + "rmdir" + ], + "constants": "", + "constants_link": "", + "examples": [] + }, { "name": "selinux.bool.name", "link": "selinux-bool-name-doc", @@ -14162,6 +14428,21 @@ } ] }, + { + "name": "Network directions", + "link": "network-directions", + "description": "Network directions are the supported directions of network packets.", + "all": [ + { + "name": "INGRESS", + "architecture": "all" + }, + { + "name": "EGRESS", + "architecture": "all" + } + ] + }, { "name": "Open flags", "link": "open-flags", diff --git a/docs/cloud-workload-security/secl_windows.json b/docs/cloud-workload-security/secl_windows.json index e8e3f1e601c4e..f3d94e1496ec0 100644 --- a/docs/cloud-workload-security/secl_windows.json +++ b/docs/cloud-workload-security/secl_windows.json @@ -2318,6 +2318,21 @@ "architecture": "all" } ] + }, + { + "name": "Network directions", + "link": "network-directions", + "description": "Network directions are the supported directions of network packets.", + "all": [ + { + "name": "INGRESS", + "architecture": "all" + }, + { + "name": "EGRESS", + "architecture": "all" + } + ] } ] } \ No newline at end of file diff --git a/docs/cloud-workload-security/windows_expressions.md b/docs/cloud-workload-security/windows_expressions.md index 035c98e380bd8..d79cfbbc743a7 100644 --- a/docs/cloud-workload-security/windows_expressions.md +++ b/docs/cloud-workload-security/windows_expressions.md @@ -892,6 +892,14 @@ L4 protocols are the supported Layer 4 protocols. | `IP_PROTO_MPLS` | all | | `IP_PROTO_RAW` | all | +### `Network directions` {#network-directions} +Network directions are the supported directions of network packets. + +| Name | Architectures | +| ---- |---------------| +| `INGRESS` | all | +| `EGRESS` | all | + {{< partial name="whats-next/whats-next.html" >}} diff --git a/docs/dev/README.md b/docs/dev/README.md index 5fa81eb884a28..8a2464ab5fbfa 100644 --- a/docs/dev/README.md +++ b/docs/dev/README.md @@ -19,6 +19,7 @@ This guide will help you to develop and contribute code to the project. * Advice / Style Guide * * [Using Atomics](atomics.md) * * [Go Imports](imports.md) +* * [Logger best practices](logger.md) ## How to contribute diff --git a/docs/dev/agent_dev_env.md b/docs/dev/agent_dev_env.md index d377808f09bf9..e53dfeb49dc33 100644 --- a/docs/dev/agent_dev_env.md +++ b/docs/dev/agent_dev_env.md @@ -138,7 +138,7 @@ This procedure ensures you not only get the correct version of `invoke`, but als ### Golang -You must [install Golang](https://golang.org/doc/install) version `1.23.3` or +You must [install Golang](https://golang.org/doc/install) version `1.23.5` or later. Make sure that `$GOPATH/bin` is in your `$PATH` otherwise `invoke` cannot use any additional tool it might need. diff --git a/docs/dev/agent_tests.md b/docs/dev/agent_tests.md index 94f5368991a68..ed2fbe3817d41 100644 --- a/docs/dev/agent_tests.md +++ b/docs/dev/agent_tests.md @@ -20,17 +20,9 @@ commit through the CI so the following requirements must be met: ## E2E tests -### Kitchen - For tests that require a fully configured Agent up and running in specific and -repeatable environments there are E2E (End to End) tests that are executed using -Test Kitchen from Chef on the supported platforms. - -### Kubernetes - -There are some end to end tests executed on top of Kubernetes. - -See the dedicated docs about it [here](../../test/e2e/README.md). +repeatable environments there are E2E (End to End) tests that are executed in +Pulumi managed instances. See the dedicated docs about it [here](../../test/new-e2e/README.md). ## System tests diff --git a/docs/dev/logger.md b/docs/dev/logger.md new file mode 100644 index 0000000000000..68c610f22b579 --- /dev/null +++ b/docs/dev/logger.md @@ -0,0 +1,46 @@ +### Logging + +Logging utilizes the [`github.com/cihub/seelog`](https://github.com/cihub/seelog) package as its underlying framework. +You can access logging through `pkg/util/log` and the `comp/core/log` component wrappers. +Using the component wrapper is recommended, as it adheres to [component best practices](https://datadoghq.dev/datadog-agent/components/overview/). + +#### Writing a good log message + +In general, there are a few rules and a few suggestions to follow when it comes to writing +good log messages: + +- Messages must be written in English, preferably in American English. +- Use proper spelling and grammar when possible. Because not everyone is a native English speaker, this is an ask, not a hard requirement. +- Identifiers or passages of note should be called out by some means such as wrapping them in + backticks or quotes (single or double). Wrapping with special characters can be helpful in drawing the user's eye to anything of importance. +- If the message is longer than one or two sentences, it's probably better suited as a single sentence briefly + explaining the event, with a link to external documentation that explains things further. + +#### Choosing the right log level + +Choosing the right level is also very important. Appropriate log levels make it easy for users to understand what they should pay attention to. +They also avoid the performance overhead of excess logging, even if the logs are filtered and never show on the console. + +- **TRACE**: Typically contains a high level of detail for deep/rich debugging. + + Trace logging is typically used when instrumenting algorithms and core pieces of logic. + Avoid adding trace logging to tight loops or commonly used codepaths. + Even when the logs are disabled, logging an event can incur overheads. +- **DEBUG**: Basic information that can be helpful for initially debugging issues. + + Do not use debug logging for things that happen per-event or that scale with event throughput. + You can safely use debug logging for uncommon cases, for example, something that happens every 1000th event. +- **INFO**: Common information about normal processes. + + Info logging is appropriate for logical or temporal events. + Examples include notifications when components are stopped and started, or other high-level events that do not require operator attention. + + **INFO** is primarily used for information that tells an operator that a notable action completed successfully. + +- **WARN** should be used for potentially problematic but non-critical events where the software can continue operating, + potentially in a degraded state and/or recover from the problem. Do not use **WARN** for events that require user's immediate attention. + +- **ERROR** level should be used for events indicating severely problematic issues that require immediate user visibility and remediation. + This includes logging related to events that may lead to data loss, unrecoverable states, and any other situation where a required component is faulty, + causing the software to be unable to remediate the problem on its own. + Error logs should be extremely rare in normally operating software to ensure high signal-to-noise ratio in observability tooling. diff --git a/docs/public/components/creating-components.md b/docs/public/components/creating-components.md index 2fe844b7175d4..ec000dfd93ee2 100644 --- a/docs/public/components/creating-components.md +++ b/docs/public/components/creating-components.md @@ -53,7 +53,7 @@ This file hierarchy aims to solve a few problems: ## Bootstrapping components -You can use the [invoke](../setup.md#preface) task `deva components.new-component comp/` to generate a scaffold for your new component. +You can use the [command](../setup.md#tooling) `inv components.new-component comp/` to generate a scaffold for your new component. Every public variable, function, struct, and interface of your component **must** be documented. Refer to the [Documentation](#documentation) section below for details. diff --git a/docs/public/components/faq.md b/docs/public/components/faq.md index c4c97f668e338..2ad78c5d7dc88 100644 --- a/docs/public/components/faq.md +++ b/docs/public/components/faq.md @@ -8,7 +8,7 @@ You might need to express the fact that some of your dependencies are optional. components that interact with many other components **if available** (that is, if they were included at compile time). This allows your component to interact with each other without forcing their inclusion in the current binary. -The [optional.Option](https://github.com/DataDog/datadog-agent/tree/main/pkg/util/optional) type answers this need. +The [option.Option](https://github.com/DataDog/datadog-agent/tree/main/pkg/util/option) type answers this need. For examples, consider the metadata components that are included in multiple binaries (`core-agent`, `DogStatsD`, etc.). These components use the `sysprobeconfig` component if it is available. `sysprobeconfig` is available in the diff --git a/docs/public/guidelines/deprecated-components-documentation/defining-bundles.md b/docs/public/guidelines/deprecated-components-documentation/defining-bundles.md index c272bbce5fa83..ace204121fad7 100644 --- a/docs/public/guidelines/deprecated-components-documentation/defining-bundles.md +++ b/docs/public/guidelines/deprecated-components-documentation/defining-bundles.md @@ -13,7 +13,7 @@ A bundle is defined in a dedicated package named `comp/`. The packag Typically, a bundle will automatically instantiate the top-level components that represent the bundle's purpose. For example, the trace-agent bundle `comp/trace` might automatically instantiate `comp/trace/agent`. -You can use the invoke task `deva components.new-bundle comp/` to generate a pre-filled `bundle.go` file for the given bundle. +You can use the `inv components.new-bundle comp/` [command](../../setup.md#tooling) to generate a pre-filled `bundle.go` file for the given bundle. ## Bundle Parameters diff --git a/docs/public/guidelines/docs.md b/docs/public/guidelines/docs.md index f8efe306282a1..171dc9b27116f 100644 --- a/docs/public/guidelines/docs.md +++ b/docs/public/guidelines/docs.md @@ -2,7 +2,7 @@ This site is built by [MkDocs](https://github.com/mkdocs/mkdocs) and uses the [Material for MkDocs](https://squidfunk.github.io/mkdocs-material/) theme. -You can serve documentation locally with the `docs.serve` [invoke task](../setup.md#preface). +You can serve documentation locally with the `inv docs.serve` [command](../setup.md#tooling). ## Organization diff --git a/docs/public/setup.md b/docs/public/setup.md index e9ee454a1fa5e..afddd36623249 100644 --- a/docs/public/setup.md +++ b/docs/public/setup.md @@ -1,5 +1,99 @@ # Set up development environment +## Tooling + +### Installers + +=== "macOS" + === "GUI installer" + 1. In your browser, download the `.pkg` file: [deva-universal.pkg](https://github.com/DataDog/datadog-agent-dev/releases/latest/download/deva-universal.pkg) + 2. Run your downloaded file and follow the on-screen instructions. + 3. Restart your terminal. + 4. To verify that the shell can find and run the `deva` command in your `PATH`, use the following command. + + ``` + $ deva --version + + ``` + === "Command line installer" + 1. Download the file using the `curl` command. The `-o` option specifies the file name that the downloaded package is written to. In this example, the file is written to `deva-universal.pkg` in the current directory. + + ``` + curl -Lo deva-universal.pkg https://github.com/DataDog/datadog-agent-dev/releases/latest/download/deva-universal.pkg + ``` + 2. Run the standard macOS [`installer`](https://ss64.com/osx/installer.html) program, specifying the downloaded `.pkg` file as the source. Use the `-pkg` parameter to specify the name of the package to install, and the `-target /` parameter for the drive in which to install the package. The files are installed to `/usr/local/deva`, and an entry is created at `/etc/paths.d/deva` that instructs shells to add the `/usr/local/deva` directory to. You must include sudo on the command to grant write permissions to those folders. + + ``` + sudo installer -pkg ./deva-universal.pkg -target / + ``` + 3. Restart your terminal. + 4. To verify that the shell can find and run the `deva` command in your `PATH`, use the following command. + + ``` + $ deva --version + + ``` + +=== "Windows" + === "GUI installer" + 1. In your browser, download one the `.msi` files: + - [deva-x64.msi](https://github.com/DataDog/datadog-agent-dev/releases/latest/download/deva-x64.msi) + 2. Run your downloaded file and follow the on-screen instructions. + 3. Restart your terminal. + 4. To verify that the shell can find and run the `deva` command in your `PATH`, use the following command. + + ``` + $ deva --version + + ``` + === "Command line installer" + 1. Download and run the installer using the standard Windows [`msiexec`](https://learn.microsoft.com/en-us/windows-server/administration/windows-commands/msiexec) program, specifying one of the `.msi` files as the source. Use the `/passive` and `/i` parameters to request an unattended, normal installation. + + === "x64" + ``` + msiexec /passive /i https://github.com/DataDog/datadog-agent-dev/releases/latest/download/deva-x64.msi + ``` + === "x86" + ``` + msiexec /passive /i https://github.com/DataDog/datadog-agent-dev/releases/latest/download/deva-x86.msi + ``` + 2. Restart your terminal. + 3. To verify that the shell can find and run the `deva` command in your `PATH`, use the following command. + + ``` + $ deva --version + + ``` + +### Standalone binaries + +After downloading the archive corresponding to your platform and architecture, extract the binary to a directory that is on your PATH and rename to `deva`. + +=== "macOS" + - [deva-aarch64-apple-darwin.tar.gz](https://github.com/DataDog/datadog-agent-dev/releases/latest/download/deva-aarch64-apple-darwin.tar.gz) + - [deva-x86_64-apple-darwin.tar.gz](https://github.com/DataDog/datadog-agent-dev/releases/latest/download/deva-x86_64-apple-darwin.tar.gz) + +=== "Windows" + - [deva-x86_64-pc-windows-msvc.zip](https://github.com/DataDog/datadog-agent-dev/releases/latest/download/deva-x86_64-pc-windows-msvc.zip) + - [deva-i686-pc-windows-msvc.zip](https://github.com/DataDog/datadog-agent-dev/releases/latest/download/deva-i686-pc-windows-msvc.zip) + +=== "Linux" + - [deva-aarch64-unknown-linux-gnu.tar.gz](https://github.com/DataDog/datadog-agent-dev/releases/latest/download/deva-aarch64-unknown-linux-gnu.tar.gz) + - [deva-x86_64-unknown-linux-gnu.tar.gz](https://github.com/DataDog/datadog-agent-dev/releases/latest/download/deva-x86_64-unknown-linux-gnu.tar.gz) + - [deva-x86_64-unknown-linux-musl.tar.gz](https://github.com/DataDog/datadog-agent-dev/releases/latest/download/deva-x86_64-unknown-linux-musl.tar.gz) + - [deva-powerpc64le-unknown-linux-gnu.tar.gz](https://github.com/DataDog/datadog-agent-dev/releases/latest/download/deva-powerpc64le-unknown-linux-gnu.tar.gz) + +### pip + +deva is available on PyPI and can be installed with [pip](https://github.com/pypa/pip). + +``` +pip install deva +``` + +!!! warning + This method modifies the Python environment in which you choose to install. + ## Windows To build the agent on Windows, see [datadog-agent-buildimages](https://github.com/DataDog/datadog-agent-buildimages/tree/main/windows). @@ -10,60 +104,27 @@ To build the agent on Windows, see [datadog-agent-buildimages](https://github.co The Agent embeds a full-fledged CPython interpreter so it requires the development files to be available in the dev env. The Agent can embed Python 2 and/or Python 3, you will need development files for all versions you want to support. -If you're on OSX/macOS, installing Python 2.7 and/or 3.11 with [Homebrew](https://brew.sh): +If you're on OSX/macOS, installing Python 2.7 and/or 3.12 with [Homebrew](https://brew.sh): ``` brew install python@2 -brew install python@3.11 +brew install python@3.12 ``` On Linux, depending on the distribution, you might need to explicitly install the development files, for example on Ubuntu: ``` sudo apt-get install python2.7-dev -sudo apt-get install python3.11-dev +sudo apt-get install python3.12-dev ``` -On Windows, install Python 2.7 and/or 3.11 via the [official installer](https://www.python.org/downloads/) brings along all the development files needed: +On Windows, install Python 2.7 and/or 3.12 via the [official installer](https://www.python.org/downloads/) brings along all the development files needed: !!! warning - If you don't use one of the Python versions that are explicitly supported, you may have problems running the built Agent's Python checks, especially if using a virtualenv. At this time, only Python 3.11 is confirmed to work as expected in the development environment. + If you don't use one of the Python versions that are explicitly supported, you may have problems running the built Agent's Python checks, especially if using a virtualenv. At this time, only Python 3.12 is confirmed to work as expected in the development environment. #### Python Dependencies -##### Preface - -[Invoke](http://www.pyinvoke.org) is a task runner written in Python that is extensively used in this project to orchestrate builds and test runs. To run the tasks, you need to have it installed on your machine. We offer two different ways to run our invoke tasks. - -##### `deva` (recommended) - -The `deva` CLI tool is a single binary that can be used to install and manage the development environment for the Agent, built by the Datadog team. It will install all the necessary Python dependencies for you. The development environment will be completely independent of your system Python installation. This tool leverages [PyApp](https://ofek.dev/pyapp/latest/), a wrapper for Python applications that bootstrap themselves at runtime. In our case, we wrap `invoke` itself and include the dependencies needed to work on the Agent. - -To install `deva`, you'll need to: - -1. Download the binary for your platform from the [releases page](https://github.com/DataDog/datadog-agent-devtools/releases/latest), -2. Make it executable (and optionally add it to your PATH), -3. Run the invoke command you need, using `deva` in place of `invoke` or `inv`. - -The Python environment will automatically be created on the first run. and will be reused for subsequent runs. For example: - -```shell -cd datadog-agent -curl -L -o deva https://github.com/DataDog/datadog-agent-devtools/releases/download/deva-v1.0.0/deva-aarch64-unknown-linux-gnu-1.0.0 -chmod +x deva -./deva linter.go -``` - -Below a live demo of how the tool works: - -![deva_install](./assets/images/deva.gif) - -If you want to uninstall `deva`, you can simply run the `./deva self remove` command, which will remove the virtual environment from your system, and remove the binary. That's it. - -##### Manual Installation - -###### Virtual Environment - To protect and isolate your system-wide python installation, a python virtual environment is _highly_ recommended (though optional). It will help keep a self-contained development environment and ensure a clean system Python. !!! note @@ -82,33 +143,21 @@ To protect and isolate your system-wide python installation, a python virtual en If using virtual environments when running the built Agent, you may need to override the built Agent's search path for Python check packages using the `PYTHONPATH` variable (your target path must have the [pre-requisite core integration packages installed](https://datadoghq.dev/integrations-core/setup/) though). ```sh -PYTHONPATH="./venv/lib/python3.11/site-packages:$PYTHONPATH" ./agent run ... +PYTHONPATH="./venv/lib/python3.12/site-packages:$PYTHONPATH" ./agent run ... ``` See also some notes in [./checks](https://github.com/DataDog/datadog-agent/tree/main/docs/dev/checks) about running custom python checks. -###### Install Invoke and its dependencies - -Our invoke tasks are only compatible with Python 3, thus you will need to use Python 3 to run them. - -Though you may install invoke in a variety of way we suggest you use the provided [requirements](https://github.com/DataDog/datadog-agent/blob/main/requirements.txt) file and `pip`: - -```bash -pip install -r tasks/requirements.txt -``` - -This procedure ensures you not only get the correct version of `invoke`, but also any additional python dependencies our development workflow may require, at their expected versions. It will also pull other handy development tools/deps (`reno`, or `docker`). - ### Golang -You must [install Golang](https://golang.org/doc/install) version `1.23.3` or later. Make sure that `$GOPATH/bin` is in your `$PATH` otherwise `invoke` cannot use any additional tool it might need. +You must [install Golang](https://golang.org/doc/install) version `1.23.5` or later. Make sure that `$GOPATH/bin` is in your `$PATH`, otherwise [tooling](#tooling) cannot use any additional tool it might need. !!! note Versions of Golang that aren't an exact match to the version specified in our build images (see e.g. [here](https://github.com/DataDog/datadog-agent-buildimages/blob/c025473ee467ee6d884d532e4c12c7d982ce8fe1/circleci/Dockerfile#L43)) may not be able to build the agent and/or the [rtloader](https://github.com/DataDog/datadog-agent/tree/main/rtloader) binary properly. -### Installing tooling +#### Installing tools -From the root of `datadog-agent`, run `invoke install-tools` to install go tooling. This uses `go` to install the necessary dependencies. +From the root of `datadog-agent`, run `inv install-tools` to install go tooling. This uses `go` to install the necessary dependencies. ### System or Embedded? @@ -152,7 +201,7 @@ If you want to build a Docker image containing the Agent, or if you wan to run [ We use [Doxygen](http://www.doxygen.nl) to generate the documentation for the `rtloader` part of the Agent. -To generate it (using the `invoke rtloader.generate-doc` command), you'll need to have Doxygen installed on your system and available in your `$PATH`. You can compile and install Doxygen from source with the instructions available [here](http://www.doxygen.nl/manual/install.html). Alternatively, you can use already-compiled Doxygen binaries from [here](http://www.doxygen.nl/download.html). +To generate it (using the `inv rtloader.generate-doc` command), you'll need to have Doxygen installed on your system and available in your `$PATH`. You can compile and install Doxygen from source with the instructions available [here](http://www.doxygen.nl/manual/install.html). Alternatively, you can use already-compiled Doxygen binaries from [here](http://www.doxygen.nl/download.html). To get the dependency graphs, you may also need to install the `dot` executable from [graphviz](http://www.graphviz.org/) and add it to your `$PATH`. @@ -200,7 +249,7 @@ See `pre-commit run --help` for further options. To configure the vscode editor to use a container as remote development environment you need to: - Install the [devcontainer plugin](https://code.visualstudio.com/docs/remote/containers) and the [golang language plugin](https://code.visualstudio.com/docs/languages/go). -- Run the following invoke command `deva vscode.setup-devcontainer --image ""`. This command will create the devcontainer configuration file `./devcontainer/devcontainer.json`. +- Run the following command `inv vscode.setup-devcontainer --image ""`. This command will create the devcontainer configuration file `./devcontainer/devcontainer.json`. - Start or restart your vscode editor. - A pop-up should show-up to propose to "reopen in container" your workspace. - The first start, it might propose you to install the golang plugin dependencies/tooling. diff --git a/flakes.yaml b/flakes.yaml index 8c95e78ab01e0..da655d3c2da03 100644 --- a/flakes.yaml +++ b/flakes.yaml @@ -12,3 +12,4 @@ test/new-e2e/tests/containers: - TestEKSSuite/TestCPU/metric___container.cpu.usage{^kube_deployment:stress-ng$,^kube_namespace:workload-cpustress$} - TestKindSuite/TestCPU/metric___container.cpu.usage{^kube_deployment:stress-ng$,^kube_namespace:workload-cpustress$} - TestKindSuite/TestAdmissionControllerWithAutoDetectedLanguage + - TestEKSSuite/TestAdmissionControllerWithAutoDetectedLanguage diff --git a/go.mod b/go.mod index f1e132f9ac36c..141100038f7c3 100644 --- a/go.mod +++ b/go.mod @@ -18,9 +18,7 @@ retract ( // Internal deps fix version replace ( github.com/cihub/seelog => github.com/cihub/seelog v0.0.0-20151216151435-d2c6e5aa9fbf // v2.6 - github.com/coreos/go-systemd => github.com/coreos/go-systemd v0.0.0-20180202092358-40e2722dffea github.com/spf13/cast => github.com/DataDog/cast v1.8.0 - github.com/ugorji/go => github.com/ugorji/go v1.1.7 ) replace ( @@ -62,7 +60,8 @@ replace ( github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/processor/infraattributesprocessor => ./comp/otelcol/otlp/components/processor/infraattributesprocessor github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/statsprocessor => ./comp/otelcol/otlp/components/statsprocessor github.com/DataDog/datadog-agent/comp/otelcol/otlp/testutil => ./comp/otelcol/otlp/testutil - github.com/DataDog/datadog-agent/comp/serializer/compression => ./comp/serializer/compression + github.com/DataDog/datadog-agent/comp/serializer/logscompression => ./comp/serializer/logscompression + github.com/DataDog/datadog-agent/comp/serializer/metricscompression => ./comp/serializer/metricscompression github.com/DataDog/datadog-agent/comp/trace/agent/def => ./comp/trace/agent/def github.com/DataDog/datadog-agent/comp/trace/compression/def => ./comp/trace/compression/def github.com/DataDog/datadog-agent/comp/trace/compression/impl-gzip => ./comp/trace/compression/impl-gzip @@ -80,6 +79,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/config/teeconfig => ./pkg/config/teeconfig github.com/DataDog/datadog-agent/pkg/config/utils => ./pkg/config/utils/ github.com/DataDog/datadog-agent/pkg/errors => ./pkg/errors + github.com/DataDog/datadog-agent/pkg/fips => ./pkg/fips github.com/DataDog/datadog-agent/pkg/gohai => ./pkg/gohai github.com/DataDog/datadog-agent/pkg/logs/auditor => ./pkg/logs/auditor github.com/DataDog/datadog-agent/pkg/logs/client => ./pkg/logs/client @@ -116,6 +116,10 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/cache => ./pkg/util/cache github.com/DataDog/datadog-agent/pkg/util/cgroups => ./pkg/util/cgroups github.com/DataDog/datadog-agent/pkg/util/common => ./pkg/util/common + github.com/DataDog/datadog-agent/pkg/util/compression => ./pkg/util/compression + github.com/DataDog/datadog-agent/pkg/util/compression/impl-noop => ./pkg/util/compression/impl-noop + github.com/DataDog/datadog-agent/pkg/util/compression/impl-zlib => ./pkg/util/compression/impl-zlib + github.com/DataDog/datadog-agent/pkg/util/compression/selector => ./pkg/util/compression/selector github.com/DataDog/datadog-agent/pkg/util/containers/image => ./pkg/util/containers/image github.com/DataDog/datadog-agent/pkg/util/defaultpaths => ./pkg/util/defaultpaths/ github.com/DataDog/datadog-agent/pkg/util/executable => ./pkg/util/executable @@ -128,7 +132,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/json => ./pkg/util/json github.com/DataDog/datadog-agent/pkg/util/log => ./pkg/util/log github.com/DataDog/datadog-agent/pkg/util/log/setup => ./pkg/util/log/setup - github.com/DataDog/datadog-agent/pkg/util/optional => ./pkg/util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ./pkg/util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ./pkg/util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ./pkg/util/scrubber github.com/DataDog/datadog-agent/pkg/util/sort => ./pkg/util/sort/ @@ -150,38 +154,39 @@ require ( github.com/CycloneDX/cyclonedx-go v0.9.1 github.com/DataDog/appsec-internal-go v1.9.0 github.com/DataDog/datadog-agent/pkg/gohai v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/obfuscate v0.59.0 - github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.59.0 + github.com/DataDog/datadog-agent/pkg/obfuscate v0.61.0 + github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.61.0 github.com/DataDog/datadog-agent/pkg/security/secl v0.56.0 - github.com/DataDog/datadog-agent/pkg/trace v0.59.0 - github.com/DataDog/datadog-agent/pkg/util/cgroups v0.59.0 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 + github.com/DataDog/datadog-agent/pkg/trace v0.61.0 + github.com/DataDog/datadog-agent/pkg/util/cgroups v0.61.0 + github.com/DataDog/datadog-agent/pkg/util/log v0.61.0 + github.com/DataDog/datadog-agent/pkg/util/pointer v0.61.0 + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.61.0 github.com/DataDog/datadog-go/v5 v5.6.0 - github.com/DataDog/datadog-operator v0.7.1-0.20241219210556-f517775059d1 - github.com/DataDog/ebpf-manager v0.7.6 + // TODO: pin to an operator released version once there is a release that includes the api module + github.com/DataDog/datadog-operator/api v0.0.0-20250114151552-463ab54482b4 + github.com/DataDog/ebpf-manager v0.7.7 github.com/DataDog/gopsutil v1.2.2 github.com/DataDog/nikos v1.12.9 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.22.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.22.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.22.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.24.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.24.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.24.0 github.com/DataDog/sketches-go v1.4.6 github.com/DataDog/viper v1.14.0 - github.com/DataDog/watermarkpodautoscaler v0.5.3-0.20241023200123-ab786c1724cf + // TODO: pin to a WPA released version once there is a release that includes the apis module + github.com/DataDog/watermarkpodautoscaler/apis v0.0.0-20250108152814-82e58d0231d1 github.com/DataDog/zstd v1.5.6 github.com/DataDog/zstd_0 v0.0.0-20210310093942-586c1286621f // indirect github.com/Masterminds/semver/v3 v3.3.1 - github.com/Masterminds/sprig/v3 v3.3.0 // indirect github.com/Microsoft/go-winio v0.6.2 github.com/Microsoft/hcsshim v0.12.9 github.com/acobaugh/osrelease v0.1.0 github.com/alecthomas/participle v0.7.1 // indirect github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 - github.com/aquasecurity/trivy-db v0.0.0-20231005141211-4fc651f7ac8d + github.com/aquasecurity/trivy-db v0.0.0-20240910133327-7e0f4d2ed4c1 github.com/avast/retry-go/v4 v4.6.0 github.com/aws/aws-lambda-go v1.37.0 - github.com/aws/aws-sdk-go v1.55.5 // indirect + github.com/aws/aws-sdk-go v1.55.6 // indirect github.com/beevik/ntp v1.4.3 github.com/benbjohnson/clock v1.3.5 github.com/bhmj/jsonslice v0.0.0-20200323023432-92c3edaad8e2 @@ -191,14 +196,14 @@ require ( github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 github.com/cilium/ebpf v0.16.0 github.com/clbanning/mxj v1.8.4 - github.com/containerd/containerd v1.7.23 + github.com/containerd/containerd v1.7.25 github.com/containernetworking/cni v1.2.3 github.com/coreos/go-semver v0.3.1 - github.com/coreos/go-systemd v22.5.0+incompatible + github.com/coreos/go-systemd/v22 v22.5.0 github.com/cri-o/ocicni v0.4.3 github.com/cyphar/filepath-securejoin v0.3.4 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc - github.com/docker/docker v27.4.0+incompatible + github.com/docker/docker v27.5.1+incompatible github.com/docker/go-connections v0.5.0 github.com/dustin/go-humanize v1.0.1 github.com/elastic/go-libaudit/v2 v2.5.0 @@ -213,19 +218,18 @@ require ( github.com/gobwas/glob v0.2.3 github.com/gogo/protobuf v1.3.2 github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da - github.com/golang/mock v1.6.0 + github.com/golang/mock v1.7.0-rc.1 github.com/golang/protobuf v1.5.4 github.com/google/go-cmp v0.6.0 - github.com/google/go-containerregistry v0.20.2 + github.com/google/go-containerregistry v0.20.3 github.com/google/gofuzz v1.2.0 github.com/google/gopacket v1.1.19 github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 // indirect github.com/gorilla/mux v1.8.1 github.com/gosnmp/gosnmp v1.38.0 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 - github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/h2non/filetype v1.1.3 - github.com/hashicorp/consul/api v1.30.0 + github.com/hashicorp/consul/api v1.31.0 github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb @@ -247,7 +251,7 @@ require ( github.com/olekukonko/tablewriter v0.0.5 github.com/oliveagle/jsonpath v0.0.0-20180606110733-2e52cf6e6852 github.com/open-policy-agent/opa v0.70.0 - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.115.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.118.0 // indirect github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/image-spec v1.1.0 github.com/opencontainers/runtime-spec v1.2.0 @@ -258,23 +262,23 @@ require ( github.com/prometheus/client_golang v1.20.5 github.com/prometheus/client_model v0.6.1 github.com/prometheus/procfs v0.15.1 - github.com/redis/go-redis/v9 v9.1.0 + github.com/redis/go-redis/v9 v9.5.1 github.com/richardartoul/molecule v1.0.1-0.20240531184615-7ca0df43c0b3 // indirect github.com/robfig/cron/v3 v3.0.1 github.com/samber/lo v1.47.0 github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4 github.com/sirupsen/logrus v1.9.3 - github.com/skydive-project/go-debouncer v1.0.0 + github.com/skydive-project/go-debouncer v1.0.1 github.com/smira/go-xz v0.1.0 github.com/spf13/afero v1.11.0 - github.com/spf13/cast v1.7.0 + github.com/spf13/cast v1.7.1 github.com/spf13/cobra v1.8.1 github.com/spf13/pflag v1.0.5 github.com/streadway/amqp v1.1.0 github.com/stretchr/testify v1.10.0 github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 - github.com/tinylib/msgp v1.2.4 + github.com/tinylib/msgp v1.2.5 github.com/twmb/murmur3 v1.1.8 github.com/uptrace/bun v1.2.5 github.com/uptrace/bun/dialect/pgdialect v1.2.5 @@ -288,17 +292,17 @@ require ( go.etcd.io/bbolt v1.3.11 go.etcd.io/etcd/client/v2 v2.306.0-alpha.0 go.mongodb.org/mongo-driver v1.15.1 - go.opentelemetry.io/collector v0.115.0 // indirect - go.opentelemetry.io/collector/component v0.115.0 - go.opentelemetry.io/collector/confmap v1.21.0 - go.opentelemetry.io/collector/exporter v0.115.0 - go.opentelemetry.io/collector/exporter/debugexporter v0.115.0 - go.opentelemetry.io/collector/exporter/otlpexporter v0.115.0 - go.opentelemetry.io/collector/pdata v1.21.0 - go.opentelemetry.io/collector/processor/batchprocessor v0.115.0 - go.opentelemetry.io/collector/receiver v0.115.0 - go.opentelemetry.io/collector/receiver/otlpreceiver v0.115.0 - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 // indirect + go.opentelemetry.io/collector v0.118.0 // indirect + go.opentelemetry.io/collector/component v0.118.0 + go.opentelemetry.io/collector/confmap v1.24.0 + go.opentelemetry.io/collector/exporter v0.118.0 + go.opentelemetry.io/collector/exporter/debugexporter v0.118.0 + go.opentelemetry.io/collector/exporter/otlpexporter v0.118.0 + go.opentelemetry.io/collector/pdata v1.24.0 + go.opentelemetry.io/collector/processor/batchprocessor v0.118.0 + go.opentelemetry.io/collector/receiver v0.118.0 + go.opentelemetry.io/collector/receiver/otlpreceiver v0.118.0 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect go.uber.org/atomic v1.11.0 go.uber.org/automaxprocs v1.6.0 go.uber.org/dig v1.18.0 @@ -306,19 +310,19 @@ require ( go.uber.org/multierr v1.11.0 go.uber.org/zap v1.27.0 go4.org/netipx v0.0.0-20220812043211-3cc044ffd68d - golang.org/x/arch v0.12.0 - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 - golang.org/x/net v0.33.0 + golang.org/x/arch v0.13.0 + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 + golang.org/x/net v0.34.0 golang.org/x/sync v0.10.0 - golang.org/x/sys v0.28.0 + golang.org/x/sys v0.29.0 golang.org/x/text v0.21.0 - golang.org/x/time v0.8.0 - golang.org/x/tools v0.28.0 + golang.org/x/time v0.9.0 + golang.org/x/tools v0.29.0 golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/grpc v1.67.1 + google.golang.org/grpc v1.69.4 google.golang.org/grpc/examples v0.0.0-20221020162917-9127159caf5a - google.golang.org/protobuf v1.35.2 + google.golang.org/protobuf v1.36.3 gopkg.in/DataDog/dd-trace-go.v1 v1.69.1 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 @@ -327,7 +331,7 @@ require ( k8s.io/apiextensions-apiserver v0.31.2 k8s.io/apimachinery v0.31.4 k8s.io/apiserver v0.31.2 // indirect - k8s.io/autoscaler/vertical-pod-autoscaler v0.13.0 + k8s.io/autoscaler/vertical-pod-autoscaler v1.2.2 k8s.io/client-go v0.31.3 k8s.io/cri-api v0.31.2 k8s.io/klog v1.0.1-0.20200310124935-4ad0115ba9e4 // Min version that includes fix for Windows Nano @@ -343,7 +347,7 @@ require ( ) require ( - cloud.google.com/go/compute/metadata v0.5.2 // indirect + cloud.google.com/go/compute/metadata v0.6.0 code.cloudfoundry.org/cfhttp/v2 v2.0.0 // indirect code.cloudfoundry.org/clock v1.0.0 // indirect code.cloudfoundry.org/consuladapter v0.0.0-20200131002136-ac1daf48ba97 // indirect @@ -355,82 +359,67 @@ require ( github.com/AlekSi/pointer v1.2.0 // indirect github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c // indirect github.com/DataDog/aptly v1.5.3 // indirect - github.com/DataDog/go-tuf v1.1.0-0.5.2 + github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect github.com/DataDog/gostackparse v0.7.0 // indirect github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 // indirect github.com/DisposaBoy/JsonConfigReader v0.0.0-20201129172854-99cf318d67e7 // indirect - github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver v1.5.0 github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/OneOfOne/xxhash v1.2.8 // indirect github.com/ProtonMail/go-crypto v1.1.3 github.com/StackExchange/wmi v1.2.1 // indirect github.com/agnivade/levenshtein v1.2.0 // indirect - github.com/aquasecurity/go-gem-version v0.0.0-20201115065557-8eed6fe000ce // indirect - github.com/aquasecurity/go-npm-version v0.0.0-20201110091526-0b796d180798 // indirect - github.com/aquasecurity/go-pep440-version v0.0.0-20210121094942-22b2f8951d46 // indirect - github.com/aquasecurity/go-version v0.0.0-20210121072130-637058cfe492 // indirect - github.com/aquasecurity/table v1.8.0 // indirect - github.com/aquasecurity/tml v0.6.1 // indirect github.com/armon/go-metrics v0.4.1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/awalterschulze/gographviz v2.0.3+incompatible // indirect - github.com/aws/aws-sdk-go-v2 v1.32.6 - github.com/aws/aws-sdk-go-v2/config v1.28.6 - github.com/aws/aws-sdk-go-v2/credentials v1.17.47 - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 // indirect + github.com/aws/aws-sdk-go-v2 v1.33.0 + github.com/aws/aws-sdk-go-v2/config v1.29.1 + github.com/aws/aws-sdk-go-v2/credentials v1.17.54 + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.24 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.28 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.28 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect - github.com/aws/aws-sdk-go-v2/service/ebs v1.27.0 // indirect - github.com/aws/aws-sdk-go-v2/service/ec2 v1.190.0 - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 // indirect + github.com/aws/aws-sdk-go-v2/service/ec2 v1.200.0 + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.9 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.24.11 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.10 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.33.9 // indirect github.com/aws/smithy-go v1.22.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40 // indirect github.com/briandowns/spinner v1.23.0 // indirect github.com/cavaliergopher/grab/v3 v3.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/containerd/continuity v0.4.3 // indirect + github.com/containerd/continuity v0.4.4 // indirect github.com/containerd/fifo v1.1.0 // indirect - github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect github.com/containerd/ttrpc v1.2.5 // indirect github.com/containernetworking/plugins v1.4.1 // indirect - github.com/coreos/go-systemd/v22 v22.5.0 github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect github.com/dgryski/go-jump v0.0.0-20211018200510-ba001c3ffce0 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect - github.com/docker/cli v27.4.0+incompatible // indirect + github.com/docker/cli v27.5.0+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect - github.com/docker/docker-credential-helpers v0.8.1 // indirect + github.com/docker/docker-credential-helpers v0.8.2 // indirect github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/ghodss/yaml v1.0.0 - github.com/go-git/go-billy/v5 v5.5.0 // indirect - github.com/go-git/go-git/v5 v5.12.0 // indirect github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/errors v0.22.0 // indirect - github.com/go-openapi/jsonpointer v0.20.2 // indirect - github.com/go-openapi/jsonreference v0.20.4 // indirect - github.com/go-openapi/runtime v0.27.1 // indirect - github.com/go-openapi/strfmt v0.23.0 // indirect - github.com/go-openapi/swag v0.22.9 // indirect - github.com/go-openapi/validate v0.23.0 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/swag v0.23.0 // indirect github.com/go-redis/redis/v8 v8.11.5 // indirect github.com/godbus/dbus/v5 v5.1.0 - github.com/golang/glog v1.2.2 // indirect + github.com/golang/glog v1.2.4 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect github.com/google/uuid v1.6.0 github.com/google/wire v0.6.0 // indirect github.com/googleapis/gax-go/v2 v2.13.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.0 github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-hclog v1.6.3 // indirect @@ -440,8 +429,6 @@ require ( github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/hashicorp/hcl v1.0.1-vault-5 // indirect github.com/hashicorp/serf v0.10.1 // indirect - github.com/huandu/xstrings v1.5.0 // indirect - github.com/in-toto/in-toto-golang v0.9.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/itchyny/timefmt-go v0.1.6 // indirect github.com/jinzhu/inflection v1.0.0 // indirect @@ -454,22 +441,16 @@ require ( github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/kjk/lzma v0.0.0-20161016003348-3fd93898850d // indirect github.com/klauspost/compress v1.17.11 // indirect - github.com/klauspost/pgzip v1.2.5 // indirect - github.com/knqyf263/go-apk-version v0.0.0-20200609155635-041fdbb8563f // indirect + github.com/klauspost/pgzip v1.2.6 // indirect github.com/knqyf263/go-deb-version v0.0.0-20230223133812-3ed183d23422 // indirect github.com/knqyf263/go-rpm-version v0.0.0-20220614171824-631e686d1075 // indirect github.com/knqyf263/go-rpmdb v0.1.1 - github.com/knqyf263/nested v0.0.1 // indirect - github.com/liamg/jfather v0.0.7 // indirect github.com/libp2p/go-reuseport v0.2.0 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect - github.com/masahiro331/go-disk v0.0.0-20220919035250-c8da316f91ac // indirect - github.com/masahiro331/go-ebs-file v0.0.0-20240112135404-d5fbb1d46323 // indirect - github.com/masahiro331/go-ext4-filesystem v0.0.0-20231208112839-4339555a0cd4 // indirect - github.com/masahiro331/go-mvn-version v0.0.0-20210429150710-d3157d602a08 // indirect - github.com/masahiro331/go-vmdk-parser v0.0.0-20221225061455-612096e4bbbd // indirect - github.com/masahiro331/go-xfs-filesystem v0.0.0-20230608043311-a335f4599b70 // indirect + github.com/masahiro331/go-disk v0.0.0-20240625071113-56c933208fee // indirect + github.com/masahiro331/go-ext4-filesystem v0.0.0-20240620024024-ca14e6327bbd // indirect + github.com/masahiro331/go-xfs-filesystem v0.0.0-20231205045356-1b22259a6c44 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect @@ -479,9 +460,8 @@ require ( github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/mkrautz/goar v0.0.0-20150919110319-282caa8bd9da // indirect - github.com/moby/buildkit v0.12.5 // indirect github.com/moby/locker v1.0.1 // indirect - github.com/moby/sys/signal v0.7.0 // indirect + github.com/moby/sys/signal v0.7.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/montanaflynn/stats v0.7.0 // indirect @@ -489,32 +469,24 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d // indirect - github.com/oklog/ulid v1.3.1 // indirect github.com/opencontainers/selinux v1.11.0 // indirect - github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/outcaste-io/ristretto v0.2.3 // indirect - github.com/owenrumney/go-sarif/v2 v2.3.0 // indirect - github.com/package-url/packageurl-go v0.1.2 // indirect + github.com/package-url/packageurl-go v0.1.3 // indirect github.com/pborman/uuid v1.2.1 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect - github.com/pierrec/lz4/v4 v4.1.21 + github.com/pierrec/lz4/v4 v4.1.22 github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect - github.com/prometheus/common v0.60.1 + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/prometheus/common v0.62.0 github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/rs/cors v1.11.1 // indirect github.com/safchain/baloum v0.0.0-20241120122234-f22c9bd19f3b - github.com/saracen/walker v0.1.3 // indirect github.com/sassoftware/go-rpmutils v0.4.0 // indirect github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect - github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect - github.com/shibumi/go-pathspec v1.3.0 // indirect - github.com/shopspring/decimal v1.4.0 // indirect github.com/smira/go-ftp-protocol v0.0.0-20140829150050-066b75c2b70d // indirect - github.com/spdx/tools-golang v0.5.4-0.20231108154018-0c0f394b5e1a // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect @@ -530,7 +502,7 @@ require ( github.com/twmb/franz-go/pkg/kmsg v1.8.0 github.com/ugorji/go/codec v1.2.11 // indirect github.com/ulikunitz/xz v0.5.12 // indirect - github.com/vbatts/tar-split v0.11.5 // indirect + github.com/vbatts/tar-split v0.11.6 // indirect github.com/vito/go-sse v1.0.0 // indirect github.com/vmihailenco/msgpack/v5 v5.4.1 github.com/vmihailenco/tagparser v0.1.2 // indirect @@ -541,7 +513,6 @@ require ( github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 - github.com/xlab/treeprint v1.2.0 // indirect github.com/xor-gate/ar v0.0.0-20170530204233-5c72ae81e2b7 // indirect github.com/yashtewari/glob-intersection v0.2.0 // indirect github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d // indirect @@ -551,36 +522,35 @@ require ( go.etcd.io/etcd/client/v3 v3.6.0-alpha.0 // indirect go.etcd.io/etcd/server/v3 v3.6.0-alpha.0.0.20220522111935-c3bc4116dcd1 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/collector/consumer v1.21.0 // indirect - go.opentelemetry.io/collector/featuregate v1.21.0 - go.opentelemetry.io/collector/semconv v0.115.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 // indirect + go.opentelemetry.io/collector/consumer v1.24.0 // indirect + go.opentelemetry.io/collector/featuregate v1.24.0 + go.opentelemetry.io/collector/semconv v0.118.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect go.opentelemetry.io/contrib/propagators/b3 v1.31.0 // indirect - go.opentelemetry.io/otel v1.32.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 - go.opentelemetry.io/otel/exporters/prometheus v0.54.0 // indirect - go.opentelemetry.io/otel/metric v1.32.0 // indirect - go.opentelemetry.io/otel/sdk v1.32.0 - go.opentelemetry.io/otel/sdk/metric v1.32.0 // indirect - go.opentelemetry.io/otel/trace v1.32.0 - go.opentelemetry.io/proto/otlp v1.3.1 // indirect - golang.org/x/crypto v0.31.0 // indirect + go.opentelemetry.io/otel v1.33.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 + go.opentelemetry.io/otel/exporters/prometheus v0.55.0 // indirect + go.opentelemetry.io/otel/metric v1.33.0 // indirect + go.opentelemetry.io/otel/sdk v1.33.0 + go.opentelemetry.io/otel/sdk/metric v1.33.0 // indirect + go.opentelemetry.io/otel/trace v1.33.0 + go.opentelemetry.io/proto/otlp v1.4.0 // indirect + golang.org/x/crypto v0.32.0 // indirect golang.org/x/mod v0.22.0 - golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/term v0.27.0 // indirect + golang.org/x/oauth2 v0.25.0 // indirect + golang.org/x/term v0.28.0 // indirect gonum.org/v1/gonum v0.15.1 // indirect google.golang.org/api v0.199.0 // indirect google.golang.org/appengine v1.6.8 // indirect gopkg.in/Knetic/govaluate.v3 v3.0.0 // indirect - gopkg.in/cheggaaa/pb.v1 v1.0.28 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect k8s.io/component-base v0.31.2 mellium.im/sasl v0.3.2 // indirect modernc.org/libc v1.55.3 // indirect - modernc.org/mathutil v1.6.0 + modernc.org/mathutil v1.6.0 // indirect modernc.org/memory v1.8.0 // indirect modernc.org/sqlite v1.34.1 sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 // indirect @@ -591,13 +561,14 @@ require ( ) require ( - github.com/DataDog/datadog-agent/comp/api/authtoken v0.0.0-00010101000000-000000000000 - github.com/DataDog/datadog-agent/comp/core/flare/builder v0.59.0 + github.com/DataDog/datadog-agent/comp/api/authtoken v0.61.0 + github.com/DataDog/datadog-agent/comp/core/flare/builder v0.61.0 github.com/DataDog/datadog-agent/comp/core/tagger/utils v0.59.0 github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/def v0.59.0-rc.6 - github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/impl v0.0.0-00010101000000-000000000000 - github.com/DataDog/datadog-agent/pkg/config/structure v0.60.0-devel - github.com/DataDog/datadog-agent/pkg/util/defaultpaths v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/comp/otelcol/ddflareextension/impl v0.61.0 + github.com/DataDog/datadog-agent/pkg/config/structure v0.61.0 + github.com/DataDog/datadog-agent/pkg/fips v0.0.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/defaultpaths v0.61.0 github.com/DataDog/datadog-agent/pkg/util/utilizationtracker v0.0.0 github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.11 github.com/NVIDIA/go-nvml v0.12.4-0 @@ -605,144 +576,155 @@ require ( github.com/containerd/containerd/api v1.8.0 github.com/containerd/errdefs v1.0.0 github.com/distribution/reference v0.6.0 - github.com/expr-lang/expr v1.16.9 + github.com/expr-lang/expr v1.16.9 // indirect github.com/go-viper/mapstructure/v2 v2.2.1 github.com/jellydator/ttlcache/v3 v3.3.0 github.com/kouhin/envflag v0.0.0-20150818174321-0e9a86061649 github.com/lorenzosaino/go-sysctl v0.3.1 - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.115.0 - go.opentelemetry.io/collector/config/configtelemetry v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.118.0 + go.opentelemetry.io/collector/config/configtelemetry v0.118.0 ) require ( - github.com/shirou/gopsutil/v4 v4.24.11 - go.opentelemetry.io/collector/component/componenttest v0.115.0 + github.com/DataDog/datadog-agent/pkg/util/compression v0.56.0-rc.3 + github.com/Masterminds/sprig/v3 v3.3.0 + github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e + github.com/shirou/gopsutil/v4 v4.24.12 + go.opentelemetry.io/collector/component/componenttest v0.118.0 ) require ( - go.opentelemetry.io/collector/extension/extensiontest v0.115.0 // indirect - go.opentelemetry.io/collector/processor/processorhelper/processorhelperprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/scraper v0.115.0 // indirect + go.opentelemetry.io/collector/connector/xconnector v0.118.0 // indirect + go.opentelemetry.io/collector/consumer/consumererror/xconsumererror v0.118.0 // indirect + go.opentelemetry.io/collector/consumer/xconsumer v0.118.0 // indirect + go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper v0.118.0 // indirect + go.opentelemetry.io/collector/exporter/xexporter v0.118.0 // indirect + go.opentelemetry.io/collector/extension/extensiontest v0.118.0 // indirect + go.opentelemetry.io/collector/extension/xextension v0.118.0 // indirect + go.opentelemetry.io/collector/pipeline/xpipeline v0.118.0 // indirect + go.opentelemetry.io/collector/processor/processorhelper/xprocessorhelper v0.118.0 // indirect + go.opentelemetry.io/collector/processor/xprocessor v0.118.0 // indirect + go.opentelemetry.io/collector/receiver/xreceiver v0.118.0 // indirect + go.opentelemetry.io/collector/scraper v0.118.0 // indirect + go.opentelemetry.io/collector/scraper/scraperhelper v0.118.0 // indirect ) require ( - go.opentelemetry.io/collector/connector/connectortest v0.115.0 // indirect - go.opentelemetry.io/collector/consumer/consumererror v0.115.0 // indirect - go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/exporter/exportertest v0.115.0 // indirect - go.opentelemetry.io/collector/internal/fanoutconsumer v0.115.0 // indirect - go.opentelemetry.io/collector/internal/memorylimiter v0.115.0 // indirect - go.opentelemetry.io/collector/internal/sharedcomponent v0.115.0 // indirect - go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/processor/processortest v0.115.0 // indirect - go.opentelemetry.io/collector/receiver/receivertest v0.115.0 // indirect + go.opentelemetry.io/collector/connector/connectortest v0.118.0 // indirect + go.opentelemetry.io/collector/consumer/consumererror v0.118.0 // indirect + go.opentelemetry.io/collector/exporter/exportertest v0.118.0 // indirect + go.opentelemetry.io/collector/internal/fanoutconsumer v0.118.0 // indirect + go.opentelemetry.io/collector/internal/memorylimiter v0.118.0 // indirect + go.opentelemetry.io/collector/internal/sharedcomponent v0.118.0 // indirect + go.opentelemetry.io/collector/processor/processortest v0.118.0 // indirect + go.opentelemetry.io/collector/receiver/receivertest v0.118.0 // indirect go.opentelemetry.io/contrib/bridges/otelzap v0.6.0 // indirect ) require ( github.com/DATA-DOG/go-sqlmock v1.5.2 - github.com/DataDog/agent-payload/v5 v5.0.138 + github.com/DataDog/agent-payload/v5 v5.0.141 github.com/DataDog/datadog-agent/comp/api/api/def v0.56.0-rc.3 - github.com/DataDog/datadog-agent/comp/core/config v0.59.0 - github.com/DataDog/datadog-agent/comp/core/flare/types v0.59.0 - github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface v0.59.0 - github.com/DataDog/datadog-agent/comp/core/log/def v0.59.0 + github.com/DataDog/datadog-agent/comp/core/config v0.61.0 + github.com/DataDog/datadog-agent/comp/core/flare/types v0.61.0 + github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface v0.61.0 + github.com/DataDog/datadog-agent/comp/core/log/def v0.61.0 github.com/DataDog/datadog-agent/comp/core/log/impl v0.59.0 github.com/DataDog/datadog-agent/comp/core/log/impl-trace v0.59.0 - github.com/DataDog/datadog-agent/comp/core/log/mock v0.58.0-devel - github.com/DataDog/datadog-agent/comp/core/secrets v0.59.0 + github.com/DataDog/datadog-agent/comp/core/log/mock v0.61.0 + github.com/DataDog/datadog-agent/comp/core/secrets v0.61.0 github.com/DataDog/datadog-agent/comp/core/status v0.59.0-rc.6 github.com/DataDog/datadog-agent/comp/core/status/statusimpl v0.56.0-rc.3 - github.com/DataDog/datadog-agent/comp/core/tagger/origindetection v0.0.0-20241217122454-175edb6c74f2 - github.com/DataDog/datadog-agent/comp/core/tagger/tags v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/comp/core/tagger/origindetection v0.62.0-rc.1 + github.com/DataDog/datadog-agent/comp/core/tagger/tags v0.61.0 github.com/DataDog/datadog-agent/comp/core/tagger/types v0.59.0 - github.com/DataDog/datadog-agent/comp/core/telemetry v0.59.0 - github.com/DataDog/datadog-agent/comp/def v0.59.0 + github.com/DataDog/datadog-agent/comp/core/telemetry v0.61.0 + github.com/DataDog/datadog-agent/comp/def v0.61.0 github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/forwarder/orchestrator/orchestratorinterface v0.56.0-rc.3 - github.com/DataDog/datadog-agent/comp/logs/agent/config v0.59.0 + github.com/DataDog/datadog-agent/comp/logs/agent/config v0.61.0 github.com/DataDog/datadog-agent/comp/netflow/payload v0.56.0-rc.3 - github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib/def v0.56.0-rc.3 + github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib/def v0.61.0 github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib/impl v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/otelcol/converter/def v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/otelcol/converter/impl v0.58.0 - github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline v0.59.0 - github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline/logsagentpipelineimpl v0.59.0 + github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline v0.61.0 + github.com/DataDog/datadog-agent/comp/otelcol/logsagentpipeline/logsagentpipelineimpl v0.61.0 github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/datadogexporter v0.59.0 - github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/logsagentexporter v0.61.0-devel.0.20241118141418-5b899217c342 + github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/logsagentexporter v0.62.0-devel.0.20241213165407-f95df913d2b7 github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/exporter/serializerexporter v0.59.0-rc.6 - github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/metricsclient v0.59.0 + github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/metricsclient v0.61.0 github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/processor/infraattributesprocessor v0.59.0 github.com/DataDog/datadog-agent/comp/otelcol/otlp/testutil v0.57.0-devel.0.20240718200853-81bf3b2e412d - github.com/DataDog/datadog-agent/comp/serializer/compression v0.59.0-rc.6 + github.com/DataDog/datadog-agent/comp/serializer/logscompression v0.61.0 + github.com/DataDog/datadog-agent/comp/serializer/metricscompression v0.59.0-rc.6 github.com/DataDog/datadog-agent/comp/trace/agent/def v0.59.0-rc.6 - github.com/DataDog/datadog-agent/comp/trace/compression/def v0.59.0 - github.com/DataDog/datadog-agent/comp/trace/compression/impl-gzip v0.59.0 + github.com/DataDog/datadog-agent/comp/trace/compression/def v0.61.0 + github.com/DataDog/datadog-agent/comp/trace/compression/impl-gzip v0.61.0 github.com/DataDog/datadog-agent/comp/trace/compression/impl-zstd v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/aggregator/ckey v0.59.0-rc.6 github.com/DataDog/datadog-agent/pkg/api v0.59.0 - github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 - github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 - github.com/DataDog/datadog-agent/pkg/config/mock v0.59.0 - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.61.0 + github.com/DataDog/datadog-agent/pkg/config/env v0.61.0 + github.com/DataDog/datadog-agent/pkg/config/mock v0.61.0 + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 github.com/DataDog/datadog-agent/pkg/config/remote v0.59.0-rc.5 - github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 - github.com/DataDog/datadog-agent/pkg/config/utils v0.59.0 + github.com/DataDog/datadog-agent/pkg/config/setup v0.61.0 + github.com/DataDog/datadog-agent/pkg/config/utils v0.61.0 github.com/DataDog/datadog-agent/pkg/errors v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/logs/auditor v0.59.0 - github.com/DataDog/datadog-agent/pkg/logs/client v0.59.0 - github.com/DataDog/datadog-agent/pkg/logs/diagnostic v0.59.0 - github.com/DataDog/datadog-agent/pkg/logs/message v0.59.0 - github.com/DataDog/datadog-agent/pkg/logs/metrics v0.59.0 - github.com/DataDog/datadog-agent/pkg/logs/pipeline v0.59.0 - github.com/DataDog/datadog-agent/pkg/logs/processor v0.59.0 - github.com/DataDog/datadog-agent/pkg/logs/sds v0.59.0 - github.com/DataDog/datadog-agent/pkg/logs/sender v0.59.0 - github.com/DataDog/datadog-agent/pkg/logs/sources v0.59.0 - github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.59.0 + github.com/DataDog/datadog-agent/pkg/logs/auditor v0.61.0 + github.com/DataDog/datadog-agent/pkg/logs/client v0.61.0 + github.com/DataDog/datadog-agent/pkg/logs/diagnostic v0.61.0 + github.com/DataDog/datadog-agent/pkg/logs/message v0.61.0 + github.com/DataDog/datadog-agent/pkg/logs/metrics v0.61.0 + github.com/DataDog/datadog-agent/pkg/logs/pipeline v0.61.0 + github.com/DataDog/datadog-agent/pkg/logs/processor v0.61.0 + github.com/DataDog/datadog-agent/pkg/logs/sds v0.61.0 + github.com/DataDog/datadog-agent/pkg/logs/sender v0.61.0 + github.com/DataDog/datadog-agent/pkg/logs/sources v0.61.0 + github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.61.0 github.com/DataDog/datadog-agent/pkg/logs/util/testutils v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/metrics v0.59.0-rc.6 github.com/DataDog/datadog-agent/pkg/networkdevice/profile v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/orchestrator/model v0.59.0 github.com/DataDog/datadog-agent/pkg/process/util/api v0.59.0 - github.com/DataDog/datadog-agent/pkg/proto v0.59.0 + github.com/DataDog/datadog-agent/pkg/proto v0.63.0-devel github.com/DataDog/datadog-agent/pkg/security/seclwin v0.56.0 github.com/DataDog/datadog-agent/pkg/serializer v0.59.0 - github.com/DataDog/datadog-agent/pkg/status/health v0.59.0 + github.com/DataDog/datadog-agent/pkg/status/health v0.61.0 github.com/DataDog/datadog-agent/pkg/tagger/types v0.59.0 github.com/DataDog/datadog-agent/pkg/tagset v0.59.0 - github.com/DataDog/datadog-agent/pkg/telemetry v0.59.0 - github.com/DataDog/datadog-agent/pkg/util/backoff v0.59.0 + github.com/DataDog/datadog-agent/pkg/telemetry v0.61.0 + github.com/DataDog/datadog-agent/pkg/util/backoff v0.61.0 github.com/DataDog/datadog-agent/pkg/util/cache v0.59.0-rc.5 github.com/DataDog/datadog-agent/pkg/util/common v0.59.0 github.com/DataDog/datadog-agent/pkg/util/containers/image v0.56.2 - github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/executable v0.61.0 + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.61.0 github.com/DataDog/datadog-agent/pkg/util/flavor v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/fxutil v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/fxutil v0.61.0 github.com/DataDog/datadog-agent/pkg/util/grpc v0.59.0 - github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 - github.com/DataDog/datadog-agent/pkg/util/http v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.61.0 + github.com/DataDog/datadog-agent/pkg/util/http v0.61.0 github.com/DataDog/datadog-agent/pkg/util/json v0.59.0 github.com/DataDog/datadog-agent/pkg/util/log/setup v1.0.0 - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 github.com/DataDog/datadog-agent/pkg/util/sort v0.59.0 - github.com/DataDog/datadog-agent/pkg/util/startstop v0.59.0 - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/startstop v0.61.0 + github.com/DataDog/datadog-agent/pkg/util/system v0.61.0 github.com/DataDog/datadog-agent/pkg/util/testutil v0.59.0 github.com/DataDog/datadog-agent/pkg/util/uuid v0.59.0 - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 - github.com/DataDog/datadog-agent/pkg/version v0.59.1 + github.com/DataDog/datadog-agent/pkg/util/winutil v0.61.0 + github.com/DataDog/datadog-agent/pkg/version v0.61.0 github.com/DataDog/go-libddwaf/v3 v3.5.1 - github.com/DataDog/go-sqllexer v0.0.17 + github.com/DataDog/go-sqllexer v0.0.20 github.com/Datadog/dublin-traceroute v0.0.2 github.com/aquasecurity/trivy v0.49.2-0.20240227072422-e1ea02c7b80d github.com/aws/aws-sdk-go-v2/service/kms v1.37.6 github.com/aws/aws-sdk-go-v2/service/rds v1.90.0 github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.34.6 github.com/cloudfoundry-community/go-cfclient/v2 v2.0.1-0.20230503155151-3d15366c5820 - github.com/containerd/cgroups/v3 v3.0.4 + github.com/containerd/cgroups/v3 v3.0.5 github.com/containerd/typeurl/v2 v2.2.3 github.com/dvsekhvalnov/jose2go v1.7.0 github.com/elastic/go-seccomp-bpf v1.5.0 @@ -755,7 +737,7 @@ require ( github.com/judwhite/go-svc v1.2.1 github.com/kr/pretty v0.3.1 // todo: update datadog connector with breaking changes from https://github.com/DataDog/datadog-agent/pull/26347. - github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.118.0 github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 github.com/prometheus-community/pro-bing v0.4.1 github.com/rickar/props v1.0.0 @@ -763,15 +745,15 @@ require ( github.com/swaggest/jsonschema-go v0.3.70 github.com/valyala/fastjson v1.6.4 github.com/vibrantbyte/go-antpath v1.1.1 - go.opentelemetry.io/collector/confmap/provider/envprovider v1.21.0 - go.opentelemetry.io/collector/confmap/provider/fileprovider v1.21.0 - go.opentelemetry.io/collector/confmap/provider/httpprovider v1.21.0 - go.opentelemetry.io/collector/confmap/provider/httpsprovider v1.21.0 - go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.21.0 - go.opentelemetry.io/collector/extension v0.115.0 - go.opentelemetry.io/collector/otelcol v0.115.0 - go.opentelemetry.io/collector/processor v0.115.0 - go.opentelemetry.io/collector/service v0.115.0 + go.opentelemetry.io/collector/confmap/provider/envprovider v1.24.0 + go.opentelemetry.io/collector/confmap/provider/fileprovider v1.24.0 + go.opentelemetry.io/collector/confmap/provider/httpprovider v1.24.0 + go.opentelemetry.io/collector/confmap/provider/httpsprovider v1.24.0 + go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.24.0 + go.opentelemetry.io/collector/extension v0.118.0 + go.opentelemetry.io/collector/otelcol v0.118.0 + go.opentelemetry.io/collector/processor v0.118.0 + go.opentelemetry.io/collector/service v0.118.0 go4.org/intern v0.0.0-20230525184215-6c62f75575cb go4.org/mem v0.0.0-20220726221520-4f986261bf13 k8s.io/cli-runtime v0.31.2 @@ -788,27 +770,28 @@ require ( filippo.io/edwards25519 v1.1.0 // indirect github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 // indirect github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 // indirect - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/compute/armcompute/v5 v5.7.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 // indirect github.com/Code-Hex/go-generics-cache v1.5.1 // indirect - github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/statsprocessor v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.60.0-devel // indirect - github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.60.0-devel // indirect - github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface v0.59.0 // indirect + github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/statsprocessor v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/util/buf v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/statstracker v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-api-client-go/v2 v2.33.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/statstracker v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.61.0 // indirect + github.com/DataDog/datadog-api-client-go/v2 v2.34.0 // indirect github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.22.0 // indirect - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.22.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.24.0 // indirect + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.24.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 // indirect github.com/Intevation/gval v1.3.0 // indirect github.com/Intevation/jsonpath v0.2.1 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect github.com/Showmax/go-fqdn v1.0.0 // indirect github.com/VividCortex/ewma v1.2.0 // indirect github.com/agext/levenshtein v1.2.3 // indirect @@ -816,18 +799,27 @@ require ( github.com/alecthomas/participle/v2 v2.1.1 // indirect github.com/alecthomas/repr v0.4.0 // indirect github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 // indirect - github.com/antchfx/xmlquery v1.4.2 // indirect - github.com/antchfx/xpath v1.3.2 // indirect + github.com/antchfx/xmlquery v1.4.3 // indirect + github.com/antchfx/xpath v1.3.3 // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/apache/thrift v0.21.0 // indirect + github.com/aquasecurity/go-gem-version v0.0.0-20201115065557-8eed6fe000ce // indirect + github.com/aquasecurity/go-npm-version v0.0.0-20201110091526-0b796d180798 // indirect + github.com/aquasecurity/go-pep440-version v0.0.0-20210121094942-22b2f8951d46 // indirect + github.com/aquasecurity/go-version v0.0.0-20240603093900-cf8a8d29271d // indirect + github.com/aquasecurity/table v1.8.0 // indirect + github.com/aquasecurity/tml v0.6.1 // indirect github.com/aquasecurity/trivy-java-db v0.0.0-20240109071736-184bd7481d48 // indirect + github.com/aws/aws-sdk-go-v2/service/ebs v1.22.1 // indirect + github.com/aws/aws-sdk-go-v2/service/ecr v1.36.7 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 // indirect github.com/bahlo/generic-list-go v0.2.0 // indirect github.com/bitnami/go-version v0.0.0-20231130084017-bb00604d650c // indirect + github.com/blang/semver v3.5.1+incompatible // indirect github.com/blang/semver/v4 v4.0.0 // indirect - github.com/bmatcuk/doublestar/v4 v4.7.1 // indirect + github.com/bmatcuk/doublestar/v4 v4.8.0 // indirect github.com/buger/jsonparser v1.1.1 // indirect - github.com/cheggaaa/pb/v3 v3.1.4 // indirect + github.com/cheggaaa/pb/v3 v3.1.5 // indirect github.com/chrusty/protoc-gen-jsonschema v0.0.0-20240212064413-73d5723042b8 // indirect github.com/cloudflare/circl v1.3.8 // indirect github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect @@ -835,8 +827,11 @@ require ( github.com/containerd/log v0.1.0 // indirect github.com/containerd/platforms v0.2.1 // indirect github.com/csaf-poc/csaf_distribution/v3 v3.0.0 // indirect + github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 // indirect github.com/dennwc/varint v1.0.0 // indirect github.com/digitalocean/godo v1.118.0 // indirect + github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 // indirect + github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 // indirect github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4 // indirect github.com/ebitengine/purego v0.8.1 // indirect github.com/elastic/go-grok v0.3.1 // indirect @@ -844,26 +839,34 @@ require ( github.com/elastic/lunes v0.1.0 // indirect github.com/emicklei/go-restful/v3 v3.12.1 // indirect github.com/emirpasic/gods v1.18.1 // indirect - github.com/envoyproxy/go-control-plane v0.13.0 // indirect + github.com/envoyproxy/go-control-plane v0.13.1 // indirect github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/glebarez/go-sqlite v1.22.0 // indirect + github.com/go-chi/chi v4.1.2+incompatible // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect + github.com/go-git/go-billy/v5 v5.6.0 // indirect + github.com/go-git/go-git/v5 v5.13.0 // indirect github.com/go-kit/log v0.2.1 // indirect - github.com/go-openapi/analysis v0.22.2 // indirect - github.com/go-openapi/loads v0.21.5 // indirect - github.com/go-openapi/spec v0.20.14 // indirect + github.com/go-openapi/analysis v0.23.0 // indirect + github.com/go-openapi/errors v0.22.0 // indirect + github.com/go-openapi/loads v0.22.0 // indirect + github.com/go-openapi/runtime v0.28.0 // indirect + github.com/go-openapi/spec v0.21.0 // indirect + github.com/go-openapi/strfmt v0.23.0 // indirect + github.com/go-openapi/validate v0.24.0 // indirect github.com/go-resty/resty/v2 v2.13.1 // indirect - github.com/go-test/deep v1.1.0 // indirect github.com/go-zookeeper/zk v1.0.3 // indirect - github.com/goccy/go-json v0.10.3 // indirect + github.com/gobuffalo/flect v1.0.2 // indirect + github.com/goccy/go-json v0.10.4 // indirect github.com/goccy/go-yaml v1.11.0 // indirect github.com/godror/knownpb v0.1.0 // indirect github.com/gogo/googleapis v1.4.1 // indirect github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/google/cel-go v0.20.1 // indirect - github.com/google/flatbuffers v24.3.25+incompatible // indirect - github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/certificate-transparency-go v1.1.8 // indirect + github.com/google/gnostic-models v0.6.9 // indirect + github.com/google/go-github/v62 v62.0.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/s2a-go v0.1.8 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect @@ -878,30 +881,41 @@ require ( github.com/hashicorp/go-sockaddr v1.0.6 // indirect github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 // indirect github.com/hetznercloud/hcloud-go/v2 v2.10.2 // indirect + github.com/huandu/xstrings v1.5.0 // indirect github.com/iancoleman/strcase v0.3.0 // indirect + github.com/in-toto/in-toto-golang v0.9.0 // indirect github.com/ionos-cloud/sdk-go/v6 v6.1.11 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect github.com/jackc/puddle/v2 v2.2.1 // indirect - github.com/jaegertracing/jaeger v1.62.0 // indirect + github.com/jaegertracing/jaeger v1.65.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect + github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 // indirect github.com/jonboulle/clockwork v0.4.0 // indirect github.com/jpillora/backoff v1.0.0 // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect github.com/knadh/koanf/maps v0.1.0 // indirect github.com/knadh/koanf/providers/confmap v0.1.0-dev0 // indirect github.com/knadh/koanf/v2 v2.1.2 // indirect + github.com/knqyf263/go-apk-version v0.0.0-20200609155635-041fdbb8563f // indirect + github.com/knqyf263/nested v0.0.1 // indirect github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b // indirect github.com/kr/text v0.2.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/leodido/go-syslog/v4 v4.2.0 // indirect github.com/leodido/ragel-machinery v0.0.0-20190525184631-5f46317e436b // indirect + github.com/letsencrypt/boulder v0.0.0-20231026200631-000cd05d5491 // indirect + github.com/liamg/jfather v0.0.7 // indirect github.com/lightstep/go-expohisto v1.0.0 // indirect github.com/linode/linodego v1.37.0 // indirect github.com/lunixbochs/struc v0.0.0-20200707160740-784aaebc1d40 // indirect github.com/magefile/mage v1.15.0 // indirect + github.com/masahiro331/go-ebs-file v0.0.0-20240917043618-e6d2bea5c32e // indirect + github.com/masahiro331/go-mvn-version v0.0.0-20210429150710-d3157d602a08 // indirect + github.com/masahiro331/go-vmdk-parser v0.0.0-20221225061455-612096e4bbbd // indirect github.com/mattn/go-shellwords v1.0.12 // indirect github.com/microsoft/go-rustaudit v0.0.0-20220808201409-204dfee52032 // indirect + github.com/moby/buildkit v0.16.0 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/spdystream v0.4.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect @@ -909,57 +923,62 @@ require ( github.com/moby/sys/userns v0.1.0 // indirect github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f // indirect github.com/ncruces/go-strftime v0.1.9 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.115.0 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.115.0 // indirect + github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 // indirect + github.com/oklog/ulid v1.3.1 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.118.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.118.0 // indirect github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142 // indirect + github.com/opentracing/opentracing-go v1.2.0 // indirect + github.com/openvex/discovery v0.1.1-0.20240802171711-7c54efc57553 // indirect github.com/openvex/go-vex v0.2.5 // indirect github.com/openzipkin/zipkin-go v0.4.3 // indirect github.com/ovh/go-ovh v1.6.0 // indirect + github.com/owenrumney/go-sarif/v2 v2.3.3 // indirect github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/pjbgf/sha1cd v0.3.0 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect @@ -973,58 +992,66 @@ require ( github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 // indirect + github.com/sassoftware/relic v7.2.1+incompatible // indirect github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29 // indirect + github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect + github.com/shibumi/go-pathspec v1.3.0 // indirect + github.com/shopspring/decimal v1.4.0 // indirect github.com/signalfx/sapm-proto v0.17.0 // indirect - github.com/sigstore/rekor v1.2.2 // indirect - github.com/skeema/knownhosts v1.2.2 // indirect + github.com/sigstore/cosign/v2 v2.2.4 // indirect + github.com/sigstore/rekor v1.3.6 // indirect + github.com/sigstore/sigstore v1.8.3 // indirect + github.com/sigstore/timestamp-authority v1.2.2 // indirect + github.com/skeema/knownhosts v1.3.0 // indirect github.com/smartystreets/assertions v1.1.0 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect + github.com/spdx/tools-golang v0.5.5 // indirect github.com/spf13/viper v1.19.0 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/stormcat24/protodep v0.1.8 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/swaggest/refl v1.3.0 // indirect - github.com/tetratelabs/wazero v1.7.0 // indirect + github.com/tetratelabs/wazero v1.8.0 // indirect + github.com/theupdateframework/go-tuf v0.7.0 // indirect github.com/tidwall/gjson v1.18.0 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.1 // indirect github.com/tidwall/sjson v1.2.5 // indirect + github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 // indirect + github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4 // indirect + github.com/transparency-dev/merkle v0.0.2 // indirect github.com/ua-parser/uap-go v0.0.0-20240611065828-3a4781585db6 // indirect github.com/vultr/govultr/v2 v2.17.2 // indirect github.com/wk8/go-ordered-map/v2 v2.1.8 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect - go.opentelemetry.io/collector/client v1.21.0 // indirect - go.opentelemetry.io/collector/component/componentstatus v0.115.0 // indirect - go.opentelemetry.io/collector/config/configauth v0.115.0 // indirect - go.opentelemetry.io/collector/config/configcompression v1.21.0 // indirect - go.opentelemetry.io/collector/config/configgrpc v0.115.0 // indirect - go.opentelemetry.io/collector/config/confighttp v0.115.0 // indirect - go.opentelemetry.io/collector/config/confignet v1.21.0 // indirect - go.opentelemetry.io/collector/config/configopaque v1.21.0 // indirect - go.opentelemetry.io/collector/config/configretry v1.21.0 // indirect - go.opentelemetry.io/collector/config/configtls v1.21.0 // indirect - go.opentelemetry.io/collector/config/internal v0.115.0 // indirect - go.opentelemetry.io/collector/connector v0.115.0 // indirect - go.opentelemetry.io/collector/connector/connectorprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/consumer/consumertest v0.115.0 // indirect - go.opentelemetry.io/collector/exporter/exporterprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/exporter/nopexporter v0.115.0 // indirect - go.opentelemetry.io/collector/exporter/otlphttpexporter v0.115.0 // indirect - go.opentelemetry.io/collector/extension/auth v0.115.0 // indirect - go.opentelemetry.io/collector/extension/experimental/storage v0.115.0 // indirect - go.opentelemetry.io/collector/extension/extensioncapabilities v0.115.0 // indirect - go.opentelemetry.io/collector/extension/zpagesextension v0.115.0 // indirect - go.opentelemetry.io/collector/filter v0.115.0 // indirect - go.opentelemetry.io/collector/pdata/pprofile v0.115.0 // indirect - go.opentelemetry.io/collector/pdata/testdata v0.115.0 // indirect - go.opentelemetry.io/collector/pipeline v0.115.0 // indirect - go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.115.0 // indirect - go.opentelemetry.io/collector/processor/processorprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/receiver/nopreceiver v0.115.0 // indirect - go.opentelemetry.io/collector/receiver/receiverprofiles v0.115.0 // indirect + github.com/xlab/treeprint v1.2.0 // indirect + github.com/zorkian/go-datadog-api v2.30.0+incompatible // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/collector/client v1.24.0 // indirect + go.opentelemetry.io/collector/component/componentstatus v0.118.0 // indirect + go.opentelemetry.io/collector/config/configauth v0.118.0 // indirect + go.opentelemetry.io/collector/config/configcompression v1.24.0 // indirect + go.opentelemetry.io/collector/config/configgrpc v0.118.0 // indirect + go.opentelemetry.io/collector/config/confighttp v0.118.0 // indirect + go.opentelemetry.io/collector/config/confignet v1.24.0 // indirect + go.opentelemetry.io/collector/config/configopaque v1.24.0 // indirect + go.opentelemetry.io/collector/config/configretry v1.24.0 // indirect + go.opentelemetry.io/collector/config/configtls v1.24.0 // indirect + go.opentelemetry.io/collector/connector v0.118.0 // indirect + go.opentelemetry.io/collector/consumer/consumertest v0.118.0 // indirect + go.opentelemetry.io/collector/exporter/nopexporter v0.118.0 // indirect + go.opentelemetry.io/collector/exporter/otlphttpexporter v0.118.0 // indirect + go.opentelemetry.io/collector/extension/auth v0.118.0 // indirect + go.opentelemetry.io/collector/extension/extensioncapabilities v0.118.0 // indirect + go.opentelemetry.io/collector/extension/zpagesextension v0.118.0 // indirect + go.opentelemetry.io/collector/filter v0.118.0 // indirect + go.opentelemetry.io/collector/pdata/pprofile v0.118.0 // indirect + go.opentelemetry.io/collector/pdata/testdata v0.118.0 // indirect + go.opentelemetry.io/collector/pipeline v0.118.0 // indirect + go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.118.0 // indirect + go.opentelemetry.io/collector/receiver/nopreceiver v0.118.0 // indirect go.opentelemetry.io/contrib/config v0.10.0 // indirect go.opentelemetry.io/contrib/zpages v0.56.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.7.0 // indirect @@ -1032,15 +1059,17 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.7.0 // indirect go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 // indirect - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.33.0 // indirect go.opentelemetry.io/otel/log v0.8.0 // indirect go.opentelemetry.io/otel/sdk/log v0.7.0 // indirect go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6 // indirect golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f // indirect golang.org/x/lint v0.0.0-20241112194109-818c5a804067 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect + gopkg.in/cheggaaa/pb.v1 v1.0.28 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + gopkg.in/go-jose/go-jose.v2 v2.6.3 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect honnef.co/go/tools v0.5.1 // indirect @@ -1049,6 +1078,7 @@ require ( modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6 // indirect modernc.org/strutil v1.2.0 // indirect modernc.org/token v1.1.0 // indirect + rsc.io/binaryregexp v0.2.0 // indirect ) replace github.com/pahanini/go-grpc-bidirectional-streaming-example v0.0.0-20211027164128-cc6111af44be => github.com/DataDog/go-grpc-bidirectional-streaming-example v0.0.0-20221024060302-b9cf785c02fe @@ -1061,24 +1091,14 @@ replace github.com/vishvananda/netlink => github.com/DataDog/netlink v1.0.1-0.20 // Use custom Trivy fork to reduce binary size // Pull in replacements needed by upstream Trivy replace ( - // Maps to Trivy fork https://github.com/DataDog/trivy/commits/use-fs-main-dd/ - github.com/aquasecurity/trivy => github.com/DataDog/trivy v0.0.0-20241216135157-95e0e96002ee + // Maps to Trivy fork https://github.com/DataDog/trivy/commits/lebauce/container-artifact + github.com/aquasecurity/trivy => github.com/DataDog/trivy v0.0.0-20241223234648-d2ac813bf11b github.com/saracen/walker => github.com/DataDog/walker v0.0.0-20230418153152-7f29bb2dc950 - // testcontainers-go has a bug with versions v0.25.0 and v0.26.0 - // ref: https://github.com/testcontainers/testcontainers-go/issues/1782 - github.com/testcontainers/testcontainers-go => github.com/testcontainers/testcontainers-go v0.23.0 ) -// Temporarely use forks of trivy libraries to use lazy initialization of zap loggers. -// Patch was pushed upstream but maintainers would prefer moving to slog once 1.22 is out -replace github.com/aquasecurity/trivy-db => github.com/datadog/trivy-db v0.0.0-20240228172000-42caffdaee3f - // Fixes CVE-2023-1732, imported by nikos replace github.com/cloudflare/circl => github.com/cloudflare/circl v1.3.7 -// Fixes CVE-2023-26054, imported by trivy -replace github.com/moby/buildkit => github.com/moby/buildkit v0.13.0 - // Exclude specific versions of knadh/koanf to fix building with a `go.work`, following // https://github.com/open-telemetry/opentelemetry-collector/issues/8127 exclude ( @@ -1093,10 +1113,13 @@ replace github.com/ProtonMail/go-crypto => github.com/ProtonMail/go-crypto v1.0. // Prevent a false-positive detection by the Google and Ikarus security vendors on VirusTotal exclude go.opentelemetry.io/proto/otlp v1.1.0 -replace github.com/google/gopacket v1.1.19 => github.com/DataDog/gopacket v0.0.0-20240626205202-4ac4cee31f14 +replace github.com/google/gopacket v1.1.19 => github.com/DataDog/gopacket v0.0.0-20250121143817-e1e3480abefb // Remove once https://github.com/kubernetes/kube-state-metrics/pull/2553 is merged -replace k8s.io/kube-state-metrics/v2 v2.13.1-0.20241025121156-110f03d7331f => github.com/L3n41c/kube-state-metrics/v2 v2.13.1-0.20241108192007-8859a4289d92 +replace k8s.io/kube-state-metrics/v2 v2.13.1-0.20241025121156-110f03d7331f => github.com/L3n41c/kube-state-metrics/v2 v2.13.1-0.20241119155242-07761b9fe9a0 // Remove once https://github.com/Iceber/iouring-go/pull/31 or equivalent is merged replace github.com/iceber/iouring-go => github.com/paulcacheux/iouring-go v0.0.0-20241115154236-2c7785c40a0f + +// github.com/golang/mock is unmaintained and archived, v1.6.0 is the last released version +replace github.com/golang/mock => github.com/golang/mock v1.6.0 diff --git a/go.sum b/go.sum index 5b5fca721ebcc..42048041ac1f8 100644 --- a/go.sum +++ b/go.sum @@ -13,6 +13,8 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.115.1 h1:Jo0SM9cQnSkYfp44+v+NQXHpcHqlnRJk2qxh6yvxxxQ= +cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc= cloud.google.com/go/auth v0.9.5 h1:4CTn43Eynw40aFVr3GpPqsQponx2jv0BQpjvajsbbzw= cloud.google.com/go/auth v0.9.5/go.mod h1:Xo0n7n66eHyOWWCnitop6870Ilwo3PiZyodVkkH1xWM= cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= @@ -23,10 +25,16 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= -cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= +cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= +cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/iam v1.2.0 h1:kZKMKVNk/IsSSc/udOb83K0hL/Yh/Gcqpz+oAkoIFN8= +cloud.google.com/go/iam v1.2.0/go.mod h1:zITGuWgsLZxd8OwAlX+eMFgZDXzBm7icj1PVTYG766Q= +cloud.google.com/go/kms v1.19.0 h1:x0OVJDl6UH1BSX4THKlMfdcFWoE4ruh90ZHuilZekrU= +cloud.google.com/go/kms v1.19.0/go.mod h1:e4imokuPJUc17Trz2s6lEXFDt8bgDmvpVynH39bdrHM= +cloud.google.com/go/longrunning v0.6.0 h1:mM1ZmaNsQsnb+5n1DNPeL0KwQd9jQRqSqSDEkBZr+aI= +cloud.google.com/go/longrunning v0.6.0/go.mod h1:uHzSZqW89h7/pasCWNYdUpwGz3PcVWhrWupreVPYLts= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -64,6 +72,10 @@ code.cloudfoundry.org/rfc5424 v0.0.0-20201103192249-000122071b78 h1:mrZQaZmuDIPh code.cloudfoundry.org/rfc5424 v0.0.0-20201103192249-000122071b78/go.mod h1:tkZo8GtzBjySJ7USvxm4E36lNQw1D3xM6oKHGqdaAJ4= code.cloudfoundry.org/tlsconfig v0.0.0-20200131000646-bbe0f8da39b3 h1:2Qal+q+tw/DmDOoJBWwDCPE3lIJNj/1o7oMkkb2c5SI= code.cloudfoundry.org/tlsconfig v0.0.0-20200131000646-bbe0f8da39b3/go.mod h1:eTbFJpyXRGuFVyg5+oaj9B2eIbIc+0/kZjH8ftbtdew= +cuelabs.dev/go/oci/ociregistry v0.0.0-20240314152124-224736b49f2e h1:GwCVItFUPxwdsEYnlUcJ6PJxOjTeFFCKOh6QWg4oAzQ= +cuelabs.dev/go/oci/ociregistry v0.0.0-20240314152124-224736b49f2e/go.mod h1:ApHceQLLwcOkCEXM1+DyCXTHEJhNGDpJ2kmV6axsx24= +cuelang.org/go v0.8.1 h1:VFYsxIFSPY5KgSaH1jQ2GxHOrbu6Ga3kEI70yCZwnOg= +cuelang.org/go v0.8.1/go.mod h1:CoDbYolfMms4BhWUlhD+t5ORnihR7wvjcfgyO9lL5FI= dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= @@ -73,12 +85,18 @@ github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9 github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0 h1:59MxjQVfjXsBpLy+dbd2/ELV5ofnUkUZBvWSC85sheA= github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0/go.mod h1:OahwfttHWG6eJ0clwcfBAHoDI6X/LV/15hx/wlMZSrU= +github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d h1:zjqpY4C7H15HjRPEenkS4SAn3Jy2eRRjkjZbGR30TOg= +github.com/AdamKorcz/go-fuzz-headers-1 v0.0.0-20230919221257-8b5d3ce2d11d/go.mod h1:XNqJ7hv2kY++g8XEHREpi+JqZo3+0l+CH2egBVN4yqM= github.com/AlekSi/pointer v1.0.0/go.mod h1:1kjywbfcPFCmncIxtk6fIEub6LKrfMz3gc5QKVOSOA8= github.com/AlekSi/pointer v1.2.0 h1:glcy/gc4h8HnG2Z3ZECSzZ1IX1x2JxRVuDzaJwQE0+w= github.com/AlekSi/pointer v1.2.0/go.mod h1:gZGfd3dpW4vEc/UlyfKKi1roIqcCgwOIvb0tSNSBle0= +github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0 h1:8+4G8JaejP8Xa6W46PzJEwisNgBXMvFcz78N6zG/ARw= +github.com/AliyunContainerService/ack-ram-tool/pkg/credentials/alibabacloudsdkgo/helper v0.2.0/go.mod h1:GgeIE+1be8Ivm7Sh4RgwI42aTtC9qrcj+Y9Y6CjJhJs= github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0 h1:GJHeeA2N7xrG3q30L2UXDyuWRzDM900/65j70wcM4Ww= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.13.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= +github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= +github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 h1:nyQWyZvwGTvunIMxi1Y9uXkcyr+I7TeNrr/foo4Kpk8= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0/go.mod h1:l38EPgmsp71HHLq9j7De57JcKOWPyhrsW1Awm1JS6K0= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 h1:tfLQ34V6F7tVSwoTf/4lH5sE0o6eCJuNDTmH09nDpbc= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0/go.mod h1:9kIvujWAA58nmPmWB1m23fyWic1kYZMxD9CxaWn4Qpg= github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 h1:ywEEhmNahHBihViHepv3xPBn1663uRv2t2q/ESv9seY= @@ -91,17 +109,33 @@ github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3 github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v4 v4.3.0/go.mod h1:Y/HgrePTmGy9HjdSGTqZNa+apUpTVIEVKXJyARP2lrk= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1 h1:7CBQ+Ei8SP2c6ydQTGCCrS35bDxgTMfoP2miAwK++OU= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.1.1/go.mod h1:c/wcGeGx5FUPbM/JltUYHZcKmigwyVLJlDq+4HdtXaw= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.1.0 h1:DRiANoJTiW6obBQe3SqZizkuV1PEgfiiGivmVocDy64= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azkeys v1.1.0/go.mod h1:qLIye2hwb/ZouqhpSD9Zn3SJipvpEnz1Ywl3VUk9Y0s= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0 h1:D3occbWoio4EBLkbkevetNMAVX197GkzbUMtqjGWn80= +github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.0.0/go.mod h1:bTSOgj05NGRuHHhQwAdPnYr9TOdNmKlZTgGLL6nyAdI= github.com/Azure/azure-storage-blob-go v0.15.0/go.mod h1:vbjsVbX0dlxnRc4FFMPsS9BsJWPcne7GB7onqlPvz58= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= +github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= +github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8= +github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 h1:wkAZRgT/pn8HhFyzfe9UnqOjJYqlembgCTi72Bm/xKk= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.12/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 h1:w77/uPk80ZET2F+AfQExZyEWtn+0Rk/uw17m9fv5Ajc= +github.com/Azure/go-autorest/autorest/azure/cli v0.4.6/go.mod h1:piCfgPho7BiIDdEQ1+g4VmKyD5y+p/XtSNqE6Hc4QD0= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2 h1:XHOnouVk1mxXfQidrMEnLlPk9UMeRtyBTnEFtxkV0kU= github.com/AzureAD/microsoft-authentication-library-for-go v1.2.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= @@ -115,41 +149,39 @@ github.com/CycloneDX/cyclonedx-go v0.9.1 h1:yffaWOZsv77oTJa/SdVZYdgAgFioCeycBUKk github.com/CycloneDX/cyclonedx-go v0.9.1/go.mod h1:NE/EWvzELOFlG6+ljX/QeMlVt9VKcTwu8u0ccsACEsw= github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= -github.com/DataDog/agent-payload/v5 v5.0.138 h1:Wg7hmWuoLC/o0X3zZ+uGcfRHPyaytljudgSY9O59zjc= -github.com/DataDog/agent-payload/v5 v5.0.138/go.mod h1:lxh9lb5xYrBXjblpIWYUi4deJqVbkIfkjwesi5nskDc= +github.com/DataDog/agent-payload/v5 v5.0.141 h1:pV76CyTUEe/LFuS7fwarIfOX5seSuYZylzhj1aGY2DQ= +github.com/DataDog/agent-payload/v5 v5.0.141/go.mod h1:lxh9lb5xYrBXjblpIWYUi4deJqVbkIfkjwesi5nskDc= github.com/DataDog/appsec-internal-go v1.9.0 h1:cGOneFsg0JTRzWl5U2+og5dbtyW3N8XaYwc5nXe39Vw= github.com/DataDog/appsec-internal-go v1.9.0/go.mod h1:wW0cRfWBo4C044jHGwYiyh5moQV2x0AhnwqMuiX7O/g= github.com/DataDog/aptly v1.5.3 h1:oLsRvjuXSVM4ia0N83dU3KiQeiJ6BaszYbTZOkSfDlw= github.com/DataDog/aptly v1.5.3/go.mod h1:ZL5TfCso+z4enH03N+s3z8tYUJHhL6DlxIvnnP2TbY4= github.com/DataDog/cast v1.8.0 h1:uooY8bMzq+cjgiNP1VTquCWve5emgk8fRspZojJwQa8= github.com/DataDog/cast v1.8.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/DataDog/datadog-agent/comp/core/log v0.56.2 h1:qvBT+FfjKGqimyEvmsNHCZKbTfBJAdUZSVy2IZQ8HS4= -github.com/DataDog/datadog-agent/comp/core/log v0.56.2/go.mod h1:ivJ/RMZjTNkoPPNDX+v/nnBwABLCiMv1vQA5tk/HCR4= -github.com/DataDog/datadog-api-client-go/v2 v2.33.0 h1:OI6kDnJeQmkjfGzxmP0XUQUxMD4tp6oAPXnnJ4VpgUM= -github.com/DataDog/datadog-api-client-go/v2 v2.33.0/go.mod h1:d3tOEgUd2kfsr9uuHQdY+nXrWp4uikgTgVCPdKNK30U= +github.com/DataDog/datadog-api-client-go/v2 v2.34.0 h1:0VVmv8uZg8vdBuEpiF2nBGUezl2QITrxdEsLgh38j8M= +github.com/DataDog/datadog-api-client-go/v2 v2.34.0/go.mod h1:d3tOEgUd2kfsr9uuHQdY+nXrWp4uikgTgVCPdKNK30U= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go/v5 v5.6.0 h1:2oCLxjF/4htd55piM75baflj/KoE6VYS7alEUqFvRDw= github.com/DataDog/datadog-go/v5 v5.6.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= -github.com/DataDog/datadog-operator v0.7.1-0.20241219210556-f517775059d1 h1:EFGXjo7iSZO1f0ZpLE2H2UIcPH2x4yhxQnKd8UKrs3k= -github.com/DataDog/datadog-operator v0.7.1-0.20241219210556-f517775059d1/go.mod h1:mD+3PWR0wOSVJGaXjkpzsYEK/7PhqjOipx2usgfsxM0= +github.com/DataDog/datadog-operator/api v0.0.0-20250114151552-463ab54482b4 h1:Lb06hh5dOz327LZZIfCu2/Kcxstf9ml7c0B2ZSm9Y5k= +github.com/DataDog/datadog-operator/api v0.0.0-20250114151552-463ab54482b4/go.mod h1:Ef4llzn4c4p6FPZNjeYgIQFHa2va2JPC8Wf/kivrF2E= github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 h1:RoH7VLzTnxHEugRPIgnGlxwDFszFGI7b3WZZUtWuPRM= github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42/go.mod h1:TX7CTOQ3LbQjfAi4SwqUoR5gY1zfUk7VRBDTuArjaDc= github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.11 h1:6vwU//TjBIghQKMgIP9UyIRhN/LWS1y8tYzvRnu8JZw= github.com/DataDog/dd-trace-go/v2 v2.0.0-beta.11/go.mod h1:woPHoAOfAIM7kl4GauR+qrWui7teNg44Um0verg2rzQ= -github.com/DataDog/ebpf-manager v0.7.6 h1:EPH1VKeK7DuDRGFiOAmtTqTozdi2/SppTyLW0O1Zsa0= -github.com/DataDog/ebpf-manager v0.7.6/go.mod h1:F3ezth0x5IE6hE6p5mhG005TdExhu60fAWiB8cRMsP8= +github.com/DataDog/ebpf-manager v0.7.7 h1:4m+hZr0VyvvUxCVLI7g4iIQqUAijTgk6e1WKKtw4znE= +github.com/DataDog/ebpf-manager v0.7.7/go.mod h1:F3ezth0x5IE6hE6p5mhG005TdExhu60fAWiB8cRMsP8= github.com/DataDog/go-grpc-bidirectional-streaming-example v0.0.0-20221024060302-b9cf785c02fe h1:RO40ywnX/vZLi4Pb4jRuFGgQQBYGIIoQ6u+P2MIgFOA= github.com/DataDog/go-grpc-bidirectional-streaming-example v0.0.0-20221024060302-b9cf785c02fe/go.mod h1:90sqV0j7E8wYCyqIp5d9HmYWLTFQttqPFFtNYDyAybQ= github.com/DataDog/go-libddwaf/v3 v3.5.1 h1:GWA4ln4DlLxiXm+X7HA/oj0ZLcdCwOS81KQitegRTyY= github.com/DataDog/go-libddwaf/v3 v3.5.1/go.mod h1:n98d9nZ1gzenRSk53wz8l6d34ikxS+hs62A31Fqmyi4= -github.com/DataDog/go-sqllexer v0.0.17 h1:u47fJAVg/+5DA74ZW3w0Qu+3qXHd3GtnA8ZBYixdPrM= -github.com/DataDog/go-sqllexer v0.0.17/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= +github.com/DataDog/go-sqllexer v0.0.20 h1:0fBknHo42yuhawZS3GtuQSdqcwaiojWjYNT6OdsZRfI= +github.com/DataDog/go-sqllexer v0.0.20/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4= github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= github.com/DataDog/gohai v0.0.0-20230524154621-4316413895ee h1:tXibLZk3G6HncIFJKaNItsdzcrk4YqILNDZlXPTNt4k= github.com/DataDog/gohai v0.0.0-20230524154621-4316413895ee/go.mod h1:nTot/Iy0kW16bXgXr6blEc8gFeAS7vTqYlhAxh+dbc0= -github.com/DataDog/gopacket v0.0.0-20240626205202-4ac4cee31f14 h1:t34NfJA77KgFZsh8kcNFW57LZLa0kW2YSUs4MvLKRxU= -github.com/DataDog/gopacket v0.0.0-20240626205202-4ac4cee31f14/go.mod h1:riddUzxTSBpJXk3qBHtYr4qOhFhT6k/1c0E3qkQjQpA= +github.com/DataDog/gopacket v0.0.0-20250121143817-e1e3480abefb h1:DWP0inw/CyCUdBh/913y3B2NK+Suu1VDC90Hrm9qtxE= +github.com/DataDog/gopacket v0.0.0-20250121143817-e1e3480abefb/go.mod h1:riddUzxTSBpJXk3qBHtYr4qOhFhT6k/1c0E3qkQjQpA= github.com/DataDog/gopsutil v1.2.2 h1:8lmthwyyCXa1NKiYcHlrtl9AAFdfbNI2gPcioCJcBPU= github.com/DataDog/gopsutil v1.2.2/go.mod h1:glkxNt/qRu9lnpmUEQwOIAXW+COWDTBOTEAHqbgBPts= github.com/DataDog/gostackparse v0.7.0 h1:i7dLkXHvYzHV308hnkvVGDL3BR4FWl7IsXNPz/IGQh4= @@ -160,28 +192,26 @@ github.com/DataDog/netlink v1.0.1-0.20240223195320-c7a4f832a3d1 h1:HnvrdC79xJ+RP github.com/DataDog/netlink v1.0.1-0.20240223195320-c7a4f832a3d1/go.mod h1:whJevzBpTrid75eZy99s3DqCmy05NfibNaF2Ol5Ox5A= github.com/DataDog/nikos v1.12.9 h1:mE8UUY+9iWI1+V71xJmRy7X8K8wXkO+UboI4he8sMK4= github.com/DataDog/nikos v1.12.9/go.mod h1:ZIwrmoyRId2qWpkREwQPWT4cjrui0d4yYM5pbRVizNo= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.22.0 h1:r1Dx2cRHCBWkVluSZA41i4eoI/nOGbcrrZdkqWjoFCc= -github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.22.0/go.mod h1:+/dkO8ZiMa8rfm4SmtTF6qPUdBbBcvsWWKaO4xPKAIk= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.22.0 h1:cXcKVEU1D0HlguR7GunnvuI70TghkarCa9DApqzMY94= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.22.0/go.mod h1:ES00EXfyEKgUkjd93tAXCxJA6i0seeOhZoS5Cj2qzzg= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.22.0 h1:yfk2cF8Bx98fSFpGrehEHh1FRqewfxcCTAbUDt5r3F8= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.22.0/go.mod h1:9qzpnBSxSOnKzbF/uHket3SSlQihQHix/ZRC2nZUUYQ= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.22.0 h1:Zqj8YUZ/ualUhM8GDCQX6xKnUJKEiG0eYdFGWmIDG30= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.22.0/go.mod h1:lpr4q6g2TB0BHeLHaz/XleKm8YXQjuxiQEb9Q9HXXE0= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.22.0 h1:w9+ngZDYUMLW+GSRA8x1DvVbuMR+cwlGb8VLwZfgBGs= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.22.0/go.mod h1:UsfqLgiD6Sjhpjkg+YzAd+TdKUZ2m6ZZ8t+tEkLNTMA= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.22.0 h1:63SzQz9Ab8XJj8fQKQz6UZNBhOm8rucwzbDfwTVF6dQ= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.22.0/go.mod h1:E/PY/aQ6S/N5hBPHXZRGmovs5b1BSi4RHGNcB4yP/Z0= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.24.0 h1:Fth9wZCAVbIUvlKq/QXT7QINza+epFaKtIvy1qqybbg= +github.com/DataDog/opentelemetry-mapping-go/pkg/inframetadata v0.24.0/go.mod h1:7D+x/7CIdzklC9spgB3lrg8GUvIW52Y8SMONrBCiPbw= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.24.0 h1:ttW3C3IN8p1goqyvaVpT4Blzg3lQ+sh4MTtB33BbpdE= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.24.0/go.mod h1:FpUbxBqKdi16CDJnRifUzmkETaEYR75xvh2Vo8vvJN0= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.24.0 h1:Y65h9AvfQO7ONOBlqCetvvUhh2XO1wIzN7IfXVFjc84= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.24.0/go.mod h1:7aAFw4o5dZk/kqFniz7ljJwS8covz8DHouGl7BrsnLI= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.24.0 h1:wZaNTYVo2WIHzvn8GBAH4FNbXac5A+hfETeK0YxYYnw= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/logs v0.24.0/go.mod h1:0JvUXmUWULz1XU0RTaNPLgces6LJvI/FinPO5suiJOo= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.24.0 h1:dG1rn794tdEpU+fqHumwx/Ngcc7uVPlJT/xt/4L1lmQ= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/metrics v0.24.0/go.mod h1:UWDxETdZ0XK3lpVJ4JYa16oYhu5H6IluXPrDtyvMIwU= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.24.0 h1:Uha4TTkbCcYTvUbkbfvUjUmxtPaPKCOtwwl91erkRRg= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.24.0/go.mod h1:RWoMSFb2Q+L0FSRYctEt8Wp0em+InUg+Oe+BU30e7gA= github.com/DataDog/sketches-go v1.4.6 h1:acd5fb+QdUzGrosfNLwrIhqyrbMORpvBy7mE+vHlT3I= github.com/DataDog/sketches-go v1.4.6/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg= -github.com/DataDog/trivy v0.0.0-20241216135157-95e0e96002ee h1:taj22FDHhWs9QkVJypOYMhdgJTnTb5y4SHiW7YcJJms= -github.com/DataDog/trivy v0.0.0-20241216135157-95e0e96002ee/go.mod h1:hv4bqUzcHXSfVft8E9DUlu9d+szHrxYSh+WykZU70Dk= +github.com/DataDog/trivy v0.0.0-20241223234648-d2ac813bf11b h1:wCKboWBVsnpnFnBKGhQ3jeQOVDPQkMRTLWcs2bxRjss= +github.com/DataDog/trivy v0.0.0-20241223234648-d2ac813bf11b/go.mod h1:qJj5iHmlvtSbgmRWJDANpAFmmxcXuKGQfucp9VtJfR8= github.com/DataDog/viper v1.14.0 h1:dIjTe/uJiah+QFqFZ+MXeqgmUvWhg37l37ZxFWxr3is= github.com/DataDog/viper v1.14.0/go.mod h1:wDdUVJ2SHaMaPrCZrlRCObwkubsX8j5sme3LaR/SGTc= -github.com/DataDog/walker v0.0.0-20230418153152-7f29bb2dc950 h1:2imDajw3V85w1iqHsuXN+hUBZQVF+r9eME8tsPq/HpA= -github.com/DataDog/walker v0.0.0-20230418153152-7f29bb2dc950/go.mod h1:FU+7qU8DeQQgSZDmmThMJi93kPkLFgy0oVAcLxurjIk= -github.com/DataDog/watermarkpodautoscaler v0.5.3-0.20241023200123-ab786c1724cf h1:nbsZ9srTWTTlHzWDGkVE6R5hnqENXTK9N8doMC2YPps= -github.com/DataDog/watermarkpodautoscaler v0.5.3-0.20241023200123-ab786c1724cf/go.mod h1:ay+v2Blaq9nA5YngtqXJe6Z9JOdeSmOEyBkmKs1yyQQ= +github.com/DataDog/watermarkpodautoscaler/apis v0.0.0-20250108152814-82e58d0231d1 h1:9hiwoIk8FOsXDkdcdgNJ48iYaPfn+/7bXEwAnnfKjTc= +github.com/DataDog/watermarkpodautoscaler/apis v0.0.0-20250108152814-82e58d0231d1/go.mod h1:57ytxiQR5KMYNeDgNqKn9y1LJMoRamBKHj3nvURhAdQ= github.com/DataDog/zstd v1.5.6 h1:LbEglqepa/ipmmQJUDnSsfvA8e8IStVcGaFWDuxvGOY= github.com/DataDog/zstd v1.5.6/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/DataDog/zstd_0 v0.0.0-20210310093942-586c1286621f h1:5Vuo4niPKFkfwW55jV4vY0ih3VQ9RaQqeqY67fvRn8A= @@ -199,8 +229,8 @@ github.com/Intevation/gval v1.3.0 h1:+Ze5sft5MmGbZrHj06NVUbcxCb67l9RaPTLMNr37mjw github.com/Intevation/gval v1.3.0/go.mod h1:xmGyGpP5be12EL0P12h+dqiYG8qn2j3PJxIgkoOHO5o= github.com/Intevation/jsonpath v0.2.1 h1:rINNQJ0Pts5XTFEG+zamtdL7l9uuE1z0FBA+r55Sw+A= github.com/Intevation/jsonpath v0.2.1/go.mod h1:WnZ8weMmwAx/fAO3SutjYFU+v7DFreNYnibV7CiaYIw= -github.com/L3n41c/kube-state-metrics/v2 v2.13.1-0.20241108192007-8859a4289d92 h1:l8Sk6DeEeAyCjjleYu6Kr/530dH7V2WDNaeJLB9CADE= -github.com/L3n41c/kube-state-metrics/v2 v2.13.1-0.20241108192007-8859a4289d92/go.mod h1:sGt/NFkZkA4hqb4cVd/xG2G17dzZ72TQXqSpHn8rF/U= +github.com/L3n41c/kube-state-metrics/v2 v2.13.1-0.20241119155242-07761b9fe9a0 h1:xrmnY+qafvx1mB2uKBDPBzL4VWea0nC7gTaELFHNjg0= +github.com/L3n41c/kube-state-metrics/v2 v2.13.1-0.20241119155242-07761b9fe9a0/go.mod h1:sGt/NFkZkA4hqb4cVd/xG2G17dzZ72TQXqSpHn8rF/U= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= @@ -233,6 +263,8 @@ github.com/Showmax/go-fqdn v1.0.0/go.mod h1:SfrFBzmDCtCGrnHhoDjuvFnKsWjEQX/Q9ARZ github.com/StackExchange/wmi v0.0.0-20181212234831-e0a55b97c705/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= +github.com/ThalesIgnite/crypto11 v1.2.5 h1:1IiIIEqYmBvUYFeMnHqRft4bwf/O36jryEUpY+9ef8E= +github.com/ThalesIgnite/crypto11 v1.2.5/go.mod h1:ILDKtnCKiQ7zRoNxcp36Y1ZR8LBPmR2E23+wTQe/MlE= github.com/VividCortex/ewma v1.2.0 h1:f58SaIzcDXrSy3kWaHNvuJgJ3Nmz59Zji6XoJR/q1ow= github.com/VividCortex/ewma v1.2.0/go.mod h1:nz4BbCtbLyFDeC9SUHbtcT5644juEuWfUAUnGx7j5l4= github.com/acobaugh/osrelease v0.1.0 h1:Yb59HQDGGNhCj4suHaFQQfBps5wyoKLSSX/J/+UifRE= @@ -258,18 +290,42 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30 h1:t3eaIm0rUkzbrIewtiFmMK5RXHej2XnoXNhxVsAYUfg= github.com/alecthomas/units v0.0.0-20240626203959-61d1e3462e30/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= +github.com/alessio/shellescape v1.4.1 h1:V7yhSDDn8LP4lc4jS8pFkt0zCnzVJlG5JXy9BVKJUX0= +github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= +github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4 h1:iC9YFYKDGEy3n/FtqJnOkZsene9olVspKmkX5A2YBEo= +github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.4/go.mod h1:sCavSAvdzOjul4cEqeVtvlSaSScfNsTQ+46HwlTL1hc= +github.com/alibabacloud-go/cr-20160607 v1.0.1 h1:WEnP1iPFKJU74ryUKh/YDPHoxMZawqlPajOymyNAkts= +github.com/alibabacloud-go/cr-20160607 v1.0.1/go.mod h1:QHeKZtZ3F3FOE+/uIXCBAp8POwnUYekpLwr1dtQa5r0= +github.com/alibabacloud-go/cr-20181201 v1.0.10 h1:B60f6S1imsgn2fgC6X6FrVNrONDrbCT0NwYhsJ0C9/c= +github.com/alibabacloud-go/cr-20181201 v1.0.10/go.mod h1:VN9orB/w5G20FjytoSpZROqu9ZqxwycASmGqYUJSoDc= +github.com/alibabacloud-go/darabonba-openapi v0.2.1 h1:WyzxxKvhdVDlwpAMOHgAiCJ+NXa6g5ZWPFEzaK/ewwY= +github.com/alibabacloud-go/darabonba-openapi v0.2.1/go.mod h1:zXOqLbpIqq543oioL9IuuZYOQgHQ5B8/n5OPrnko8aY= +github.com/alibabacloud-go/debug v1.0.0 h1:3eIEQWfay1fB24PQIEzXAswlVJtdQok8f3EVN5VrBnA= +github.com/alibabacloud-go/debug v1.0.0/go.mod h1:8gfgZCCAC3+SCzjWtY053FrOcd4/qlH6IHTI4QyICOc= +github.com/alibabacloud-go/endpoint-util v1.1.1 h1:ZkBv2/jnghxtU0p+upSU0GGzW1VL9GQdZO3mcSUTUy8= +github.com/alibabacloud-go/endpoint-util v1.1.1/go.mod h1:O5FuCALmCKs2Ff7JFJMudHs0I5EBgecXXxZRyswlEjE= +github.com/alibabacloud-go/openapi-util v0.1.0 h1:0z75cIULkDrdEhkLWgi9tnLe+KhAFE/r5Pb3312/eAY= +github.com/alibabacloud-go/openapi-util v0.1.0/go.mod h1:sQuElr4ywwFRlCCberQwKRFhRzIyG4QTP/P4y1CJ6Ws= +github.com/alibabacloud-go/tea v1.2.1 h1:rFF1LnrAdhaiPmKwH5xwYOKlMh66CqRwPUTzIK74ask= +github.com/alibabacloud-go/tea v1.2.1/go.mod h1:qbzof29bM/IFhLMtJPrgTGK3eauV5J2wSyEUo4OEmnA= +github.com/alibabacloud-go/tea-utils v1.4.5 h1:h0/6Xd2f3bPE4XHTvkpjwxowIwRCJAJOqY6Eq8f3zfA= +github.com/alibabacloud-go/tea-utils v1.4.5/go.mod h1:KNcT0oXlZZxOXINnZBs6YvgOd5aYp9U67G+E3R8fcQw= +github.com/alibabacloud-go/tea-xml v1.1.3 h1:7LYnm+JbOq2B+T/B0fHC4Ies4/FofC4zHzYtqw7dgt0= +github.com/alibabacloud-go/tea-xml v1.1.3/go.mod h1:Rq08vgCcCAjHyRi/M7xlHKUykZCEtyBy9+DPF6GgEu8= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= -github.com/alicebob/miniredis/v2 v2.31.1 h1:7XAt0uUg3DtwEKW5ZAGa+K7FZV2DdKQo5K/6TTnfX8Y= -github.com/alicebob/miniredis/v2 v2.31.1/go.mod h1:UB/T2Uztp7MlFSDakaX1sTXUv5CASoprx0wulRT6HBg= +github.com/alicebob/miniredis/v2 v2.33.0 h1:uvTF0EDeu9RLnUEG27Db5I68ESoIxTiXbNUiji6lZrA= +github.com/alicebob/miniredis/v2 v2.33.0/go.mod h1:MhP4a3EU7aENRi9aO+tHfTBZicLqQevyi/DJpoj6mi0= +github.com/aliyun/credentials-go v1.3.1 h1:uq/0v7kWrxmoLGpqjx7vtQ/s03f0zR//0br/xWDTE28= +github.com/aliyun/credentials-go v1.3.1/go.mod h1:8jKYhQuDawt8x2+fusqa1Y6mPxemTsBEN04dgcAcYz0= github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092 h1:aM1rlcoLz8y5B2r4tTLMiVTrMtpfY0O8EScKJxaSaEc= github.com/anchore/go-struct-converter v0.0.0-20221118182256-c68fdcfa2092/go.mod h1:rYqSE9HbjzpHTI74vwPvae4ZVYZd1lue2ta6xHPdblA= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8= github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= -github.com/antchfx/xmlquery v1.4.2 h1:MZKd9+wblwxfQ1zd1AdrTsqVaMjMCwow3IqkCSe00KA= -github.com/antchfx/xmlquery v1.4.2/go.mod h1:QXhvf5ldTuGqhd1SHNvvtlhhdQLks4dD0awIVhXIDTA= -github.com/antchfx/xpath v1.3.2 h1:LNjzlsSjinu3bQpw9hWMY9ocB80oLOWuQqFvO6xt51U= -github.com/antchfx/xpath v1.3.2/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= +github.com/antchfx/xmlquery v1.4.3 h1:f6jhxCzANrWfa93O+NmRWvieVyLs+R2Szfpy+YrZaww= +github.com/antchfx/xmlquery v1.4.3/go.mod h1:AEPEEPYE9GnA2mj5Ur2L5Q5/2PycJ0N9Fusrx9b12fc= +github.com/antchfx/xpath v1.3.3 h1:tmuPQa1Uye0Ym1Zn65vxPgfltWb/Lxu2jeqIGteJSRs= +github.com/antchfx/xpath v1.3.3/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= @@ -286,14 +342,17 @@ github.com/aquasecurity/go-npm-version v0.0.0-20201110091526-0b796d180798/go.mod github.com/aquasecurity/go-pep440-version v0.0.0-20210121094942-22b2f8951d46 h1:vmXNl+HDfqqXgr0uY1UgK1GAhps8nbAAtqHNBcgyf+4= github.com/aquasecurity/go-pep440-version v0.0.0-20210121094942-22b2f8951d46/go.mod h1:olhPNdiiAAMiSujemd1O/sc6GcyePr23f/6uGKtthNg= github.com/aquasecurity/go-version v0.0.0-20201107203531-5e48ac5d022a/go.mod h1:9Beu8XsUNNfzml7WBf3QmyPToP1wm1Gj/Vc5UJKqTzU= -github.com/aquasecurity/go-version v0.0.0-20210121072130-637058cfe492 h1:rcEG5HI490FF0a7zuvxOxen52ddygCfNVjP0XOCMl+M= github.com/aquasecurity/go-version v0.0.0-20210121072130-637058cfe492/go.mod h1:9Beu8XsUNNfzml7WBf3QmyPToP1wm1Gj/Vc5UJKqTzU= +github.com/aquasecurity/go-version v0.0.0-20240603093900-cf8a8d29271d h1:4zour5Sh9chOg+IqIinIcJ3qtr3cIf8FdFY6aArlXBw= +github.com/aquasecurity/go-version v0.0.0-20240603093900-cf8a8d29271d/go.mod h1:1cPOp4BaQZ1G2F5fnw4dFz6pkOyXJI9KTuak8ghIl3U= github.com/aquasecurity/table v1.8.0 h1:9ntpSwrUfjrM6/YviArlx/ZBGd6ix8W+MtojQcM7tv0= github.com/aquasecurity/table v1.8.0/go.mod h1:eqOmvjjB7AhXFgFqpJUEE/ietg7RrMSJZXyTN8E/wZw= -github.com/aquasecurity/testdocker v0.0.0-20230111101738-e741bda259da h1:pj/adfN0Wbzc0H8YkI1nX5K92wOU5/1/1TRuuc0y5Nw= -github.com/aquasecurity/testdocker v0.0.0-20230111101738-e741bda259da/go.mod h1:852lbQLpK2nCwlR4ZLYIccxYCfoQao6q9Nl6tjz54v8= +github.com/aquasecurity/testdocker v0.0.0-20240730042311-4642e94c7fc8 h1:b43UVqYjz7qDqK+cVOtF2Lk6CxjytYItP6Pgf3wGsNE= +github.com/aquasecurity/testdocker v0.0.0-20240730042311-4642e94c7fc8/go.mod h1:wXA9k3uuaxY3yu7gxrxZDPo/04FEMJtwyecdAlYrEIo= github.com/aquasecurity/tml v0.6.1 h1:y2ZlGSfrhnn7t4ZJ/0rotuH+v5Jgv6BDDO5jB6A9gwo= github.com/aquasecurity/tml v0.6.1/go.mod h1:OnYMWY5lvI9ejU7yH9LCberWaaTBW7hBFsITiIMY2yY= +github.com/aquasecurity/trivy-db v0.0.0-20240910133327-7e0f4d2ed4c1 h1:G0gnacAORRUqz2Tm5MqivSpldY2GZ74ijhJcMsae+sA= +github.com/aquasecurity/trivy-db v0.0.0-20240910133327-7e0f4d2ed4c1/go.mod h1:PYkSRx4dlgFATEt+okGwibvbxVEtqsOdH+vX/saACYE= github.com/aquasecurity/trivy-java-db v0.0.0-20240109071736-184bd7481d48 h1:JVgBIuIYbwG+ekC5lUHUpGJboPYiCcxiz06RCtz8neI= github.com/aquasecurity/trivy-java-db v0.0.0-20240109071736-184bd7481d48/go.mod h1:Ldya37FLi0e/5Cjq2T5Bty7cFkzUDwTcPeQua+2M8i8= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q= @@ -318,42 +377,46 @@ github.com/awalterschulze/gographviz v2.0.3+incompatible/go.mod h1:GEV5wmg4YquNw github.com/aws/aws-lambda-go v1.37.0 h1:WXkQ/xhIcXZZ2P5ZBEw+bbAKeCEcb5NtiYpSwVVzIXg= github.com/aws/aws-lambda-go v1.37.0/go.mod h1:jwFe2KmMsHmffA1X2R09hH6lFzJQxzI8qK17ewzbQMM= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= -github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= +github.com/aws/aws-sdk-go v1.55.6 h1:cSg4pvZ3m8dgYcgqB97MrcdjUmZ1BeMYKUxMMB89IPk= +github.com/aws/aws-sdk-go v1.55.6/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/aws/aws-sdk-go-v2 v1.21.2/go.mod h1:ErQhvNuEMhJjweavOYhxVkn2RUx7kQXVATHrjKtxIpM= -github.com/aws/aws-sdk-go-v2 v1.32.6 h1:7BokKRgRPuGmKkFMhEg/jSul+tB9VvXhcViILtfG8b4= -github.com/aws/aws-sdk-go-v2 v1.32.6/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= +github.com/aws/aws-sdk-go-v2 v1.33.0 h1:Evgm4DI9imD81V0WwD+TN4DCwjUMdc94TrduMLbgZJs= +github.com/aws/aws-sdk-go-v2 v1.33.0/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.4.14/go.mod h1:9NCTOURS8OpxvoAVHq79LK81/zC78hfRWFn+aL0SPcY= github.com/aws/aws-sdk-go-v2/config v1.19.0/go.mod h1:ZwDUgFnQgsazQTnWfeLWk5GjeqTQTL8lMkoE1UXzxdE= -github.com/aws/aws-sdk-go-v2/config v1.28.6 h1:D89IKtGrs/I3QXOLNTH93NJYtDhm8SYa9Q5CsPShmyo= -github.com/aws/aws-sdk-go-v2/config v1.28.6/go.mod h1:GDzxJ5wyyFSCoLkS+UhGB0dArhb9mI+Co4dHtoTxbko= +github.com/aws/aws-sdk-go-v2/config v1.29.1 h1:JZhGawAyZ/EuJeBtbQYnaoftczcb2drR2Iq36Wgz4sQ= +github.com/aws/aws-sdk-go-v2/config v1.29.1/go.mod h1:7bR2YD5euaxBhzt2y/oDkt3uNRb6tjFp98GlTFueRwk= github.com/aws/aws-sdk-go-v2/credentials v1.13.43/go.mod h1:zWJBz1Yf1ZtX5NGax9ZdNjhhI4rgjfgsyk6vTY1yfVg= -github.com/aws/aws-sdk-go-v2/credentials v1.17.47 h1:48bA+3/fCdi2yAwVt+3COvmatZ6jUDNkDTIsqDiMUdw= -github.com/aws/aws-sdk-go-v2/credentials v1.17.47/go.mod h1:+KdckOejLW3Ks3b0E3b5rHsr2f9yuORBum0WPnE5o5w= +github.com/aws/aws-sdk-go-v2/credentials v1.17.54 h1:4UmqeOqJPvdvASZWrKlhzpRahAulBfyTJQUaYy4+hEI= +github.com/aws/aws-sdk-go-v2/credentials v1.17.54/go.mod h1:RTdfo0P0hbbTxIhmQrOsC/PquBZGabEPnCaxxKRPSnI= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.13.13/go.mod h1:f/Ib/qYjhV2/qdsf79H3QP/eRE4AkVyEf6sk7XfZ1tg= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 h1:AmoU1pziydclFT/xRV+xXE/Vb8fttJCLRPv8oAkprc0= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21/go.mod h1:AjUdLYe4Tgs6kpH4Bv7uMZo7pottoyHMn4eTcIcneaY= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.24 h1:5grmdTdMsovn9kPZPI23Hhvp0ZyNm5cRO+IZFIYiAfw= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.24/go.mod h1:zqi7TVKTswH3Ozq28PkmBmgzG1tona7mo9G2IJg4Cis= github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.43/go.mod h1:auo+PiyLl0n1l8A0e8RIeR8tOzYPfZZH/JNlrJ8igTQ= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 h1:s/fF4+yDQDoElYhfIVvSNyeCydfbuTKzhxSXDXCPasU= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25/go.mod h1:IgPfDv5jqFIzQSNbUEMoitNooSMXjRSDkhXv8jiROvU= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.28 h1:igORFSiH3bfq4lxKFkTSYDhJEUCYo6C8VKiWJjYwQuQ= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.28/go.mod h1:3So8EA/aAYm36L7XIvCVwLa0s5N0P7o2b1oqnx/2R4g= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.37/go.mod h1:Qe+2KtKml+FEsQF/DHmDV+xjtche/hwoF75EG4UlHW8= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 h1:ZntTCl5EsYnhN/IygQEUugpdwbhdkom9uHcbCftiGgA= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25/go.mod h1:DBdPrgeocww+CSl1C8cEV8PN1mHMBhuCDLpXezyvWkE= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.28 h1:1mOW9zAUMhTSrMDssEHS/ajx8JcAj/IcftzcmNlmVLI= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.28/go.mod h1:kGlXVIWDfvt2Ox5zEaNglmq0hXPHgQFNMix33Tw22jA= github.com/aws/aws-sdk-go-v2/internal/ini v1.3.45/go.mod h1:lD5M20o09/LCuQ2mE62Mb/iSdSlCNuj6H5ci7tW7OsE= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= github.com/aws/aws-sdk-go-v2/internal/v4a v1.1.6/go.mod h1:Q0Hq2X/NuL7z8b1Dww8rmOFl+jzusKEcyvkKspwdpyc= -github.com/aws/aws-sdk-go-v2/service/ebs v1.27.0 h1:4zuGQITyy9O+GlSGcs+aUz3+SmlvnYFc1/o4lRBs5Bw= -github.com/aws/aws-sdk-go-v2/service/ebs v1.27.0/go.mod h1:T0t6q7wBD2P11xwVcc6GvwmuDT3i6ZJgZ+13ziQUUnA= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.190.0 h1:k97fGog9Tl0woxTiSIHN14Qs5ehqK6GXejUwkhJYyL0= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.190.0/go.mod h1:mzj8EEjIHSN2oZRXiw1Dd+uB4HZTl7hC8nBzX9IZMWw= +github.com/aws/aws-sdk-go-v2/service/ebs v1.22.1 h1:SeDJWG4pmye+/aO6k+zt9clPTUy1MXqUmkW8rbAddQg= +github.com/aws/aws-sdk-go-v2/service/ebs v1.22.1/go.mod h1:wRzaW0v9GGQS0h//wpsVDw3Hah5gs5UP+NxoyGeZIGM= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.200.0 h1:3hH6o7Z2WeE1twvz44Aitn6Qz8DZN3Dh5IB4Eh2xq7s= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.200.0/go.mod h1:I76S7jN0nfsYTBtuTgTsJtK2Q8yJVDgrLr5eLN64wMA= +github.com/aws/aws-sdk-go-v2/service/ecr v1.36.7 h1:R+5XKIJga2K9Dkj0/iQ6fD/MBGo02oxGGFTc512lK/Q= +github.com/aws/aws-sdk-go-v2/service/ecr v1.36.7/go.mod h1:fDPQV/6ONOQOjvtKhtypIy1wcGLcKYtoK/lvZ9fyDGQ= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.18.2 h1:PpbXaecV3sLAS6rjQiaKw4/jyq3Z8gNzmoJupHAoBp0= +github.com/aws/aws-sdk-go-v2/service/ecrpublic v1.18.2/go.mod h1:fUHpGXr4DrXkEDpGAjClPsviWf+Bszeb0daKE0blxv8= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.9.15/go.mod h1:26SQUPcTNgV1Tapwdt4a1rOsYRsnBsJHLMPoxK2b0d8= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 h1:iXtILhvDxB6kPvEXgsDhGaZCSC6LQET5ZHSdJozeI0Y= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1/go.mod h1:9nu0fVANtYiAePIBh2/pFUSwtJ402hLnp854CNoDOeE= github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.1.38/go.mod h1:epIZoRSSbRIwLPJU5F+OldHhwZPBdpDeQkRdCeY3+00= github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.37/go.mod h1:vBmDnwWXWxNPFRMmG2m/3MKOe+xEcMDo1tanpaWCcck= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 h1:50+XsN70RS7dwJ2CkVNXzj7U2L1HKP8nqTd3XWEXBN4= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6/go.mod h1:WqgLmwY7so32kG01zD8CPTJWVWM+TzJoOVHwTg4aPug= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.9 h1:TQmKDyETFGiXVhZfQ/I0cCFziqqX58pi4tKJGYGFSz0= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.9/go.mod h1:HVLPK2iHQBUx7HfZeOQSEu3v2ubZaAY2YPbAm5/WUyY= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.15.6/go.mod h1:lnc2taBsR9nTlz9meD+lhFZZ9EWY712QHrRflWpTcOA= github.com/aws/aws-sdk-go-v2/service/kms v1.37.6 h1:CZImQdb1QbU9sGgJ9IswhVkxAcjkkD1eQTMA1KHWk+E= github.com/aws/aws-sdk-go-v2/service/kms v1.37.6/go.mod h1:YJDdlK0zsyxVBxGU48AR/Mi8DMrGdc1E3Yij4fNrONA= @@ -363,17 +426,19 @@ github.com/aws/aws-sdk-go-v2/service/s3 v1.40.2/go.mod h1:Zjfqt7KhQK+PO1bbOsFNzK github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.34.6 h1:1KDMKvOKNrpD667ORbZ/+4OgvUoaok1gg/MLzrHF9fw= github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.34.6/go.mod h1:DmtyfCfONhOyVAJ6ZMTrDSFIeyCBlEO93Qkfhxwbxu0= github.com/aws/aws-sdk-go-v2/service/sso v1.15.2/go.mod h1:gsL4keucRCgW+xA85ALBpRFfdSLH4kHOVSnLMSuBECo= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 h1:rLnYAfXQ3YAccocshIH5mzNNwZBkBo+bP6EhIxak6Hw= -github.com/aws/aws-sdk-go-v2/service/sso v1.24.7/go.mod h1:ZHtuQJ6t9A/+YDuxOLnbryAmITtr8UysSny3qcyvJTc= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.11 h1:kuIyu4fTT38Kj7YCC7ouNbVZSSpqkZ+LzIfhCr6Dg+I= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.11/go.mod h1:Ro744S4fKiCCuZECXgOi760TiYylUM8ZBf6OGiZzJtY= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.17.3/go.mod h1:a7bHA82fyUXOm+ZSWKU6PIoBxrjSprdLoM8xPYvzYVg= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6 h1:JnhTZR3PiYDNKlXy50/pNeix9aGMo6lLpXwJ1mw8MD4= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.6/go.mod h1:URronUEGfXZN1VpdktPSD1EkAL9mfrV+2F4sjH38qOY= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.10 h1:l+dgv/64iVlQ3WsBbnn+JSbkj01jIi+SM0wYsj3y/hY= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.28.10/go.mod h1:Fzsj6lZEb8AkTE5S68OhcbBqeWPsR8RnGuKPr8Todl8= github.com/aws/aws-sdk-go-v2/service/sts v1.23.2/go.mod h1:Eows6e1uQEsc4ZaHANmsPRzAKcVDrcmjjWiih2+HUUQ= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 h1:s4074ZO1Hk8qv65GqNXqDjmkf4HSQqJukaLuuW0TpDA= -github.com/aws/aws-sdk-go-v2/service/sts v1.33.2/go.mod h1:mVggCnIWoM09jP71Wh+ea7+5gAp53q+49wDFs1SW5z8= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.9 h1:BRVDbewN6VZcwr+FBOszDKvYeXY1kJ+GGMCcpghlw0U= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.9/go.mod h1:f6vjfZER1M17Fokn0IzssOTMT2N8ZSq+7jnNF0tArvw= github.com/aws/smithy-go v1.15.0/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= github.com/aws/smithy-go v1.22.1 h1:/HPHZQ0g7f4eUeK6HKglFz8uwVfZKgoI25rb/J+dnro= github.com/aws/smithy-go v1.22.1/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= +github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231024185945-8841054dbdb8 h1:SoFYaT9UyGkR0+nogNyD/Lj+bsixB+SNuAS4ABlEs6M= +github.com/awslabs/amazon-ecr-credential-helper/ecr-login v0.0.0-20231024185945-8841054dbdb8/go.mod h1:2JF49jcDOrLStIXN/j/K1EKRq8a8R2qRnlZA6/o/c7c= github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk= github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg= github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3 h1:6df1vn4bBlDDo4tARvBm7l6KA9iVMnE3NWizDeWSrps= @@ -394,10 +459,12 @@ github.com/bitnami/go-version v0.0.0-20231130084017-bb00604d650c h1:C4UZIaS+HAw+ github.com/bitnami/go-version v0.0.0-20231130084017-bb00604d650c/go.mod h1:9iglf1GG4oNRJ39bZ5AZrjgAFD2RwQbXw6Qf7Cs47wo= github.com/blabber/go-freebsd-sysctl v0.0.0-20201130114544-503969f39d8f h1:gMH+lz/KRpSqdoL+IQjgd91bP1LB8vrVEfNxr47GYC8= github.com/blabber/go-freebsd-sysctl v0.0.0-20201130114544-503969f39d8f/go.mod h1:cTRyHktEaXkKTTEyZ0hAgS7H4V0AVoKhB8Dx0tVr/tY= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/bmatcuk/doublestar/v4 v4.7.1 h1:fdDeAqgT47acgwd9bd9HxJRDmc9UAmPpc+2m0CXv75Q= -github.com/bmatcuk/doublestar/v4 v4.7.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= +github.com/bmatcuk/doublestar/v4 v4.8.0 h1:DSXtrypQddoug1459viM9X9D3dp1Z7993fw36I2kNcQ= +github.com/bmatcuk/doublestar/v4 v4.8.0/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40 h1:y4B3+GPxKlrigF1ha5FFErxK+sr6sWxQovRMzwMhejo= github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c= github.com/bool64/dev v0.2.34 h1:P9n315P8LdpxusnYQ0X7MP1CZXwBK5ae5RZrd+GdSZE= @@ -408,12 +475,18 @@ github.com/bradleyjkemp/cupaloy/v2 v2.8.0 h1:any4BmKE+jGIaMpnU8YgH/I2LPiLBufr6oM github.com/bradleyjkemp/cupaloy/v2 v2.8.0/go.mod h1:bm7JXdkRd4BHJk9HpwqAI8BoAY1lps46Enkdqw6aRX0= github.com/briandowns/spinner v1.23.0 h1:alDF2guRWqa/FOZZYWjlMIx2L6H0wyewPxo/CH4Pt2A= github.com/briandowns/spinner v1.23.0/go.mod h1:rPG4gmXeN3wQV/TsAY4w8lPdIM6RX3yqeBQJSrbXjuE= -github.com/bsm/ginkgo/v2 v2.9.5 h1:rtVBYPs3+TC5iLUVOis1B9tjLTup7Cj5IfzosKtvTJ0= -github.com/bsm/ginkgo/v2 v2.9.5/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= -github.com/bsm/gomega v1.26.0 h1:LhQm+AFcgV2M0WyKroMASzAzCAJVpAxQXv4SaI9a69Y= -github.com/bsm/gomega v1.26.0/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= +github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs= +github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c= +github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA= +github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0= github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/buildkite/agent/v3 v3.62.0 h1:yvzSjI8Lgifw883I8m9u8/L/Thxt4cLFd5aWPn3gg70= +github.com/buildkite/agent/v3 v3.62.0/go.mod h1:jN6SokGXrVNNIpI0BGQ+j5aWeI3gin8F+3zwA5Q6gqM= +github.com/buildkite/go-pipeline v0.3.2 h1:SW4EaXNwfjow7xDRPGgX0Rcx+dPj5C1kV9LKCLjWGtM= +github.com/buildkite/go-pipeline v0.3.2/go.mod h1:iY5jzs3Afc8yHg6KDUcu3EJVkfaUkd9x/v/OH98qyUA= +github.com/buildkite/interpolate v0.0.0-20200526001904-07f35b4ae251 h1:k6UDF1uPYOs0iy1HPeotNa155qXRWrzKnqAaGXHLZCE= +github.com/buildkite/interpolate v0.0.0-20200526001904-07f35b4ae251/go.mod h1:gbPR1gPu9dB96mucYIR7T3B7p/78hRVSOuzIWLHK2Y4= github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/bytecodealliance/wasmtime-go/v3 v3.0.2 h1:3uZCA/BLTIu+DqCfguByNMJa2HVHpXvjfy0Dy7g6fuA= github.com/bytecodealliance/wasmtime-go/v3 v3.0.2/go.mod h1:RnUjnIXxEJcL6BgCvNyzCCRzZcxCgsZCi+RNlvYor5Q= @@ -421,6 +494,8 @@ github.com/cavaliergopher/grab/v3 v3.0.1 h1:4z7TkBfmPjmLAAmkkAZNX/6QJ1nNFdv3SdIH github.com/cavaliergopher/grab/v3 v3.0.1/go.mod h1:1U/KNnD+Ft6JJiYoYBAimKH2XrYptb8Kl3DFGmsjpq4= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M= +github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -433,8 +508,10 @@ github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/cheggaaa/pb v1.0.10/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= github.com/cheggaaa/pb v1.0.29 h1:FckUN5ngEk2LpvuG0fw1GEFx6LtyY2pWI/Z2QgCnEYo= github.com/cheggaaa/pb v1.0.29/go.mod h1:W40334L7FMC5JKWldsTWbdGjLo0RxUKK73K+TuPxX30= -github.com/cheggaaa/pb/v3 v3.1.4 h1:DN8j4TVVdKu3WxVwcRKu0sG00IIU6FewoABZzXbRQeo= -github.com/cheggaaa/pb/v3 v3.1.4/go.mod h1:6wVjILNBaXMs8c21qRiaUM8BR82erfgau1DQ4iUXmSA= +github.com/cheggaaa/pb/v3 v3.1.5 h1:QuuUzeM2WsAqG2gMqtzaWithDJv0i+i6UlnwSCI4QLk= +github.com/cheggaaa/pb/v3 v3.1.5/go.mod h1:CrxkeghYTXi1lQBEI7jSn+3svI3cuc19haAj6jM60XI= +github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589 h1:krfRl01rzPzxSxyLyrChD+U+MzsBXbm0OwYYB67uF+4= +github.com/chrismellard/docker-credential-acr-env v0.0.0-20230304212654-82a0ddb27589/go.mod h1:OuDyvmLnMCwa2ep4Jkm6nyA0ocJuZlGyk2gGseVzERM= github.com/chrusty/protoc-gen-jsonschema v0.0.0-20240212064413-73d5723042b8 h1:/Jd/W5vhPuLHSNOzK15RkiVd9zhFHGuxbOV+mRGFYQ0= github.com/chrusty/protoc-gen-jsonschema v0.0.0-20240212064413-73d5723042b8/go.mod h1:VC7bNYA8cg2IWaZDBNfrqUBcUMEuJ/RRkus0frzTQb4= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -448,6 +525,8 @@ github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6D github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/mxj v1.8.4 h1:HuhwZtbyvyOw+3Z1AowPkU87JkJUSv751ELWaiTpj8I= github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng= +github.com/clbanning/mxj/v2 v2.7.0 h1:WA/La7UGCanFe5NpHF0Q3DNtnCsVoxbPKuyBNHWRyME= +github.com/clbanning/mxj/v2 v2.7.0/go.mod h1:hNiWqW14h+kc+MdF9C6/YoRfjEJoR3ou6tn/Qo+ve2s= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/cbpfc v0.0.0-20240920015331-ff978e94500b h1:EgR1t4Lnq6uP6QxJQ+oIFtENOHUY3/7gMOE76vL0KcA= github.com/cloudflare/cbpfc v0.0.0-20240920015331-ff978e94500b/go.mod h1:X/9cHz8JVzKlvoZyKBgMgrogKZlLf+pWjmm5gSUm5dI= @@ -460,18 +539,22 @@ github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnht github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8ETbOasdwEV+avkR75ZzsVV9WI= github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cockroachdb/apd/v3 v3.2.1 h1:U+8j7t0axsIgvQUqthuNm82HIrYXodOV2iWLWtEaIwg= +github.com/cockroachdb/apd/v3 v3.2.1/go.mod h1:klXJcjp+FffLTHlhIG69tezTDvdP065naDsHzKhYSqc= github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= -github.com/containerd/cgroups/v3 v3.0.4 h1:2fs7l3P0Qxb1nKWuJNFiwhp2CqiKzho71DQkDrHJIo4= -github.com/containerd/cgroups/v3 v3.0.4/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins= -github.com/containerd/containerd v1.7.23 h1:H2CClyUkmpKAGlhQp95g2WXHfLYc7whAuvZGBNYOOwQ= -github.com/containerd/containerd v1.7.23/go.mod h1:7QUzfURqZWCZV7RLNEn1XjUCQLEf0bkaK4GjUaZehxw= +github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be h1:J5BL2kskAlV9ckgEsNQXscjIaLiOYiZ75d4e94E6dcQ= +github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be/go.mod h1:mk5IQ+Y0ZeO87b858TlA645sVcEcbiX6YqP98kt+7+w= +github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJHo6Bzo= +github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins= +github.com/containerd/containerd v1.7.25 h1:khEQOAXOEJalRO228yzVsuASLH42vT7DIo9Ss+9SMFQ= +github.com/containerd/containerd v1.7.25/go.mod h1:tWfHzVI0azhw4CT2vaIjsb2CoV4LJ9PrMPaULAr21Ok= github.com/containerd/containerd/api v1.8.0 h1:hVTNJKR8fMc/2Tiw60ZRijntNMd1U+JVMyTRdsD2bS0= github.com/containerd/containerd/api v1.8.0/go.mod h1:dFv4lt6S20wTu/hMcP4350RL87qPWLVa/OHOwmmdnYc= -github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8= -github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= +github.com/containerd/continuity v0.4.4 h1:/fNVfTJ7wIl/YPMHjf+5H32uFhl63JucB34PlCpMKII= +github.com/containerd/continuity v0.4.4/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE= github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= @@ -482,8 +565,8 @@ github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= -github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU= -github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk= +github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8= +github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU= github.com/containerd/ttrpc v1.2.5 h1:IFckT1EFQoFBMG4c3sMdT8EP3/aKfumK1msY+Ze4oLU= github.com/containerd/ttrpc v1.2.5/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= @@ -497,11 +580,14 @@ github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkE github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-oidc v2.2.1+incompatible h1:mh48q/BqXqgjVHpy2ZY7WnWAbenxRjsz9N1i1YxjHAk= +github.com/coreos/go-oidc/v3 v3.10.0 h1:tDnXHnLyiTVyT/2zLDGj09pFPkhND8Gl8lnTRhoEaJU= +github.com/coreos/go-oidc/v3 v3.10.0/go.mod h1:5j11xcw0D3+SGxn6Z/WFADsgcWVMyNAlSQupk0KK3ac= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= -github.com/coreos/go-systemd v0.0.0-20180202092358-40e2722dffea h1:IHPWgevPcOUjTvj3n7Qgm+nie6xs/xV8dmO5MddNTpc= -github.com/coreos/go-systemd v0.0.0-20180202092358-40e2722dffea/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= @@ -516,10 +602,12 @@ github.com/cri-o/ocicni v0.4.3 h1:BfnrZrtr/F+o+b+yOguB1o6I4OzjieF3k3dN4MrsCJA= github.com/cri-o/ocicni v0.4.3/go.mod h1:RzIKSln5AT65hyyfGj3/gsfCpjiY1Y6rVK51Uc5YNzk= github.com/csaf-poc/csaf_distribution/v3 v3.0.0 h1:ob9+Fmpff0YWgTP3dYaw7G2hKQ9cegh9l3zksc+q3sM= github.com/csaf-poc/csaf_distribution/v3 v3.0.0/go.mod h1:uilCTiNKivq+6zrDvjtZaUeLk70oe21iwKivo6ILwlQ= +github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46 h1:2Dx4IHfC1yHWI12AxQDJM1QbRCDfk6M+blLzlZCXdrc= +github.com/cyberphone/json-canonicalization v0.0.0-20231011164504-785e29786b46/go.mod h1:uzvlm1mxhHkdfqitSA92i7Se+S9ksOn3a3qmv/kyOCw= github.com/cyphar/filepath-securejoin v0.3.4 h1:VBWugsJh2ZxJmLFSM06/0qzQyiQX2Qs0ViKrUAcqdZ8= github.com/cyphar/filepath-securejoin v0.3.4/go.mod h1:8s/MCNJREmFK0H02MF6Ihv1nakJe4L/w3WZLHNkvlYM= -github.com/datadog/trivy-db v0.0.0-20240228172000-42caffdaee3f h1:IFB3J+f0m2e7nZjPTqvzLrrb6dVU6BQrsGx/7Tmm8Xk= -github.com/datadog/trivy-db v0.0.0-20240228172000-42caffdaee3f/go.mod h1:cj9/QmD9N3OZnKQMp+/DvdV+ym3HyIkd4e+F0ZM3ZGs= +github.com/danieljoos/wincred v1.2.1 h1:dl9cBrupW8+r5250DYkYxocLeZ1Y4vB1kxgtjxw8GQs= +github.com/danieljoos/wincred v1.2.1/go.mod h1:uGaFL9fDn3OLTvzCGulzE+SzjEe5NGlh5FdCcyfPwps= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -528,8 +616,8 @@ github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= github.com/dgraph-io/badger/v3 v3.2103.5 h1:ylPa6qzbjYRQMU6jokoj4wzcaweHylt//CH0AKt0akg= github.com/dgraph-io/badger/v3 v3.2103.5/go.mod h1:4MPiseMeDQ3FNCYwRbbcBOGJLf5jsE0PPFzRiKjtcdw= -github.com/dgraph-io/ristretto v0.1.2-0.20240116140435-c67e07994f91 h1:Pux6+xANi0I7RRo5E1gflI4EZ2yx3BGZ75JkAIvGEOA= -github.com/dgraph-io/ristretto v0.1.2-0.20240116140435-c67e07994f91/go.mod h1:swkazRqnUf1N62d0Nutz7KIj2UKqsm/H8tD0nBJAXqM= +github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= +github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= @@ -542,18 +630,25 @@ github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54 h1:SG7nF6SRlWhcT7c github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= github.com/digitalocean/godo v1.118.0 h1:lkzGFQmACrVCp7UqH1sAi4JK/PWwlc5aaxubgorKmC4= github.com/digitalocean/godo v1.118.0/go.mod h1:Vk0vpCot2HOAJwc5WE8wljZGtJ3ZtWIc8MQ8rF38sdo= +github.com/digitorus/pkcs7 v0.0.0-20230713084857-e76b763bdc49/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= +github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352 h1:ge14PCmCvPjpMQMIAH7uKg0lrtNSOdpYsRXlwk3QbaE= +github.com/digitorus/pkcs7 v0.0.0-20230818184609-3a137a874352/go.mod h1:SKVExuS+vpu2l9IoOc0RwqE7NYnb0JlcFHFnEJkVDzc= +github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7 h1:lxmTCgmHE1GUYL7P0MlNa00M67axePTq+9nBSGddR8I= +github.com/digitorus/timestamp v0.0.0-20231217203849-220c5c2851b7/go.mod h1:GvWntX9qiTlOud0WkQ6ewFm0LPy5JUR1Xo0Ngbd1w6Y= +github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= +github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/cli v27.4.0+incompatible h1:/nJzWkcI1MDMN+U+px/YXnQWJqnu4J+QKGTfD6ptiTc= -github.com/docker/cli v27.4.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v27.5.0+incompatible h1:aMphQkcGtpHixwwhAXJT1rrK/detk2JIvDaFkLctbGM= +github.com/docker/cli v27.5.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v27.4.0+incompatible h1:I9z7sQ5qyzO0BfAb9IMOawRkAGxhYsidKiTMcm0DU+A= -github.com/docker/docker v27.4.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.8.1 h1:j/eKUktUltBtMzKqmfLB0PAgqYyMHOp5vfsD1807oKo= -github.com/docker/docker-credential-helpers v0.8.1/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= +github.com/docker/docker v27.5.1+incompatible h1:4PYU5dnBYqRQi0294d1FBECqT9ECWeQAIfE8q4YnPY8= +github.com/docker/docker v27.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo= +github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= @@ -582,12 +677,14 @@ github.com/elastic/go-seccomp-bpf v1.5.0/go.mod h1:umdhQ/3aybliBF2jjiZwS492I/TOK github.com/elastic/lunes v0.1.0 h1:amRtLPjwkWtzDF/RKzcEPMvSsSseLDLW+bnhfNSLRe4= github.com/elastic/lunes v0.1.0/go.mod h1:xGphYIt3XdZRtyWosHQTErsQTd4OP1p9wsbVoHelrd4= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU= -github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= +github.com/elazarl/goproxy v1.2.1 h1:njjgvO6cRG9rIqN2ebkqy6cQz2Njkx7Fsfv/zIZqgug= +github.com/elazarl/goproxy v1.2.1/go.mod h1:YfEbZtqP4AetfO6d40vWchF3znWX7C7Vd6ZMfdL8z64= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/proto v1.12.1 h1:6n/Z2pZAnBwuhU66Gs8160B8rrrYKo7h2F2sCOnNceE= +github.com/emicklei/proto v1.12.1/go.mod h1:rn1FgRS/FANiZdD2djyH7TMA9jdRDcYQ9IEN9yvjX0A= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -595,8 +692,8 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= -github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/go-control-plane v0.13.1 h1:vPfJZCkob6yTMEgS+0TwfTUfbHjfy/6vOJ8hUWX/uXE= +github.com/envoyproxy/go-control-plane v0.13.1/go.mod h1:X45hY0mufo6Fd0KW3rqsGvQMw58jvjymeCzBU3mWyHw= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.10.1/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= @@ -640,23 +737,29 @@ github.com/glaslos/ssdeep v0.4.0 h1:w9PtY1HpXbWLYgrL/rvAVkj2ZAMOtDxoGKcBHcUFCLs= github.com/glaslos/ssdeep v0.4.0/go.mod h1:il4NniltMO8eBtU7dqoN+HVJ02gXxbpbUfkcyUvNtG0= github.com/glebarez/go-sqlite v1.22.0 h1:uAcMJhaA6r3LHMTFgP0SifzgXg46yJkgxqyuyec+ruQ= github.com/glebarez/go-sqlite v1.22.0/go.mod h1:PlBIdHe0+aUEFn+r2/uthrWq4FxbzugL0L8Li6yQJbc= -github.com/gliderlabs/ssh v0.3.7 h1:iV3Bqi942d9huXnzEF2Mt+CY9gLu8DNM4Obd+8bODRE= -github.com/gliderlabs/ssh v0.3.7/go.mod h1:zpHEXBstFnQYtGnB8k8kQLol82umzn/2/snG7alWVD8= +github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c= +github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU= +github.com/go-chi/chi v4.1.2+incompatible h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyNz34tQRec= +github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= github.com/go-delve/delve v1.23.1 h1:MtZ13ppptttkqSuvVnwJ5CPhIAzDiOwRrYuCk3ES7fU= github.com/go-delve/delve v1.23.1/go.mod h1:S3SLuEE2mn7wipKilTvk1p9HdTMnXXElcEpiZ+VcuqU= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= -github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= -github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= +github.com/go-git/go-billy/v5 v5.6.0 h1:w2hPNtoehvJIxR00Vb4xX94qHQi/ApZfX+nBE2Cjio8= +github.com/go-git/go-billy/v5 v5.6.0/go.mod h1:sFDq7xD3fn3E0GOwUSZqHo9lrkmx8xJhA0ZrfvjBRGM= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= -github.com/go-git/go-git/v5 v5.12.0 h1:7Md+ndsjrzZxbddRDZjF14qK+NN56sy6wkqaVrjZtys= -github.com/go-git/go-git/v5 v5.12.0/go.mod h1:FTM9VKtnI2m65hNI/TenDDDnUf2Q9FHnXYjuz9i5OEY= +github.com/go-git/go-git/v5 v5.13.0 h1:vLn5wlGIh/X78El6r3Jr+30W16Blk0CTcxTYcYPWi5E= +github.com/go-git/go-git/v5 v5.13.0/go.mod h1:Wjo7/JyVKtQgUNdXYXIepzWfJQkUEIGvkvVkiXRR/zw= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-jose/go-jose/v3 v3.0.3 h1:fFKWeig/irsp7XD2zBxvnmA/XaRWp5V3CBsZXJF7G7k= +github.com/go-jose/go-jose/v3 v3.0.3/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= +github.com/go-jose/go-jose/v4 v4.0.1 h1:QVEPDE3OluqXBQZDcnNvQrInro2h0e4eqNbnZSWqS6U= +github.com/go-jose/go-jose/v4 v4.0.1/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= @@ -684,34 +787,36 @@ github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiU github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= -github.com/go-openapi/analysis v0.22.2 h1:ZBmNoP2h5omLKr/srIC9bfqrUGzT6g6gNv03HE9Vpj0= -github.com/go-openapi/analysis v0.22.2/go.mod h1:pDF4UbZsQTo/oNuRfAWWd4dAh4yuYf//LYorPTjrpvo= +github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= +github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w= github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= -github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= -github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= -github.com/go-openapi/loads v0.21.5 h1:jDzF4dSoHw6ZFADCGltDb2lE4F6De7aWSpe+IcsRzT0= -github.com/go-openapi/loads v0.21.5/go.mod h1:PxTsnFBoBe+z89riT+wYt3prmSBP6GDAQh2l9H1Flz8= -github.com/go-openapi/runtime v0.27.1 h1:ae53yaOoh+fx/X5Eaq8cRmavHgDma65XPZuvBqvJYto= -github.com/go-openapi/runtime v0.27.1/go.mod h1:fijeJEiEclyS8BRurYE1DE5TLb9/KZl6eAdbzjsrlLU= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= +github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= +github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ= +github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= -github.com/go-openapi/spec v0.20.14 h1:7CBlRnw+mtjFGlPDRZmAMnq35cRzI91xj03HVyUi/Do= -github.com/go-openapi/spec v0.20.14/go.mod h1:8EOhTpBoFiask8rrgwbLC3zmJfz4zsCUueRuPM6GNkw= +github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= +github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE= -github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= -github.com/go-openapi/validate v0.23.0 h1:2l7PJLzCis4YUGEoW6eoQw3WhyM65WSIcjX6SQnlfDw= -github.com/go-openapi/validate v0.23.0/go.mod h1:EeiAZ5bmpSIOJV1WLfyYF9qp/B1ZgSaEpHTJHtN5cbE= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= +github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= +github.com/go-piv/piv-go v1.11.0 h1:5vAaCdRTFSIW4PeqMbnsDlUZ7odMYWnHBDGdmtU/Zhg= +github.com/go-piv/piv-go v1.11.0/go.mod h1:NZ2zmjVkfFaL/CF8cVQ/pXdXtuj110zEKGdJM6fJZZM= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= @@ -738,10 +843,12 @@ github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIx github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/go-zookeeper/zk v1.0.3 h1:7M2kwOsc//9VeeFiPtf+uSJlVpU66x9Ba5+8XK7/TDg= github.com/go-zookeeper/zk v1.0.3/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= +github.com/gobuffalo/flect v1.0.2 h1:eqjPGSo2WmjgY2XlpGwo2NXgL3RucAKo4k4qQMNA5sA= +github.com/gobuffalo/flect v1.0.2/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= -github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/goccy/go-json v0.10.4 h1:JSwxQzIqKfmFX1swYPpUThQZp/Ka4wzJdK0LWVytLPM= +github.com/goccy/go-json v0.10.4/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/goccy/go-yaml v1.11.0 h1:n7Z+zx8S9f9KgzG6KtQKf+kwqXZlLNR2F6018Dgau54= github.com/goccy/go-yaml v1.11.0/go.mod h1:H+mJrWtjPTJAHvRbV09MCK9xYwODM+wRTVFFTWckfng= github.com/gocomply/scap v0.1.2-0.20230531064509-55a00f73e8d6 h1:u1QKTc+GgWnBO1Mo0CwQ/4DXElFmSvNKRspxAr+AJuY= @@ -770,21 +877,14 @@ github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= -github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/glog v1.2.4 h1:CNNw5U8lSiiBk7druxtSHHTsRWcxKoac6kZKm2peBBc= +github.com/golang/glog v1.2.4/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -815,10 +915,12 @@ github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/cel-go v0.20.1 h1:nDx9r8S3L4pE61eDdt8igGj8rf5kjYR3ILxWIpWNi84= github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg= +github.com/google/certificate-transparency-go v1.1.8 h1:LGYKkgZF7satzgTak9R4yzfJXEeYVAjV6/EAEJOf1to= +github.com/google/certificate-transparency-go v1.1.8/go.mod h1:bV/o8r0TBKRf1X//iiiSgWrvII4d7/8OiA+3vG26gI8= github.com/google/flatbuffers v24.3.25+incompatible h1:CX395cjN9Kke9mmalRoL3d81AtFUxJM+yDthflgJGkI= github.com/google/flatbuffers v24.3.25+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= -github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= -github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= +github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -834,8 +936,12 @@ github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-containerregistry v0.20.2 h1:B1wPJ1SN/S7pB+ZAimcciVD+r+yV/l/DSArMxlbwseo= -github.com/google/go-containerregistry v0.20.2/go.mod h1:z38EKdKh4h7IP2gSfUUqEvalZBqs6AoLeWfUy34nQC8= +github.com/google/go-containerregistry v0.20.3 h1:oNx7IdTI936V8CQRveCjaxOiegWwvM7kqkbXTpyiovI= +github.com/google/go-containerregistry v0.20.3/go.mod h1:w00pIgBRDVUDFM6bq+Qx8lwNWK+cxgCuX1vd3PIBDNI= +github.com/google/go-github/v55 v55.0.0 h1:4pp/1tNMB9X/LuAhs5i0KQAE40NmiR/y6prLNb9x9cg= +github.com/google/go-github/v55 v55.0.0/go.mod h1:JLahOTA1DnXzhxEymmFF5PP2tSS9JVNj68mSZNDwskA= +github.com/google/go-github/v62 v62.0.0 h1:/6mGCaRywZz9MuHyw9gD1CwsbmBX8GWsbFkwMmHdhl4= +github.com/google/go-github/v62 v62.0.0/go.mod h1:EMxeUqGJq2xRu9DYBMwel/mr7kZrzUOfQmmpYrZn2a4= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -858,6 +964,10 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4 github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= +github.com/google/tink/go v1.7.0 h1:6Eox8zONGebBFcCBqkVmt60LaWZa6xg1cl/DwAh/J1w= +github.com/google/tink/go v1.7.0/go.mod h1:GAUOd+QE3pgj9q8VKIGTCP33c/B7eb4NhxLcgTJZStM= +github.com/google/trillian v1.6.0 h1:jMBeDBIkINFvS2n6oV5maDqfRlxREAc6CW9QYWQ0qT4= +github.com/google/trillian v1.6.0/go.mod h1:Yu3nIMITzNhhMJEHjAtp6xKiu+H/iHu2Oq5FjV2mCWI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -895,21 +1005,21 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmg github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 h1:pRhl55Yx1eC7BZ1N+BBWwnKaMyD8uC+34TLdndZMAKk= -github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0/go.mod h1:XKMd7iuf/RGPSMJ/U4HP0zS2Z9Fh8Ps9a+6X26m/tmI= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0 h1:kQ0NI7W1B3HwiN5gAYtY+XFItDPbLBwYRxAqbFTyDes= +github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.2.0/go.mod h1:zrT2dxOAjNFPRGjTUe2Xmb4q4YdUwVvQFV6xiCSf+z0= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.0 h1:VD1gqscl4nYs1YxVuSdemTrSgTKrwOWDK0FVFMqm+Cg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.0/go.mod h1:4EgsQoS4TOhJizV+JTFg40qx1Ofh3XmXEQNBpgvNT40= github.com/h2non/filetype v1.0.5/go.mod h1:isekKqOuhMj+s/7r3rIeTErIRy4Rub5uBWHfvMusLMU= github.com/h2non/filetype v1.1.3 h1:FKkx9QbD7HR/zjK1Ia5XiBsq9zdLi5Kf3zGyFTAFkGg= github.com/h2non/filetype v1.1.3/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY= -github.com/hashicorp/consul/api v1.30.0 h1:ArHVMMILb1nQv8vZSGIwwQd2gtc+oSQZ6CalyiyH2XQ= -github.com/hashicorp/consul/api v1.30.0/go.mod h1:B2uGchvaXVW2JhFoS8nqTxMD5PBykr4ebY4JWHTTeLM= +github.com/hashicorp/consul/api v1.31.0 h1:32BUNLembeSRek0G/ZAM6WNfdEwYdYo8oQ4+JoqGkNQ= +github.com/hashicorp/consul/api v1.31.0/go.mod h1:2ZGIiXM3A610NmDULmCHd/aqBJj8CkMfOhswhOafxRg= github.com/hashicorp/consul/sdk v0.16.1 h1:V8TxTnImoPD5cj0U9Spl0TUxcytjcbbJeADFF07KdHg= github.com/hashicorp/consul/sdk v0.16.1/go.mod h1:fSXvwxB2hmh1FMZCNl6PwX0Q/1wdWtHJcZ7Ea5tns0s= github.com/hashicorp/cronexpr v1.1.2 h1:wG/ZYIKT+RT3QkOdgYc+xsKWVRgnxJ1OJtjjy84fJ9A= @@ -969,6 +1079,8 @@ github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3 h1:fgVfQ4AC1av github.com/hashicorp/nomad/api v0.0.0-20240717122358-3d93bd3778f3/go.mod h1:svtxn6QnrQ69P23VvIWMR34tg3vmwLz4UdUzm1dSCgE= github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= +github.com/hashicorp/vault/api v1.12.2 h1:7YkCTE5Ni90TcmYHDBExdt4WGJxhpzaHqR6uGbQb/rE= +github.com/hashicorp/vault/api v1.12.2/go.mod h1:LSGf1NGT1BnvFFnKVtnvcaLBM2Lz+gJdpL6HUYed8KE= github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb h1:PGufWXXDq9yaev6xX1YQauaO1MV90e6Mpoq1I7Lz/VM= github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E= github.com/hetznercloud/hcloud-go/v2 v2.10.2 h1:9gyTUPhfNbfbS40Spgij5mV5k37bOZgt8iHKCbfGs5I= @@ -976,6 +1088,8 @@ github.com/hetznercloud/hcloud-go/v2 v2.10.2/go.mod h1:xQ+8KhIS62W0D78Dpi57jsufW github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/howeyc/gopass v0.0.0-20170109162249-bf9dde6d0d2c/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= +github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef h1:A9HsByNhogrvm9cWb28sjiS3i7tcKCkflWFEkHfuAgM= +github.com/howeyc/gopass v0.0.0-20210920133722-c8aef6fb66ef/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= @@ -1011,12 +1125,14 @@ github.com/jackc/pgx/v5 v5.6.0 h1:SWJzexBzPL5jb0GEsrPMLIsi/3jOo7RHlzTjcAeDrPY= github.com/jackc/pgx/v5 v5.6.0/go.mod h1:DNZ/vlrUnhWCoFGxHAG8U2ljioxukquj7utPDgtQdTw= github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= -github.com/jaegertracing/jaeger v1.62.0 h1:YoaJ2e8oVz5sqGGlVAKSUCED8DzJ1q7PojBmZFNKoJA= -github.com/jaegertracing/jaeger v1.62.0/go.mod h1:jhEIHazwyb+a6xlRBi+p96BAvTYTSmGkghcwdQfV7FM= +github.com/jaegertracing/jaeger v1.65.0 h1:phDrZzaPUbomlN8VfxGWuPwkipYh7cU6V9q6Obf+7Fc= +github.com/jaegertracing/jaeger v1.65.0/go.mod h1:EkEqyIzI0xCjexVHURWJmZZxjswTUKSriW57eVG44yo= github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww= github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267 h1:TMtDYDHKYY15rFihtRfck/bfFqNfvcabqvXAFQfAUpY= +github.com/jedisct1/go-minisign v0.0.0-20230811132847-661be99b8267/go.mod h1:h1nSAbGFqGVzn6Jyl1R/iCcBUHN4g+gW1u9CoBTrb9E= github.com/jellydator/ttlcache/v3 v3.3.0 h1:BdoC9cE81qXfrxeb9eoJi9dWrdhSuwXMAnHTbnBm4Wc= github.com/jellydator/ttlcache/v3 v3.3.0/go.mod h1:bj2/e0l4jRnQdrnSTaGTsh4GSXvMjQcy41i7th0GVGw= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= @@ -1028,6 +1144,8 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jmhodges/clock v1.2.0 h1:eq4kys+NI0PLngzaHEe7AmPT90XMGIEySD1JfV1PDIs= +github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI= github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= @@ -1075,8 +1193,9 @@ github.com/kjk/lzma v0.0.0-20161016003348-3fd93898850d/go.mod h1:phT/jsRPBAEqjAi github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= -github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= +github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/knadh/koanf/maps v0.1.0 h1:fYqBfuNiqSu7xkSJDuRdRCyx/iVjyxVInCIIZS2BrdI= github.com/knadh/koanf/maps v0.1.0/go.mod h1:npD/QZY3V6ghQDdcQzl1W4ICNVTkohC8E73eI2xW4yI= github.com/knadh/koanf/providers/confmap v0.1.0-dev0 h1:ocRN64rGNrgf+UAPeFzFSfpZwdAzWEV5BUvn6i0PyUU= @@ -1121,8 +1240,12 @@ github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgx github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= github.com/leodido/ragel-machinery v0.0.0-20190525184631-5f46317e436b h1:11UHH39z1RhZ5dc4y4r/4koJo6IYFgTRMe/LlwRTEw0= github.com/leodido/ragel-machinery v0.0.0-20190525184631-5f46317e436b/go.mod h1:WZxr2/6a/Ar9bMDc2rN/LJrE/hF6bXE4LPyDSIxwAfg= +github.com/letsencrypt/boulder v0.0.0-20231026200631-000cd05d5491 h1:WGrKdjHtWC67RX96eTkYD2f53NDHhrq/7robWTAfk4s= +github.com/letsencrypt/boulder v0.0.0-20231026200631-000cd05d5491/go.mod h1:o158RFmdEbYyIZmXAbrvmJWesbyxlLKee6X64VPVuOc= github.com/liamg/jfather v0.0.7 h1:Xf78zS263yfT+xr2VSo6+kyAy4ROlCacRqJG7s5jt4k= github.com/liamg/jfather v0.0.7/go.mod h1:xXBGiBoiZ6tmHhfy5Jzw8sugzajwYdi6VosIpB3/cPM= +github.com/liamg/memoryfs v1.6.0 h1:jAFec2HI1PgMTem5gR7UT8zi9u4BfG5jorCRlLH06W8= +github.com/liamg/memoryfs v1.6.0/go.mod h1:z7mfqXFQS8eSeBBsFjYLlxYRMRyiPktytvYCYTb3BSk= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libp2p/go-reuseport v0.2.0 h1:18PRvIMlpY6ZK85nIAicSBuXXvrYoSw3dsBAR7zc560= @@ -1133,8 +1256,8 @@ github.com/linode/linodego v1.37.0 h1:B/2Spzv9jYXzKA+p+GD8fVCNJ7Wuw6P91ZDD9eCkks github.com/linode/linodego v1.37.0/go.mod h1:L7GXKFD3PoN2xSEtFc04wIXP5WK65O10jYQx0PQISWQ= github.com/lorenzosaino/go-sysctl v0.3.1 h1:3phX80tdITw2fJjZlwbXQnDWs4S30beNcMbw0cn0HtY= github.com/lorenzosaino/go-sysctl v0.3.1/go.mod h1:5grcsBRpspKknNS1qzt1eIeRDLrhpKZAtz8Fcuvs1Rc= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/lunixbochs/struc v0.0.0-20200707160740-784aaebc1d40 h1:EnfXoSqDfSNJv0VBNqY/88RNnhSGYkrHaO0mmFGbVsc= github.com/lunixbochs/struc v0.0.0-20200707160740-784aaebc1d40/go.mod h1:vy1vK6wD6j7xX6O6hXe621WabdtNkou2h7uRtTfRMyg= github.com/lxn/walk v0.0.0-20210112085537-c389da54e794 h1:NVRJ0Uy0SOFcXSKLsS65OmI1sgCCfiDUPj+cwnH7GZw= @@ -1154,18 +1277,18 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0 github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11 h1:YFh+sjyJTMQSYjKwM4dFKhJPJC/wfo98tPUc17HdoYw= github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11/go.mod h1:Ah2dBMoxZEqk118as2T4u4fjfXarE0pPnMJaArZQZsI= -github.com/masahiro331/go-disk v0.0.0-20220919035250-c8da316f91ac h1:QyRucnGOLHJag1eB9CtuZwZk+/LpvTSYr5mnFLLFlgA= -github.com/masahiro331/go-disk v0.0.0-20220919035250-c8da316f91ac/go.mod h1:J7Vb0sf0JzOhT0uHTeCqO6dqP/ELVcQvQ6yQ/56ZRGw= -github.com/masahiro331/go-ebs-file v0.0.0-20240112135404-d5fbb1d46323 h1:uQubA711SeYStvStohMLrdvRTTohdPHrEPFzerLcY9I= -github.com/masahiro331/go-ebs-file v0.0.0-20240112135404-d5fbb1d46323/go.mod h1:OdtzwqTtu49Gh5RFkNEU1SbcihIuVTtUipwHflqxckE= -github.com/masahiro331/go-ext4-filesystem v0.0.0-20231208112839-4339555a0cd4 h1:uHO44vOunB0oEtk+r8ifBbFOD0mr6+fmoyFNCgLE66k= -github.com/masahiro331/go-ext4-filesystem v0.0.0-20231208112839-4339555a0cd4/go.mod h1:3XMMY1M486mWGTD13WPItg6FsgflQR72ZMAkd+gsyoQ= +github.com/masahiro331/go-disk v0.0.0-20240625071113-56c933208fee h1:cgm8mE25x5XXX2oyvJDlyJ72K+rDu/4ZCYce2worNb8= +github.com/masahiro331/go-disk v0.0.0-20240625071113-56c933208fee/go.mod h1:rojbW5tVhH1cuVYFKZS+QX+VGXK45JVsRO+jW92kkKM= +github.com/masahiro331/go-ebs-file v0.0.0-20240917043618-e6d2bea5c32e h1:nCgF1JEYIS8KNuJtIeUrmjjhktIMKWNmASZqwK2ynu0= +github.com/masahiro331/go-ebs-file v0.0.0-20240917043618-e6d2bea5c32e/go.mod h1:XFWPTlAcEL733RUjbr0QBybdt6oK2DH7LZk8id2qtd4= +github.com/masahiro331/go-ext4-filesystem v0.0.0-20240620024024-ca14e6327bbd h1:JEIW94K3spsvBI5Xb9PGhKSIza9/jxO1lF30tPCAJlA= +github.com/masahiro331/go-ext4-filesystem v0.0.0-20240620024024-ca14e6327bbd/go.mod h1:3XMMY1M486mWGTD13WPItg6FsgflQR72ZMAkd+gsyoQ= github.com/masahiro331/go-mvn-version v0.0.0-20210429150710-d3157d602a08 h1:AevUBW4cc99rAF8q8vmddIP8qd/0J5s/UyltGbp66dg= github.com/masahiro331/go-mvn-version v0.0.0-20210429150710-d3157d602a08/go.mod h1:JOkBRrE1HvgTyjk6diFtNGgr8XJMtIfiBzkL5krqzVk= github.com/masahiro331/go-vmdk-parser v0.0.0-20221225061455-612096e4bbbd h1:Y30EzvuoVp97b0unb/GOFXzBUKRXZXUN2e0wYmvC+ic= github.com/masahiro331/go-vmdk-parser v0.0.0-20221225061455-612096e4bbbd/go.mod h1:5f7mCJGW9cJb8SDn3z8qodGxpMCOo8d/2nls/tiwRrw= -github.com/masahiro331/go-xfs-filesystem v0.0.0-20230608043311-a335f4599b70 h1:X6W6raTo07X0q4pvSI/68Pj/Ic4iIU2CfQU65OH0Zhc= -github.com/masahiro331/go-xfs-filesystem v0.0.0-20230608043311-a335f4599b70/go.mod h1:QKBZqdn6teT0LK3QhAf3K6xakItd1LonOShOEC44idQ= +github.com/masahiro331/go-xfs-filesystem v0.0.0-20231205045356-1b22259a6c44 h1:VmSjn0UCyfXUNdePDr7uM/uZTnGSp+mKD5+cYkEoLx4= +github.com/masahiro331/go-xfs-filesystem v0.0.0-20231205045356-1b22259a6c44/go.mod h1:QKBZqdn6teT0LK3QhAf3K6xakItd1LonOShOEC44idQ= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= @@ -1205,6 +1328,8 @@ github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKju github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= +github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= +github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= @@ -1212,6 +1337,8 @@ github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4= github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= @@ -1222,8 +1349,8 @@ github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zx github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mkrautz/goar v0.0.0-20150919110319-282caa8bd9da h1:Iu5QFXIMK/YrHJ0NgUnK0rqYTTyb0ldt/rqNenAj39U= github.com/mkrautz/goar v0.0.0-20150919110319-282caa8bd9da/go.mod h1:NfnmoBY0gGkr3/NmI+DP/UXbZvOCurCUYAzOdYJjlOc= -github.com/moby/buildkit v0.13.0 h1:reVR1Y+rbNIUQ9jf0Q1YZVH5a/nhOixZsl+HJ9qQEGI= -github.com/moby/buildkit v0.13.0/go.mod h1:aNmNQKLBFYAOFuzQjR3VA27/FijlvtBD1pjNwTSN37k= +github.com/moby/buildkit v0.16.0 h1:wOVBj1o5YNVad/txPQNXUXdelm7Hs/i0PUFjzbK0VKE= +github.com/moby/buildkit v0.16.0/go.mod h1:Xqx/5GlrqE1yIRORk0NSCVDFpQAU1WjlT6KHYZdisIQ= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= @@ -1237,8 +1364,8 @@ github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9Kou github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= -github.com/moby/sys/signal v0.7.0 h1:25RW3d5TnQEoKvRbEKUGay6DCQ46IxAVTT9CUMgmsSI= -github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= +github.com/moby/sys/signal v0.7.1 h1:PrQxdvxcGijdo6UXXo/lU/TvHUWyPhj7UOpSo8tuvk0= +github.com/moby/sys/signal v0.7.1/go.mod h1:Se1VGehYokAkrSQwL4tDzHvETwUZlnY7S5XtQ50mQp8= github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo= github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= @@ -1260,6 +1387,8 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mostynb/go-grpc-compression v1.2.3 h1:42/BKWMy0KEJGSdWvzqIyOZ95YcR9mLPqKctH7Uo//I= github.com/mostynb/go-grpc-compression v1.2.3/go.mod h1:AghIxF3P57umzqM9yz795+y1Vjs47Km/Y2FE6ouQ7Lg= +github.com/mozillazg/docker-credential-acr-helper v0.3.0 h1:DVWFZ3/O8BP6Ue3iS/Olw+G07u1hCq1EOVCDZZjCIBI= +github.com/mozillazg/docker-credential-acr-helper v0.3.0/go.mod h1:cZlu3tof523ujmLuiNUb6JsjtHcNA70u1jitrrdnuyA= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= @@ -1274,16 +1403,19 @@ github.com/ncw/swift v1.0.30/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ github.com/netsampler/goflow2 v1.3.3 h1:uheCMgWwbaHnVdsvc2bqbdQe93E73pVF77WGu/kPE7U= github.com/netsampler/goflow2 v1.3.3/go.mod h1:mUjr4ERDTtNUAVtf2EomWHmr6Xvz2N9DahhFkhNnFkQ= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 h1:Up6+btDp321ZG5/zdSLo48H9Iaq0UQGthrhWC6pCxzE= +github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481/go.mod h1:yKZQO8QE2bHlgozqWDiRVqTFlLQSj30K/6SAK8EeYFw= github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d h1:VhgPp6v9qf9Agr/56bj7Y/xa04UccTW04VP0Qed4vnQ= github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH2ZwIWBy3CJBeOBEugqcmXREj14T+iG/4k4U= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= -github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= -github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/oklog/ulid/v2 v2.0.2 h1:r4fFzBm+bv0wNKNh5eXTwU7i85y5x+uwkxCUTNVQqLc= github.com/oklog/ulid/v2 v2.0.2/go.mod h1:mtBL0Qe/0HAx6/a4Z30qxVIAL1eQDweXq5lxOEiwQ68= +github.com/oleiade/reflections v1.0.1 h1:D1XO3LVEYroYskEsoSiGItp9RUxG6jWnCVvrqH0HHQM= +github.com/oleiade/reflections v1.0.1/go.mod h1:rdFxbxq4QXVZWj0F+e9jqjDkc7dbp97vkRixKo2JR60= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/oliveagle/jsonpath v0.0.0-20180606110733-2e52cf6e6852 h1:Yl0tPBa8QPjGmesFh1D0rDy+q1Twx6FyU7VWHi8wZbI= @@ -1314,120 +1446,120 @@ github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= github.com/open-policy-agent/opa v0.70.0 h1:B3cqCN2iQAyKxK6+GI+N40uqkin+wzIrM7YA60t9x1U= github.com/open-policy-agent/opa v0.70.0/go.mod h1:Y/nm5NY0BX0BqjBriKUiV81sCl8XOjjvqQG7dXrggtI= -github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.115.0 h1:Xkfl44ZRgkz1EoCCYgwPomQkV+BrYOPvv9v1Kd1gZE4= -github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.115.0/go.mod h1:Sr/upBdJeJ7nxDfmCFCl9iHosXiPoQCPHkCJslDyoUA= -github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.115.0 h1:sO4fPw0NRUibgBVvQVTqPBCBRFh0I+ODIr3HAwcWezI= -github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.115.0/go.mod h1:HqzCXJ4rxXzWNYaUtCqJzXyTsCGEKSa/d+tHcyeRDY0= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.115.0 h1:qtct9PsKONY6YOMc+QGBE/uGs8KMBcF6mvYJbyFHFt8= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.115.0/go.mod h1:OR9DKWrSRpfc3+CxwsL2QTOuHD03S9w0Jubi3EhTcy4= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.115.0 h1:u7Ht+E1ghQESffcjyaxWrXGsfSWa1VE9LKC4f2PPx84= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.115.0/go.mod h1:r3iS2mDYu+cnGjgNc8TgvuUUAN6A6/1BvR1e1YJBrqM= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.115.0 h1:RXYLbv2uTJlJTJcEa5H8/fLdX419XUlbn6mjzEgTWxc= -github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.115.0/go.mod h1:ngeyITKu+koaagA/sFpnuT+x0nFVBNdWq60/h5buSr4= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.115.0 h1:51D/x3xIAnWgVrY0lgdU+b+yb2aWd72uDqu9GhjRcNI= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.115.0/go.mod h1:nLau1YUdjhtLrk4jXLPb2l9riQ1Ap4xytTLl7MBedBg= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer v0.115.0 h1:eJk/gbfWpGKTIGLUN+EWpqM52Zf4LFTfIeMnDji+dqM= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer v0.115.0/go.mod h1:+GPzqBFeqV90U4/bntDRPMxo/i/12lxH7GyPJmqz4ls= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver v0.115.0 h1:790+/iSYt6bMs/OA3AfLlZl9E/Zpb0pm5X628TCncE4= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver v0.115.0/go.mod h1:LtsKKBDZyn02DiqvuOZapGg75P/FqGQNelTI6fO12o0= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.115.0 h1:BtYrSkQSYGJufsmbqqrpzb+BJXH2S4CKL14i1bxOFCU= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.115.0/go.mod h1:4LQ1S3eBu+MyCNaCkBk0hIoAhvJJS851i/tY45FtDf4= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver v0.115.0 h1:zi0LLZp26hAycIKNbmOIMGc0ZnkikrciTHl1tiJuo4Y= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver v0.115.0/go.mod h1:a/UMjV9mrFJ5WIlpaDQ/S5KgCrg0H3kD8nlhfQRxfBI= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver v0.115.0 h1:5PiDmieivpExBd2LchzSIvEls+cjUeJtPLXvvHxLZoI= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver v0.115.0/go.mod h1:FIFNtgEoqcI/evvgSL+5qO/cdRUK+6ixFKKUdKpmMeA= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver v0.115.0 h1:sMHHN4HrakORqrpsTLQQVGiDjKg4QreBJ+UCx/1OI+I= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver v0.115.0/go.mod h1:q1950sX5QqCGDurVOkwatDSc5de4gpGfuPGVtFgNo3I= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.115.0 h1:HVGG31WeB6Fn2+il2/ycWj9tDP0fxOeOqD1rKCjsBSc= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.115.0/go.mod h1:2hYojHs5daPVWECuZsPViKwty0ojuHUEmk8GEuaFqO0= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.115.0 h1:4Ycg73pYVdiF+oq+BmUq7Dkg0WKeKvBSk9AOKvBe4LU= -github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.115.0/go.mod h1:l2Q+MmYk2ZRDSbhX9GlJYvBXC51AqhDJAj2ne290Xik= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.115.0 h1:SF3gOOEkfntE3zEhY80yO7BVQ5CkaK8ecic2U2AZPHE= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.115.0/go.mod h1:jeBzX5m8O9X0LQxiryV9sJUIrn+QAwOnCBE2wZWIltQ= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.115.0 h1:vRQQFD4YpasQFUAdF030UWtaflSYFXK542bfWMGhOK0= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.115.0/go.mod h1:BZ7DT+0VkKR7P3I9PGEDfVa0GdB0ty41eEcejIUXF9A= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.115.0 h1:a36EJz/mb83f6ieX0v4fNDJ1jXqpeaM6DVQXeFDvdhw= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.115.0/go.mod h1:r5/40YO1eSP5ZreOmRzVOUtDr7YG39ZIUcVjHd+9Izc= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.115.0 h1:xITYM8BkEgs2Wf+PczOrVv0b1Fk4N929/xR9YtxLpkw= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.115.0/go.mod h1:m+5tYnZKfNDtnZKknOfssYSXBEL5Yqse4CJMpaY5kMk= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.115.0 h1:h6zEsBtuZalQu7lKYf6ZCcj8fTocT+zxdmuOou9515Q= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.115.0/go.mod h1:6QU/K0dGCGYorkOvJmhbDFCspy4RPxRkFjf9I64y6I0= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.115.0 h1:f/HrZgTf6TF97v67uEZB3v2UtBT9aQojBvnloD3LOm4= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.115.0/go.mod h1:Hp9uSq3qNJqdxu24u7RWyuPT9x1GgEUSx9US1LLeLi0= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stest v0.115.0 h1:vXDJE8YHfAoYIAlPRtODchlqb6lWnGhJxPaT2ljvN7I= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stest v0.115.0/go.mod h1:f3IgMFHIjEUEI/I+5e3KWMPq9h2PSMy9WovmvPdmlb0= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.115.0 h1:4RoU3SlcNe6Dxyxfv8JVsrN8QgjBQ44Pkt9FLKK095I= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.115.0/go.mod h1:jfPlBpZT+hvp52Ldcx+srxaqyYuKxBkxOd3KtxbveCU= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.115.0 h1:8A+iBT5G23zvBPqYx32Qh4800jHFo4X9T1fpQKVQ+4E= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.115.0/go.mod h1:AhdPvwYKu7G8LKRWzHTNQYBq27RinsMm5qSanwSA/rU= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.115.0 h1:MuyDWyVoCty8HyP2CAYoRZXwINiThHovcC1Bj3+H8lk= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.115.0/go.mod h1:asekVnrdzYsMJBaJtIyXOt8p07l1x0xs8X3h00sZyf0= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.115.0 h1:6GIJOSEIWBt9bprARMtTjRlENrwNsJl2UzbtjOBk7A0= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.115.0/go.mod h1:/Fg/itwlAzDjyM0Sjenup9TbdOT+aVNPSqXsF80M8hw= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.115.0 h1:l4NBxl2AELPlyqupLu1IVAjtbGOEovaKEyt0UGMsuq8= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.115.0/go.mod h1:j1qF1hE/Qcy2I655yXbf2ItezXok61OW+9AAxbH2ORw= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.115.0 h1:WOqt8NpU/JPGYDR4CiWx7g/sHV6Oe9FChzhushwmVdo= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.115.0/go.mod h1:wV/+iU7MyXcyTaY8K5Qx+1Z3yUzrxA40nydPQA476Iw= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.115.0 h1:l9AsnVHr3Sp4lAGFlBJ6Ochl7mlPE0d5MNd70o4qKEM= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.115.0/go.mod h1:kARk81QZpcX6L8x4fLo4Nr/z/+jpo5PxXtugBxF2DyE= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.115.0 h1:Z9p78zj9Qblw472mGkPieuX7mqduAp47rzMbFfq5evI= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.115.0/go.mod h1:mtxUxJEIQy27MaGR1yzcn/OK8NoddEgb7fumpEbKYss= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.115.0 h1:qdZ9EqmdM19pWhPoFA7VivBTdzP2HvNwXa3CCMHYoDQ= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.115.0/go.mod h1:mrL1MNrcg0zYAJ+aK9WtOH062dl2wN9DDG7mZk9H8v4= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.115.0 h1:MerLKMrkM4YoGF6Di0D9yMXO02yCX8mrZAi/+jJVVeI= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.115.0/go.mod h1:R8AkVWe9G5Q0oMOapvm9HNS076E3Min8SVlmhBL3QD0= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.115.0 h1:WEqcnWSy9dNSlGb8pYRBX7zhaz2ReyaeImlenbzNTB4= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.115.0/go.mod h1:6Mk71CakHUA3I6oM9hARDiyQypYyOolvb+4PFYyVEFg= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.115.0 h1:eoapW0JBablApkdv4C1RUuOKfz0U6SwuKMYYSAJH6fE= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.115.0/go.mod h1:hW2AaybTRcwxJySGLC3Fh1vd2VDaQhRBfa7O7w30NS8= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.115.0 h1:R9MRrO+dSkAHBQLZjuwjv2RHXHQqF2Wtm1Ki0VKD5cs= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.115.0/go.mod h1:rKXLXmwdUVcUHwTilroKSejbg3KSwLeYzNPSpkIEnv4= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.115.0 h1:7tQ+WjojXhtWDFTJlwCvkjpvdTed5YkVKVQKVAu1alg= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.115.0/go.mod h1:iqgJP7+N03pOIOqYaKjVWYoIKweNdFivsvWJfFw6MTQ= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.115.0 h1:rrIm0dyEdaHmQo6udPK1V3opkzEKa0PrZzSdY5oGqmQ= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.115.0/go.mod h1:AMeisxL/9gs0bzozaymUqI1/EJ9GPvtnLh/BtqtjSF8= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.115.0 h1:KghgAubxdDqP4eUQ+d2GzHXUAwtFxpSDToqFVnax0XA= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.115.0/go.mod h1:cW/BaYE6Uo7ZYHbmT0wVBktHP0SfeLqGHMf0qks7rOE= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.115.0 h1:ioGiKiO0WqT3PxkzanuJsPVA24FItH6nTJeDeSMFpYA= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.115.0/go.mod h1:x1W4J+pzK/Bi9jjYBYESTsPq0nRJJLZoN7cPNd0vYSU= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.115.0 h1:A9zqBtUJZ5J/0VI+B1dxuQhc2iVYpD9c54SgaKtFIN8= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.115.0/go.mod h1:hG7GOrBBux/cg1fAUzvSlzYY02ekxjF9IvH4ls/nGXA= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.115.0 h1:hAsK9I081ShnSDSKPVEHB3TLawyOmbR6bPDiQEkgo2Y= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.115.0/go.mod h1:z8XdvlhXSYVboxS3TPGembE9kfxLAYH2PxPLMvf8wTk= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.115.0 h1:t3BGnPpmeuxW51vISSu51PrAs49ACBCa1Yl1NfZGE5Y= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.115.0/go.mod h1:jQLYyroEYEV1kWJApmGBgVuGUd73v+Q6EUJ6Wy7N508= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.115.0 h1:ficXJmB6l6kfiu+R6CmggtnlQWMHUNzu2csDYA4CFSs= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.115.0/go.mod h1:ykraxSeEVCuA43oqlMWnex78+vNQ+1dBTJUeInkqIpA= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.115.0 h1:LVe/Oh2un9CFKFYtepB9oZ6j38whFPVYl01RAVsdxHg= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.115.0/go.mod h1:mGSGQCX5dT5KUxBkuCO15CNqB+8Cb+qj0edt/oKmA34= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.115.0 h1:6RGhDlZkekmp12EvK6JV9fiIwrdZBOJID6/Ts9tXzL4= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.115.0/go.mod h1:qZRQtGr/DAjuBqAuKJMN2cWvc9RI94lB0Oq8UyGAduo= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.115.0 h1:vwZQ7k8oqlK0bdZYTsjP/59zjQQfjSD4fNsWIWsTu2w= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.115.0/go.mod h1:5ObSa9amrbzbYTdAK1Qhv3D/YqCxxnQhP0sk2eWB7Oo= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.115.0 h1:jQ6mIXhWqXhl8MPun9soNynsQ0lpOpOYQyAnQ28F014= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.115.0/go.mod h1:oRxNwm6HN7ckp4aJOAFC8BVBPa0UDhB8vNGTFL3QBJg= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.115.0 h1:KbfjEsr2d/5TGWHvcaBC3lOpYAnquEraLXcis4IamAs= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.115.0/go.mod h1:fmtZPK5RIz+2Lcm9xQZuwiM+M8/juSSeJufSxUT+J9w= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.115.0 h1:Ea5v0Q6VNIMRbXVJjHUsSbdOSkB+80sCOH7Y9yhStnY= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.115.0/go.mod h1:IkiZL9vOU8qNCkrnJP0GOWPoFTED+yhB94wJbcLYcGA= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.115.0 h1:olyiml73slGYORDjZNViW3nKiysC+K+h5yPsSBjUxQ4= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.115.0/go.mod h1:N00k1mTxzfS2clqxSP4Dxk7iX8GWbbuCq6LF8/ECk/M= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.115.0 h1:sLRTfXUFiqJ5Qe/NN5MUJxTaFt46E0Y/xjSY+KesCQc= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.115.0/go.mod h1:361IqXD4jnfs6G+Yn7978uv1UNozhZo4yBYy4p6Nqzc= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.115.0 h1:JSFnfWwlVGLul8p9DE6Sk6E0zaqCvbys7CqvJQD4MIs= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.115.0/go.mod h1:cw0qzwXzKKxM7QyDcNSp9OSDLySVXyaSrgdqWPqlDk8= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.115.0 h1:2xlgF/vCUsZx9HDqhDi0XyR1QXBM67YFRyWrEq5Ydos= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.115.0/go.mod h1:vWTdohkLm9S+3Ekz4aq1jW0xt8wD2jrdOOSOJNllppo= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.115.0 h1:XDlXWa6pdAp02kdfZdzZ0cjeZMNHjI7dj2dNgKdzOfo= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.115.0/go.mod h1:Zo6YARAWAMCdlUmyKBq0EcuKmLjxfC2hUNd3jIAFsWE= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.115.0 h1:hYNlyUj3F43cuv1ap19NlEEchQfs91vYeNoQ1+nswLo= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.115.0/go.mod h1:1o6wF5HJdpb2hd2eGMoQhGuTKb4F2+j/IHBJJSPdM2w= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.115.0 h1:GIyMUiud3T8nyCJP9KVhxVKvfcNQRBCde5uTCl6K/i0= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.115.0/go.mod h1:x4hCznyUolxGt5cE/uXWRCckdIDrUYqH5hJddvdKZd4= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.115.0 h1:Di0uc2QvwEVrq1PEReZ34FpPuo1z5QhHmT0bvdTe0DU= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.115.0/go.mod h1:ODvjmz18PDQnX/BruQ8IFOpiz/HdGOpUWMEKq7f3nhA= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.115.0 h1:h/HAHLIZnIyu85l8wOeggOyiI8z8citNAqxQktVKUpk= -github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.115.0/go.mod h1:iEU0NA/i2sUREqD19JYmjKwrjMUTcddad/h1LGdSMHw= +github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.118.0 h1:i6EXJvoGNOrYvmYbOgJu2FRurpMg1eS/lP6nkEOwKM8= +github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.118.0/go.mod h1:I96MagdAxY3SNR8PZWSiKMp4s8EfutqmhJVpQ3x4dLk= +github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.118.0 h1:X0RNsPCvo+VCQNaxFL+3Zj+13/It8aY6yRmBSLcGy1c= +github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.118.0/go.mod h1:ZZzyaYuuQVUA/STahm8GOJqXRPFrB9KxT7jY7EakDXA= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.118.0 h1:AsPP531/BHxHh0SD73ij1Lg+prrGn2RTVXWdtf0d0YI= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.118.0/go.mod h1:NKFEb3yh4hZBTi1BQM5Sn7n/UiIVBZForHHqjtJBH5U= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.118.0 h1:zHA9n518dSAz2VKqqn30upcZQL6ll9lrK1jCRnBHmhc= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/prometheusremotewriteexporter v0.118.0/go.mod h1:9KW4qWtwCvpWmZYczNkwCwT7nI2Nat6IemDX5w/fTdI= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.118.0 h1:3ppMguebAQUpaf7vy8fbgnPNBTXRMUPzMy1qvzkG8lw= +github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.118.0/go.mod h1:zhFt+3GJXpvmSlNp8XnnR4kIIgsKfTBIlLXoH1SPMHY= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.118.0 h1:HKPTwhA+GNlsBpIR77DH1gwbzL2thOv/+rZzEZ27Sdk= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.118.0/go.mod h1:MchgZvCCgNc9sXj52bvrjQQxS71UaZ0HcikwUDuGwFg= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer v0.118.0 h1:wvirZ1Q8AgtkuJcOJMsloo3F59hYYQstUAmjRkCVcLg= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer v0.118.0/go.mod h1:+GPzqBFeqV90U4/bntDRPMxo/i/12lxH7GyPJmqz4ls= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver v0.118.0 h1:hYDOKToj0lY6FeE5lTZKznpSGVHFD/4Cfi2lPaRivjw= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver v0.118.0/go.mod h1:PjNA+kVULMLNDKtEgRysEa49wIO6k4tn8iLZWp9gbqs= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.118.0 h1:jy3jqQbSZr6zlZefoGOgGWOsALaU4iMPK3vFF7IYM/A= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.118.0/go.mod h1:aNnCfejJO8lURzs3xgff8kCMf/X9OvIxzQotaqqMEy8= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver v0.118.0 h1:4kumnH6249A8TIlz47mDvkJQGg5iHw/p53wTjf9R7G0= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver v0.118.0/go.mod h1:z/AkF6hKF31PSGZ0al59SXAi0Bb+xY/l5arsL0CDHvM= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver v0.118.0 h1:aUfAf5iF/oncctB1T54rCqN1Mq+4EXH/ODhnIZj9U70= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver v0.118.0/go.mod h1:r7IO5NDCqknOmYV+UP6zw4Pmwgr27WwLqOktPXRHinY= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver v0.118.0 h1:N4uUP2W9anaeClRpvyo3Voj5PdjU8juSIRKNaTW0BzA= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver v0.118.0/go.mod h1:PBxPdHzVa6Vi5L1PONYaELTD0eJZmTN6C808r4P4O0Q= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.118.0 h1:KlIEiJprSJYUvc2XxXCu0uXM0/T/IbTKcyugEcjmnm4= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.118.0/go.mod h1:oE1OPZITVJobOfQBHokvUlCm4BILngcmba1jkKhBcKs= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.118.0 h1:uWMYM1UrkVGBlWDZP5DxrjVvGfKM3RUaEwSeBNaW8aU= +github.com/open-telemetry/opentelemetry-collector-contrib/extension/storage v0.118.0/go.mod h1:cBXc0E/8KWMwd5CZfg2PrOeSRzeE9+uL/P02ZV86fV0= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.118.0 h1:xRe7n6OGxrjAkSycWEHSRXlSO9gN8dHoRHC8mrNEqsU= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/aws/ecsutil v0.118.0/go.mod h1:6Lrr+9tQ1/cBVpMhccQ43CgUmy9YYbsu/yssNIZJxjM= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.118.0 h1:W6maz9dZiAYO3WWFMy41GoX2tzx7EPiLGiykNkiAOMI= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/common v0.118.0/go.mod h1:WmS8hAnghKAR5UGYC/sww46mKkBO4RxAqkn8K0i+ZU4= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.118.0 h1:Xnwd0QEyBg6iNPUbc3CnHIb0hVjfTc+jHdFbA9VSa7k= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.118.0/go.mod h1:rmqCuNFMNBUxgyufeU8rpVYOWau8ubr0gmSO1u+R5Hk= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.118.0 h1:ID+tXs48HrBgG8FqRbBxTBTssybnBc7M7+dcY4dD5Bg= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/docker v0.118.0/go.mod h1:VHzkkLUJmRxbIYdbIv/8ZkaDmpMNbtJydMgbEp61GrE= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.118.0 h1:cRDOmJfEOm7G369Lw47k03NIg1qY6HtO9XTwfYRLBw4= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/filter v0.118.0/go.mod h1:KPphlnKqOx44kbEks3VjqQstD/892osXDVN1kn53wWE= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.118.0 h1:94Xf/jV2ewqnVRA/CUKvNKZ5p3+mEtrMcPE1Xw9lk18= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8sconfig v0.118.0/go.mod h1:GhC+Pk3PbAIq52vmYr+d6PN4Hnxyp4lGQMbomI7Bom8= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stest v0.118.0 h1:zzv0uQqa3UZ7Axiad2yVDCdPCzUMKDWLbKjRzkq7KXY= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/k8stest v0.118.0/go.mod h1:TgVgtImN3q4BNxLMWz6xLwk//UKShVerrZ4R2rGxOPo= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.118.0 h1:OnZwsQGs3DKeZbyLWNZY1J2xKthKkg4Myb0OP9YN7/U= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/metadataproviders v0.118.0/go.mod h1:6wbKIFyIVjtzzHEFUSvk6bKBLPEwwdVqY86N6MYVsPw= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.118.0 h1:nzm0/xJEzIWKydgsubNipphuYabJPF3dXc4I6g0dR2M= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/pdatautil v0.118.0/go.mod h1:jORSreOnfMNkLI3KgHVRCFaj/D8gMvgUAQXzXnPf858= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.118.0 h1:dPYcq0NyUpXeJGejLvNAMZ+iaQGx0UCmNwnnn60D/Oc= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/sharedcomponent v0.118.0/go.mod h1:RYz6Pcxqia18V98XqWXWqXB/Qejn7vgK5PoWgMv7DwM= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.118.0 h1:p/DhBHfynUpu6jO4G2zsKlPaeXnWcqdMHMZTc0JY7PQ= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/splunk v0.118.0/go.mod h1:uAVNa10cWbfJsWpf73NyVi93AIR9Kk/+ygXHKKXoWt8= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.118.0 h1:jShgD4zzFxDAWXuk+5kiDuNxLc9222s4qUSISIybJo4= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/batchperresourceattr v0.118.0/go.mod h1:QV0JiEz23p+0jDeAA4IfmX9/nAhGPrn9ZEnFKqS8r7w= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.118.0 h1:4IvL4o5uOf1PspPgjgcrxfPkyZQbgJP6VsyUi5KuSdM= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/datadog v0.118.0/go.mod h1:9cP+bHuftqoYmNDd8LrJ3YTzQl8S1T+qQxSeOIdLM+g= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.118.0 h1:vuPvyNTWyqJVp4hJ/Gr1i5Gqd89lFaaOjXtsVlLUlfs= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata v0.118.0/go.mod h1:rv8ynKZtox4Lahm+1eG8zyyAsARoKiM0TZNqlMwDfE8= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.118.0 h1:Pho1MwH+cvosN6pOinGhunBwAJyyAwFnbIW5x7N/37A= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/golden v0.118.0/go.mod h1:IMy3f4XjwIu+PZF9Qq5T6WZ/+mOL9l+SFjPYEQuWZh8= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.118.0 h1:8pBuMvrFhU7YLJn1uhuuv5uLz0cJUyzusFtNA//fvFI= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/ottl v0.118.0/go.mod h1:pPjJ7ITPSA68VT7cGK9UIJkGsvfjIJV8cjB8cnnsKr8= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.118.0 h1:DSoYrOjLv23HXpx72hl61br4ZZTj6dqtwZSGoypKWIA= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatatest v0.118.0/go.mod h1:nR+r7aAbsktscJk4fGmzljblbZBMaiZcIWeKbXV+HmY= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.118.0 h1:aUTSkzJExtrlHN32g8hX/cRNEo2ZmucPg+vwPqOYvhg= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/pdatautil v0.118.0/go.mod h1:a3sewj4nEozMwcNwZTHPzddS+1BnA6BaAkO/CRIGHVU= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.118.0 h1:WnOBLIbdKDdtLCmpedY35QIkCOb2yW+BxydQMEIv2Xc= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/resourcetotelemetry v0.118.0/go.mod h1:QNv8LB5TzLUHB4p413mrtLryozBRNHKwIlY2R6UirrQ= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.118.0 h1:zEdd1JoVEBX7Lmf/wjs+45p4rR5+HvT2iF5VcoOgK1g= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.118.0/go.mod h1:WE5ientZ87x3cySOh4D/uVUwxK82DMyCkLBJ43+ehDU= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.118.0 h1:iuQWJbTtl3A/wgG7Zl/mWpcBQASXeJiWWblSfu1qSQ8= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/stanza v0.118.0/go.mod h1:JKBSWs4Wo3B2172g6/Hcar31GM8EvlJK2lbAqElpkT0= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.118.0 h1:cNxDWIo5FNwVCEJ0OkYZG7L2FSiIoH7ASUnhjw5+yaA= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/jaeger v0.118.0/go.mod h1:wGuwhjwdA3Sqw0gLBebku6vJ8NHqWhv8mDEOaxFsKTQ= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.118.0 h1:ycH2OpswYo9KWsZv7i7zaI8QQUTVZZssAC48cwThZ88= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus v0.118.0/go.mod h1:VkFMDbe3yp1xEzLyyHhQ5SZzWFXxgzuw38SdLzEet+A= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.118.0 h1:ZellmKscolOE6l5R8Cf4ndjSvXzA6sx4ItmbviMBWSQ= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheusremotewrite v0.118.0/go.mod h1:jQKwQo7XgAUXnibEA4bq+RngO43owGFBXRqbbP50i+Y= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.118.0 h1:Ef0H9eY8EtZ6yqZvbyEFiE5ElQNLiADYo2KVdR7a3jU= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/zipkin v0.118.0/go.mod h1:VqUc4LGE97Qh8RddrA7+fkd4OAzhhkQ59/oE0q3TfqI= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.118.0 h1:Hj5+sK/NK5lKY6aq+d19GrFE0upk22NCWoJFPQSGA8M= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.118.0/go.mod h1:6TXkJ9mQArydxXiL6Da2VM4iEyhpcGAGI43BW/SCGgo= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.118.0 h1:rcF1K6gDvX8lSXYnglnSYIlyW9wL98A95XABxvHWoaY= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.118.0/go.mod h1:JjCx8GAMR29DytUD0osPum9bXyf5iobMBTmjUlo/JCk= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.118.0 h1:D/67TEByWyRExhiV0Ihr5DZCh6WsCpaFMUaaPeyP6c8= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.118.0/go.mod h1:qYuRkOOo0OXWAFb2YGyL+UQkyrypds9cMW+q7+dTUJM= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.118.0 h1:hoX1aUlZdrC5Y4AVWONPAFhq/UOMLL4tGGOrMDANrbw= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.118.0/go.mod h1:D3hu4pM6NK9Ouot8cPtsDxh6EcA/g1qOFEIOy8iOnKI= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.118.0 h1:5ElmjGrphFCpidyucBTINYX5lZXCpJiFo0csZBGJS/k= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.118.0/go.mod h1:uUIOPbmhZNUXPo8tWn++5f/LR70hx4deGtVUkWlkBMM= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.118.0 h1:pC1e5BvBf8rjwGb56MiTUFEDHU2LSclaqRNUs3z9Snw= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.118.0/go.mod h1:wZTrQ0XWb1A9XBhl1WmUKLPfqNjERKFYWT5WER70gLg= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.118.0 h1:oNVf6dfJAy46JPwogw98YSGrQm30qdtrdQVoJswgLAE= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.118.0/go.mod h1:TXA8gBGYuK9NJeAJVdFaxQ/3DElUExT7kMQHiNqKWfY= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.118.0 h1:p0WId+SoJIm3RMidEqsXqZ86u6+815I8AnCXQHgV27Q= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.118.0/go.mod h1:J103TDmU+aY8mMbYMuvAZWRugt4crQlmvVOg97eZckY= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.118.0 h1:aTWOuC42eWr6Z/unoHiV1oQwZ27F5sszygoLE5p45CI= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.118.0/go.mod h1:YerB7SYBjfS0j1zpfg7EHezUHKn9o5L5YlWG8U2uYdg= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.118.0 h1:vOVsKrrRjfOYSvOu3Qv7MIHUZSVL93tnHETBU+GGxsI= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.118.0/go.mod h1:NxqPda5zVnG8RiCgff0L2EfdIflsC/wkRTLNdlYgN/E= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.118.0 h1:ZeOm/Hf/zCcpqIa6zbZ80uy1W0/HR/ib18rTj7cuQ4I= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.118.0/go.mod h1:0WO9Sxt9rPjfe88wnP4SL/M09nohh3H9NX634fem0b0= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.118.0 h1:j/961n8IAbqdw6NoWrnJLTADnLzH3txAZhMocaUi+3o= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.118.0/go.mod h1:6LGm+uv2Hv0D9OCJ/7d5+2h+he/8YbQT7lV3rFrUskA= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.118.0 h1:E7R1x5fBhWKbG4F0c7vLfIYoL7a5XB9BZDagq2XDPp4= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.118.0/go.mod h1:D4vpT7Xo8mwmq7b0YFBwV5LO8dKQAtPkTVgfkRjBwaU= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.118.0 h1:zHRXkCwg0/Mz0tnb15T3sltANwMzQyJyDDnYor2is2c= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.118.0/go.mod h1:zu2HyeyHz5WG1ssJSRCsHggM06IAaEDsm0eGFFedKpQ= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.118.0 h1:ABsdtuXGh1YjOkiVr19ZsaHAAfM+c7QiccF0yinhb4s= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.118.0/go.mod h1:bpfe1oPTuiP6ot4tkPvSVYPMkYshLGjNPrJvoDk1ZCg= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.118.0 h1:ZKedpw3/P2iAW1mkPij/AP0q4bSY/3BjH5k6K50wgmk= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.118.0/go.mod h1:Vx5ZkbyLKL01R44rHNn6FwdVrY7x4LxLMi8f1Zmxk1g= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.118.0 h1:Y5MeQVPRosTBzw5U6HSmhpB9NIYkuQxUUH2f7hScug8= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.118.0/go.mod h1:Q7BhKWXfa5IxpDwbD64mjrQVImo70WsUwcP8vIEgadw= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.118.0 h1:hdq0EDq6gCjOWl0RfXhAcSepB52QHx7us+UcUYTbWpg= +github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.118.0/go.mod h1:9kAczl5meDgn9zlLJJre8Q/4U43cqh9aAy3kCm/rJlk= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= @@ -1445,6 +1577,8 @@ github.com/openshift/client-go v0.0.0-20210521082421-73d9475a9142/go.mod h1:fjS8 github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/openvex/discovery v0.1.1-0.20240802171711-7c54efc57553 h1:c4u0GIH0w2Q57Pm2Oldrq6EiHFnLCCnRs98A+ggj/YQ= +github.com/openvex/discovery v0.1.1-0.20240802171711-7c54efc57553/go.mod h1:z4b//Qi7p7zcM/c41ogeTy+/nqfMbbeYnfZ+EMCTCD0= github.com/openvex/go-vex v0.2.5 h1:41utdp2rHgAGCsG+UbjmfMG5CWQxs15nGqir1eRgSrQ= github.com/openvex/go-vex v0.2.5/go.mod h1:j+oadBxSUELkrKh4NfNb+BPo77U3q7gdKME88IO/0Wo= github.com/openzipkin/zipkin-go v0.4.3 h1:9EGwpqkgnwdEIJ+Od7QVSEIH+ocmm5nPat0G7sjsSdg= @@ -1454,12 +1588,12 @@ github.com/outcaste-io/ristretto v0.2.3/go.mod h1:W8HywhmtlopSB1jeMg3JtdIhf+DYkL github.com/ovh/go-ovh v1.6.0 h1:ixLOwxQdzYDx296sXcgS35TOPEahJkpjMGtzPadCjQI= github.com/ovh/go-ovh v1.6.0/go.mod h1:cTVDnl94z4tl8pP1uZ/8jlVxntjSIf09bNcQ5TJSC7c= github.com/owenrumney/go-sarif v1.1.1/go.mod h1:dNDiPlF04ESR/6fHlPyq7gHKmrM0sHUvAGjsoh8ZH0U= -github.com/owenrumney/go-sarif/v2 v2.3.0 h1:wP5yEpI53zr0v5cBmagXzLbHZp9Oylyo3AJDpfLBITs= -github.com/owenrumney/go-sarif/v2 v2.3.0/go.mod h1:MSqMMx9WqlBSY7pXoOZWgEsVB4FDNfhcaXDA1j6Sr+w= +github.com/owenrumney/go-sarif/v2 v2.3.3 h1:ubWDJcF5i3L/EIOER+ZyQ03IfplbSU1BLOE26uKQIIU= +github.com/owenrumney/go-sarif/v2 v2.3.3/go.mod h1:MSqMMx9WqlBSY7pXoOZWgEsVB4FDNfhcaXDA1j6Sr+w= github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2 h1:CXwSGu/LYmbjEab5aMCs5usQRVBGThelUKBNnoSOuso= github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2/go.mod h1:L3UMQOThbttwfYRNFOWLLVXMhk5Lkio4GGOtw5UrxS0= -github.com/package-url/packageurl-go v0.1.2 h1:0H2DQt6DHd/NeRlVwW4EZ4oEI6Bn40XlNPRqegcxuo4= -github.com/package-url/packageurl-go v0.1.2/go.mod h1:uQd4a7Rh3ZsVg5j0lNyAfyxIeGde9yrlhjF78GzeW0c= +github.com/package-url/packageurl-go v0.1.3 h1:4juMED3hHiz0set3Vq3KeQ75KD1avthoXLtmE3I0PLs= +github.com/package-url/packageurl-go v0.1.3/go.mod h1:nKAWB8E6uk1MHqiS/lQb9pYBGH2+mdJ2PJc2s50dQY0= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -1478,8 +1612,8 @@ github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c h1:dAMKvw0MlJT1GshSTtih8C2gDs04w8dReiOGXrGLNoY= github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM= -github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= -github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= +github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= @@ -1497,8 +1631,8 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus-community/pro-bing v0.4.1 h1:aMaJwyifHZO0y+h8+icUz0xbToHbia0wdmzdVZ+Kl3w= @@ -1529,8 +1663,8 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8 github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1547,12 +1681,14 @@ github.com/prometheus/prometheus v0.54.1/go.mod h1:xlLByHhk2g3ycakQGrMaU8K7OySZx github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/protocolbuffers/protoscope v0.0.0-20221109213918-8e7a6aafa2c9 h1:arwj11zP0yJIxIRiDn22E0H8PxfF7TsTrc2wIPFIsf4= github.com/protocolbuffers/protoscope v0.0.0-20221109213918-8e7a6aafa2c9/go.mod h1:SKZx6stCn03JN3BOWTwvVIO2ajMkb/zQdTceXYhKw/4= +github.com/protocolbuffers/txtpbfmt v0.0.0-20231025115547-084445ff1adf h1:014O62zIzQwvoD7Ekj3ePDF5bv9Xxy0w6AZk0qYbjUk= +github.com/protocolbuffers/txtpbfmt v0.0.0-20231025115547-084445ff1adf/go.mod h1:jgxiZysxFPM+iWKwQwPR+y+Jvo54ARd4EisXxKYpB5c= github.com/puzpuzpuz/xsync/v3 v3.4.0 h1:DuVBAdXuGFHv8adVXjWWZ63pJq+NRXOWVXlKDBZ+mJ4= github.com/puzpuzpuz/xsync/v3 v3.4.0/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/redis/go-redis/v9 v9.1.0 h1:137FnGdk+EQdCbye1FW+qOEcY5S+SpY9T0NiuqvtfMY= -github.com/redis/go-redis/v9 v9.1.0/go.mod h1:urWj3He21Dj5k4TK1y59xH8Uj6ATueP8AH1cY3lZl4c= +github.com/redis/go-redis/v9 v9.5.1 h1:H1X4D3yHPaYrkL5X06Wh6xNVM/pX0Ft4RV0vMGvLBh8= +github.com/redis/go-redis/v9 v9.5.1/go.mod h1:hdY0cQFCN4fnSYT6TkisLufl/4W5UIXyv0b/CLO2V2M= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/richardartoul/molecule v1.0.1-0.20240531184615-7ca0df43c0b3 h1:4+LEVOB87y175cLJC/mbsgKmoDOjrBldtXvioEy96WY= @@ -1598,20 +1734,26 @@ github.com/santhosh-tekuri/jsonschema/v5 v5.3.1 h1:lZUw3E0/J3roVtGQ+SCrUrg3ON6Ng github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY= github.com/sassoftware/go-rpmutils v0.4.0 h1:ojND82NYBxgwrV+mX1CWsd5QJvvEZTKddtCdFLPWhpg= github.com/sassoftware/go-rpmutils v0.4.0/go.mod h1:3goNWi7PGAT3/dlql2lv3+MSN5jNYPjT5mVcQcIsYzI= +github.com/sassoftware/relic v7.2.1+incompatible h1:Pwyh1F3I0r4clFJXkSI8bOyJINGqpgjJU3DYAZeI05A= +github.com/sassoftware/relic v7.2.1+incompatible/go.mod h1:CWfAxv73/iLZ17rbyhIEq3K9hs5w6FpNMdUT//qR+zk= +github.com/sassoftware/relic/v7 v7.6.2 h1:rS44Lbv9G9eXsukknS4mSjIAuuX+lMq/FnStgmZlUv4= +github.com/sassoftware/relic/v7 v7.6.2/go.mod h1:kjmP0IBVkJZ6gXeAu35/KCEfca//+PKM6vTAsyDPY+k= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29 h1:BkTk4gynLjguayxrYxZoMZjBnAOh7ntQvUkOFmkMqPU= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.29/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA= github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= +github.com/segmentio/ksuid v1.0.4 h1:sBo2BdShXjmcugAMwjugoGUdUV0pcxY5mW4xKRn3v4c= +github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI= github.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE= github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI= github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4 h1:udFKJ0aHUL60LboW/A+DfgoHVedieIzIXE8uylPue0U= github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= @@ -1624,8 +1766,24 @@ github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+D github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/signalfx/sapm-proto v0.17.0 h1:KY+9zm/yDOq6uzaguI1RmrJcWxzbkGv0zE6GplA3ytc= github.com/signalfx/sapm-proto v0.17.0/go.mod h1:c8fGx9DjGP7Hqif7g6Zy6E+BCMXK/dERFU2b3faA0gk= -github.com/sigstore/rekor v1.2.2 h1:5JK/zKZvcQpL/jBmHvmFj3YbpDMBQnJQ6ygp8xdF3bY= -github.com/sigstore/rekor v1.2.2/go.mod h1:FGnWBGWzeNceJnp0x9eDFd41mI8aQqCjj+Zp0IEs0Qg= +github.com/sigstore/cosign/v2 v2.2.4 h1:iY4vtEacmu2hkNj1Fh+8EBqBwKs2DHM27/lbNWDFJro= +github.com/sigstore/cosign/v2 v2.2.4/go.mod h1:JZlRD2uaEjVAvZ1XJ3QkkZJhTqSDVtLaet+C/TMR81Y= +github.com/sigstore/fulcio v1.4.5 h1:WWNnrOknD0DbruuZWCbN+86WRROpEl3Xts+WT2Ek1yc= +github.com/sigstore/fulcio v1.4.5/go.mod h1:oz3Qwlma8dWcSS/IENR/6SjbW4ipN0cxpRVfgdsjMU8= +github.com/sigstore/rekor v1.3.6 h1:QvpMMJVWAp69a3CHzdrLelqEqpTM3ByQRt5B5Kspbi8= +github.com/sigstore/rekor v1.3.6/go.mod h1:JDTSNNMdQ/PxdsS49DJkJ+pRJCO/83nbR5p3aZQteXc= +github.com/sigstore/sigstore v1.8.3 h1:G7LVXqL+ekgYtYdksBks9B38dPoIsbscjQJX/MGWkA4= +github.com/sigstore/sigstore v1.8.3/go.mod h1:mqbTEariiGA94cn6G3xnDiV6BD8eSLdL/eA7bvJ0fVs= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.3 h1:LTfPadUAo+PDRUbbdqbeSl2OuoFQwUFTnJ4stu+nwWw= +github.com/sigstore/sigstore/pkg/signature/kms/aws v1.8.3/go.mod h1:QV/Lxlxm0POyhfyBtIbTWxNeF18clMlkkyL9mu45y18= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.3 h1:xgbPRCr2npmmsuVVteJqi/ERw9+I13Wou7kq0Yk4D8g= +github.com/sigstore/sigstore/pkg/signature/kms/azure v1.8.3/go.mod h1:G4+I83FILPX6MtnoaUdmv/bRGEVtR3JdLeJa/kXdk/0= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.3 h1:vDl2fqPT0h3D/k6NZPlqnKFd1tz3335wm39qjvpZNJc= +github.com/sigstore/sigstore/pkg/signature/kms/gcp v1.8.3/go.mod h1:9uOJXbXEXj+M6QjMKH5PaL5WDMu43rHfbIMgXzA8eKI= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.3 h1:h9G8j+Ds21zqqulDbA/R/ft64oQQIyp8S7wJYABYSlg= +github.com/sigstore/sigstore/pkg/signature/kms/hashivault v1.8.3/go.mod h1:zgCeHOuqF6k7A7TTEvftcA9V3FRzB7mrPtHOhXAQBnc= +github.com/sigstore/timestamp-authority v1.2.2 h1:X4qyutnCQqJ0apMewFyx+3t7Tws00JQ/JonBiu3QvLE= +github.com/sigstore/timestamp-authority v1.2.2/go.mod h1:nEah4Eq4wpliDjlY342rXclGSO7Kb9hoRrl9tqLW13A= github.com/sijms/go-ora/v2 v2.8.19 h1:7LoKZatDYGi18mkpQTR/gQvG9yOdtc7hPAex96Bqisc= github.com/sijms/go-ora/v2 v2.8.19/go.mod h1:EHxlY6x7y9HAsdfumurRfTd+v8NrEOTR3Xl4FWlH6xk= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= @@ -1634,10 +1792,12 @@ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrf github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A= -github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= -github.com/skydive-project/go-debouncer v1.0.0 h1:cqU19PyN7WXsnSlMTANvnHws6lGcbVOH2aDQzwe6qbk= -github.com/skydive-project/go-debouncer v1.0.0/go.mod h1:7pK+5HBlYCD8W2cXhvMRsMsdWelDEPfpbE6PwSlDX68= +github.com/skeema/knownhosts v1.3.0 h1:AM+y0rI04VksttfwjkSTNQorvGqmwATnvnAHpSgc0LY= +github.com/skeema/knownhosts v1.3.0/go.mod h1:sPINvnADmT/qYH1kfv+ePMmOBTH6Tbl7b5LvTDjFK7M= +github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= +github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= +github.com/skydive-project/go-debouncer v1.0.1 h1:N75Mdusd65Jjbc7k5t2oo+7qLIdMtSNJKssmpEYuSgo= +github.com/skydive-project/go-debouncer v1.0.1/go.mod h1:7pK+5HBlYCD8W2cXhvMRsMsdWelDEPfpbE6PwSlDX68= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.1.0 h1:MkTeG1DMwsrdH7QtLXy5W+fUxWq+vmb6cLmyJ7aRtF0= github.com/smartystreets/assertions v1.1.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= @@ -1661,8 +1821,8 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spdx/gordf v0.0.0-20201111095634-7098f93598fb/go.mod h1:uKWaldnbMnjsSAXRurWqqrdyZen1R7kxl8TkmWk2OyM= -github.com/spdx/tools-golang v0.5.4-0.20231108154018-0c0f394b5e1a h1:uuREJ3I15VLjYZuhxjTQnA2bTqzRQX1HKEphYBzqT9o= -github.com/spdx/tools-golang v0.5.4-0.20231108154018-0c0f394b5e1a/go.mod h1:BHs8QEhK6MbFGdyjxvuBtzJtCLrN5bwUBC9fzQlYBXs= +github.com/spdx/tools-golang v0.5.5 h1:61c0KLfAcNqAjlg6UNMdkwpMernhw3zVRwDZ2x9XOmk= +github.com/spdx/tools-golang v0.5.5/go.mod h1:MVIsXx8ZZzaRWNQpUDhC4Dud34edUYJYecciXgrw5vE= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= @@ -1680,6 +1840,8 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= +github.com/spiffe/go-spiffe/v2 v2.2.0 h1:9Vf06UsvsDbLYK/zJ4sYsIsHmMFknUD+feA7IYoWMQY= +github.com/spiffe/go-spiffe/v2 v2.2.0/go.mod h1:Urzb779b3+IwDJD2ZbN8fVl3Aa8G4N/PiUe6iXC0XxU= github.com/square/certstrap v1.2.0 h1:ecgyABrbFLr8jSbOC6oTBmBek0t/HqtgrMUZCPuyfdw= github.com/square/certstrap v1.2.0/go.mod h1:CUHqV+fxJW0Y5UQFnnbYwQ7bpKXO1AKbic9g73799yw= github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= @@ -1732,10 +1894,16 @@ github.com/tedsuo/rata v1.0.0 h1:Sf9aZrYy6ElSTncjnGkyC2yuVvz5YJetBIUKJ4CmeKE= github.com/tedsuo/rata v1.0.0/go.mod h1:X47ELzhOoLbfFIY0Cql9P6yo3Cdwf2CMX3FVZxRzJPc= github.com/terminalstatic/go-xsd-validate v0.1.5 h1:RqpJnf6HGE2CB/lZB1A8BYguk8uRtcvYAPLCF15qguo= github.com/terminalstatic/go-xsd-validate v0.1.5/go.mod h1:18lsvYFofBflqCrvo1umpABZ99+GneNTw2kEEc8UPJw= -github.com/testcontainers/testcontainers-go v0.23.0 h1:ERYTSikX01QczBLPZpqsETTBO7lInqEP349phDOVJVs= -github.com/testcontainers/testcontainers-go v0.23.0/go.mod h1:3gzuZfb7T9qfcH2pHpV4RLlWrPjeWNQah6XlYQ32c4I= -github.com/tetratelabs/wazero v1.7.0 h1:jg5qPydno59wqjpGrHph81lbtHzTrWzwwtD4cD88+hQ= -github.com/tetratelabs/wazero v1.7.0/go.mod h1:ytl6Zuh20R/eROuyDaGPkp82O9C/DJfXAwJfQ3X6/7Y= +github.com/testcontainers/testcontainers-go v0.35.0 h1:uADsZpTKFAtp8SLK+hMwSaa+X+JiERHtd4sQAFmXeMo= +github.com/testcontainers/testcontainers-go v0.35.0/go.mod h1:oEVBj5zrfJTrgjwONs1SsRbnBtH9OKl+IGl3UMcr2B4= +github.com/testcontainers/testcontainers-go/modules/localstack v0.33.0 h1:AhbUGUjneEnMyTV5aTsPYzDiAWrba1duPtiV+Z9CKdY= +github.com/testcontainers/testcontainers-go/modules/localstack v0.33.0/go.mod h1:J5vMq1fXXiTfwcJplMClHhn+j8+MbIMv7Lic4d9E8qU= +github.com/tetratelabs/wazero v1.8.0 h1:iEKu0d4c2Pd+QSRieYbnQC9yiFlMS9D+Jr0LsRmcF4g= +github.com/tetratelabs/wazero v1.8.0/go.mod h1:yAI0XTsMBhREkM/YDAK/zNou3GoiAce1P6+rp/wQhjs= +github.com/thales-e-security/pool v0.0.2 h1:RAPs4q2EbWsTit6tpzuvTFlgFRJ3S8Evf5gtvVDbmPg= +github.com/thales-e-security/pool v0.0.2/go.mod h1:qtpMm2+thHtqhLzTwgDBj/OuNnMpupY8mv0Phz0gjhU= +github.com/theupdateframework/go-tuf v0.7.0 h1:CqbQFrWo1ae3/I0UCblSbczevCCbS31Qvs5LdxRWqRI= +github.com/theupdateframework/go-tuf v0.7.0/go.mod h1:uEB7WSY+7ZIugK6R1hiBMBjQftaFzn7ZCDJcp1tCUug= github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= @@ -1750,8 +1918,12 @@ github.com/tidwall/tinylru v1.1.0 h1:XY6IUfzVTU9rpwdhKUF6nQdChgCdGjkMfLzbWyiau6I github.com/tidwall/tinylru v1.1.0/go.mod h1:3+bX+TJ2baOLMWTnlyNWHh4QMnFyARg2TLTQ6OFbzw8= github.com/tidwall/wal v1.1.8 h1:2qDSGdAdjaY3PEvHRva+9UFqgk+ef7cOiW1Qn5JH1y0= github.com/tidwall/wal v1.1.8/go.mod h1:r6lR1j27W9EPalgHiB7zLJDYu3mzW5BQP5KrzBpYY/E= -github.com/tinylib/msgp v1.2.4 h1:yLFeUGostXXSGW5vxfT5dXG/qzkn4schv2I7at5+hVU= -github.com/tinylib/msgp v1.2.4/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= +github.com/tinylib/msgp v1.2.5 h1:WeQg1whrXRFiZusidTQqzETkRpGjFjcIhW6uqWH09po= +github.com/tinylib/msgp v1.2.5/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399 h1:e/5i7d4oYZ+C1wj2THlRK+oAhjeS/TRQwMfkIuet3w0= +github.com/titanous/rocacheck v0.0.0-20171023193734-afe73141d399/go.mod h1:LdwHTNJT99C5fTAzDz0ud328OgXz+gierycbcIx2fRs= +github.com/tjfoc/gmsm v1.4.1 h1:aMe1GlZb+0bLjn+cKTPEvvn9oUEBlJitaZiiBwsbgho= +github.com/tjfoc/gmsm v1.4.1/go.mod h1:j4INPkHWMrhJb38G+J6W4Tw0AbuN8Thu3PbdVYhVcTE= github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= @@ -1762,6 +1934,10 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7 github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc h1:9lRDQMhESg+zvGYmW5DyG0UqvY96Bu5QYsTLvCHdrgo= github.com/tmthrgd/go-hex v0.0.0-20190904060850-447a3041c3bc/go.mod h1:bciPuU6GHm1iF1pBvUfxfsH0Wmnc2VbpgvbI9ZWuIRs= +github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4 h1:7I5c2Ig/5FgqkYOh/N87NzoyI9U15qUPXhDD8uCupv8= +github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4/go.mod h1:278M4p8WsNh3n4a1eqiFcV2FGk7wE5fwUpUom9mK9lE= +github.com/transparency-dev/merkle v0.0.2 h1:Q9nBoQcZcgPamMkGn7ghV8XiTZ/kRxn1yCG81+twTK4= +github.com/transparency-dev/merkle v0.0.2/go.mod h1:pqSy+OXefQ1EDUVmAJ8MUhHB9TXGuzVAT58PqBoHz1A= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/twitchtv/twirp v8.1.3+incompatible h1:+F4TdErPgSUbMZMwp13Q/KgDVuI7HJXP61mNV3/7iuU= github.com/twitchtv/twirp v8.1.3+incompatible/go.mod h1:RRJoFSAmTEh2weEqWtpPE3vFK5YBhA6bqp2l1kfCC5A= @@ -1775,7 +1951,9 @@ github.com/twmb/murmur3 v1.1.8 h1:8Yt9taO/WN3l08xErzjeschgZU2QSrwm1kclYq+0aRg= github.com/twmb/murmur3 v1.1.8/go.mod h1:Qq/R7NUyOfr65zD+6Q5IHKsJLwP7exErjN6lyyq3OSQ= github.com/ua-parser/uap-go v0.0.0-20240611065828-3a4781585db6 h1:SIKIoA4e/5Y9ZOl0DCe3eVMLPOQzJxgZpfdHHeauNTM= github.com/ua-parser/uap-go v0.0.0-20240611065828-3a4781585db6/go.mod h1:BUbeWZiieNxAuuADTBNb3/aeje6on3DhU3rpWsQSB1E= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= @@ -1794,8 +1972,8 @@ github.com/urfave/negroni v1.0.0 h1:kIimOitoypq34K7TG7DUaJ9kq/N4Ofuwi1sjz0KipXc= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ= github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= -github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= -github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= +github.com/vbatts/tar-split v0.11.6 h1:4SjTW5+PU11n6fZenf2IPoV8/tz3AaYHMWjf23envGs= +github.com/vbatts/tar-split v0.11.6/go.mod h1:dqKNtesIOr2j2Qv3W/cHjnvk9I8+G7oAkFDFN6TCBEI= github.com/vibrantbyte/go-antpath v1.1.1 h1:SWDIMx4pSjyo7QoAsgTkpNU7QD0X9O0JAgr5O3TsYKk= github.com/vibrantbyte/go-antpath v1.1.1/go.mod h1:ZqMGIk+no3BL2o6OdEZ3ZDiWfIteuastNSaTFv7kgUY= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= @@ -1824,6 +2002,8 @@ github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 h1:3UeQBvD0TFrlV github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0/go.mod h1:IXCdmsXIht47RaVFLEdVnh1t+pgYtTAhQGj73kz+2DM= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xanzy/go-gitlab v0.102.0 h1:ExHuJ1OTQ2yt25zBMMj0G96ChBirGYv8U7HyUiYkZ+4= +github.com/xanzy/go-gitlab v0.102.0/go.mod h1:ETg8tcj4OhrB84UEgeE8dSuV/0h4BBL1uOV/qK0vlyI= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/xdg-go/pbkdf2 v1.0.0 h1:Su7DPu48wXMwC3bs7MCNG+z4FhcyEuz5dlvchbq0B0c= @@ -1842,8 +2022,9 @@ github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17 github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 h1:S2dVYn90KE98chqDkyE9Z4N61UnQd+KOfgp5Iu53llk= +github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/xor-gate/ar v0.0.0-20170530204233-5c72ae81e2b7 h1:Vo3q7h44BfmnLQh5SdF+2xwIoVnHThmZLunx6odjrHI= @@ -1864,11 +2045,15 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/yuin/gopher-lua v1.1.0 h1:BojcDhfyDWgU2f2TOzYK/g5p2gxMrku8oupLDqlnSqE= -github.com/yuin/gopher-lua v1.1.0/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= +github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M= +github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/zalando/go-keyring v0.2.3 h1:v9CUu9phlABObO4LPWycf+zwMG7nlbb3t/B5wa97yms= +github.com/zalando/go-keyring v0.2.3/go.mod h1:HL4k+OXQfJUWaMnqyuSOc0drfGPX2b51Du6K+MRgZMk= github.com/zclconf/go-cty v1.10.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= +github.com/zeebo/errs v1.3.0 h1:hmiaKqgYZzcVgRL1Vkc1Mn2914BbzB0IBxs+ebeutGs= +github.com/zeebo/errs v1.3.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= github.com/zorkian/go-datadog-api v2.30.0+incompatible h1:R4ryGocppDqZZbnNc5EDR8xGWF/z/MxzWnqTUijDQes= github.com/zorkian/go-datadog-api v2.30.0+incompatible/go.mod h1:PkXwHX9CUQa/FpB9ZwAD45N1uhCW4MT/Wj7m36PbKss= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -1897,195 +2082,203 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/collector v0.115.0 h1:qUZ0bTeNBudMxNQ7FJKS//TxTjeJ7tfU/z22mcFavWU= -go.opentelemetry.io/collector v0.115.0/go.mod h1:66qx0xKnVvdwq60e1DEfb4e+zmM9szhPsv2hxZ/Mpj4= -go.opentelemetry.io/collector/client v1.21.0 h1:3Kes8lOFMYVxoxeAmX+DTEAkuS1iTA3NkSfqzGmygJA= -go.opentelemetry.io/collector/client v1.21.0/go.mod h1:jYJGiL0UA975OOyHmjbQSokNWt1OiviI5KjPOMUMGwc= -go.opentelemetry.io/collector/component v0.115.0 h1:iLte1oCiXzjiCnaOBKdsXacfFiECecpWxW3/LeriMoo= -go.opentelemetry.io/collector/component v0.115.0/go.mod h1:oIUFiH7w1eOimdeYhFI+gAIxYSiLDocKVJ0PTvX7d6s= -go.opentelemetry.io/collector/component/componentstatus v0.115.0 h1:pbpUIL+uKDfEiSgKK+S5nuSL6MDIIQYsp4b65ZGVb9M= -go.opentelemetry.io/collector/component/componentstatus v0.115.0/go.mod h1:36A+9XSiOz0Cdhq+UwwPRlEr5CYuSkEnVO9om4BH7d0= -go.opentelemetry.io/collector/component/componenttest v0.115.0 h1:9URDJ9VyP6tuij+YHjp/kSSMecnZOd7oGvzu+rw9SJY= -go.opentelemetry.io/collector/component/componenttest v0.115.0/go.mod h1:PzXvNqKLCiSADZGZFKH+IOHMkaQ0GTHuzysfVbTPKYY= -go.opentelemetry.io/collector/config/configauth v0.115.0 h1:xa+ALdyPgva3rZnLBh1H2oS5MsHP6JxSqMtQmcELnys= -go.opentelemetry.io/collector/config/configauth v0.115.0/go.mod h1:C7anpb3Rf4KswMT+dgOzkW9UX0z/65PLORpUw3p0VYc= -go.opentelemetry.io/collector/config/configcompression v1.21.0 h1:0zbPdZAgPFMAarwJEC4gaR6f/JBP686A3TYSgb3oa+E= -go.opentelemetry.io/collector/config/configcompression v1.21.0/go.mod h1:LvYG00tbPTv0NOLoZN0wXq1F5thcxvukO8INq7xyfWU= -go.opentelemetry.io/collector/config/configgrpc v0.115.0 h1:gZzXSFe6hB3RUcEeAYqk1yT+TBa+X9tp6/1x29Yg2yk= -go.opentelemetry.io/collector/config/configgrpc v0.115.0/go.mod h1:107lRZ5LdQPMdGJGd4m1GhyKxyH0az2cUOqrJgTEN8E= -go.opentelemetry.io/collector/config/confighttp v0.115.0 h1:BIy394oNXnqySJwrCqgAJu4gWgAV5aQUDD6k1hy6C8o= -go.opentelemetry.io/collector/config/confighttp v0.115.0/go.mod h1:Wr50ut12NmCEAl4bWLJryw2EjUmJTtYRg89560Q51wc= -go.opentelemetry.io/collector/config/confignet v1.21.0 h1:PeQ5YrMnfftysFL/WVaSrjPOWjD6DfeABY50pf9CZxU= -go.opentelemetry.io/collector/config/confignet v1.21.0/go.mod h1:ZppUH1hgUJOubawEsxsQ9MzEYFytqo2GnVSS7d4CVxc= -go.opentelemetry.io/collector/config/configopaque v1.21.0 h1:PcvRGkBk4Px8BQM7tX+kw4i3jBsfAHGoGQbtZg6Ox7U= -go.opentelemetry.io/collector/config/configopaque v1.21.0/go.mod h1:sW0t0iI/VfRL9VYX7Ik6XzVgPcR+Y5kejTLsYcMyDWs= -go.opentelemetry.io/collector/config/configretry v1.21.0 h1:ZHoOvAkEcv5BBeaJn8IQ6rQ4GMPZWW4S+W7R4QTEbZU= -go.opentelemetry.io/collector/config/configretry v1.21.0/go.mod h1:cleBc9I0DIWpTiiHfu9v83FUaCTqcPXmebpLxjEIqro= -go.opentelemetry.io/collector/config/configtelemetry v0.115.0 h1:U07FinCDop+r2RjWQ3aP9ZWONC7r7kQIp1GkXQi6nsI= -go.opentelemetry.io/collector/config/configtelemetry v0.115.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= -go.opentelemetry.io/collector/config/configtls v1.21.0 h1:ZfrlAYgBD8lzp04W0GxwiDmUbrvKsvDYJi+wkyiXlpA= -go.opentelemetry.io/collector/config/configtls v1.21.0/go.mod h1:5EsNefPfVCMOTlOrr3wyj7LrsOgY7V8iqRl8oFZEqtw= -go.opentelemetry.io/collector/config/internal v0.115.0 h1:eVk57iufZpUXyPJFKTb1Ebx5tmcCyroIlt427r5pxS8= -go.opentelemetry.io/collector/config/internal v0.115.0/go.mod h1:OVkadRWlKAoWjHslqjWtBLAne8ceQm8WYT71ZcBWLFc= -go.opentelemetry.io/collector/confmap v1.21.0 h1:1tIcx2/Suwg8VhuPmQw87ba0ludPmumpFCFRZZa6RXA= -go.opentelemetry.io/collector/confmap v1.21.0/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec= -go.opentelemetry.io/collector/confmap/provider/envprovider v1.21.0 h1:YLf++Z8CMp86AanfOCWUiE7vKbb1kSjgC3a9VJoxbD4= -go.opentelemetry.io/collector/confmap/provider/envprovider v1.21.0/go.mod h1:aSWLYcmgZZJDNtWN1M8JKQuehoGgOxibl1KuvKTar4M= -go.opentelemetry.io/collector/confmap/provider/fileprovider v1.21.0 h1:+zukkM+3l426iGoJkXTpLB2Z8QnZFu26TkGPjh5Rn/4= -go.opentelemetry.io/collector/confmap/provider/fileprovider v1.21.0/go.mod h1:BXBpQhF3n4CNLYO2n/mWZPd2U9ekpbLXLRGZrun1VfI= -go.opentelemetry.io/collector/confmap/provider/httpprovider v1.21.0 h1:NYYGM+SgIlTuNGjd8eGzDr8DkvOe4q7cXon8djF9yyI= -go.opentelemetry.io/collector/confmap/provider/httpprovider v1.21.0/go.mod h1:XRYbuwqq1awFuNhLDUv4aSvn6MzqX+abcevx1O+APJI= -go.opentelemetry.io/collector/confmap/provider/httpsprovider v1.21.0 h1:2EEUI2DzA2DvrvCImMWRSNqIHdRJ6+qbgvZL44Zb2ac= -go.opentelemetry.io/collector/confmap/provider/httpsprovider v1.21.0/go.mod h1:axezjjQWY4kZc5pr/+wOKAuqSYMhea/tWzP5S30h+dc= -go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.21.0 h1:P3Q9RytCMY76ORPCnkkjOa4fkuFqmZiQRor+F/nPlYE= -go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.21.0/go.mod h1:xhYhHK3yLQ78tsoaKPIGUfFulgy961ImOe2gATH3RQc= -go.opentelemetry.io/collector/connector v0.115.0 h1:4Kkm3HQFzNT1eliMOB8FbIn+PLMRJ2qQku5Vmy3V8Ko= -go.opentelemetry.io/collector/connector v0.115.0/go.mod h1:+ByuAmYLrYHoKh9B+LGqUc0N2kXcN2l8Dea8Mp6brZ8= -go.opentelemetry.io/collector/connector/connectorprofiles v0.115.0 h1:aW1f4Az0I+QJyImFccNWAXqik80bnNu27aQqi2hFfD8= -go.opentelemetry.io/collector/connector/connectorprofiles v0.115.0/go.mod h1:lmynB1CucydOsHa8RSSBh5roUZPfuiv65imXhtNzClM= -go.opentelemetry.io/collector/connector/connectortest v0.115.0 h1:GjtourFr0MJmlbtEPAZ/1BZCxkNAeJ0aMTlrxwftJ0k= -go.opentelemetry.io/collector/connector/connectortest v0.115.0/go.mod h1:f3KQXXNlh/XuV8elmnuVVyfY92dJCAovz10gD72OH0k= -go.opentelemetry.io/collector/consumer v1.21.0 h1:THKZ2Vbi6GkamjTBI2hFq5Dc4kINZTWGwQNa8d/Ty9g= -go.opentelemetry.io/collector/consumer v1.21.0/go.mod h1:FQcC4ThMtRYY41dv+IPNK8POLLhAFY3r1YR5fuP7iiY= -go.opentelemetry.io/collector/consumer/consumererror v0.115.0 h1:yli//xBCQMPZKXNgNlXemo4dvqhnFrAmCZ11DvQgmcY= -go.opentelemetry.io/collector/consumer/consumererror v0.115.0/go.mod h1:LwVzAvQ6ZVNG7mbOvurbAo+W/rKws0IcjOwriuZXqPE= -go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.115.0 h1:gaIhzpaGFWauiyznrQ3f++TbcdXxA5rpsX3L9uGjMM8= -go.opentelemetry.io/collector/consumer/consumererror/consumererrorprofiles v0.115.0/go.mod h1:7oXvuGBSawS5bc413lh1KEMcXkqBcrCqZQahOdnE24U= -go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0 h1:H3fDuyQW1t2HWHkz96WMBQJKUevypOCjBqnqtaAWyoA= -go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0/go.mod h1:IzEmZ91Tp7TBxVDq8Cc9xvLsmO7H08njr6Pu9P5d9ns= -go.opentelemetry.io/collector/consumer/consumertest v0.115.0 h1:hru0I2447y0TluCdwlKYFFtgcpyCnlM+LiOK1JZyA70= -go.opentelemetry.io/collector/consumer/consumertest v0.115.0/go.mod h1:ybjALRJWR6aKNOzEMy1T1ruCULVDEjj4omtOJMrH/kU= -go.opentelemetry.io/collector/exporter v0.115.0 h1:JnxfpOnsuqhTPKJXVKJLS1Cv3BiVrVLzpHOjJEQw+xw= -go.opentelemetry.io/collector/exporter v0.115.0/go.mod h1:xof3fHQK8wADhaKLIJcQ7ChZaFLNC+haRdPN0wgl6kY= -go.opentelemetry.io/collector/exporter/debugexporter v0.115.0 h1:gb9VMQhcbvYqp0SJ4Hp8R9XqOLNLsoTgNJCPKpNEaVc= -go.opentelemetry.io/collector/exporter/debugexporter v0.115.0/go.mod h1:H/HS1UJlcZPNBbOcrsGZc2sPdQDHtbOjHOxMtJkmlcU= -go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles v0.115.0 h1:fetbc740pODH6JW+H49SW0hiAJwQE+/B0SbuIlaY2rg= -go.opentelemetry.io/collector/exporter/exporterhelper/exporterhelperprofiles v0.115.0/go.mod h1:oEKZ/d5BeaCK6Made9iwaeqmlT4lRbJSlW9nhIn/TwM= -go.opentelemetry.io/collector/exporter/exporterprofiles v0.115.0 h1:lSQEleCn/q9eFufcuK61NdFKU70ZlgI9dBjPCO/4CrE= -go.opentelemetry.io/collector/exporter/exporterprofiles v0.115.0/go.mod h1:7l5K2AecimX2kx+nZC1gKG3QkP247CO1+SodmJ4fFkQ= -go.opentelemetry.io/collector/exporter/exportertest v0.115.0 h1:P9SMTUXQOtcaq40bGtnnAe14zRmR4/yUgj/Tb2BEf/k= -go.opentelemetry.io/collector/exporter/exportertest v0.115.0/go.mod h1:1jMZ9gFGXglb8wfNrBZIgd+RvpZhSyFwdfE+Jtf9w4U= -go.opentelemetry.io/collector/exporter/nopexporter v0.115.0 h1:ufwLbNp7mfoSxWJcoded3D9f/nIVvCwNa/0+ZqxzkzU= -go.opentelemetry.io/collector/exporter/nopexporter v0.115.0/go.mod h1:iIJgru1t+VJVVCE5KMAKjXbq9RkK4/5FCClnWnAlGtc= -go.opentelemetry.io/collector/exporter/otlpexporter v0.115.0 h1:Kqr31VFrQvgEMzeg8T1JSXWacjUQoZph39efKN8jBpY= -go.opentelemetry.io/collector/exporter/otlpexporter v0.115.0/go.mod h1:5uy/gduFx2mH0GxJ84sY75NfzQJb9xYmgiL9Pf0dKF8= -go.opentelemetry.io/collector/exporter/otlphttpexporter v0.115.0 h1:I0qzSWGbgph+iva5/jU8tkeUTkkqqcj8+UzMxg5ubF8= -go.opentelemetry.io/collector/exporter/otlphttpexporter v0.115.0/go.mod h1:cUrv5EG12iOs5MXaecfi9K+ZATEELefpyZY6Hj4NlUo= -go.opentelemetry.io/collector/extension v0.115.0 h1:/cBb8AUdD0KMWC6V3lvCC16eP9Fg0wd1Upcp5rgvuGI= -go.opentelemetry.io/collector/extension v0.115.0/go.mod h1:HI7Ak6loyi6ZrZPsQJW1OO1wbaAW8OqXLFNQlTZnreQ= -go.opentelemetry.io/collector/extension/auth v0.115.0 h1:TTMokbBsSHZRFH48PvGSJmgSS8F3Rkr9MWGHZn8eJDk= -go.opentelemetry.io/collector/extension/auth v0.115.0/go.mod h1:3w+2mzeb2OYNOO4Bi41TUo4jr32ap2y7AOq64IDpxQo= -go.opentelemetry.io/collector/extension/auth/authtest v0.115.0 h1:OZe7dKbZ01qodSpZU0ZYzI6zpmmzJ3UvfdBSFAbSgDw= -go.opentelemetry.io/collector/extension/auth/authtest v0.115.0/go.mod h1:fk9WCXP0x91Q64Z8HZKWTHh9PWtgoWE1KXe3n2Bff3U= -go.opentelemetry.io/collector/extension/experimental/storage v0.115.0 h1:sZXw0+77092pq24CkUoTRoHQPLQUsDq6HFRNB0g5yR4= -go.opentelemetry.io/collector/extension/experimental/storage v0.115.0/go.mod h1:qjFH7Y3QYYs88By2ZB5GMSUN5k3ul4Brrq2J6lKACA0= -go.opentelemetry.io/collector/extension/extensioncapabilities v0.115.0 h1:/g25Hp5aoCNKdDjIb3Fc7XRglO8yaBRFLO/IUNPnqNI= -go.opentelemetry.io/collector/extension/extensioncapabilities v0.115.0/go.mod h1:EQx7ETiy330O6q05S2KRZsRNDg0aQEeJmVl7Ipx+Fcw= -go.opentelemetry.io/collector/extension/extensiontest v0.115.0 h1:GBVFxFEskR8jSdu9uaQh2qpXnN5VNXhXjpJ2UjxtE8I= -go.opentelemetry.io/collector/extension/extensiontest v0.115.0/go.mod h1:eu1ecbz5mT+cHoH2H3GmD/rOO0WsicSJD2RLrYuOmRA= -go.opentelemetry.io/collector/extension/zpagesextension v0.115.0 h1:zYrZZocc7n0ZuDyXNkIaX0P0qk2fjMQj7NegwBJZA4k= -go.opentelemetry.io/collector/extension/zpagesextension v0.115.0/go.mod h1:OaXwNHF3MAcInBzCXrhXbTNHfIi9b7YGhXjtCFZqxNY= -go.opentelemetry.io/collector/featuregate v1.21.0 h1:+EULHPJDLMipcwAGZVp9Nm8NriRvoBBMxp7MSiIZVMI= -go.opentelemetry.io/collector/featuregate v1.21.0/go.mod h1:3GaXqflNDVwWndNGBJ1+XJFy3Fv/XrFgjMN60N3z7yg= -go.opentelemetry.io/collector/filter v0.115.0 h1:pYnHUFDSHSjEIFZit+CU09itVkDXgV+WcV2HOkjvQcE= -go.opentelemetry.io/collector/filter v0.115.0/go.mod h1:aewQ+jmvpH88gPVWpNXiWSm+wwJVxTK4f23ex2NMd2c= -go.opentelemetry.io/collector/internal/fanoutconsumer v0.115.0 h1:6DRiSECeApFq6Jj5ug77rG53R6FzJEZBfygkyMEXdpg= -go.opentelemetry.io/collector/internal/fanoutconsumer v0.115.0/go.mod h1:vgQf5HQdmLQqpDHpDq2S3nTRoUuKtRcZpRTsy+UiwYw= -go.opentelemetry.io/collector/internal/memorylimiter v0.115.0 h1:U07IJxyHZXM6eLn8cOq/Lycx6DhQZhpDOuYtIRw/d6I= -go.opentelemetry.io/collector/internal/memorylimiter v0.115.0/go.mod h1:KNcU8WVpW5y7Ij6CGnsefb7q1UZT7VvrTDhe5FKNOA4= -go.opentelemetry.io/collector/internal/sharedcomponent v0.115.0 h1:9TL6T6ALqDpumUJ0tYIuPIg5LGo4r6eoqlNArYX116o= -go.opentelemetry.io/collector/internal/sharedcomponent v0.115.0/go.mod h1:SgBLKMh11bOTPR1bdDZbi5MlqsoDBBFI3uBIwnei+0k= -go.opentelemetry.io/collector/otelcol v0.115.0 h1:wZhFGrSCZcTQ4qw4ePjI2PaSrOCejoQKAjprKD/xavs= -go.opentelemetry.io/collector/otelcol v0.115.0/go.mod h1:iK8DPvaizirIYKDl1zZG7DDYUj6GkkH4KHifVVM88vk= -go.opentelemetry.io/collector/otelcol/otelcoltest v0.115.0 h1:HNlFpQujlnvawBk8nvMGxzjDHWDCfSprxem/EpQn4u8= -go.opentelemetry.io/collector/otelcol/otelcoltest v0.115.0/go.mod h1:WsMbqYl2rm3nPFbdxQqyLXf4iu97nYLeuQ1seZIpV3Y= -go.opentelemetry.io/collector/pdata v1.21.0 h1:PG+UbiFMJ35X/WcAR7Rf/PWmWtRdW0aHlOidsR6c5MA= -go.opentelemetry.io/collector/pdata v1.21.0/go.mod h1:GKb1/zocKJMvxKbS+sl0W85lxhYBTFJ6h6I1tphVyDU= -go.opentelemetry.io/collector/pdata/pprofile v0.115.0 h1:NI89hy13vNDw7EOnQf7Jtitks4HJFO0SUWznTssmP94= -go.opentelemetry.io/collector/pdata/pprofile v0.115.0/go.mod h1:jGzdNfO0XTtfLjXCL/uCC1livg1LlfR+ix2WE/z3RpQ= -go.opentelemetry.io/collector/pdata/testdata v0.115.0 h1:Rblz+AKXdo3fG626jS+KSd0OSA4uMXcTQfpwed6P8LI= -go.opentelemetry.io/collector/pdata/testdata v0.115.0/go.mod h1:inNnRt6S2Nn260EfCBEcjesjlKOSsr0jPwkPqpBkt4s= -go.opentelemetry.io/collector/pipeline v0.115.0 h1:bmACBqb0e8U9ag+vGGHUP7kCfAO7HHROdtzIEg8ulus= -go.opentelemetry.io/collector/pipeline v0.115.0/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74= -go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.115.0 h1:3l9ruCAOrssTUDnyChKNzHWOdTtfThnYaoPZ1/+5sD0= -go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.115.0/go.mod h1:2Myg+law/5lcezo9PhhZ0wjCaLYdGK24s1jDWbSW9VY= -go.opentelemetry.io/collector/processor v0.115.0 h1:+fveHGRe24PZPv/F5taahGuZ9HdNW44hgNWEJhIUdyc= -go.opentelemetry.io/collector/processor v0.115.0/go.mod h1:/oLHBlLsm7tFb7zOIrA5C0j14yBtjXKAgxJJ2Bktyk4= -go.opentelemetry.io/collector/processor/batchprocessor v0.115.0 h1:dgw1jcE/YVFTs41b3Y7SerU3BBSyMEE93AYV+BAxR8E= -go.opentelemetry.io/collector/processor/batchprocessor v0.115.0/go.mod h1:imG1kDEq14UGlxyCjSCf1TUEFdSWRvF7tLoYX9nixEQ= -go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.115.0 h1:LCA2jwxy1PRc7X/AtRJfMdOANh5rVLdwo5PAM+gAuyo= -go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.115.0/go.mod h1:gPcHyza7Rek3jfrQFxw99fcWBDkkRqBaMHcUz9yYv5I= -go.opentelemetry.io/collector/processor/processorhelper/processorhelperprofiles v0.115.0 h1:r1UF8LPICTRXBL0685zV/CC8J4sWg/qm1g+sHOYMq2Y= -go.opentelemetry.io/collector/processor/processorhelper/processorhelperprofiles v0.115.0/go.mod h1:3erq5umu5a7DKXo4PBm4I5yJjc6r0aJNvBV2nVSPDuE= -go.opentelemetry.io/collector/processor/processorprofiles v0.115.0 h1:cCZAs+FXaebZPppqAN3m+X3etoSBL6NvyQo8l0hOZoo= -go.opentelemetry.io/collector/processor/processorprofiles v0.115.0/go.mod h1:kMxF0gknlWX4duuAJFi2/HuIRi6C3w95tOenRa0GKOY= -go.opentelemetry.io/collector/processor/processortest v0.115.0 h1:j9HEaYFOeOB6VYl9zGhBnhQbTkqGBa2udUvu5NTh6hc= -go.opentelemetry.io/collector/processor/processortest v0.115.0/go.mod h1:Gws+VEnp/eW3qAqPpqbKsrbnnxxNfyDjqrfUXbZfZic= -go.opentelemetry.io/collector/receiver v0.115.0 h1:55Q3Jvj6zHCIA1psKqi/3kEMJO4OqUF5tNAEYNdB1U8= -go.opentelemetry.io/collector/receiver v0.115.0/go.mod h1:nBSCh2O/WUcfgpJ+Jpz+B0z0Hn5jHeRvF2WmLij5EIY= -go.opentelemetry.io/collector/receiver/nopreceiver v0.115.0 h1:87dxAcHekbXqLtjcQjnK1An2PWkWAhTly+EXzPEgYOE= -go.opentelemetry.io/collector/receiver/nopreceiver v0.115.0/go.mod h1:Llu88KNSNwvmYPRr2PMDDbVY9zHfHEbPPB4yTjjQQe0= -go.opentelemetry.io/collector/receiver/otlpreceiver v0.115.0 h1:NqMWsGuVy6y6VKTaPeJS7NZ9KAxhE/xyGUC7GaLYm/o= -go.opentelemetry.io/collector/receiver/otlpreceiver v0.115.0/go.mod h1:9ituzngnjsh/YvO+Phayq9BTk/nw0rgK5ZVvX1oxULk= -go.opentelemetry.io/collector/receiver/receiverprofiles v0.115.0 h1:R9JLaj2Al93smIPUkbJshAkb/cY0H5JBOxIx+Zu0NG4= -go.opentelemetry.io/collector/receiver/receiverprofiles v0.115.0/go.mod h1:05E5hGujWeeXJmzKZwTdHyZ/+rRyrQlQB5p5Q2XY39M= -go.opentelemetry.io/collector/receiver/receivertest v0.115.0 h1:OiB684SbHQi6/Pd3ZH0cXjYvCpBS9ilQBfTQx0wVXHg= -go.opentelemetry.io/collector/receiver/receivertest v0.115.0/go.mod h1:Y8Z9U/bz9Xpyt8GI8DxZZgryw3mnnIw+AeKVLTD2cP8= -go.opentelemetry.io/collector/scraper v0.115.0 h1:hbfebO7x1Xm96OwqeuLz5w7QAaB3ZMlwOkUo0XzPadc= -go.opentelemetry.io/collector/scraper v0.115.0/go.mod h1:7YoCO6/4PeExLiX1FokcydJGCQUa7lUqZsqXokJ5VZ4= -go.opentelemetry.io/collector/semconv v0.115.0 h1:SoqMvg4ZEB3mz2EdAb6XYa+TuMo5Mir5FRBr3nVFUDY= -go.opentelemetry.io/collector/semconv v0.115.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= -go.opentelemetry.io/collector/service v0.115.0 h1:k4GAOiI5tZgB2QKgwA6c3TeAVr7QL/ft5cOQbzUr8Iw= -go.opentelemetry.io/collector/service v0.115.0/go.mod h1:DKde9LMhNebdREecDSsqiTFLI2wRc+IoV4/wGxU6goY= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/collector v0.118.0 h1:OBqxppK9Ul6bzEabcHsx11pXwgp05sBpqYxIxiOkyFo= +go.opentelemetry.io/collector v0.118.0/go.mod h1:yxfijW5k9dwd9sifTBAEoItE+ahFEtOlyvex1B99uno= +go.opentelemetry.io/collector/client v1.24.0 h1:eH7ctqDnRWNH5QVVbAvdYYdkvr8QWLkEm8FUPaaYbWE= +go.opentelemetry.io/collector/client v1.24.0/go.mod h1:C/38SYPa0tTL6ikPz/glYz6f3GVzEuT4nlEml6IBDMw= +go.opentelemetry.io/collector/component v0.118.0 h1:sSO/ObxJ+yH77Z4DmT1mlSuxhbgUmY1ztt7xCA1F/8w= +go.opentelemetry.io/collector/component v0.118.0/go.mod h1:LUJ3AL2b+tmFr3hZol3hzKzCMvNdqNq0M5CF3SWdv4M= +go.opentelemetry.io/collector/component/componentstatus v0.118.0 h1:1aCIdUjqz0noKNQr1v04P+lwF89Lkua5U7BhH9IAxkE= +go.opentelemetry.io/collector/component/componentstatus v0.118.0/go.mod h1:ynO1Nyj0t1h6x/djIMJy35bhnnWEc2mlQaFgDNUO504= +go.opentelemetry.io/collector/component/componenttest v0.118.0 h1:knEHckoiL2fEWSIc0iehg39zP4IXzi9sHa45O+oxKo8= +go.opentelemetry.io/collector/component/componenttest v0.118.0/go.mod h1:aHc7t7zVwCpbhrWIWY+GMuaMxMCUP8C8P7pJOt8r/vU= +go.opentelemetry.io/collector/config/configauth v0.118.0 h1:uBH/s9kRw/m7VWuibrkCzbXSCVLf9ElKq9NuKb0wAwk= +go.opentelemetry.io/collector/config/configauth v0.118.0/go.mod h1:uAmSGkihIENoIah6mEQ8S/HX4oiFOHZu3EoZLZwi9OI= +go.opentelemetry.io/collector/config/configcompression v1.24.0 h1:jyM6BX7wYcrh+eVSC0FMbWgy/zb9iP58SerOrvisccE= +go.opentelemetry.io/collector/config/configcompression v1.24.0/go.mod h1:LvYG00tbPTv0NOLoZN0wXq1F5thcxvukO8INq7xyfWU= +go.opentelemetry.io/collector/config/configgrpc v0.118.0 h1:if8VfsnnHwVX/E+GgehVXKh85YtAtVci+c4A/M5gPh0= +go.opentelemetry.io/collector/config/configgrpc v0.118.0/go.mod h1:TZqpu5s/iEW5XmhSnzrhXCUQ3W5qaICNvlllBf3GGcw= +go.opentelemetry.io/collector/config/confighttp v0.118.0 h1:ey50dfySOCPgUPJ1x8Kq6CmNcv/TpZHt6cYmPhZItj0= +go.opentelemetry.io/collector/config/confighttp v0.118.0/go.mod h1:4frheVFiIfKUHuD/KAPn+u+d+EUx5GlQTNmoI1ftReA= +go.opentelemetry.io/collector/config/confignet v1.24.0 h1:Je1oO3qCUI4etX9ZVyav/NkeD+sfzZQRmwMGy51Oei4= +go.opentelemetry.io/collector/config/confignet v1.24.0/go.mod h1:ZppUH1hgUJOubawEsxsQ9MzEYFytqo2GnVSS7d4CVxc= +go.opentelemetry.io/collector/config/configopaque v1.24.0 h1:EPOprMDreZPKyIgT0/eVBvEGQVvq7ncvBCBVnWerj54= +go.opentelemetry.io/collector/config/configopaque v1.24.0/go.mod h1:sW0t0iI/VfRL9VYX7Ik6XzVgPcR+Y5kejTLsYcMyDWs= +go.opentelemetry.io/collector/config/configretry v1.24.0 h1:sIPHhNNY2YlHMIJ//63iMxIqlgDeGczId0uUb1njsPM= +go.opentelemetry.io/collector/config/configretry v1.24.0/go.mod h1:cleBc9I0DIWpTiiHfu9v83FUaCTqcPXmebpLxjEIqro= +go.opentelemetry.io/collector/config/configtelemetry v0.118.0 h1:UlN46EViG2X42odWtXgWaqY7Y01ZKpsnswSwXTWx5mM= +go.opentelemetry.io/collector/config/configtelemetry v0.118.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= +go.opentelemetry.io/collector/config/configtls v1.24.0 h1:rOhl8qjIlUVVRHnwQj6/vZe6cuCYImyx7aVDBR35bqI= +go.opentelemetry.io/collector/config/configtls v1.24.0/go.mod h1:d0OdfkbuYEMYDBJLSbpH0wPI29lmSiFT3geqh/ygF2k= +go.opentelemetry.io/collector/confmap v1.24.0 h1:UUHVhkDCsVw14jPOarug9PDQE2vaB2ELPWMr7ARFBCA= +go.opentelemetry.io/collector/confmap v1.24.0/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec= +go.opentelemetry.io/collector/confmap/provider/envprovider v1.24.0 h1:jAtaNR4b5gnddNzyfcpIhURSDq4rai667yV1Ngmku2Y= +go.opentelemetry.io/collector/confmap/provider/envprovider v1.24.0/go.mod h1:X0BuIYyscilkwApnmxlrdz0kTVWgKXq2ih8sTWm8Zio= +go.opentelemetry.io/collector/confmap/provider/fileprovider v1.24.0 h1:QoQulv9L20MhD1TFWH1scbRoo0bxbZqF2quh1VRNMh4= +go.opentelemetry.io/collector/confmap/provider/fileprovider v1.24.0/go.mod h1:ljIH/rWIUHJeWIDEKMRU/ufol/bcgC7ufamchtuTAwM= +go.opentelemetry.io/collector/confmap/provider/httpprovider v1.24.0 h1:1mbj6HlVZ4LNVBYrxM5jQEJKxinpe0LtNZwI7i8pQNY= +go.opentelemetry.io/collector/confmap/provider/httpprovider v1.24.0/go.mod h1:xM2qJmW6mB1lzFpLWIoxX/h4tUnoYTICZoqPND9YWi0= +go.opentelemetry.io/collector/confmap/provider/httpsprovider v1.24.0 h1:/Z3LvIRPJTJEu6mOqELxPiiKMfyl9sUxoZOR/qc7D1I= +go.opentelemetry.io/collector/confmap/provider/httpsprovider v1.24.0/go.mod h1:C61Rq3ppnFUoieBGiZxqDnOUKK8ZmmH2RzDXG1P+OUo= +go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.24.0 h1:Ncr7a3HbVpmjAvPHd0yQM/MV2p7HqJe+zvDPmHdjSCI= +go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.24.0/go.mod h1:i7omVh3uK8efpr7/fSAcOh8Xiv3FLYL26wUuON9i1WI= +go.opentelemetry.io/collector/connector v0.118.0 h1:amay4UriWrtydaAxjQ8/MTTaVYERlZcYLCAGcjoBejw= +go.opentelemetry.io/collector/connector v0.118.0/go.mod h1:R6jbMrHZYg21pZ0nsoo4cSHIn7Lrdpi5R3OWcDEQwhE= +go.opentelemetry.io/collector/connector/connectortest v0.118.0 h1:hLMSTqtFWveXa3b1qJMEaWuaX3PHx7dfl8G/bsac2fE= +go.opentelemetry.io/collector/connector/connectortest v0.118.0/go.mod h1:hm6TNLiQLe65NpENCFsFoiO8fOf3BbN4UF1heUsT73Q= +go.opentelemetry.io/collector/connector/xconnector v0.118.0 h1:0s6rwZmt8va6xd3BEZs7s2QBNFNjLv0kzYi6l44dKqc= +go.opentelemetry.io/collector/connector/xconnector v0.118.0/go.mod h1:12mJPGWo90iZrrpgOkmSd5TkejweL34V/R6AqwqJnMA= +go.opentelemetry.io/collector/consumer v1.24.0 h1:7DeyBm9qdr1EPuCfPjWyChPK16DbVc0wZeSa9LZprFU= +go.opentelemetry.io/collector/consumer v1.24.0/go.mod h1:0G6jvZprIp4dpKMD1ZxCjriiP9GdFvFMObsQEtTk71s= +go.opentelemetry.io/collector/consumer/consumererror v0.118.0 h1:Cx//ZFDa6wUEoRDRYRZ/Rkb52dWNoHj2e9FdlcM9jCA= +go.opentelemetry.io/collector/consumer/consumererror v0.118.0/go.mod h1:2mhnzzLYR5zS2Zz4h9ZnRM8Uogu9qatcfQwGNenhing= +go.opentelemetry.io/collector/consumer/consumererror/xconsumererror v0.118.0 h1:/kkWdw1PQtPb1noZMTt6tbgP1ntWdJ835u1o45nYhTg= +go.opentelemetry.io/collector/consumer/consumererror/xconsumererror v0.118.0/go.mod h1:2mdXnTT0nPd/KTG9w29cc1OGKBLzL2HW+x/o7QVpCpI= +go.opentelemetry.io/collector/consumer/consumertest v0.118.0 h1:8AAS9ejQapP1zqt0+cI6u+AUBheT3X0171N9WtXWsVY= +go.opentelemetry.io/collector/consumer/consumertest v0.118.0/go.mod h1:spRM2wyGr4QZzqMHlLmZnqRCxqXN4Wd0piogC4Qb5PQ= +go.opentelemetry.io/collector/consumer/xconsumer v0.118.0 h1:guWnzzRqgCInjnYlOQ1BPrimppNGIVvnknAjlIbWXuY= +go.opentelemetry.io/collector/consumer/xconsumer v0.118.0/go.mod h1:C5V2d6Ys/Fi6k3tzjBmbdZ9v3J/rZSAMlhx4KVcMIIg= +go.opentelemetry.io/collector/exporter v0.118.0 h1:PE0vF2U+znOB8OVLPWNw40bGCoT/5QquQ8Xbz4i9Rb0= +go.opentelemetry.io/collector/exporter v0.118.0/go.mod h1:5ST3gxT/RzE/vg2bcGDtWJxlQF1ypwk50UpmdK1kUqY= +go.opentelemetry.io/collector/exporter/debugexporter v0.118.0 h1:MUZl270SJSU/fDpIr5cJ+JEPrK6OEsHllmKauWYhxxQ= +go.opentelemetry.io/collector/exporter/debugexporter v0.118.0/go.mod h1:SW3j4Bl3uB/nbTC1D0hog9TcelVot9RXQnScCwx8azw= +go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper v0.118.0 h1:wC4IyE98DR4eXVyT7EnA4iJ6s+sbUTZVq/5KoVWSKDw= +go.opentelemetry.io/collector/exporter/exporterhelper/xexporterhelper v0.118.0/go.mod h1:spjZv9QX+pCcx/ECSqlo/UKCYJzp2rR5NsvIgpfdUxQ= +go.opentelemetry.io/collector/exporter/exportertest v0.118.0 h1:8gWky42BcJsxoaqWbnqCDUjP3Y84hjC6RD/UWHwR7sI= +go.opentelemetry.io/collector/exporter/exportertest v0.118.0/go.mod h1:UbpQBZvznA8YPqqcKlafVIhB6Qa4fPf2+I67MUGyNqo= +go.opentelemetry.io/collector/exporter/nopexporter v0.118.0 h1:YX+pX1bVv3IJtBmrAN0waJnFWe9ynCfIRhvmVMTg4Cs= +go.opentelemetry.io/collector/exporter/nopexporter v0.118.0/go.mod h1:vWBRmNyRqN7nzu7sXjrSuVZPnpKZnKAG4ct01jL8xrg= +go.opentelemetry.io/collector/exporter/otlpexporter v0.118.0 h1:kfVfskZEroh3zs8HmdCLeo9weAJT5oedd+04McXEBSU= +go.opentelemetry.io/collector/exporter/otlpexporter v0.118.0/go.mod h1:iyvbf05lZdh+KObvNF0uEpaaV9YoQNofm1RRamWbq78= +go.opentelemetry.io/collector/exporter/otlphttpexporter v0.118.0 h1:8ShK60uf6nY6TlSYBZ2Y7eh3sv0WwNkUKgmh3P1U/2U= +go.opentelemetry.io/collector/exporter/otlphttpexporter v0.118.0/go.mod h1:UJXry//sSRs04eg35nZkT1wxP43tPxz/3wbf26eLRkc= +go.opentelemetry.io/collector/exporter/xexporter v0.118.0 h1:PZAo1CFhZHfQwtzUNj+Fwcv/21pWHJHTsrIddD096fw= +go.opentelemetry.io/collector/exporter/xexporter v0.118.0/go.mod h1:x4J+qyrRcp4DfWKqK3DLZomFTIUhedsqCQWqq6Gqps4= +go.opentelemetry.io/collector/extension v0.118.0 h1:9o5jLCTRvs0+rtFDx04zTBuB4WFrE0RvtVCPovYV0sA= +go.opentelemetry.io/collector/extension v0.118.0/go.mod h1:BFwB0WOlse6JnrStO44+k9kwUVjjtseFEHhJLHD7lBg= +go.opentelemetry.io/collector/extension/auth v0.118.0 h1:+eMNUBUK1JK9A3mr95BasbWE90Lxu+WlR9sqS36sJms= +go.opentelemetry.io/collector/extension/auth v0.118.0/go.mod h1:MJpYcRGSERkgOhczqTKoAhkHmcugr+YTlRhc/SpYYYI= +go.opentelemetry.io/collector/extension/auth/authtest v0.118.0 h1:KIORXNc71vfpQrrZOntiZesRCZtQ8alrASWVT/zZkyo= +go.opentelemetry.io/collector/extension/auth/authtest v0.118.0/go.mod h1:0ZlSP9NPAfTRQd6Tx4mOH0IWrp6ufHaVN//L9Mb87gM= +go.opentelemetry.io/collector/extension/extensioncapabilities v0.118.0 h1:I/SjuacUXdBOxa6ZnVMuMKkZX+m40tUm+5YKqWnNv/c= +go.opentelemetry.io/collector/extension/extensioncapabilities v0.118.0/go.mod h1:IxDALY0rMvsENrVui7Y5tvvL/xHNgMKuhfiQiSHMiTQ= +go.opentelemetry.io/collector/extension/extensiontest v0.118.0 h1:rKBUaFS9elGfENG45wANmrwx7mHsmt1+YWCzxjftElg= +go.opentelemetry.io/collector/extension/extensiontest v0.118.0/go.mod h1:CqNXzkIOR32D8EUpptpOXhpFkibs3kFlRyNMEgIW8l4= +go.opentelemetry.io/collector/extension/xextension v0.118.0 h1:P6gvJzqnH9ma2QfnWde/E6Xu9bAzuefzIwm5iupiVPE= +go.opentelemetry.io/collector/extension/xextension v0.118.0/go.mod h1:ne4Q8ZtRlbC0Etr2hTcVkjOpVM2bE2xy1u+R80LUkDw= +go.opentelemetry.io/collector/extension/zpagesextension v0.118.0 h1:XkaLvST4p1/i/dsk5yCwFG4HJUUr6joCbegJc2MEOrE= +go.opentelemetry.io/collector/extension/zpagesextension v0.118.0/go.mod h1:alaAK7I7UeM1Hcs/eNqIjTLIZpqrk3mD1Ua42mJ7JnU= +go.opentelemetry.io/collector/featuregate v1.24.0 h1:DEqDsuJgxjZ3E5JNC9hXCd4sWGFiF7h9kaziODuqwFY= +go.opentelemetry.io/collector/featuregate v1.24.0/go.mod h1:3GaXqflNDVwWndNGBJ1+XJFy3Fv/XrFgjMN60N3z7yg= +go.opentelemetry.io/collector/filter v0.118.0 h1:mvf08g5VHUcyhqobqId2bVGhgcs1RNR69INGlT0LEsA= +go.opentelemetry.io/collector/filter v0.118.0/go.mod h1:Pgii0Ad2PXdxYSYYqki6Mr4gZdueJG9rDOiaB3fXf3Q= +go.opentelemetry.io/collector/internal/fanoutconsumer v0.118.0 h1:affTj1Qxjbg9dZ1x2tbV9Rs9/otZQ1lHA++L8qB5KiQ= +go.opentelemetry.io/collector/internal/fanoutconsumer v0.118.0/go.mod h1:9mbE68mYdtTyozr3jTtNMB1RA5F8/dt2aWVYSu6bsQ4= +go.opentelemetry.io/collector/internal/memorylimiter v0.118.0 h1:F2FgIe7N4UBQKybKEmpcpFjFBfVLR7ogQHTGAPQ04rc= +go.opentelemetry.io/collector/internal/memorylimiter v0.118.0/go.mod h1:1UXOl4BMaJl9hOlORAJvXNt1jc0GJazCRy9ieDdMkxw= +go.opentelemetry.io/collector/internal/sharedcomponent v0.118.0 h1:aCiwkzBL4VyPEUBmEcTnoPyld5EClJGbwyUNJhHNgEo= +go.opentelemetry.io/collector/internal/sharedcomponent v0.118.0/go.mod h1:drV6vD4acelEUOjM9cgxV5ILs8q2AYUh3EV+Pljdorg= +go.opentelemetry.io/collector/otelcol v0.118.0 h1:uSD3wU0sO4vsw5VvWI2yUFLggLdq1BWN/nC1LJXIhMg= +go.opentelemetry.io/collector/otelcol v0.118.0/go.mod h1:OdKz/AXj+ewCwXp/acZCBIoMIYiIxeNRNkbqUXvWi+o= +go.opentelemetry.io/collector/otelcol/otelcoltest v0.118.0 h1:s4yLzDUPzzPElvcOqth7iOuKe+eBo8iXy6bzAy57sXA= +go.opentelemetry.io/collector/otelcol/otelcoltest v0.118.0/go.mod h1:nNDwBOLXNHVnALpcBzkWQ/770WB3IFvEVgLjgujt3Eo= +go.opentelemetry.io/collector/pdata v1.24.0 h1:D6j92eAzmAbQgivNBUnt8r9juOl8ugb+ihYynoFZIEg= +go.opentelemetry.io/collector/pdata v1.24.0/go.mod h1:cf3/W9E/uIvPS4MR26SnMFJhraUCattzzM6qusuONuc= +go.opentelemetry.io/collector/pdata/pprofile v0.118.0 h1:VK/fr65VFOwEhsSGRPj5c3lCv0yIK1Kt0sZxv9WZBb8= +go.opentelemetry.io/collector/pdata/pprofile v0.118.0/go.mod h1:eJyP/vBm179EghV3dPSnamGAWQwLyd+4z/3yG54YFoQ= +go.opentelemetry.io/collector/pdata/testdata v0.118.0 h1:5N0w1SX9KIRkwvtkrpzQgXy9eGk3vfNG0ds6mhEPMIM= +go.opentelemetry.io/collector/pdata/testdata v0.118.0/go.mod h1:UY+GHV5bOC1BnFburOZ0wiHReJj1XbW12mi2Ogbc5Lw= +go.opentelemetry.io/collector/pipeline v0.118.0 h1:RI1DMe7L0+5hGkx0EDGxG00TaJoh96MEQppgOlGx1Oc= +go.opentelemetry.io/collector/pipeline v0.118.0/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74= +go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.116.0 h1:vRdnwIU40bYtxntVOmxg4Bhrh9QVKtx5wwlxK21rc1s= +go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.116.0/go.mod h1:KxDMUOfbVy8lzZ85CZEG3gCJEYMyWiBKdN+HWUwQWTM= +go.opentelemetry.io/collector/pipeline/xpipeline v0.118.0 h1:ZUVF1MYNQYZvmuL30KfP+QbVGSbFZvldBM9hgCe4J4k= +go.opentelemetry.io/collector/pipeline/xpipeline v0.118.0/go.mod h1:XgG1ktGO9J1f6fasMYPWSXL9Raan/VYB9vddKKWp5hQ= +go.opentelemetry.io/collector/processor v0.118.0 h1:NlqWiTTpPP+EPbrqTcNP9nh/4O4/9U9RGWVB49xo4ws= +go.opentelemetry.io/collector/processor v0.118.0/go.mod h1:Y8OD7wk51oPuBqrbn1qXIK91AbprRHP76hlvEzC24U4= +go.opentelemetry.io/collector/processor/batchprocessor v0.118.0 h1:odyJ9l5eakr+TS8sr6U9rz53QD5ZwewL/6pLUtFTJBs= +go.opentelemetry.io/collector/processor/batchprocessor v0.118.0/go.mod h1:fcHRefknjoLMpCRQ9LKEEzrrmSFUejEaTSxCqj5lHhI= +go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.118.0 h1:1v9VB9lJdo5kNT448Ba1jk9psS4+iv8clooiDU0/5WM= +go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.118.0/go.mod h1:UjlRdaLezSHt+5vX9erJu24HmTMw9mefQSQLatcSwG4= +go.opentelemetry.io/collector/processor/processorhelper/xprocessorhelper v0.118.0 h1:FAuXTUGtk82XDeNC2EIsK8Ad2I0GrbK9zLT6piwjNeA= +go.opentelemetry.io/collector/processor/processorhelper/xprocessorhelper v0.118.0/go.mod h1:F/sHViojq+fH2rEAJcPC/o71EE5aFAideVkVBu59e9k= +go.opentelemetry.io/collector/processor/processortest v0.118.0 h1:VfTLHuIaJWGyUmrvAOvf63gPMf1vAW68/jtJClEsKtU= +go.opentelemetry.io/collector/processor/processortest v0.118.0/go.mod h1:ZFWxsSoafGNOEk83FtGz43M5ypUzAOvGnfT0aQTDHdU= +go.opentelemetry.io/collector/processor/xprocessor v0.118.0 h1:M/EMhPRbadHLpv7g99fBjfgyuYexBZmgQqb2vjTXjvM= +go.opentelemetry.io/collector/processor/xprocessor v0.118.0/go.mod h1:lkoQoCv2Cz+C0kf2VHgBUDYWDecZLLeaHEvHDXbBCXU= +go.opentelemetry.io/collector/receiver v0.118.0 h1:X4mspHmbbtwdCQZ7o370kNmdWfxRnK1FrsvEShCCKEc= +go.opentelemetry.io/collector/receiver v0.118.0/go.mod h1:wFyfu6sgrkDPLQoGOGMuChGZzkZnYcI/tPJWV4CRTzs= +go.opentelemetry.io/collector/receiver/nopreceiver v0.118.0 h1:JeOZxB26tIIBshKgzhWoLsC90TLF1ftyL0JSVyFtOBk= +go.opentelemetry.io/collector/receiver/nopreceiver v0.118.0/go.mod h1:cxUUVD5rXqBIK1ynSuR0cyJ1B8s1VWx4xZunZ31+EAM= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.118.0 h1:Nud8aaRDb86K2kBeqMTjqAKDUV00JDn+G4wUZ3hDlAk= +go.opentelemetry.io/collector/receiver/otlpreceiver v0.118.0/go.mod h1:MJvDEzWJnm1FMoIoTKmhlT3pPmwJP+65GKWy0lAzd30= +go.opentelemetry.io/collector/receiver/receivertest v0.118.0 h1:XlMr2mPsyXJsMUOqCpEoY3uCPsLZQbNA5fmVNDGB7Bw= +go.opentelemetry.io/collector/receiver/receivertest v0.118.0/go.mod h1:dtu/H1RNjhy11hTVf/XUfc02uGufMhYYdhhYBbglcUg= +go.opentelemetry.io/collector/receiver/xreceiver v0.118.0 h1:dzECve9e0H3ot0JWnWPuQr9Y84RhOYSd0+CjvJskx7Y= +go.opentelemetry.io/collector/receiver/xreceiver v0.118.0/go.mod h1:Lv1nD/mSYSP64iV8k+C+mWWZZOMLRubv9d1SUory3/E= +go.opentelemetry.io/collector/scraper v0.118.0 h1:944QgQVZ7PM0L9WIwgRPY0LbbHX5qsk2x4uxDO1IOAQ= +go.opentelemetry.io/collector/scraper v0.118.0/go.mod h1:wIa4bIqiU9bkeg3v5QQybwz1+K5DjrP1Afc13Kt22Cw= +go.opentelemetry.io/collector/scraper/scraperhelper v0.118.0 h1:kZu4TgGGSWlNP9ogVr3pVQGX6J/P8ooPj8wMH5+aWyQ= +go.opentelemetry.io/collector/scraper/scraperhelper v0.118.0/go.mod h1:NKOcwL580ycua1HQ9K3OUucBsMsVL5DbvOJxGtg4chs= +go.opentelemetry.io/collector/scraper/scrapertest v0.118.0 h1:1zqF7Rs/RuvUITsxGJSDsvVZEqyDFW5xe1nvlNs2+HE= +go.opentelemetry.io/collector/scraper/scrapertest v0.118.0/go.mod h1:lUUX279TfqMQ63VdAdf/cpX4AUuMLPHS0hJcjfyzKkg= +go.opentelemetry.io/collector/semconv v0.118.0 h1:V4vlMIK7TIaemrrn2VawvQPwruIKpj7Xgw9P5+BL56w= +go.opentelemetry.io/collector/semconv v0.118.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= +go.opentelemetry.io/collector/service v0.118.0 h1:acZ9LzUbEF5M3G7o5FgenPJVuuM2y8c4HW5JVm648L4= +go.opentelemetry.io/collector/service v0.118.0/go.mod h1:uw3cl3UtkAOrEr8UQV2lXKjyTIbhWxURaQec8kE+Pic= go.opentelemetry.io/contrib/bridges/otelzap v0.6.0 h1:j8icMXyyqNf6HGuwlYhniPnVsbJIq7n+WirDu3VAJdQ= go.opentelemetry.io/contrib/bridges/otelzap v0.6.0/go.mod h1:evIOZpl+kAlU5IsaYX2Siw+IbpacAZvXemVsgt70uvw= go.opentelemetry.io/contrib/config v0.10.0 h1:2JknAzMaYjxrHkTnZh3eOme/Y2P5eHE2SWfhfV6Xd6c= go.opentelemetry.io/contrib/config v0.10.0/go.mod h1:aND2M6/KfNkntI5cyvHriR/zvZgPf8j9yETdSmvpfmc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0 h1:yMkBS9yViCc7U7yeLzJPM2XizlfdVvBRSmsQDWu6qc0= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.56.0/go.mod h1:n8MR6/liuGB5EmTETUBeU5ZgqMOlqKRxUaqPQBOANZ8= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0 h1:UP6IpuHFkUgOQL9FFQFrZ+5LiwhhYRbi7VZSIx6Nj5s= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.56.0/go.mod h1:qxuZLtbq5QDtdeSHsS7bcf6EH6uO6jUAgk764zd3rhM= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= go.opentelemetry.io/contrib/propagators/b3 v1.31.0 h1:PQPXYscmwbCp76QDvO4hMngF2j8Bx/OTV86laEl8uqo= go.opentelemetry.io/contrib/propagators/b3 v1.31.0/go.mod h1:jbqfV8wDdqSDrAYxVpXQnpM0XFMq2FtDesblJ7blOwQ= go.opentelemetry.io/contrib/zpages v0.56.0 h1:W7vP6s3juzL5KiHpr41zLNmsJ0QAZudYu8ay0zGAoko= go.opentelemetry.io/contrib/zpages v0.56.0/go.mod h1:IxPRP4TYHw9jLeaEOSDIiA9zmyJNZNO6sbW55iMvSXs= -go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= -go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.7.0 h1:mMOmtYie9Fx6TSVzw4W+NTpvoaS1JWWga37oI1a/4qQ= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.7.0/go.mod h1:yy7nDsMMBUkD+jeekJ36ur5f3jJIrmCwUrY67VFhNpA= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 h1:j7ZSD+5yn+lo3sGV69nW04rRR0jhYnBwjuX3r0HvnK0= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0/go.mod h1:WXbYJTUaZXAbYd8lbgGuvih0yuCfOFC5RJoYnoLcGz8= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0 h1:t/Qur3vKSkUCcDVaSumWF2PKHt85pc7fRvFuoVT8qFU= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.32.0/go.mod h1:Rl61tySSdcOJWoEgYZVtmnKdA0GeKrSqkHC1t+91CH8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0 h1:K0XaT3DwHAcV4nKLzcQvwAgSyisUghWoY20I7huthMk= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.31.0/go.mod h1:B5Ki776z/MBnVha1Nzwp5arlzBbE3+1jk+pGmaP5HME= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy03iVTXP6ffeN2iXrxfGsZGCjVx0/4KlizjyBwU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0/go.mod h1:TMu73/k1CP8nBUpDLc71Wj/Kf7ZS9FK5b53VapRsP9o= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 h1:lUsI2TYsQw2r1IASwoROaCnjdj2cvC2+Jbxvk6nHnWU= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0/go.mod h1:2HpZxxQurfGxJlJDblybejHB6RX6pmExPNe517hREw4= -go.opentelemetry.io/otel/exporters/prometheus v0.54.0 h1:rFwzp68QMgtzu9PgP3jm9XaMICI6TsofWWPcBDKwlsU= -go.opentelemetry.io/otel/exporters/prometheus v0.54.0/go.mod h1:QyjcV9qDP6VeK5qPyKETvNjmaaEc7+gqjh4SS0ZYzDU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0 h1:wpMfgF8E1rkrT1Z6meFh1NDtownE9Ii3n3X2GJYjsaU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.33.0/go.mod h1:wAy0T/dUbs468uOlkT31xjvqQgEVXv58BRFWEgn5v/0= +go.opentelemetry.io/otel/exporters/prometheus v0.55.0 h1:sSPw658Lk2NWAv74lkD3B/RSDb+xRFx46GjkrL3VUZo= +go.opentelemetry.io/otel/exporters/prometheus v0.55.0/go.mod h1:nC00vyCmQixoeaxF6KNyP42II/RHa9UdruK02qBmHvI= go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.7.0 h1:TwmL3O3fRR80m8EshBrd8YydEZMcUCsZXzOUlnFohwM= go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.7.0/go.mod h1:tH98dDv5KPmPThswbXA0fr0Lwfs+OhK8HgaCo7PjRrk= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0 h1:SZmDnHcgp3zwlPBS2JX2urGYe/jBKEIT6ZedHRUyCz8= go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.32.0/go.mod h1:fdWW0HtZJ7+jNpTKUR0GpMEDP69nR8YBJQxNiVCE3jk= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 h1:UGZ1QwZWY67Z6BmckTU+9Rxn04m2bD3gD6Mk0OIOCPk= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0/go.mod h1:fcwWuDuaObkkChiDlhEpSq9+X1C0omv+s5mBtToAQ64= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.33.0 h1:W5AWUn/IVe8RFb5pZx1Uh9Laf/4+Qmm4kJL5zPuvR+0= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.33.0/go.mod h1:mzKxJywMNBdEX8TSJais3NnsVZUaJ+bAy6UxPTng2vk= go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk= go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8= -go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= -go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= -go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= -go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= go.opentelemetry.io/otel/sdk/log v0.7.0 h1:dXkeI2S0MLc5g0/AwxTZv6EUEjctiH8aG14Am56NTmQ= go.opentelemetry.io/otel/sdk/log v0.7.0/go.mod h1:oIRXpW+WD6M8BuGj5rtS0aRu/86cbDV/dAfNaZBIjYM= -go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= -go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= -go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= -go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= +go.opentelemetry.io/otel/sdk/metric v1.33.0 h1:Gs5VK9/WUJhNXZgn8MR6ITatvAmKeIuCtNbsP3JkNqU= +go.opentelemetry.io/otel/sdk/metric v1.33.0/go.mod h1:dL5ykHZmm1B1nVRk9dDjChwDmt81MjVp3gLkQRwKf/Q= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= +go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= +go.step.sm/crypto v0.44.2 h1:t3p3uQ7raP2jp2ha9P6xkQF85TJZh+87xmjSLaib+jk= +go.step.sm/crypto v0.44.2/go.mod h1:x1439EnFhadzhkuaGX7sz03LEMQ+jV4gRamf5LCZJQQ= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -2122,8 +2315,8 @@ go4.org/netipx v0.0.0-20220812043211-3cc044ffd68d/go.mod h1:tgPU4N2u9RByaTN3NC2p go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6 h1:lGdhQUN/cnWdSH3291CUuxSEqc+AsGTiDxPP3r2J0l4= go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E= -golang.org/x/arch v0.12.0 h1:UsYJhbzPYGsT0HbEdmYcqtCv8UNGvnaL561NnIUvaKg= -golang.org/x/arch v0.12.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= +golang.org/x/arch v0.13.0 h1:KCkqVVV1kGg0X87TFysjCJ8MxtZEIU4Ja/yXGeoECdA= +golang.org/x/arch v0.13.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181127143415-eb0de9b17e85/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -2147,8 +2340,9 @@ golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= -golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -2159,8 +2353,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f h1:phY1HzDcf18Aq9A8KkmRtY9WvOFIxN8wgfvy6Zm1DV8= golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= @@ -2194,6 +2388,8 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2250,15 +2446,15 @@ golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2266,8 +2462,8 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= +golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2283,6 +2479,7 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2378,8 +2575,10 @@ golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -2392,9 +2591,9 @@ golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= -golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -2418,8 +2617,8 @@ golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= -golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -2429,7 +2628,6 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3 golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -2480,8 +2678,9 @@ golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= -golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= -golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE= +golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2552,10 +2751,10 @@ google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU= google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= -google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= -google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f h1:gap6+3Gk41EItBuyi4XX/bp4oqJ3UwuIMl25yGinuAA= +google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:Ic02D47M+zbarjYYUlK57y316f2MoN0gjAwI3f2S95o= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -2574,8 +2773,8 @@ google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTp google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= -google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= -google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= +google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= google.golang.org/grpc/examples v0.0.0-20221020162917-9127159caf5a h1:p51n6zkL483uumoZhCSGtHCem9kDeU05G5jX/wYI9gw= google.golang.org/grpc/examples v0.0.0-20221020162917-9127159caf5a/go.mod h1:gxndsbNG1n4TZcHGgsYEfVGnTxqfEdfiDv6/DADXX9o= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -2594,8 +2793,8 @@ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/DataDog/dd-trace-go.v1 v1.69.1 h1:grTElrPaCfxUsrJjyPLHlVPbmlKVzWMxVdcBrGZSzEk= gopkg.in/DataDog/dd-trace-go.v1 v1.69.1/go.mod h1:U9AOeBHNAL95JXcd/SPf4a7O5GNeF/yD13sJtli/yaU= gopkg.in/Knetic/govaluate.v3 v3.0.0 h1:18mUyIt4ZlRlFZAAfVetz4/rzlJs9yhN+U02F4u1AOc= @@ -2614,6 +2813,8 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/go-jose/go-jose.v2 v2.6.3 h1:nt80fvSDlhKWQgSWyHyy5CfmlQr+asih51R8PTWNKKs= +gopkg.in/go-jose/go-jose.v2 v2.6.3/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI= gopkg.in/h2non/filetype.v1 v1.0.1/go.mod h1:M0yem4rwSX5lLVrkEuRRp2/NinFMD5vgJ4DlAhZcfNo= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= @@ -2666,8 +2867,8 @@ k8s.io/apimachinery v0.31.4 h1:8xjE2C4CzhYVm9DGf60yohpNUh5AEBnPxCryPBECmlM= k8s.io/apimachinery v0.31.4/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= k8s.io/apiserver v0.31.2 h1:VUzOEUGRCDi6kX1OyQ801m4A7AUPglpsmGvdsekmcI4= k8s.io/apiserver v0.31.2/go.mod h1:o3nKZR7lPlJqkU5I3Ove+Zx3JuoFjQobGX1Gctw6XuE= -k8s.io/autoscaler/vertical-pod-autoscaler v0.13.0 h1:pH6AsxeBZcyX6KBqcnl7SPIJqbN1d59RrEBuIE6Rq6c= -k8s.io/autoscaler/vertical-pod-autoscaler v0.13.0/go.mod h1:LraL5kR2xX7jb4VMCG6/tUH4I75uRHlnzC0VWQHcyWk= +k8s.io/autoscaler/vertical-pod-autoscaler v1.2.2 h1:d6nrlgROIvGJrBZnmyTibA2CvXIylet/vBE1EicilRo= +k8s.io/autoscaler/vertical-pod-autoscaler v1.2.2/go.mod h1:9ywHbt0kTrLyeNGgTNm7WEns34PmBMEr+9bDKTxW6wQ= k8s.io/cli-runtime v0.31.2 h1:7FQt4C4Xnqx8V1GJqymInK0FFsoC+fAZtbLqgXYVOLQ= k8s.io/cli-runtime v0.31.2/go.mod h1:XROyicf+G7rQ6FQJMbeDV9jqxzkWXTYD6Uxd15noe0Q= k8s.io/client-go v0.21.1/go.mod h1:/kEw4RgW+3xnBGzvp9IWxKSNA+lXn3A7AuH3gdOAzLs= @@ -2733,9 +2934,8 @@ modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA= modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0= modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3 h1:2770sDpzrjjsAtVhSeUFseziht227YAWYHLGNM8QPwY= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.3/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q= @@ -2744,11 +2944,14 @@ sigs.k8s.io/custom-metrics-apiserver v1.30.1-0.20241105195130-84dc8cfe2555 h1:GY sigs.k8s.io/custom-metrics-apiserver v1.30.1-0.20241105195130-84dc8cfe2555/go.mod h1:JL2q3g2QCWnIDvo73jpkksZOVd3ee3FWzZs4EHvx5NE= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/release-utils v0.7.7 h1:JKDOvhCk6zW8ipEOkpTGDH/mW3TI+XqtPp16aaQ79FU= +sigs.k8s.io/release-utils v0.7.7/go.mod h1:iU7DGVNi3umZJ8q6aHyUFzsDUIaYwNnNKGHo3YE5E3s= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +software.sslmate.com/src/go-pkcs12 v0.4.0 h1:H2g08FrTvSFKUj+D309j1DPfk5APnIdAQAB8aEykJ5k= +software.sslmate.com/src/go-pkcs12 v0.4.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI= diff --git a/go.work b/go.work index c7c6ebf27d3ff..fa3e96dcf00ce 100644 --- a/go.work +++ b/go.work @@ -1,6 +1,6 @@ -go 1.23.0 +go 1.23.1 -toolchain go1.23.3 +toolchain go1.23.5 use ( . @@ -42,7 +42,8 @@ use ( comp/otelcol/otlp/components/processor/infraattributesprocessor comp/otelcol/otlp/components/statsprocessor comp/otelcol/otlp/testutil - comp/serializer/compression + comp/serializer/logscompression + comp/serializer/metricscompression comp/trace/agent/def comp/trace/compression/def comp/trace/compression/impl-gzip @@ -66,6 +67,7 @@ use ( pkg/config/teeconfig pkg/config/utils pkg/errors + pkg/fips pkg/gohai pkg/linters/components/pkgconfigusage pkg/logs/auditor @@ -102,6 +104,7 @@ use ( pkg/util/cache pkg/util/cgroups pkg/util/common + pkg/util/compression pkg/util/containers/image pkg/util/defaultpaths pkg/util/executable @@ -114,7 +117,7 @@ use ( pkg/util/json pkg/util/log pkg/util/log/setup - pkg/util/optional + pkg/util/option pkg/util/pointer pkg/util/scrubber pkg/util/sort diff --git a/google-marketplace/chart/datadog-mp/charts/datadog-operator-2.1.0.tgz b/google-marketplace/chart/datadog-mp/charts/datadog-operator-2.1.0.tgz deleted file mode 100644 index bc7a69c1c39ee..0000000000000 Binary files a/google-marketplace/chart/datadog-mp/charts/datadog-operator-2.1.0.tgz and /dev/null differ diff --git a/google-marketplace/chart/datadog-mp/charts/datadog-operator-2.5.0.tgz b/google-marketplace/chart/datadog-mp/charts/datadog-operator-2.5.0.tgz new file mode 100644 index 0000000000000..13a7de87bc77e Binary files /dev/null and b/google-marketplace/chart/datadog-mp/charts/datadog-operator-2.5.0.tgz differ diff --git a/google-marketplace/chart/datadog-mp/requirements.lock b/google-marketplace/chart/datadog-mp/requirements.lock index 619199475af62..0e0d35ebf1f89 100644 --- a/google-marketplace/chart/datadog-mp/requirements.lock +++ b/google-marketplace/chart/datadog-mp/requirements.lock @@ -1,6 +1,6 @@ dependencies: - name: datadog-operator repository: https://helm.datadoghq.com - version: 2.1.0 -digest: sha256:53eb89cb1a976d1db77059bf69495cfa9e0248c2d7c773aa4dde7a2ea8850a14 -generated: "2024-10-18T12:01:26.314015-04:00" + version: 2.5.0 +digest: sha256:05c57dea79700f5f2df08e5bbd8178c2464e2842de18bba9085f51fa4598167d +generated: "2024-12-30T10:01:23.595271+01:00" diff --git a/google-marketplace/chart/datadog-mp/requirements.yaml b/google-marketplace/chart/datadog-mp/requirements.yaml index b90dbe158e4d7..e3690b5b5d19f 100644 --- a/google-marketplace/chart/datadog-mp/requirements.yaml +++ b/google-marketplace/chart/datadog-mp/requirements.yaml @@ -1,5 +1,5 @@ dependencies: - name: datadog-operator # Helm chart version - version: 2.1.x + version: 2.5.x repository: https://helm.datadoghq.com diff --git a/google-marketplace/chart/datadog-mp/templates/application.yaml b/google-marketplace/chart/datadog-mp/templates/application.yaml index 939ce80a91318..1f8a541779021 100644 --- a/google-marketplace/chart/datadog-mp/templates/application.yaml +++ b/google-marketplace/chart/datadog-mp/templates/application.yaml @@ -12,7 +12,7 @@ metadata: spec: descriptor: type: 'Datadog Agent' - version: '1.9.0' + version: '1.11.1' description: |- Datadog provides infrastructure monitoring, application performance monitoring, and log management in a single-pane-of-glass view so teams can @@ -43,7 +43,7 @@ spec: It will allow the Datadog Operator to deploy the Datadog Agent following the default configuration found in `DatadogAgent` object. # Send custom metrics with DogStatsD - + To enable the collection of custom metrics, the Datadog Agent ships with a lightweight DogStatsD server for metric collection and aggregation. To send metrics to a containerized DogStatsD, you can bind the container’s diff --git a/google-marketplace/chart/datadog-mp/templates/manifests.yaml b/google-marketplace/chart/datadog-mp/templates/manifests.yaml index feb291786871e..cd38db806cdd3 100644 --- a/google-marketplace/chart/datadog-mp/templates/manifests.yaml +++ b/google-marketplace/chart/datadog-mp/templates/manifests.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.3 name: datadogagents.datadoghq.com spec: group: datadoghq.com @@ -32,200 +32,106 @@ spec: name: v2alpha1 schema: openAPIV3Schema: - description: DatadogAgent Deployment with the Datadog Operator. properties: apiVersion: - description: |- - APIVersion defines the versioned schema of this representation of an object. - Servers should convert recognized schemas to the latest internal value, and - may reject unrecognized values. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: |- - Kind is a string value representing the REST resource this object represents. - Servers may infer this from the endpoint the client submits requests to. - Cannot be updated. - In CamelCase. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object spec: - description: DatadogAgentSpec defines the desired state of DatadogAgent properties: features: - description: Features running on the Agent and Cluster Agent properties: admissionController: - description: AdmissionController configuration. properties: agentCommunicationMode: - description: |- - AgentCommunicationMode corresponds to the mode used by the Datadog application libraries to communicate with the Agent. - It can be "hostip", "service", or "socket". type: string agentSidecarInjection: - description: AgentSidecarInjection contains Agent sidecar injection configurations. properties: clusterAgentCommunicationEnabled: - description: |- - ClusterAgentCommunicationEnabled enables communication between Agent sidecars and the Cluster Agent. - Default : true type: boolean enabled: - description: |- - Enabled enables Sidecar injections. - Default: false type: boolean image: - description: Image overrides the default Agent image name and tag for the Agent sidecar. properties: jmxEnabled: - description: |- - Define whether the Agent image should support JMX. - To be used if the Name field does not correspond to a full image string. type: boolean name: - description: |- - Define the image to use: - Use "gcr.io/datadoghq/agent:latest" for Datadog Agent 7. - Use "datadog/dogstatsd:latest" for standalone Datadog Agent DogStatsD 7. - Use "gcr.io/datadoghq/cluster-agent:latest" for Datadog Cluster Agent. - Use "agent" with the registry and tag configurations for /agent:. - Use "cluster-agent" with the registry and tag configurations for /cluster-agent:. - If the name is the full image string—`:` or `/:`, then `tag`, `jmxEnabled`, - and `global.registry` values are ignored. - Otherwise, image string is created by overriding default settings with supplied `name`, `tag`, and `jmxEnabled` values; - image string is created using default registry unless `global.registry` is configured. type: string pullPolicy: - description: |- - The Kubernetes pull policy: - Use Always, Never, or IfNotPresent. type: string pullSecrets: - description: |- - It is possible to specify Docker registry credentials. - See https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod items: - description: |- - LocalObjectReference contains enough information to let you locate the - referenced object inside the same namespace. properties: name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + default: "" type: string type: object x-kubernetes-map-type: atomic type: array tag: - description: |- - Define the image tag to use. - To be used if the Name field does not correspond to a full image string. type: string type: object profiles: - description: Profiles define the sidecar configuration override. Only one profile is supported. items: - description: Profile defines a sidecar configuration override. properties: env: - description: EnvVars specifies the environment variables for the profile. items: - description: EnvVar represents an environment variable present in a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. type: string value: - description: |- - Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". type: string valueFrom: - description: Source for the environment variable's value. Cannot be used if value is not empty. properties: configMapKeyRef: - description: Selects a key of a ConfigMap. properties: key: - description: The key to select. type: string name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + default: "" type: string optional: - description: Specify whether the ConfigMap or its key must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. properties: apiVersion: - description: Version of the schema the FieldPath is written in terms of, defaults to "v1". type: string fieldPath: - description: Path of the field to select in the specified API version. type: string required: - fieldPath type: object x-kubernetes-map-type: atomic resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. properties: containerName: - description: 'Container name: required for volumes, optional for env vars' type: string divisor: anyOf: - type: integer - type: string - description: Specifies the output format of the exposed resources, defaults to "1" pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true resource: - description: 'Required: resource to select' type: string required: - resource type: object x-kubernetes-map-type: atomic secretKeyRef: - description: Selects a key of a secret in the pod's namespace properties: key: - description: The key of the secret to select from. Must be a valid secret key. type: string name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + default: "" type: string optional: - description: Specify whether the Secret or its key must be defined type: boolean required: - key @@ -240,27 +146,13 @@ spec: - name x-kubernetes-list-type: map resources: - description: ResourceRequirements specifies the resource requirements for the profile. properties: claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. It can only be set for containers. items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. properties: name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. + type: string + request: type: string required: - name @@ -276,9 +168,6 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -287,112 +176,67 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object type: object type: array x-kubernetes-list-type: atomic provider: - description: |- - Provider is used to add infrastructure provider-specific configurations to the Agent sidecar. - Currently only "fargate" is supported. - To use the feature in other environments (including local testing) omit the config. - See also: https://docs.datadoghq.com/integrations/eks_fargate type: string registry: - description: Registry overrides the default registry for the sidecar Agent. type: string selectors: - description: Selectors define the pod selector for sidecar injection. Only one rule is supported. items: - description: Selectors define a pod selector for sidecar injection. properties: namespaceSelector: - description: NamespaceSelector specifies the label selector for namespaces. properties: matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. properties: key: - description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic objectSelector: - description: ObjectSelector specifies the label selector for objects. properties: matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. properties: key: - description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic @@ -401,238 +245,131 @@ spec: x-kubernetes-list-type: atomic type: object cwsInstrumentation: - description: CWSInstrumentation holds the CWS Instrumentation endpoint configuration properties: enabled: - description: |- - Enable the CWS Instrumentation admission controller endpoint. - Default: false type: boolean mode: - description: |- - Mode defines the behavior of the CWS Instrumentation endpoint, and can be either "init_container" or "remote_copy". - Default: "remote_copy" type: string type: object enabled: - description: |- - Enabled enables the Admission Controller. - Default: true type: boolean failurePolicy: - description: FailurePolicy determines how unrecognized and timeout errors are handled. type: string mutateUnlabelled: - description: |- - MutateUnlabelled enables config injection without the need of pod label 'admission.datadoghq.com/enabled="true"'. - Default: false type: boolean + mutation: + properties: + enabled: + type: boolean + type: object registry: - description: Registry defines an image registry for the admission controller. type: string serviceName: - description: ServiceName corresponds to the webhook service name. type: string + validation: + properties: + enabled: + type: boolean + type: object webhookName: - description: |- - WebhookName is a custom name for the MutatingWebhookConfiguration. - Default: "datadog-webhook" type: string type: object apm: - description: APM (Application Performance Monitoring) configuration. properties: enabled: - description: |- - Enabled enables Application Performance Monitoring. - Default: true type: boolean hostPortConfig: - description: |- - HostPortConfig contains host port configuration. - Enabled Default: false - Port Default: 8126 properties: enabled: - description: |- - Enabled enables host port configuration - Default: false type: boolean hostPort: - description: |- - Port takes a port number (0 < x < 65536) to expose on the host. (Most containers do not need this.) - If HostNetwork is enabled, this value must match the ContainerPort. format: int32 type: integer type: object instrumentation: - description: |- - SingleStepInstrumentation allows the agent to inject the Datadog APM libraries into all pods in the cluster. - Feature is in beta. - See also: https://docs.datadoghq.com/tracing/trace_collection/single-step-apm - Enabled Default: false properties: disabledNamespaces: - description: DisabledNamespaces disables injecting the Datadog APM libraries into pods in specific namespaces. items: type: string type: array x-kubernetes-list-type: set enabled: - description: |- - Enabled enables injecting the Datadog APM libraries into all pods in the cluster. - Default: false type: boolean enabledNamespaces: - description: EnabledNamespaces enables injecting the Datadog APM libraries into pods in specific namespaces. items: type: string type: array x-kubernetes-list-type: set languageDetection: - description: |- - LanguageDetection detects languages and adds them as annotations on Deployments, but does not use these languages for injecting libraries to workload pods. - (Requires Agent 7.52.0+ and Cluster Agent 7.52.0+) properties: enabled: - description: |- - Enabled enables Language Detection to automatically detect languages of user workloads (beta). - Requires SingleStepInstrumentation.Enabled to be true. - Default: true type: boolean type: object libVersions: additionalProperties: type: string - description: |- - LibVersions configures injection of specific tracing library versions with Single Step Instrumentation. - : - ex: "java": "v1.18.0" type: object type: object unixDomainSocketConfig: - description: |- - UnixDomainSocketConfig contains socket configuration. - See also: https://docs.datadoghq.com/agent/kubernetes/apm/?tab=helm#agent-environment-variables - Enabled Default: true - Path Default: `/var/run/datadog/apm.socket` properties: enabled: - description: |- - Enabled enables Unix Domain Socket. - Default: true type: boolean path: - description: Path defines the socket path used when enabled. type: string type: object type: object asm: - description: ASM (Application Security Management) configuration. properties: iast: - description: |- - IAST configures Interactive Application Security Testing. - Enabled Default: false properties: enabled: - description: |- - Enabled enables Interactive Application Security Testing (IAST). - Default: false type: boolean type: object sca: - description: |- - SCA configures Software Composition Analysis. - Enabled Default: false properties: enabled: - description: |- - Enabled enables Software Composition Analysis (SCA). - Default: false type: boolean type: object threats: - description: |- - Threats configures ASM App & API Protection. - Enabled Default: false properties: enabled: - description: |- - Enabled enables ASM App & API Protection. - Default: false type: boolean type: object type: object autoscaling: - description: Autoscaling configuration. properties: workload: - description: Workload contains the configuration for the workload autoscaling product. properties: enabled: - description: |- - Enabled enables the workload autoscaling product. - Default: false type: boolean type: object type: object clusterChecks: - description: ClusterChecks configuration. properties: enabled: - description: |- - Enables Cluster Checks scheduling in the Cluster Agent. - Default: true type: boolean useClusterChecksRunners: - description: |- - Enabled enables Cluster Checks Runners to run all Cluster Checks. - Default: false type: boolean type: object cspm: - description: CSPM (Cloud Security Posture Management) configuration. properties: checkInterval: - description: CheckInterval defines the check interval. type: string customBenchmarks: - description: |- - CustomBenchmarks contains CSPM benchmarks. - The content of the ConfigMap will be merged with the benchmarks bundled with the agent. - Any benchmarks with the same name as those existing in the agent will take precedence. properties: configData: - description: ConfigData corresponds to the configuration file content. type: string configMap: - description: ConfigMap references an existing ConfigMap with the configuration file content. properties: items: - description: Items maps a ConfigMap data `key` to a file `path` mount. items: - description: Maps a string key to a path within a volume. properties: key: - description: key is the key to project. type: string mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. type: string required: - key @@ -643,64 +380,34 @@ spec: - key x-kubernetes-list-type: map name: - description: Name is the name of the ConfigMap. type: string type: object type: object enabled: - description: |- - Enabled enables Cloud Security Posture Management. - Default: false type: boolean hostBenchmarks: - description: HostBenchmarks contains configuration for host benchmarks. properties: enabled: - description: |- - Enabled enables host benchmarks. - Default: true type: boolean type: object type: object cws: - description: CWS (Cloud Workload Security) configuration. properties: customPolicies: - description: |- - CustomPolicies contains security policies. - The content of the ConfigMap will be merged with the policies bundled with the agent. - Any policies with the same name as those existing in the agent will take precedence. properties: configData: - description: ConfigData corresponds to the configuration file content. type: string configMap: - description: ConfigMap references an existing ConfigMap with the configuration file content. properties: items: - description: Items maps a ConfigMap data `key` to a file `path` mount. items: - description: Maps a string key to a path within a volume. properties: key: - description: key is the key to project. type: string mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. type: string required: - key @@ -711,102 +418,54 @@ spec: - key x-kubernetes-list-type: map name: - description: Name is the name of the ConfigMap. type: string type: object type: object enabled: - description: |- - Enabled enables Cloud Workload Security. - Default: false type: boolean network: properties: enabled: - description: |- - Enabled enables Cloud Workload Security Network detections. - Default: true type: boolean type: object remoteConfiguration: properties: enabled: - description: |- - Enabled enables Remote Configuration for Cloud Workload Security. - Default: true type: boolean type: object securityProfiles: properties: enabled: - description: |- - Enabled enables Security Profiles collection for Cloud Workload Security. - Default: true type: boolean type: object syscallMonitorEnabled: - description: |- - SyscallMonitorEnabled enables Syscall Monitoring (recommended for troubleshooting only). - Default: false type: boolean type: object dogstatsd: - description: Dogstatsd configuration. properties: hostPortConfig: - description: |- - HostPortConfig contains host port configuration. - Enabled Default: false - Port Default: 8125 properties: enabled: - description: |- - Enabled enables host port configuration - Default: false type: boolean hostPort: - description: |- - Port takes a port number (0 < x < 65536) to expose on the host. (Most containers do not need this.) - If HostNetwork is enabled, this value must match the ContainerPort. format: int32 type: integer type: object mapperProfiles: - description: |- - Configure the Dogstasd Mapper Profiles. - Can be passed as raw data or via a json encoded string in a config map. - See also: https://docs.datadoghq.com/developers/dogstatsd/dogstatsd_mapper/ properties: configData: - description: ConfigData corresponds to the configuration file content. type: string configMap: - description: ConfigMap references an existing ConfigMap with the configuration file content. properties: items: - description: Items maps a ConfigMap data `key` to a file `path` mount. items: - description: Maps a string key to a path within a volume. properties: key: - description: key is the key to project. type: string mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. type: string required: - key @@ -817,72 +476,36 @@ spec: - key x-kubernetes-list-type: map name: - description: Name is the name of the ConfigMap. type: string type: object type: object originDetectionEnabled: - description: |- - OriginDetectionEnabled enables origin detection for container tagging. - See also: https://docs.datadoghq.com/developers/dogstatsd/unix_socket/#using-origin-detection-for-container-tagging type: boolean tagCardinality: - description: |- - TagCardinality configures tag cardinality for the metrics collected using origin detection (`low`, `orchestrator` or `high`). - See also: https://docs.datadoghq.com/getting_started/tagging/assigning_tags/?tab=containerizedenvironments#environment-variables - Cardinality default: low type: string unixDomainSocketConfig: - description: |- - UnixDomainSocketConfig contains socket configuration. - See also: https://docs.datadoghq.com/agent/kubernetes/apm/?tab=helm#agent-environment-variables - Enabled Default: true - Path Default: `/var/run/datadog/dsd.socket` properties: enabled: - description: |- - Enabled enables Unix Domain Socket. - Default: true type: boolean path: - description: Path defines the socket path used when enabled. type: string type: object type: object ebpfCheck: - description: EBPFCheck configuration. properties: enabled: - description: |- - Enables the eBPF check. - Default: false type: boolean type: object eventCollection: - description: EventCollection configuration. properties: collectKubernetesEvents: - description: |- - CollectKubernetesEvents enables Kubernetes event collection. - Default: true type: boolean collectedEventTypes: - description: |- - CollectedEventTypes defines the list of events to collect when UnbundleEvents is enabled. - Default: - [ - {"kind":"Pod","reasons":["Failed","BackOff","Unhealthy","FailedScheduling","FailedMount","FailedAttachVolume"]}, - {"kind":"Node","reasons":["TerminatingEvictedPod","NodeNotReady","Rebooted","HostPortConflict"]}, - {"kind":"CronJob","reasons":["SawCompletedJob"]} - ] items: - description: EventTypes defines the kind and reasons of events to collect. properties: kind: - description: 'Kind is the kind of event to collect. (ex: Pod, Node, CronJob)' type: string reasons: - description: 'Reasons is a list of event reasons to collect. (ex: Failed, BackOff, Unhealthy)' items: type: string type: array @@ -894,156 +517,80 @@ spec: type: array x-kubernetes-list-type: atomic unbundleEvents: - description: |- - UnbundleEvents enables collection of Kubernetes events as individual events. - Default: false type: boolean type: object externalMetricsServer: - description: ExternalMetricsServer configuration. properties: enabled: - description: |- - Enabled enables the External Metrics Server. - Default: false type: boolean endpoint: - description: |- - Override the API endpoint for the External Metrics Server. - URL Default: "https://app.datadoghq.com". properties: credentials: - description: Credentials defines the Datadog credentials used to submit data to/query data from Datadog. properties: apiKey: - description: |- - APIKey configures your Datadog API key. - See also: https://app.datadoghq.com/account/settings#agent/kubernetes type: string apiSecret: - description: |- - APISecret references an existing Secret which stores the API key instead of creating a new one. - If set, this parameter takes precedence over "APIKey". properties: keyName: - description: KeyName is the key of the secret to use. type: string secretName: - description: SecretName is the name of the secret. type: string required: - secretName type: object appKey: - description: |- - AppKey configures your Datadog application key. - If you are using features.externalMetricsServer.enabled = true, you must set - a Datadog application key for read access to your metrics. type: string appSecret: - description: |- - AppSecret references an existing Secret which stores the application key instead of creating a new one. - If set, this parameter takes precedence over "AppKey". properties: keyName: - description: KeyName is the key of the secret to use. type: string secretName: - description: SecretName is the name of the secret. type: string required: - secretName type: object type: object url: - description: URL defines the endpoint URL. type: string type: object port: - description: |- - Port specifies the metricsProvider External Metrics Server service port. - Default: 8443 format: int32 type: integer registerAPIService: - description: |- - RegisterAPIService registers the External Metrics endpoint as an APIService - Default: true type: boolean useDatadogMetrics: - description: |- - UseDatadogMetrics enables usage of the DatadogMetrics CRD (allowing one to scale on arbitrary Datadog metric queries). - Default: true type: boolean wpaController: - description: |- - WPAController enables the informer and controller of the Watermark Pod Autoscaler. - NOTE: The Watermark Pod Autoscaler controller needs to be installed. - See also: https://github.com/DataDog/watermarkpodautoscaler. - Default: false type: boolean type: object helmCheck: - description: HelmCheck configuration. properties: collectEvents: - description: |- - CollectEvents set to `true` enables event collection in the Helm check - (Requires Agent 7.36.0+ and Cluster Agent 1.20.0+) - Default: false type: boolean enabled: - description: |- - Enabled enables the Helm check. - Default: false type: boolean valuesAsTags: additionalProperties: type: string - description: |- - ValuesAsTags collects Helm values from a release and uses them as tags - (Requires Agent and Cluster Agent 7.40.0+). - Default: {} type: object type: object kubeStateMetricsCore: - description: KubeStateMetricsCore check configuration. properties: conf: - description: |- - Conf overrides the configuration for the default Kubernetes State Metrics Core check. - This must point to a ConfigMap containing a valid cluster check configuration. properties: configData: - description: ConfigData corresponds to the configuration file content. type: string configMap: - description: ConfigMap references an existing ConfigMap with the configuration file content. properties: items: - description: Items maps a ConfigMap data `key` to a file `path` mount. items: - description: Maps a string key to a path within a volume. properties: key: - description: key is the key to project. type: string mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. type: string required: - key @@ -1054,163 +601,77 @@ spec: - key x-kubernetes-list-type: map name: - description: Name is the name of the ConfigMap. type: string type: object type: object enabled: - description: |- - Enabled enables Kube State Metrics Core. - Default: true type: boolean type: object liveContainerCollection: - description: LiveContainerCollection configuration. properties: enabled: - description: |- - Enables container collection for the Live Container View. - Default: true type: boolean type: object liveProcessCollection: - description: LiveProcessCollection configuration. properties: enabled: - description: |- - Enabled enables Process monitoring. - Default: false type: boolean scrubProcessArguments: - description: |- - ScrubProcessArguments enables scrubbing of sensitive data in process command-lines (passwords, tokens, etc. ). - Default: true type: boolean stripProcessArguments: - description: |- - StripProcessArguments enables stripping of all process arguments. - Default: false type: boolean type: object logCollection: - description: LogCollection configuration. properties: containerCollectAll: - description: |- - ContainerCollectAll enables Log collection from all containers. - Default: false type: boolean containerCollectUsingFiles: - description: |- - ContainerCollectUsingFiles enables log collection from files in `/var/log/pods instead` of using the container runtime API. - Collecting logs from files is usually the most efficient way of collecting logs. - See also: https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/#log-collection-setup - Default: true type: boolean containerLogsPath: - description: |- - ContainerLogsPath allows log collection from the container log path. - Set to a different path if you are not using the Docker runtime. - See also: https://docs.datadoghq.com/agent/kubernetes/daemonset_setup/?tab=k8sfile#create-manifest - Default: `/var/lib/docker/containers` type: string containerSymlinksPath: - description: |- - ContainerSymlinksPath allows log collection to use symbolic links in this directory to validate container ID -> pod. - Default: `/var/log/containers` type: string enabled: - description: |- - Enabled enables Log collection. - Default: false type: boolean openFilesLimit: - description: |- - OpenFilesLimit sets the maximum number of log files that the Datadog Agent tails. - Increasing this limit can increase resource consumption of the Agent. - See also: https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/#log-collection-setup - Default: 100 format: int32 type: integer podLogsPath: - description: |- - PodLogsPath allows log collection from a pod log path. - Default: `/var/log/pods` type: string tempStoragePath: - description: |- - TempStoragePath (always mounted from the host) is used by the Agent to store information about processed log files. - If the Agent is restarted, it starts tailing the log files immediately. - Default: `/var/lib/datadog-agent/logs` type: string type: object npm: - description: NPM (Network Performance Monitoring) configuration. properties: collectDNSStats: - description: |- - CollectDNSStats enables DNS stat collection. - Default: false type: boolean enableConntrack: - description: |- - EnableConntrack enables the system-probe agent to connect to the netlink/conntrack subsystem to add NAT information to connection data. - See also: http://conntrack-tools.netfilter.org/ - Default: false type: boolean enabled: - description: |- - Enabled enables Network Performance Monitoring. - Default: false type: boolean type: object oomKill: - description: OOMKill configuration. properties: enabled: - description: |- - Enables the OOMKill eBPF-based check. - Default: false type: boolean type: object orchestratorExplorer: - description: OrchestratorExplorer check configuration. properties: conf: - description: |- - Conf overrides the configuration for the default Orchestrator Explorer check. - This must point to a ConfigMap containing a valid cluster check configuration. properties: configData: - description: ConfigData corresponds to the configuration file content. type: string configMap: - description: ConfigMap references an existing ConfigMap with the configuration file content. properties: items: - description: Items maps a ConfigMap data `key` to a file `path` mount. items: - description: Maps a string key to a path within a volume. properties: key: - description: key is the key to project. type: string mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. type: string required: - key @@ -1221,361 +682,297 @@ spec: - key x-kubernetes-list-type: map name: - description: Name is the name of the ConfigMap. type: string type: object type: object customResources: - description: |- - `CustomResources` defines custom resources for the orchestrator explorer to collect. - Each item should follow the convention `group/version/kind`. For example, `datadoghq.com/v1alpha1/datadogmetrics`. items: type: string type: array x-kubernetes-list-type: set ddUrl: - description: |- - Override the API endpoint for the Orchestrator Explorer. - URL Default: "https://orchestrator.datadoghq.com". type: string enabled: - description: |- - Enabled enables the Orchestrator Explorer. - Default: true type: boolean extraTags: - description: |- - Additional tags to associate with the collected data in the form of `a b c`. - This is a Cluster Agent option distinct from DD_TAGS that is used in the Orchestrator Explorer. items: type: string type: array x-kubernetes-list-type: set scrubContainers: - description: |- - ScrubContainers enables scrubbing of sensitive container data (passwords, tokens, etc. ). - Default: true type: boolean type: object otlp: - description: OTLP ingest configuration properties: receiver: - description: Receiver contains configuration for the OTLP ingest receiver. properties: protocols: - description: Protocols contains configuration for the OTLP ingest receiver protocols. properties: grpc: - description: GRPC contains configuration for the OTLP ingest OTLP/gRPC receiver. properties: enabled: - description: Enable the OTLP/gRPC endpoint. type: boolean endpoint: - description: |- - Endpoint for OTLP/gRPC. - gRPC supports several naming schemes: https://github.com/grpc/grpc/blob/master/doc/naming.md - The Datadog Operator supports only 'host:port' (usually `0.0.0.0:port`). - Default: `0.0.0.0:4317`. type: string + hostPortConfig: + properties: + enabled: + type: boolean + hostPort: + format: int32 + type: integer + type: object type: object http: - description: HTTP contains configuration for the OTLP ingest OTLP/HTTP receiver. properties: enabled: - description: Enable the OTLP/HTTP endpoint. type: boolean endpoint: - description: |- - Endpoint for OTLP/HTTP. - Default: '0.0.0.0:4318'. type: string + hostPortConfig: + properties: + enabled: + type: boolean + hostPort: + format: int32 + type: integer + type: object type: object type: object type: object type: object processDiscovery: - description: ProcessDiscovery configuration. properties: enabled: - description: |- - Enabled enables the Process Discovery check in the Agent. - Default: true type: boolean type: object prometheusScrape: - description: PrometheusScrape configuration. properties: additionalConfigs: - description: AdditionalConfigs allows adding advanced Prometheus check configurations with custom discovery rules. type: string enableServiceEndpoints: - description: |- - EnableServiceEndpoints enables generating dedicated checks for service endpoints. - Default: false type: boolean enabled: - description: |- - Enable autodiscovery of pods and services exposing Prometheus metrics. - Default: false type: boolean version: - description: |- - Version specifies the version of the OpenMetrics check. - Default: 2 type: integer type: object remoteConfiguration: - description: Remote Configuration configuration. properties: enabled: - description: |- - Enable this option to activate Remote Configuration. - Default: true type: boolean type: object sbom: - description: SBOM collection configuration. properties: containerImage: - description: SBOMTypeConfig contains configuration for a SBOM collection type. properties: analyzers: - description: Analyzers to use for SBOM collection. items: type: string type: array x-kubernetes-list-type: set enabled: - description: |- - Enable this option to activate SBOM collection. - Default: false type: boolean overlayFSDirectScan: - description: |- - Enable this option to enable experimental overlayFS direct scan. - Default: false type: boolean uncompressedLayersSupport: - description: |- - Enable this option to enable support for uncompressed layers. - Default: false type: boolean type: object enabled: - description: |- - Enable this option to activate SBOM collection. - Default: false type: boolean host: - description: SBOMTypeConfig contains configuration for a SBOM collection type. properties: analyzers: - description: Analyzers to use for SBOM collection. items: type: string type: array x-kubernetes-list-type: set enabled: - description: |- - Enable this option to activate SBOM collection. - Default: false type: boolean type: object type: object + serviceDiscovery: + properties: + enabled: + type: boolean + type: object tcpQueueLength: - description: TCPQueueLength configuration. properties: enabled: - description: |- - Enables the TCP queue length eBPF-based check. - Default: false type: boolean type: object usm: - description: USM (Universal Service Monitoring) configuration. properties: enabled: - description: |- - Enabled enables Universal Service Monitoring. - Default: false type: boolean type: object type: object global: - description: Global settings to configure the agents properties: + checksTagCardinality: + type: string clusterAgentToken: - description: ClusterAgentToken is the token for communication between the NodeAgent and ClusterAgent. type: string clusterAgentTokenSecret: - description: ClusterAgentTokenSecret is the secret containing the Cluster Agent token. properties: keyName: - description: KeyName is the key of the secret to use. type: string secretName: - description: SecretName is the name of the secret. type: string required: - secretName type: object clusterName: - description: ClusterName sets a unique cluster name for the deployment to easily scope monitoring data in the Datadog app. type: string containerStrategy: - description: |- - ContainerStrategy determines whether agents run in a single or multiple containers. - Default: 'optimized' type: string credentials: - description: Credentials defines the Datadog credentials used to submit data to/query data from Datadog. properties: apiKey: - description: |- - APIKey configures your Datadog API key. - See also: https://app.datadoghq.com/account/settings#agent/kubernetes type: string apiSecret: - description: |- - APISecret references an existing Secret which stores the API key instead of creating a new one. - If set, this parameter takes precedence over "APIKey". properties: keyName: - description: KeyName is the key of the secret to use. type: string secretName: - description: SecretName is the name of the secret. type: string required: - secretName type: object appKey: - description: |- - AppKey configures your Datadog application key. - If you are using features.externalMetricsServer.enabled = true, you must set - a Datadog application key for read access to your metrics. type: string appSecret: - description: |- - AppSecret references an existing Secret which stores the application key instead of creating a new one. - If set, this parameter takes precedence over "AppKey". properties: keyName: - description: KeyName is the key of the secret to use. type: string secretName: - description: SecretName is the name of the secret. type: string required: - secretName type: object type: object criSocketPath: - description: Path to the container runtime socket (if different from Docker). type: string disableNonResourceRules: - description: |- - Set DisableNonResourceRules to exclude NonResourceURLs from default ClusterRoles. - Required 'true' for Google Cloud Marketplace. type: boolean dockerSocketPath: - description: Path to the docker runtime socket. type: string endpoint: - description: |- - Endpoint is the Datadog intake URL the Agent data are sent to. - Only set this option if you need the Agent to send data to a custom URL. - Overrides the site setting defined in `Site`. properties: credentials: - description: Credentials defines the Datadog credentials used to submit data to/query data from Datadog. properties: apiKey: - description: |- - APIKey configures your Datadog API key. - See also: https://app.datadoghq.com/account/settings#agent/kubernetes type: string apiSecret: - description: |- - APISecret references an existing Secret which stores the API key instead of creating a new one. - If set, this parameter takes precedence over "APIKey". properties: keyName: - description: KeyName is the key of the secret to use. type: string secretName: - description: SecretName is the name of the secret. type: string required: - secretName type: object appKey: - description: |- - AppKey configures your Datadog application key. - If you are using features.externalMetricsServer.enabled = true, you must set - a Datadog application key for read access to your metrics. type: string appSecret: - description: |- - AppSecret references an existing Secret which stores the application key instead of creating a new one. - If set, this parameter takes precedence over "AppKey". properties: keyName: - description: KeyName is the key of the secret to use. type: string secretName: - description: SecretName is the name of the secret. type: string required: - secretName type: object type: object url: - description: URL defines the endpoint URL. type: string type: object + env: + items: + properties: + name: + type: string + value: + type: string + valueFrom: + properties: + configMapKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + properties: + apiVersion: + type: string + fieldPath: + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + properties: + containerName: + type: string + divisor: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + properties: + key: + type: string + name: + default: "" + type: string + optional: + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map fips: - description: FIPS contains configuration used to customize the FIPS proxy sidecar. properties: customFIPSConfig: - description: |- - CustomFIPSConfig configures a custom configMap to provide the FIPS configuration. - Specify custom contents for the FIPS proxy sidecar container config - (/etc/datadog-fips-proxy/datadog-fips-proxy.cfg). If empty, the default FIPS - proxy sidecar container config is used. properties: configData: - description: ConfigData corresponds to the configuration file content. type: string configMap: - description: ConfigMap references an existing ConfigMap with the configuration file content. properties: items: - description: Items maps a ConfigMap data `key` to a file `path` mount. items: - description: Maps a string key to a path within a volume. properties: key: - description: key is the key to project. type: string mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. type: string required: - key @@ -1586,102 +983,47 @@ spec: - key x-kubernetes-list-type: map name: - description: Name is the name of the ConfigMap. type: string type: object type: object enabled: - description: Enable FIPS sidecar. type: boolean image: - description: The container image of the FIPS sidecar. properties: jmxEnabled: - description: |- - Define whether the Agent image should support JMX. - To be used if the Name field does not correspond to a full image string. type: boolean name: - description: |- - Define the image to use: - Use "gcr.io/datadoghq/agent:latest" for Datadog Agent 7. - Use "datadog/dogstatsd:latest" for standalone Datadog Agent DogStatsD 7. - Use "gcr.io/datadoghq/cluster-agent:latest" for Datadog Cluster Agent. - Use "agent" with the registry and tag configurations for /agent:. - Use "cluster-agent" with the registry and tag configurations for /cluster-agent:. - If the name is the full image string—`:` or `/:`, then `tag`, `jmxEnabled`, - and `global.registry` values are ignored. - Otherwise, image string is created by overriding default settings with supplied `name`, `tag`, and `jmxEnabled` values; - image string is created using default registry unless `global.registry` is configured. type: string pullPolicy: - description: |- - The Kubernetes pull policy: - Use Always, Never, or IfNotPresent. type: string pullSecrets: - description: |- - It is possible to specify Docker registry credentials. - See https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod items: - description: |- - LocalObjectReference contains enough information to let you locate the - referenced object inside the same namespace. properties: name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + default: "" type: string type: object x-kubernetes-map-type: atomic type: array tag: - description: |- - Define the image tag to use. - To be used if the Name field does not correspond to a full image string. type: string type: object localAddress: - description: |- - Set the local IP address. - Default: `127.0.0.1` type: string port: - description: |- - Port specifies which port is used by the containers to communicate to the FIPS sidecar. - Default: 9803 format: int32 type: integer portRange: - description: |- - PortRange specifies the number of ports used. - Default: 15 format: int32 type: integer resources: - description: Resources is the requests and limits for the FIPS sidecar container. properties: claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. It can only be set for containers. items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. properties: name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. + type: string + request: type: string required: - name @@ -1697,9 +1039,6 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -1708,100 +1047,64 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object useHTTPS: - description: |- - UseHTTPS enables HTTPS. - Default: false type: boolean type: object kubelet: - description: Kubelet contains the kubelet configuration parameters. properties: agentCAPath: - description: |- - AgentCAPath is the container path where the kubelet CA certificate is stored. - Default: '/var/run/host-kubelet-ca.crt' if hostCAPath is set, else '/var/run/secrets/kubernetes.io/serviceaccount/ca.crt' type: string host: - description: Host overrides the host used to contact kubelet API (default to status.hostIP). properties: configMapKeyRef: - description: Selects a key of a ConfigMap. properties: key: - description: The key to select. type: string name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + default: "" type: string optional: - description: Specify whether the ConfigMap or its key must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. properties: apiVersion: - description: Version of the schema the FieldPath is written in terms of, defaults to "v1". type: string fieldPath: - description: Path of the field to select in the specified API version. type: string required: - fieldPath type: object x-kubernetes-map-type: atomic resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. properties: containerName: - description: 'Container name: required for volumes, optional for env vars' type: string divisor: anyOf: - type: integer - type: string - description: Specifies the output format of the exposed resources, defaults to "1" pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true resource: - description: 'Required: resource to select' type: string required: - resource type: object x-kubernetes-map-type: atomic secretKeyRef: - description: Selects a key of a secret in the pod's namespace properties: key: - description: The key of the secret to select from. Must be a valid secret key. type: string name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + default: "" type: string optional: - description: Specify whether the Secret or its key must be defined type: boolean required: - key @@ -1809,159 +1112,127 @@ spec: x-kubernetes-map-type: atomic type: object hostCAPath: - description: HostCAPath is the host path where the kubelet CA certificate is stored. type: string tlsVerify: - description: |- - TLSVerify toggles kubelet TLS verification. - Default: true type: boolean type: object + kubernetesResourcesAnnotationsAsTags: + additionalProperties: + additionalProperties: + type: string + type: object + type: object + kubernetesResourcesLabelsAsTags: + additionalProperties: + additionalProperties: + type: string + type: object + type: object localService: - description: LocalService contains configuration to customize the internal traffic policy service. properties: forceEnableLocalService: - description: |- - ForceEnableLocalService forces the creation of the internal traffic policy service to target the agent running on the local node. - This parameter only applies to Kubernetes 1.21, where the feature is in alpha and is disabled by default. - (On Kubernetes 1.22+, the feature entered beta and the internal traffic service is created by default, so this parameter is ignored.) - Default: false type: boolean nameOverride: - description: NameOverride defines the name of the internal traffic service to target the agent running on the local node. type: string type: object logLevel: - description: |- - LogLevel sets logging verbosity. This can be overridden by container. - Valid log levels are: trace, debug, info, warn, error, critical, and off. - Default: 'info' type: string namespaceAnnotationsAsTags: additionalProperties: type: string - description: |- - Provide a mapping of Kubernetes Namespace Annotations to Datadog Tags. - : type: object namespaceLabelsAsTags: additionalProperties: type: string - description: |- - Provide a mapping of Kubernetes Namespace Labels to Datadog Tags. - : type: object networkPolicy: - description: NetworkPolicy contains the network configuration. properties: create: - description: Create defines whether to create a NetworkPolicy for the current deployment. type: boolean dnsSelectorEndpoints: - description: DNSSelectorEndpoints defines the cilium selector of the DNS server entity. items: - description: |- - A label selector is a label query over a set of resources. The result of matchLabels and - matchExpressions are ANDed. An empty label selector matches all objects. A null - label selector matches no objects. properties: matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. properties: key: - description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic type: array x-kubernetes-list-type: atomic flavor: - description: Flavor defines Which network policy to use. type: string type: object nodeLabelsAsTags: additionalProperties: type: string - description: |- - Provide a mapping of Kubernetes Node Labels to Datadog Tags. - : type: object originDetectionUnified: - description: OriginDetectionUnified defines the origin detection unified mechanism behavior. properties: enabled: - description: |- - Enabled enables unified mechanism for origin detection. - Default: false type: boolean type: object podAnnotationsAsTags: additionalProperties: type: string - description: |- - Provide a mapping of Kubernetes Annotations to Datadog Tags. - : type: object podLabelsAsTags: additionalProperties: type: string - description: |- - Provide a mapping of Kubernetes Labels to Datadog Tags. - : type: object registry: - description: |- - Registry is the image registry to use for all Agent images. - Use 'public.ecr.aws/datadog' for AWS ECR. - Use 'docker.io/datadog' for DockerHub. - Default: 'gcr.io/datadoghq' type: string + runProcessChecksInCoreAgent: + type: boolean + secretBackend: + properties: + args: + type: string + command: + type: string + enableGlobalPermissions: + type: boolean + roles: + items: + properties: + namespace: + type: string + secrets: + items: + type: string + type: array + x-kubernetes-list-type: set + required: + - namespace + - secrets + type: object + type: array + x-kubernetes-list-type: atomic + timeout: + format: int32 + type: integer + type: object site: - description: |- - Site is the Datadog intake site Agent data are sent to. - Set to 'datadoghq.com' to send data to the US1 site (default). - Set to 'datadoghq.eu' to send data to the EU site. - Set to 'us3.datadoghq.com' to send data to the US3 site. - Set to 'us5.datadoghq.com' to send data to the US5 site. - Set to 'ddog-gov.com' to send data to the US1-FED site. - Set to 'ap1.datadoghq.com' to send data to the AP1 site. - Default: 'datadoghq.com' type: string tags: - description: |- - Tags contains a list of tags to attach to every metric, event and service check collected. - Learn more about tagging: https://docs.datadoghq.com/tagging/ items: type: string type: array @@ -1969,97 +1240,55 @@ spec: type: object override: additionalProperties: - description: DatadogAgentComponentOverride is the generic description equivalent to a subset of the PodTemplate for a component. properties: affinity: - description: If specified, the pod's scheduling constraints. properties: nodeAffinity: - description: Describes node affinity scheduling rules for the pod. properties: preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node matches the corresponding matchExpressions; the - node(s) with the highest sum are the most preferred. items: - description: |- - An empty preferred scheduling term matches all objects with implicit weight 0 - (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). properties: preference: - description: A node selector term, associated with the corresponding weight. properties: matchExpressions: - description: A list of node selector requirements by node's labels. items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. properties: key: - description: The label key that the selector applies to. type: string operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: - description: A list of node selector requirements by node's fields. items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. properties: key: - description: The label key that the selector applies to. type: string operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object x-kubernetes-map-type: atomic weight: - description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. format: int32 type: integer required: @@ -2067,223 +1296,137 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to an update), the system - may or may not try to eventually evict the pod from its node. properties: nodeSelectorTerms: - description: Required. A list of node selector terms. The terms are ORed. items: - description: |- - A null or empty node selector term matches no objects. The requirements of - them are ANDed. - The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. properties: matchExpressions: - description: A list of node selector requirements by node's labels. items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. properties: key: - description: The label key that the selector applies to. type: string operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchFields: - description: A list of node selector requirements by node's fields. items: - description: |- - A node selector requirement is a selector that contains values, a key, and an operator - that relates the key and values. properties: key: - description: The label key that the selector applies to. type: string operator: - description: |- - Represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. type: string values: - description: |- - An array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. If the operator is Gt or Lt, the values - array must have a single element, which will be interpreted as an integer. - This array is replaced during a strategic merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic type: object x-kubernetes-map-type: atomic type: array + x-kubernetes-list-type: atomic required: - nodeSelectorTerms type: object x-kubernetes-map-type: atomic type: object podAffinity: - description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) properties: podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. properties: labelSelector: - description: A label query over a set of resources, in this case pods. properties: matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. properties: key: - description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. properties: matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. properties: key: - description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: |- - weight associated with matching the corresponding podAffinityTerm, - in the range 1-100. format: int32 type: integer required: @@ -2291,268 +1434,165 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to a pod label update), the - system may or may not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes corresponding to each - podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: |- - Defines a set of pods (namely those matching the labelSelector - relative to the given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node whose value of - the label with key matches that of any node on which - a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of resources, in this case pods. properties: matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. properties: key: - description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. properties: matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. properties: key: - description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object podAntiAffinity: - description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: - description: |- - The scheduler will prefer to schedule pods to nodes that satisfy - the anti-affinity expressions specified by this field, but it may choose - a node that violates one or more of the expressions. The node that is - most preferred is the one with the greatest sum of weights, i.e. - for each node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling anti-affinity expressions, etc.), - compute a sum by iterating through the elements of this field and adding - "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. items: - description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) properties: podAffinityTerm: - description: Required. A pod affinity term, associated with the corresponding weight. properties: labelSelector: - description: A label query over a set of resources, in this case pods. properties: matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. properties: key: - description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. properties: matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. properties: key: - description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. type: string required: - topologyKey type: object weight: - description: |- - weight associated with matching the corresponding podAffinityTerm, - in the range 1-100. format: int32 type: integer required: @@ -2560,255 +1600,161 @@ spec: - weight type: object type: array + x-kubernetes-list-type: atomic requiredDuringSchedulingIgnoredDuringExecution: - description: |- - If the anti-affinity requirements specified by this field are not met at - scheduling time, the pod will not be scheduled onto the node. - If the anti-affinity requirements specified by this field cease to be met - at some point during pod execution (e.g. due to a pod label update), the - system may or may not try to eventually evict the pod from its node. - When there are multiple elements, the lists of nodes corresponding to each - podAffinityTerm are intersected, i.e. all terms must be satisfied. items: - description: |- - Defines a set of pods (namely those matching the labelSelector - relative to the given namespace(s)) that this pod should be - co-located (affinity) or not co-located (anti-affinity) with, - where co-located is defined as running on a node whose value of - the label with key matches that of any node on which - a pod of the set of pods is running properties: labelSelector: - description: A label query over a set of resources, in this case pods. properties: matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. properties: key: - description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic + matchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic + mismatchLabelKeys: + items: + type: string + type: array + x-kubernetes-list-type: atomic namespaceSelector: - description: |- - A label query over the set of namespaces that the term applies to. - The term is applied to the union of the namespaces selected by this field - and the ones listed in the namespaces field. - null selector and null or empty namespaces list means "this pod's namespace". - An empty selector ({}) matches all namespaces. properties: matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. properties: key: - description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic namespaces: - description: |- - namespaces specifies a static list of namespace names that the term applies to. - The term is applied to the union of the namespaces listed in this field - and the ones selected by namespaceSelector. - null or empty namespaces list and null namespaceSelector means "this pod's namespace". items: type: string type: array + x-kubernetes-list-type: atomic topologyKey: - description: |- - This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where co-located is defined as running on a node - whose value of the label with key topologyKey matches that of any node on which any of the - selected pods is running. - Empty topologyKey is not allowed. type: string required: - topologyKey type: object type: array + x-kubernetes-list-type: atomic type: object type: object annotations: additionalProperties: type: string - description: Annotations provide annotations that are added to the different component (Datadog Agent, Cluster Agent, Cluster Check Runner) pods. type: object containers: additionalProperties: - description: DatadogAgentGenericContainer is the generic structure describing any container's common configuration. properties: appArmorProfileName: - description: AppArmorProfileName specifies an apparmor profile. type: string args: - description: Args allows the specification of extra args to the `Command` parameter items: type: string type: array x-kubernetes-list-type: atomic command: - description: Command allows the specification of a custom entrypoint for container items: type: string type: array x-kubernetes-list-type: atomic env: - description: |- - Specify additional environment variables in the container. - See also: https://docs.datadoghq.com/agent/kubernetes/?tab=helm#environment-variables items: - description: EnvVar represents an environment variable present in a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. type: string value: - description: |- - Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". type: string valueFrom: - description: Source for the environment variable's value. Cannot be used if value is not empty. properties: configMapKeyRef: - description: Selects a key of a ConfigMap. properties: key: - description: The key to select. type: string name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + default: "" type: string optional: - description: Specify whether the ConfigMap or its key must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. properties: apiVersion: - description: Version of the schema the FieldPath is written in terms of, defaults to "v1". type: string fieldPath: - description: Path of the field to select in the specified API version. type: string required: - fieldPath type: object x-kubernetes-map-type: atomic resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. properties: containerName: - description: 'Container name: required for volumes, optional for env vars' type: string divisor: anyOf: - type: integer - type: string - description: Specifies the output format of the exposed resources, defaults to "1" pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true resource: - description: 'Required: resource to select' type: string required: - resource type: object x-kubernetes-map-type: atomic secretKeyRef: - description: Selects a key of a secret in the pod's namespace properties: key: - description: The key of the secret to select from. Must be a valid secret key. type: string name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + default: "" type: string optional: - description: Specify whether the Secret or its key must be defined type: boolean required: - key @@ -2823,335 +1769,182 @@ spec: - name x-kubernetes-list-type: map healthPort: - description: |- - HealthPort of the container for the internal liveness probe. - Must be the same as the Liveness/Readiness probes. format: int32 type: integer livenessProbe: - description: Configure the Liveness Probe of the container properties: exec: - description: Exec specifies the action to take. properties: command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. format: int32 type: integer grpc: - description: GRPC specifies an action involving a GRPC port. properties: port: - description: Port number of the gRPC service. Number must be in the range 1 to 65535. format: int32 type: integer service: - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - - If this is not specified, the default behavior is defined by gRPC. + default: "" type: string required: - port type: object httpGet: - description: HTTPGet specifies the http request to perform. properties: host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. type: string httpHeaders: - description: Custom headers to set in the request. HTTP allows repeated headers. items: - description: HTTPHeader describes a custom header to be used in HTTP probes properties: name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: - description: The header field value type: string required: - name - value type: object type: array + x-kubernetes-list-type: atomic path: - description: Path to access on the HTTP server. type: string port: anyOf: - type: integer - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. type: string required: - port type: object initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. format: int32 type: integer successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. format: int32 type: integer tcpSocket: - description: TCPSocket specifies an action involving a TCP port. properties: host: - description: 'Optional: Host name to connect to, defaults to the pod IP.' type: string port: anyOf: - type: integer - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. format: int64 type: integer timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer type: object logLevel: - description: |- - LogLevel sets logging verbosity (overrides global setting). - Valid log levels are: trace, debug, info, warn, error, critical, and off. - Default: 'info' type: string name: - description: Name of the container that is overridden type: string readinessProbe: - description: Configure the Readiness Probe of the container properties: exec: - description: Exec specifies the action to take. properties: command: - description: |- - Command is the command line to execute inside the container, the working directory for the - command is root ('/') in the container's filesystem. The command is simply exec'd, it is - not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - a shell, you need to explicitly call out to that shell. - Exit status of 0 is treated as live/healthy and non-zero is unhealthy. items: type: string type: array + x-kubernetes-list-type: atomic type: object failureThreshold: - description: |- - Minimum consecutive failures for the probe to be considered failed after having succeeded. - Defaults to 3. Minimum value is 1. format: int32 type: integer grpc: - description: GRPC specifies an action involving a GRPC port. properties: port: - description: Port number of the gRPC service. Number must be in the range 1 to 65535. format: int32 type: integer service: - description: |- - Service is the name of the service to place in the gRPC HealthCheckRequest - (see https://github.com/grpc/grpc/blob/master/doc/health-checking.md). - - - If this is not specified, the default behavior is defined by gRPC. + default: "" type: string required: - port type: object httpGet: - description: HTTPGet specifies the http request to perform. properties: host: - description: |- - Host name to connect to, defaults to the pod IP. You probably want to set - "Host" in httpHeaders instead. type: string httpHeaders: - description: Custom headers to set in the request. HTTP allows repeated headers. items: - description: HTTPHeader describes a custom header to be used in HTTP probes properties: name: - description: |- - The header field name. - This will be canonicalized upon output, so case-variant names will be understood as the same header. type: string value: - description: The header field value type: string required: - name - value type: object type: array + x-kubernetes-list-type: atomic path: - description: Path to access on the HTTP server. type: string port: anyOf: - type: integer - type: string - description: |- - Name or number of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true scheme: - description: |- - Scheme to use for connecting to the host. - Defaults to HTTP. type: string required: - port type: object initialDelaySeconds: - description: |- - Number of seconds after the container has started before liveness probes are initiated. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer periodSeconds: - description: |- - How often (in seconds) to perform the probe. - Default to 10 seconds. Minimum value is 1. format: int32 type: integer successThreshold: - description: |- - Minimum consecutive successes for the probe to be considered successful after having failed. - Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. format: int32 type: integer tcpSocket: - description: TCPSocket specifies an action involving a TCP port. properties: host: - description: 'Optional: Host name to connect to, defaults to the pod IP.' type: string port: anyOf: - type: integer - type: string - description: |- - Number or name of the port to access on the container. - Number must be in the range 1 to 65535. - Name must be an IANA_SVC_NAME. x-kubernetes-int-or-string: true required: - port type: object terminationGracePeriodSeconds: - description: |- - Optional duration in seconds the pod needs to terminate gracefully upon probe failure. - The grace period is the duration in seconds after the processes running in the pod are sent - a termination signal and the time when the processes are forcibly halted with a kill signal. - Set this value longer than the expected cleanup time for your process. - If this value is nil, the pod's terminationGracePeriodSeconds will be used. Otherwise, this - value overrides the value provided by the pod spec. - Value must be non-negative integer. The value zero indicates stop immediately via - the kill signal (no opportunity to shut down). - This is a beta field and requires enabling ProbeTerminationGracePeriod feature gate. - Minimum value is 1. spec.terminationGracePeriodSeconds is used if unset. format: int64 type: integer timeoutSeconds: - description: |- - Number of seconds after which the probe times out. - Defaults to 1 second. Minimum value is 1. - More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes format: int32 type: integer type: object resources: - description: |- - Specify the Request and Limits of the pods - To get guaranteed QoS class, specify requests and limits equal. - See also: http://kubernetes.io/docs/user-guide/compute-resources/ properties: claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. It can only be set for containers. items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. properties: name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. + type: string + request: type: string required: - name @@ -3167,9 +1960,6 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -3178,54 +1968,25 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object seccompConfig: - description: |- - Seccomp configurations to override Operator actions. For all other Seccomp Profile manipulation, - use SecurityContext. properties: customProfile: - description: |- - CustomProfile specifies a ConfigMap containing a custom Seccomp Profile. - ConfigMap data must either have the key `system-probe-seccomp.json` or CustomProfile.Items - must include a corev1.KeytoPath that maps the key to the path `system-probe-seccomp.json`. properties: configData: - description: ConfigData corresponds to the configuration file content. type: string configMap: - description: ConfigMap references an existing ConfigMap with the configuration file content. properties: items: - description: Items maps a ConfigMap data `key` to a file `path` mount. items: - description: Maps a string key to a path within a volume. properties: key: - description: key is the key to project. type: string mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. type: string required: - key @@ -3236,210 +1997,181 @@ spec: - key x-kubernetes-list-type: map name: - description: Name is the name of the ConfigMap. type: string type: object type: object customRootPath: - description: CustomRootPath specifies a custom Seccomp Profile root location. type: string type: object securityContext: - description: Container-level SecurityContext. properties: allowPrivilegeEscalation: - description: |- - AllowPrivilegeEscalation controls whether a process can gain more - privileges than its parent process. This bool directly controls if - the no_new_privs flag will be set on the container process. - AllowPrivilegeEscalation is true always when the container is: - 1) run as Privileged - 2) has CAP_SYS_ADMIN - Note that this field cannot be set when spec.os.name is windows. type: boolean + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object capabilities: - description: |- - The capabilities to add/drop when running containers. - Defaults to the default set of capabilities granted by the container runtime. - Note that this field cannot be set when spec.os.name is windows. properties: add: - description: Added capabilities items: - description: Capability represent POSIX capabilities type type: string type: array + x-kubernetes-list-type: atomic drop: - description: Removed capabilities items: - description: Capability represent POSIX capabilities type type: string type: array + x-kubernetes-list-type: atomic type: object privileged: - description: |- - Run container in privileged mode. - Processes in privileged containers are essentially equivalent to root on the host. - Defaults to false. - Note that this field cannot be set when spec.os.name is windows. type: boolean procMount: - description: |- - procMount denotes the type of proc mount to use for the containers. - The default is DefaultProcMount which uses the container runtime defaults for - readonly paths and masked paths. - This requires the ProcMountType feature flag to be enabled. - Note that this field cannot be set when spec.os.name is windows. type: string readOnlyRootFilesystem: - description: |- - Whether this container has a read-only root filesystem. - Default is false. - Note that this field cannot be set when spec.os.name is windows. type: boolean runAsGroup: - description: |- - The GID to run the entrypoint of the container process. - Uses runtime default if unset. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. format: int64 type: integer runAsNonRoot: - description: |- - Indicates that the container must run as a non-root user. - If true, the Kubelet will validate the image at runtime to ensure that it - does not run as UID 0 (root) and fail to start the container if it does. - If unset or false, no such validation will be performed. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. type: boolean runAsUser: - description: |- - The UID to run the entrypoint of the container process. - Defaults to user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. format: int64 type: integer seLinuxOptions: - description: |- - The SELinux context to be applied to the container. - If unspecified, the container runtime will allocate a random SELinux context for each - container. May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is windows. properties: level: - description: Level is SELinux level label that applies to the container. type: string role: - description: Role is a SELinux role label that applies to the container. type: string type: - description: Type is a SELinux type label that applies to the container. type: string user: - description: User is a SELinux user label that applies to the container. type: string type: object seccompProfile: - description: |- - The seccomp options to use by this container. If seccomp options are - provided at both the pod & container level, the container options - override the pod options. - Note that this field cannot be set when spec.os.name is windows. properties: localhostProfile: - description: |- - localhostProfile indicates a profile defined in a file on the node should be used. - The profile must be preconfigured on the node to work. - Must be a descending path, relative to the kubelet's configured seccomp profile location. - Must be set if type is "Localhost". Must NOT be set for any other type. type: string type: - description: |- - type indicates which kind of seccomp profile will be applied. - Valid options are: - - - Localhost - a profile defined in a file on the node should be used. - RuntimeDefault - the container runtime default profile should be used. - Unconfined - no profile should be applied. type: string required: - type type: object windowsOptions: - description: |- - The Windows specific settings applied to all containers. - If unspecified, the options from the PodSecurityContext will be used. - If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is linux. properties: gmsaCredentialSpec: - description: |- - GMSACredentialSpec is where the GMSA admission webhook - (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the - GMSA credential spec named by the GMSACredentialSpecName field. type: string gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name of the GMSA credential spec to use. type: string hostProcess: - description: |- - HostProcess determines if a container should be run as a 'Host Process' container. - All of a Pod's containers must have the same effective HostProcess value - (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). - In addition, if HostProcess is true then HostNetwork must also be set to true. type: boolean runAsUserName: - description: |- - The UserName in Windows to run the entrypoint of the container process. - Defaults to the user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. type: string type: object type: object + startupProbe: + properties: + exec: + properties: + command: + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + failureThreshold: + format: int32 + type: integer + grpc: + properties: + port: + format: int32 + type: integer + service: + default: "" + type: string + required: + - port + type: object + httpGet: + properties: + host: + type: string + httpHeaders: + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + path: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scheme: + type: string + required: + - port + type: object + initialDelaySeconds: + format: int32 + type: integer + periodSeconds: + format: int32 + type: integer + successThreshold: + format: int32 + type: integer + tcpSocket: + properties: + host: + type: string + port: + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + required: + - port + type: object + terminationGracePeriodSeconds: + format: int64 + type: integer + timeoutSeconds: + format: int32 + type: integer + type: object volumeMounts: - description: Specify additional volume mounts in the container. items: - description: VolumeMount describes a mounting of a Volume within a container. properties: mountPath: - description: |- - Path within the container at which the volume should be mounted. Must - not contain ':'. type: string mountPropagation: - description: |- - mountPropagation determines how mounts are propagated from the host - to container and the other way around. - When not set, MountPropagationNone is used. - This field is beta in 1.10. type: string name: - description: This must match the Name of a Volume. type: string readOnly: - description: |- - Mounted read-only if true, read-write otherwise (false or unspecified). - Defaults to false. type: boolean + recursiveReadOnly: + type: string subPath: - description: |- - Path within the volume from which the container's volume should be mounted. - Defaults to "" (volume's root). type: string subPathExpr: - description: |- - Expanded path within the volume from which the container's volume should be mounted. - Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - Defaults to "" (volume's root). - SubPathExpr and SubPath are mutually exclusive. type: string required: - mountPath @@ -3451,53 +2183,27 @@ spec: - mountPath x-kubernetes-list-type: map type: object - description: |- - Configure the basic configurations for each Agent container. Valid Agent container names are: - `agent`, `cluster-agent`, `init-config`, `init-volume`, `process-agent`, `seccomp-setup`, - `security-agent`, `system-probe`, `trace-agent`, and `all`. - Configuration under `all` applies to all configured containers. type: object + createPodDisruptionBudget: + type: boolean createRbac: - description: Set CreateRbac to false to prevent automatic creation of Role/ClusterRole for this component type: boolean customConfigurations: additionalProperties: - description: |- - CustomConfig provides a place for custom configuration of the Agent or Cluster Agent, corresponding to datadog.yaml, - system-probe.yaml, security-agent.yaml or datadog-cluster.yaml. - The configuration can be provided in the ConfigData field as raw data, or referenced in a ConfigMap. - Note: `ConfigData` and `ConfigMap` cannot be set together. properties: configData: - description: ConfigData corresponds to the configuration file content. type: string configMap: - description: ConfigMap references an existing ConfigMap with the configuration file content. properties: items: - description: Items maps a ConfigMap data `key` to a file `path` mount. items: - description: Maps a string key to a path within a volume. properties: key: - description: key is the key to project. type: string mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. type: string required: - key @@ -3508,162 +2214,93 @@ spec: - key x-kubernetes-list-type: map name: - description: Name is the name of the ConfigMap. type: string type: object type: object - description: |- - CustomConfiguration allows to specify custom configuration files for `datadog.yaml`, `datadog-cluster.yaml`, `security-agent.yaml`, and `system-probe.yaml`. - The content is merged with configuration generated by the Datadog Operator, with priority given to custom configuration. - WARNING: It is possible to override values set in the `DatadogAgent`. type: object disabled: - description: Disabled force disables a component. type: boolean dnsConfig: - description: |- - Specifies the DNS parameters of a pod. - Parameters specified here will be merged to the generated DNS - configuration based on DNSPolicy. properties: nameservers: - description: |- - A list of DNS name server IP addresses. - This will be appended to the base nameservers generated from DNSPolicy. - Duplicated nameservers will be removed. items: type: string type: array + x-kubernetes-list-type: atomic options: - description: |- - A list of DNS resolver options. - This will be merged with the base options generated from DNSPolicy. - Duplicated entries will be removed. Resolution options given in Options - will override those that appear in the base DNSPolicy. items: - description: PodDNSConfigOption defines DNS resolver options of a pod. properties: name: - description: Required. type: string value: type: string type: object type: array + x-kubernetes-list-type: atomic searches: - description: |- - A list of DNS search domains for host-name lookup. - This will be appended to the base search paths generated from DNSPolicy. - Duplicated search paths will be removed. items: type: string type: array + x-kubernetes-list-type: atomic type: object dnsPolicy: - description: |- - Set DNS policy for the pod. - Defaults to "ClusterFirst". - Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. - DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. - To have DNS options set along with hostNetwork, you have to specify DNS policy - explicitly to 'ClusterFirstWithHostNet'. type: string env: - description: |- - Specify additional environment variables for all containers in this component - Priority is Container > Component. - See also: https://docs.datadoghq.com/agent/kubernetes/?tab=helm#environment-variables items: - description: EnvVar represents an environment variable present in a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. type: string value: - description: |- - Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". type: string valueFrom: - description: Source for the environment variable's value. Cannot be used if value is not empty. properties: configMapKeyRef: - description: Selects a key of a ConfigMap. properties: key: - description: The key to select. type: string name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + default: "" type: string optional: - description: Specify whether the ConfigMap or its key must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. properties: apiVersion: - description: Version of the schema the FieldPath is written in terms of, defaults to "v1". type: string fieldPath: - description: Path of the field to select in the specified API version. type: string required: - fieldPath type: object x-kubernetes-map-type: atomic resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. properties: containerName: - description: 'Container name: required for volumes, optional for env vars' type: string divisor: anyOf: - type: integer - type: string - description: Specifies the output format of the exposed resources, defaults to "1" pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true resource: - description: 'Required: resource to select' type: string required: - resource type: object x-kubernetes-map-type: atomic secretKeyRef: - description: Selects a key of a secret in the pod's namespace properties: key: - description: The key of the secret to select from. Must be a valid secret key. type: string name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + default: "" type: string optional: - description: Specify whether the Secret or its key must be defined type: boolean required: - key @@ -3677,45 +2314,48 @@ spec: x-kubernetes-list-map-keys: - name x-kubernetes-list-type: map + envFrom: + items: + properties: + configMapRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + type: string + secretRef: + properties: + name: + default: "" + type: string + optional: + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array extraChecksd: - description: |- - Checksd configuration allowing to specify custom checks placed under /etc/datadog-agent/checks.d/ - See https://docs.datadoghq.com/agent/guide/agent-configuration-files/?tab=agentv6 for more details. properties: configDataMap: additionalProperties: type: string - description: |- - ConfigDataMap corresponds to the content of the configuration files. - The key should be the filename the contents get mounted to; for instance check.py or check.yaml. type: object configMap: - description: ConfigMap references an existing ConfigMap with the content of the configuration files. properties: items: - description: Items maps a ConfigMap data `key` to a file `path` mount. items: - description: Maps a string key to a path within a volume. properties: key: - description: key is the key to project. type: string mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. type: string required: - key @@ -3726,49 +2366,26 @@ spec: - key x-kubernetes-list-type: map name: - description: Name is the name of the ConfigMap. type: string type: object type: object extraConfd: - description: |- - Confd configuration allowing to specify config files for custom checks placed under /etc/datadog-agent/conf.d/. - See https://docs.datadoghq.com/agent/guide/agent-configuration-files/?tab=agentv6 for more details. properties: configDataMap: additionalProperties: type: string - description: |- - ConfigDataMap corresponds to the content of the configuration files. - The key should be the filename the contents get mounted to; for instance check.py or check.yaml. type: object configMap: - description: ConfigMap references an existing ConfigMap with the content of the configuration files. properties: items: - description: Items maps a ConfigMap data `key` to a file `path` mount. items: - description: Maps a string key to a path within a volume. properties: key: - description: key is the key to project. type: string mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. type: string required: - key @@ -3779,681 +2396,342 @@ spec: - key x-kubernetes-list-type: map name: - description: Name is the name of the ConfigMap. type: string type: object type: object hostNetwork: - description: Host networking requested for this pod. Use the host's network namespace. type: boolean hostPID: - description: Use the host's PID namespace. type: boolean image: - description: The container image of the different components (Datadog Agent, Cluster Agent, Cluster Check Runner). properties: jmxEnabled: - description: |- - Define whether the Agent image should support JMX. - To be used if the Name field does not correspond to a full image string. type: boolean name: - description: |- - Define the image to use: - Use "gcr.io/datadoghq/agent:latest" for Datadog Agent 7. - Use "datadog/dogstatsd:latest" for standalone Datadog Agent DogStatsD 7. - Use "gcr.io/datadoghq/cluster-agent:latest" for Datadog Cluster Agent. - Use "agent" with the registry and tag configurations for /agent:. - Use "cluster-agent" with the registry and tag configurations for /cluster-agent:. - If the name is the full image string—`:` or `/:`, then `tag`, `jmxEnabled`, - and `global.registry` values are ignored. - Otherwise, image string is created by overriding default settings with supplied `name`, `tag`, and `jmxEnabled` values; - image string is created using default registry unless `global.registry` is configured. type: string pullPolicy: - description: |- - The Kubernetes pull policy: - Use Always, Never, or IfNotPresent. type: string pullSecrets: - description: |- - It is possible to specify Docker registry credentials. - See https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod items: - description: |- - LocalObjectReference contains enough information to let you locate the - referenced object inside the same namespace. properties: name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + default: "" type: string type: object x-kubernetes-map-type: atomic type: array tag: - description: |- - Define the image tag to use. - To be used if the Name field does not correspond to a full image string. type: string type: object labels: additionalProperties: type: string - description: AdditionalLabels provide labels that are added to the different component (Datadog Agent, Cluster Agent, Cluster Check Runner) pods. type: object name: - description: Name overrides the default name for the resource type: string nodeSelector: additionalProperties: type: string - description: |- - NodeSelector is a selector which must be true for the pod to fit on a node. - Selector which must match a node's labels for the pod to be scheduled on that node. - More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ type: object priorityClassName: - description: |- - If specified, indicates the pod's priority. "system-node-critical" and "system-cluster-critical" - are two special keywords which indicate the highest priorities with the former being the highest priority. - Any other name must be defined by creating a PriorityClass object with that name. If not specified, - the pod priority is default, or zero if there is no default. type: string replicas: - description: |- - Number of the replicas. - Not applicable for a DaemonSet/ExtendedDaemonSet deployment format: int32 type: integer securityContext: - description: Pod-level SecurityContext. properties: + appArmorProfile: + properties: + localhostProfile: + type: string + type: + type: string + required: + - type + type: object fsGroup: - description: |- - A special supplemental group that applies to all containers in a pod. - Some volume types allow the Kubelet to change the ownership of that volume - to be owned by the pod: - - - 1. The owning GID will be the FSGroup - 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) - 3. The permission bits are OR'd with rw-rw---- - - - If unset, the Kubelet will not modify the ownership and permissions of any volume. - Note that this field cannot be set when spec.os.name is windows. format: int64 type: integer fsGroupChangePolicy: - description: |- - fsGroupChangePolicy defines behavior of changing ownership and permission of the volume - before being exposed inside Pod. This field will only apply to - volume types which support fsGroup based ownership(and permissions). - It will have no effect on ephemeral volume types such as: secret, configmaps - and emptydir. - Valid values are "OnRootMismatch" and "Always". If not specified, "Always" is used. - Note that this field cannot be set when spec.os.name is windows. type: string runAsGroup: - description: |- - The GID to run the entrypoint of the container process. - Uses runtime default if unset. - May also be set in SecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence - for that container. - Note that this field cannot be set when spec.os.name is windows. format: int64 type: integer runAsNonRoot: - description: |- - Indicates that the container must run as a non-root user. - If true, the Kubelet will validate the image at runtime to ensure that it - does not run as UID 0 (root) and fail to start the container if it does. - If unset or false, no such validation will be performed. - May also be set in SecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. type: boolean runAsUser: - description: |- - The UID to run the entrypoint of the container process. - Defaults to user specified in image metadata if unspecified. - May also be set in SecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence - for that container. - Note that this field cannot be set when spec.os.name is windows. format: int64 type: integer seLinuxOptions: - description: |- - The SELinux context to be applied to all containers. - If unspecified, the container runtime will allocate a random SELinux context for each - container. May also be set in SecurityContext. If set in - both SecurityContext and PodSecurityContext, the value specified in SecurityContext - takes precedence for that container. - Note that this field cannot be set when spec.os.name is windows. properties: level: - description: Level is SELinux level label that applies to the container. type: string role: - description: Role is a SELinux role label that applies to the container. type: string type: - description: Type is a SELinux type label that applies to the container. type: string user: - description: User is a SELinux user label that applies to the container. type: string type: object seccompProfile: - description: |- - The seccomp options to use by the containers in this pod. - Note that this field cannot be set when spec.os.name is windows. properties: localhostProfile: - description: |- - localhostProfile indicates a profile defined in a file on the node should be used. - The profile must be preconfigured on the node to work. - Must be a descending path, relative to the kubelet's configured seccomp profile location. - Must be set if type is "Localhost". Must NOT be set for any other type. type: string type: - description: |- - type indicates which kind of seccomp profile will be applied. - Valid options are: - - - Localhost - a profile defined in a file on the node should be used. - RuntimeDefault - the container runtime default profile should be used. - Unconfined - no profile should be applied. type: string required: - type type: object supplementalGroups: - description: |- - A list of groups applied to the first process run in each container, in addition - to the container's primary GID, the fsGroup (if specified), and group memberships - defined in the container image for the uid of the container process. If unspecified, - no additional groups are added to any container. Note that group memberships - defined in the container image for the uid of the container process are still effective, - even if they are not included in this list. - Note that this field cannot be set when spec.os.name is windows. items: format: int64 type: integer type: array + x-kubernetes-list-type: atomic + supplementalGroupsPolicy: + type: string sysctls: - description: |- - Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported - sysctls (by the container runtime) might fail to launch. - Note that this field cannot be set when spec.os.name is windows. items: - description: Sysctl defines a kernel parameter to be set properties: name: - description: Name of a property to set type: string value: - description: Value of a property to set type: string required: - name - value type: object type: array + x-kubernetes-list-type: atomic windowsOptions: - description: |- - The Windows specific settings applied to all containers. - If unspecified, the options within a container's SecurityContext will be used. - If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - Note that this field cannot be set when spec.os.name is linux. properties: gmsaCredentialSpec: - description: |- - GMSACredentialSpec is where the GMSA admission webhook - (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the - GMSA credential spec named by the GMSACredentialSpecName field. type: string gmsaCredentialSpecName: - description: GMSACredentialSpecName is the name of the GMSA credential spec to use. type: string hostProcess: - description: |- - HostProcess determines if a container should be run as a 'Host Process' container. - All of a Pod's containers must have the same effective HostProcess value - (it is not allowed to have a mix of HostProcess containers and non-HostProcess containers). - In addition, if HostProcess is true then HostNetwork must also be set to true. type: boolean runAsUserName: - description: |- - The UserName in Windows to run the entrypoint of the container process. - Defaults to the user specified in image metadata if unspecified. - May also be set in PodSecurityContext. If set in both SecurityContext and - PodSecurityContext, the value specified in SecurityContext takes precedence. type: string type: object type: object + serviceAccountAnnotations: + additionalProperties: + type: string + type: object serviceAccountName: - description: |- - Sets the ServiceAccount used by this component. - Ignored if the field CreateRbac is true. type: string tolerations: - description: Configure the component tolerations. items: - description: |- - The pod this Toleration is attached to tolerates any taint that matches - the triple using the matching operator . properties: effect: - description: |- - Effect indicates the taint effect to match. Empty means match all taint effects. - When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. type: string key: - description: |- - Key is the taint key that the toleration applies to. Empty means match all taint keys. - If the key is empty, operator must be Exists; this combination means to match all values and all keys. type: string operator: - description: |- - Operator represents a key's relationship to the value. - Valid operators are Exists and Equal. Defaults to Equal. - Exists is equivalent to wildcard for value, so that a pod can - tolerate all taints of a particular category. type: string tolerationSeconds: - description: |- - TolerationSeconds represents the period of time the toleration (which must be - of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, - it is not set, which means tolerate the taint forever (do not evict). Zero and - negative values will be treated as 0 (evict immediately) by the system. format: int64 type: integer value: - description: |- - Value is the taint value the toleration matches to. - If the operator is Exists, the value should be empty, otherwise just a regular string. type: string type: object type: array x-kubernetes-list-type: atomic updateStrategy: - description: The deployment strategy to use to replace existing pods with new ones. properties: rollingUpdate: - description: Configure the rolling update strategy of the Deployment or DaemonSet. properties: maxSurge: anyOf: - type: integer - type: string - description: |- - MaxSurge behaves differently based on the Kubernetes resource. Refer to the - Kubernetes API documentation for additional details. x-kubernetes-int-or-string: true maxUnavailable: anyOf: - type: integer - type: string - description: |- - The maximum number of pods that can be unavailable during the update. - Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - Refer to the Kubernetes API documentation for additional details.. x-kubernetes-int-or-string: true type: object type: - description: |- - Type can be "RollingUpdate" or "OnDelete" for DaemonSets and "RollingUpdate" - or "Recreate" for Deployments type: string type: object volumes: - description: Specify additional volumes in the different components (Datadog Agent, Cluster Agent, Cluster Check Runner). items: - description: Volume represents a named volume in a pod that may be accessed by any container in the pod. properties: awsElasticBlockStore: - description: |- - awsElasticBlockStore represents an AWS Disk resource that is attached to a - kubelet's host machine and then exposed to the pod. - More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore properties: fsType: - description: |- - fsType is the filesystem type of the volume that you want to mount. - Tip: Ensure that the filesystem type is supported by the host operating system. - Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - TODO: how do we prevent errors in the filesystem from compromising the machine type: string partition: - description: |- - partition is the partition in the volume that you want to mount. - If omitted, the default is to mount by volume name. - Examples: For volume /dev/sda1, you specify the partition as "1". - Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). format: int32 type: integer readOnly: - description: |- - readOnly value true will force the readOnly setting in VolumeMounts. - More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore type: boolean volumeID: - description: |- - volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). - More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore type: string required: - volumeID type: object azureDisk: - description: azureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. properties: cachingMode: - description: 'cachingMode is the Host Caching mode: None, Read Only, Read Write.' type: string diskName: - description: diskName is the Name of the data disk in the blob storage type: string diskURI: - description: diskURI is the URI of data disk in the blob storage type: string fsType: - description: |- - fsType is Filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + default: ext4 type: string kind: - description: 'kind expected values are Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared' type: string readOnly: - description: |- - readOnly Defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. + default: false type: boolean required: - diskName - diskURI type: object azureFile: - description: azureFile represents an Azure File Service mount on the host and bind mount to the pod. properties: readOnly: - description: |- - readOnly defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. type: boolean secretName: - description: secretName is the name of secret that contains Azure Storage Account Name and Key type: string shareName: - description: shareName is the azure share Name type: string required: - secretName - shareName type: object cephfs: - description: cephFS represents a Ceph FS mount on the host that shares a pod's lifetime properties: monitors: - description: |- - monitors is Required: Monitors is a collection of Ceph monitors - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it items: type: string type: array + x-kubernetes-list-type: atomic path: - description: 'path is Optional: Used as the mounted root, rather than the full Ceph tree, default is /' type: string readOnly: - description: |- - readOnly is Optional: Defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it type: boolean secretFile: - description: |- - secretFile is Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it type: string secretRef: - description: |- - secretRef is Optional: SecretRef is reference to the authentication secret for User, default is empty. - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it properties: name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + default: "" type: string type: object x-kubernetes-map-type: atomic user: - description: |- - user is optional: User is the rados user name, default is admin - More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it type: string required: - monitors type: object cinder: - description: |- - cinder represents a cinder volume attached and mounted on kubelets host machine. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md properties: fsType: - description: |- - fsType is the filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md type: string readOnly: - description: |- - readOnly defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md type: boolean secretRef: - description: |- - secretRef is optional: points to a secret object containing parameters used to connect - to OpenStack. properties: name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + default: "" type: string type: object x-kubernetes-map-type: atomic volumeID: - description: |- - volumeID used to identify the volume in cinder. - More info: https://examples.k8s.io/mysql-cinder-pd/README.md type: string required: - volumeID type: object configMap: - description: configMap represents a configMap that should populate this volume properties: defaultMode: - description: |- - defaultMode is optional: mode bits used to set permissions on created files by default. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - Defaults to 0644. - Directories within the path are not affected by this setting. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer items: - description: |- - items if unspecified, each key-value pair in the Data field of the referenced - ConfigMap will be projected into the volume as a file whose name is the - key and content is the value. If specified, the listed keys will be - projected into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the ConfigMap, - the volume setup will error unless it is marked optional. Paths must be - relative and may not contain the '..' path or start with '..'. items: - description: Maps a string key to a path within a volume. properties: key: - description: key is the key to project. type: string mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. type: string required: - key - path type: object type: array + x-kubernetes-list-type: atomic name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + default: "" type: string optional: - description: optional specify whether the ConfigMap or its keys must be defined type: boolean type: object x-kubernetes-map-type: atomic csi: - description: csi (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature). properties: driver: - description: |- - driver is the name of the CSI driver that handles this volume. - Consult with your admin for the correct name as registered in the cluster. type: string fsType: - description: |- - fsType to mount. Ex. "ext4", "xfs", "ntfs". - If not provided, the empty value is passed to the associated CSI driver - which will determine the default filesystem to apply. type: string nodePublishSecretRef: - description: |- - nodePublishSecretRef is a reference to the secret object containing - sensitive information to pass to the CSI driver to complete the CSI - NodePublishVolume and NodeUnpublishVolume calls. - This field is optional, and may be empty if no secret is required. If the - secret object contains more than one secret, all secret references are passed. properties: name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + default: "" type: string type: object x-kubernetes-map-type: atomic readOnly: - description: |- - readOnly specifies a read-only configuration for the volume. - Defaults to false (read/write). type: boolean volumeAttributes: additionalProperties: type: string - description: |- - volumeAttributes stores driver-specific properties that are passed to the CSI - driver. Consult your driver's documentation for supported values. type: object required: - driver type: object downwardAPI: - description: downwardAPI represents downward API about the pod that should populate this volume properties: defaultMode: - description: |- - Optional: mode bits to use on created files by default. Must be a - Optional: mode bits used to set permissions on created files by default. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - Defaults to 0644. - Directories within the path are not affected by this setting. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer items: - description: Items is a list of downward API volume file items: - description: DownwardAPIVolumeFile represents information to create the file containing the pod field properties: fieldRef: - description: 'Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.' properties: apiVersion: - description: Version of the schema the FieldPath is written in terms of, defaults to "v1". type: string fieldPath: - description: Path of the field to select in the specified API version. type: string required: - fieldPath type: object x-kubernetes-map-type: atomic mode: - description: |- - Optional: mode bits used to set permissions on this file, must be an octal value - between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: 'Required: Path is the relative path name of the file to be created. Must not be absolute or contain the ''..'' path. Must be utf-8 encoded. The first item of the relative path must not start with ''..''' type: string resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. properties: containerName: - description: 'Container name: required for volumes, optional for env vars' type: string divisor: anyOf: - type: integer - type: string - description: Specifies the output format of the exposed resources, defaults to "1" pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true resource: - description: 'Required: resource to select' type: string required: - resource @@ -4463,133 +2741,39 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic type: object emptyDir: - description: |- - emptyDir represents a temporary directory that shares a pod's lifetime. - More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir properties: medium: - description: |- - medium represents what type of storage medium should back this directory. - The default is "" which means to use the node's default medium. - Must be an empty string (default) or Memory. - More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir type: string sizeLimit: anyOf: - type: integer - type: string - description: |- - sizeLimit is the total amount of local storage required for this EmptyDir volume. - The size limit is also applicable for memory medium. - The maximum usage on memory medium EmptyDir would be the minimum value between - the SizeLimit specified here and the sum of memory limits of all containers in a pod. - The default is nil which means that the limit is undefined. - More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true type: object ephemeral: - description: |- - ephemeral represents a volume that is handled by a cluster storage driver. - The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, - and deleted when the pod is removed. - - - Use this if: - a) the volume is only needed while the pod runs, - b) features of normal volumes like restoring from snapshot or capacity - tracking are needed, - c) the storage driver is specified through a storage class, and - d) the storage driver supports dynamic volume provisioning through - a PersistentVolumeClaim (see EphemeralVolumeSource for more - information on the connection between this volume type - and PersistentVolumeClaim). - - - Use PersistentVolumeClaim or one of the vendor-specific - APIs for volumes that persist for longer than the lifecycle - of an individual pod. - - - Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to - be used that way - see the documentation of the driver for - more information. - - - A pod can use both types of ephemeral volumes and - persistent volumes at the same time. properties: volumeClaimTemplate: - description: |- - Will be used to create a stand-alone PVC to provision the volume. - The pod in which this EphemeralVolumeSource is embedded will be the - owner of the PVC, i.e. the PVC will be deleted together with the - pod. The name of the PVC will be `-` where - `` is the name from the `PodSpec.Volumes` array - entry. Pod validation will reject the pod if the concatenated name - is not valid for a PVC (for example, too long). - - - An existing PVC with that name that is not owned by the pod - will *not* be used for the pod to avoid using an unrelated - volume by mistake. Starting the pod is then blocked until - the unrelated PVC is removed. If such a pre-created PVC is - meant to be used by the pod, the PVC has to updated with an - owner reference to the pod once the pod exists. Normally - this should not be necessary, but it may be useful when - manually reconstructing a broken cluster. - - - This field is read-only and no changes will be made by Kubernetes - to the PVC after it has been created. - - - Required, must not be nil. properties: metadata: - description: |- - May contain labels and annotations that will be copied into the PVC - when creating it. No other fields are allowed and will be rejected during - validation. type: object spec: - description: |- - The specification for the PersistentVolumeClaim. The entire content is - copied unchanged into the PVC that gets created from this - template. The same fields as in a PersistentVolumeClaim - are also valid here. properties: accessModes: - description: |- - accessModes contains the desired access modes the volume should have. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 items: type: string type: array + x-kubernetes-list-type: atomic dataSource: - description: |- - dataSource field can be used to specify either: - * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot) - * An existing PVC (PersistentVolumeClaim) - If the provisioner or an external controller can support the specified data source, - it will create a new volume based on the contents of the specified data source. - When the AnyVolumeDataSource feature gate is enabled, dataSource contents will be copied to dataSourceRef, - and dataSourceRef contents will be copied to dataSource when dataSourceRef.namespace is not specified. - If the namespace is specified, then dataSourceRef will not be copied to dataSource. properties: apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. type: string kind: - description: Kind is the type of resource being referenced type: string name: - description: Name is the name of resource being referenced type: string required: - kind @@ -4597,88 +2781,21 @@ spec: type: object x-kubernetes-map-type: atomic dataSourceRef: - description: |- - dataSourceRef specifies the object from which to populate the volume with data, if a non-empty - volume is desired. This may be any object from a non-empty API group (non - core object) or a PersistentVolumeClaim object. - When this field is specified, volume binding will only succeed if the type of - the specified object matches some installed volume populator or dynamic - provisioner. - This field will replace the functionality of the dataSource field and as such - if both fields are non-empty, they must have the same value. For backwards - compatibility, when namespace isn't specified in dataSourceRef, - both fields (dataSource and dataSourceRef) will be set to the same - value automatically if one of them is empty and the other is non-empty. - When namespace is specified in dataSourceRef, - dataSource isn't set to the same value and must be empty. - There are three important differences between dataSource and dataSourceRef: - * While dataSource only allows two specific types of objects, dataSourceRef - allows any non-core object, as well as PersistentVolumeClaim objects. - * While dataSource ignores disallowed values (dropping them), dataSourceRef - preserves all values, and generates an error if a disallowed value is - specified. - * While dataSource only allows local objects, dataSourceRef allows objects - in any namespaces. - (Beta) Using this field requires the AnyVolumeDataSource feature gate to be enabled. - (Alpha) Using the namespace field of dataSourceRef requires the CrossNamespaceVolumeDataSource feature gate to be enabled. properties: apiGroup: - description: |- - APIGroup is the group for the resource being referenced. - If APIGroup is not specified, the specified Kind must be in the core API group. - For any other third-party types, APIGroup is required. type: string kind: - description: Kind is the type of resource being referenced type: string name: - description: Name is the name of resource being referenced type: string namespace: - description: |- - Namespace is the namespace of resource being referenced - Note that when a namespace is specified, a gateway.networking.k8s.io/ReferenceGrant object is required in the referent namespace to allow that namespace's owner to accept the reference. See the ReferenceGrant documentation for details. - (Alpha) This field requires the CrossNamespaceVolumeDataSource feature gate to be enabled. type: string required: - kind - name type: object resources: - description: |- - resources represents the minimum resources the volume should have. - If RecoverVolumeExpansionFailure feature is enabled users are allowed to specify resource requirements - that are lower than previous value but must still be higher than capacity recorded in the - status field of the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources properties: - claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. It can only be set for containers. - items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. - properties: - name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. - type: string - required: - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map limits: additionalProperties: anyOf: @@ -4686,9 +2803,6 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -4697,67 +2811,41 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object selector: - description: selector is a label query over volumes to consider for binding. properties: matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. properties: key: - description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic storageClassName: - description: |- - storageClassName is the name of the StorageClass required by the claim. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + type: string + volumeAttributesClassName: type: string volumeMode: - description: |- - volumeMode defines what type of volume is required by the claim. - Value of Filesystem is implied when not included in claim spec. type: string volumeName: - description: volumeName is the binding reference to the PersistentVolume backing this claim. type: string type: object required: @@ -4765,74 +2853,41 @@ spec: type: object type: object fc: - description: fc represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. properties: fsType: - description: |- - fsType is the filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - TODO: how do we prevent errors in the filesystem from compromising the machine type: string lun: - description: 'lun is Optional: FC target lun number' format: int32 type: integer readOnly: - description: |- - readOnly is Optional: Defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. type: boolean targetWWNs: - description: 'targetWWNs is Optional: FC target worldwide names (WWNs)' items: type: string type: array + x-kubernetes-list-type: atomic wwids: - description: |- - wwids Optional: FC volume world wide identifiers (wwids) - Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. items: type: string type: array + x-kubernetes-list-type: atomic type: object flexVolume: - description: |- - flexVolume represents a generic volume resource that is - provisioned/attached using an exec based plugin. properties: driver: - description: driver is the name of the driver to use for this volume. type: string fsType: - description: |- - fsType is the filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. type: string options: additionalProperties: type: string - description: 'options is Optional: this field holds extra command options if any.' type: object readOnly: - description: |- - readOnly is Optional: defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. type: boolean secretRef: - description: |- - secretRef is Optional: secretRef is reference to the secret object containing - sensitive information to pass to the plugin scripts. This may be - empty if no secret object is specified. If the secret object - contains more than one secret, all secrets are passed to the plugin - scripts. properties: name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + default: "" type: string type: object x-kubernetes-map-type: atomic @@ -4840,193 +2895,98 @@ spec: - driver type: object flocker: - description: flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running properties: datasetName: - description: |- - datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker - should be considered as deprecated type: string datasetUUID: - description: datasetUUID is the UUID of the dataset. This is unique identifier of a Flocker dataset type: string type: object gcePersistentDisk: - description: |- - gcePersistentDisk represents a GCE Disk resource that is attached to a - kubelet's host machine and then exposed to the pod. - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk properties: fsType: - description: |- - fsType is filesystem type of the volume that you want to mount. - Tip: Ensure that the filesystem type is supported by the host operating system. - Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - TODO: how do we prevent errors in the filesystem from compromising the machine type: string partition: - description: |- - partition is the partition in the volume that you want to mount. - If omitted, the default is to mount by volume name. - Examples: For volume /dev/sda1, you specify the partition as "1". - Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk format: int32 type: integer pdName: - description: |- - pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk type: string readOnly: - description: |- - readOnly here will force the ReadOnly setting in VolumeMounts. - Defaults to false. - More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk type: boolean required: - pdName type: object gitRepo: - description: |- - gitRepo represents a git repository at a particular revision. - DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an - EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir - into the Pod's container. properties: directory: - description: |- - directory is the target directory name. - Must not contain or start with '..'. If '.' is supplied, the volume directory will be the - git repository. Otherwise, if specified, the volume will contain the git repository in - the subdirectory with the given name. type: string repository: - description: repository is the URL type: string revision: - description: revision is the commit hash for the specified revision. type: string required: - repository type: object glusterfs: - description: |- - glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/glusterfs/README.md properties: endpoints: - description: |- - endpoints is the endpoint name that details Glusterfs topology. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod type: string path: - description: |- - path is the Glusterfs volume path. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod type: string readOnly: - description: |- - readOnly here will force the Glusterfs volume to be mounted with read-only permissions. - Defaults to false. - More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod type: boolean required: - endpoints - path type: object hostPath: - description: |- - hostPath represents a pre-existing file or directory on the host - machine that is directly exposed to the container. This is generally - used for system agents or other privileged things that are allowed - to see the host machine. Most containers will NOT need this. - More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - --- - TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not - mount host directories as read/write. properties: path: - description: |- - path of the directory on the host. - If the path is a symlink, it will follow the link to the real path. - More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath type: string type: - description: |- - type for HostPath Volume - Defaults to "" - More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath type: string required: - path type: object + image: + properties: + pullPolicy: + type: string + reference: + type: string + type: object iscsi: - description: |- - iscsi represents an ISCSI Disk resource that is attached to a - kubelet's host machine and then exposed to the pod. - More info: https://examples.k8s.io/volumes/iscsi/README.md properties: chapAuthDiscovery: - description: chapAuthDiscovery defines whether support iSCSI Discovery CHAP authentication type: boolean chapAuthSession: - description: chapAuthSession defines whether support iSCSI Session CHAP authentication type: boolean fsType: - description: |- - fsType is the filesystem type of the volume that you want to mount. - Tip: Ensure that the filesystem type is supported by the host operating system. - Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi - TODO: how do we prevent errors in the filesystem from compromising the machine type: string initiatorName: - description: |- - initiatorName is the custom iSCSI Initiator Name. - If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface - : will be created for the connection. type: string iqn: - description: iqn is the target iSCSI Qualified Name. type: string iscsiInterface: - description: |- - iscsiInterface is the interface Name that uses an iSCSI transport. - Defaults to 'default' (tcp). + default: default type: string lun: - description: lun represents iSCSI Target Lun number. format: int32 type: integer portals: - description: |- - portals is the iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port - is other than default (typically TCP ports 860 and 3260). items: type: string type: array + x-kubernetes-list-type: atomic readOnly: - description: |- - readOnly here will force the ReadOnly setting in VolumeMounts. - Defaults to false. type: boolean secretRef: - description: secretRef is the CHAP Secret for iSCSI target and initiator authentication properties: name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + default: "" type: string type: object x-kubernetes-map-type: atomic targetPortal: - description: |- - targetPortal is iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port - is other than default (typically TCP ports 860 and 3260). type: string required: - iqn @@ -5034,210 +2994,151 @@ spec: - targetPortal type: object name: - description: |- - name of the volume. - Must be a DNS_LABEL and unique within the pod. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names type: string nfs: - description: |- - nfs represents an NFS mount on the host that shares a pod's lifetime - More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs properties: path: - description: |- - path that is exported by the NFS server. - More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs type: string readOnly: - description: |- - readOnly here will force the NFS export to be mounted with read-only permissions. - Defaults to false. - More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs type: boolean server: - description: |- - server is the hostname or IP address of the NFS server. - More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs type: string required: - path - server type: object persistentVolumeClaim: - description: |- - persistentVolumeClaimVolumeSource represents a reference to a - PersistentVolumeClaim in the same namespace. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims properties: claimName: - description: |- - claimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. - More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims type: string readOnly: - description: |- - readOnly Will force the ReadOnly setting in VolumeMounts. - Default false. type: boolean required: - claimName type: object photonPersistentDisk: - description: photonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine properties: fsType: - description: |- - fsType is the filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. type: string pdID: - description: pdID is the ID that identifies Photon Controller persistent disk type: string required: - pdID type: object portworxVolume: - description: portworxVolume represents a portworx volume attached and mounted on kubelets host machine properties: fsType: - description: |- - fSType represents the filesystem type to mount - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. type: string readOnly: - description: |- - readOnly defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. type: boolean volumeID: - description: volumeID uniquely identifies a Portworx volume type: string required: - volumeID type: object projected: - description: projected items for all in one resources secrets, configmaps, and downward API properties: defaultMode: - description: |- - defaultMode are the mode bits used to set permissions on created files by default. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - Directories within the path are not affected by this setting. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer sources: - description: sources is the list of volume projections items: - description: Projection that may be projected along with other supported volume types properties: + clusterTrustBundle: + properties: + labelSelector: + properties: + matchExpressions: + items: + properties: + key: + type: string + operator: + type: string + values: + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-map-type: atomic + name: + type: string + optional: + type: boolean + path: + type: string + signerName: + type: string + required: + - path + type: object configMap: - description: configMap information about the configMap data to project properties: items: - description: |- - items if unspecified, each key-value pair in the Data field of the referenced - ConfigMap will be projected into the volume as a file whose name is the - key and content is the value. If specified, the listed keys will be - projected into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the ConfigMap, - the volume setup will error unless it is marked optional. Paths must be - relative and may not contain the '..' path or start with '..'. items: - description: Maps a string key to a path within a volume. properties: key: - description: key is the key to project. type: string mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. type: string required: - key - path type: object type: array + x-kubernetes-list-type: atomic name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + default: "" type: string optional: - description: optional specify whether the ConfigMap or its keys must be defined type: boolean type: object x-kubernetes-map-type: atomic downwardAPI: - description: downwardAPI information about the downwardAPI data to project properties: items: - description: Items is a list of DownwardAPIVolume file items: - description: DownwardAPIVolumeFile represents information to create the file containing the pod field properties: fieldRef: - description: 'Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.' properties: apiVersion: - description: Version of the schema the FieldPath is written in terms of, defaults to "v1". type: string fieldPath: - description: Path of the field to select in the specified API version. type: string required: - fieldPath type: object x-kubernetes-map-type: atomic mode: - description: |- - Optional: mode bits used to set permissions on this file, must be an octal value - between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: 'Required: Path is the relative path name of the file to be created. Must not be absolute or contain the ''..'' path. Must be utf-8 encoded. The first item of the relative path must not start with ''..''' type: string resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. properties: containerName: - description: 'Container name: required for volumes, optional for env vars' type: string divisor: anyOf: - type: integer - type: string - description: Specifies the output format of the exposed resources, defaults to "1" pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true resource: - description: 'Required: resource to select' type: string required: - resource @@ -5247,245 +3148,128 @@ spec: - path type: object type: array + x-kubernetes-list-type: atomic type: object secret: - description: secret information about the secret data to project properties: items: - description: |- - items if unspecified, each key-value pair in the Data field of the referenced - Secret will be projected into the volume as a file whose name is the - key and content is the value. If specified, the listed keys will be - projected into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the Secret, - the volume setup will error unless it is marked optional. Paths must be - relative and may not contain the '..' path or start with '..'. items: - description: Maps a string key to a path within a volume. properties: key: - description: key is the key to project. type: string mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. type: string required: - key - path type: object type: array + x-kubernetes-list-type: atomic name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + default: "" type: string optional: - description: optional field specify whether the Secret or its key must be defined type: boolean type: object x-kubernetes-map-type: atomic serviceAccountToken: - description: serviceAccountToken is information about the serviceAccountToken data to project properties: audience: - description: |- - audience is the intended audience of the token. A recipient of a token - must identify itself with an identifier specified in the audience of the - token, and otherwise should reject the token. The audience defaults to the - identifier of the apiserver. type: string expirationSeconds: - description: |- - expirationSeconds is the requested duration of validity of the service - account token. As the token approaches expiration, the kubelet volume - plugin will proactively rotate the service account token. The kubelet will - start trying to rotate the token if the token is older than 80 percent of - its time to live or if the token is older than 24 hours.Defaults to 1 hour - and must be at least 10 minutes. format: int64 type: integer path: - description: |- - path is the path relative to the mount point of the file to project the - token into. type: string required: - path type: object type: object type: array + x-kubernetes-list-type: atomic type: object quobyte: - description: quobyte represents a Quobyte mount on the host that shares a pod's lifetime properties: group: - description: |- - group to map volume access to - Default is no group type: string readOnly: - description: |- - readOnly here will force the Quobyte volume to be mounted with read-only permissions. - Defaults to false. type: boolean registry: - description: |- - registry represents a single or multiple Quobyte Registry services - specified as a string as host:port pair (multiple entries are separated with commas) - which acts as the central registry for volumes type: string tenant: - description: |- - tenant owning the given Quobyte volume in the Backend - Used with dynamically provisioned Quobyte volumes, value is set by the plugin type: string user: - description: |- - user to map volume access to - Defaults to serivceaccount user type: string volume: - description: volume is a string that references an already created Quobyte volume by name. type: string required: - registry - volume type: object rbd: - description: |- - rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. - More info: https://examples.k8s.io/volumes/rbd/README.md properties: fsType: - description: |- - fsType is the filesystem type of the volume that you want to mount. - Tip: Ensure that the filesystem type is supported by the host operating system. - Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd - TODO: how do we prevent errors in the filesystem from compromising the machine type: string image: - description: |- - image is the rados image name. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it type: string keyring: - description: |- - keyring is the path to key ring for RBDUser. - Default is /etc/ceph/keyring. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + default: /etc/ceph/keyring type: string monitors: - description: |- - monitors is a collection of Ceph monitors. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it items: type: string type: array + x-kubernetes-list-type: atomic pool: - description: |- - pool is the rados pool name. - Default is rbd. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + default: rbd type: string readOnly: - description: |- - readOnly here will force the ReadOnly setting in VolumeMounts. - Defaults to false. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it type: boolean secretRef: - description: |- - secretRef is name of the authentication secret for RBDUser. If provided - overrides keyring. - Default is nil. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it properties: name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + default: "" type: string type: object x-kubernetes-map-type: atomic user: - description: |- - user is the rados user name. - Default is admin. - More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + default: admin type: string required: - image - monitors type: object scaleIO: - description: scaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. properties: fsType: - description: |- - fsType is the filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". - Default is "xfs". + default: xfs type: string gateway: - description: gateway is the host address of the ScaleIO API Gateway. type: string protectionDomain: - description: protectionDomain is the name of the ScaleIO Protection Domain for the configured storage. type: string readOnly: - description: |- - readOnly Defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. type: boolean secretRef: - description: |- - secretRef references to the secret for ScaleIO user and other - sensitive information. If this is not provided, Login operation will fail. properties: name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + default: "" type: string type: object x-kubernetes-map-type: atomic sslEnabled: - description: sslEnabled Flag enable/disable SSL communication with Gateway, default false type: boolean storageMode: - description: |- - storageMode indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. - Default is ThinProvisioned. + default: ThinProvisioned type: string storagePool: - description: storagePool is the ScaleIO Storage Pool associated with the protection domain. type: string system: - description: system is the name of the storage system as configured in ScaleIO. type: string volumeName: - description: |- - volumeName is the name of a volume already created in the ScaleIO system - that is associated with this volume source. type: string required: - gateway @@ -5493,126 +3277,58 @@ spec: - system type: object secret: - description: |- - secret represents a secret that should populate this volume. - More info: https://kubernetes.io/docs/concepts/storage/volumes#secret properties: defaultMode: - description: |- - defaultMode is Optional: mode bits used to set permissions on created files by default. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values - for mode bits. Defaults to 0644. - Directories within the path are not affected by this setting. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer items: - description: |- - items If unspecified, each key-value pair in the Data field of the referenced - Secret will be projected into the volume as a file whose name is the - key and content is the value. If specified, the listed keys will be - projected into the specified paths, and unlisted keys will not be - present. If a key is specified which is not present in the Secret, - the volume setup will error unless it is marked optional. Paths must be - relative and may not contain the '..' path or start with '..'. items: - description: Maps a string key to a path within a volume. properties: key: - description: key is the key to project. type: string mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. type: string required: - key - path type: object type: array + x-kubernetes-list-type: atomic optional: - description: optional field specify whether the Secret or its keys must be defined type: boolean secretName: - description: |- - secretName is the name of the secret in the pod's namespace to use. - More info: https://kubernetes.io/docs/concepts/storage/volumes#secret type: string type: object storageos: - description: storageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. properties: fsType: - description: |- - fsType is the filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. type: string readOnly: - description: |- - readOnly defaults to false (read/write). ReadOnly here will force - the ReadOnly setting in VolumeMounts. type: boolean secretRef: - description: |- - secretRef specifies the secret to use for obtaining the StorageOS API - credentials. If not specified, default values will be attempted. properties: name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + default: "" type: string type: object x-kubernetes-map-type: atomic volumeName: - description: |- - volumeName is the human-readable name of the StorageOS volume. Volume - names are only unique within a namespace. type: string volumeNamespace: - description: |- - volumeNamespace specifies the scope of the volume within StorageOS. If no - namespace is specified then the Pod's namespace will be used. This allows the - Kubernetes name scoping to be mirrored within StorageOS for tighter integration. - Set VolumeName to any name to override the default behaviour. - Set to "default" if you are not using namespaces within StorageOS. - Namespaces that do not pre-exist within StorageOS will be created. type: string type: object vsphereVolume: - description: vsphereVolume represents a vSphere volume attached and mounted on kubelets host machine properties: fsType: - description: |- - fsType is filesystem type to mount. - Must be a filesystem type supported by the host operating system. - Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. type: string storagePolicyID: - description: storagePolicyID is the storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. type: string storagePolicyName: - description: storagePolicyName is the storage Policy Based Management (SPBM) profile name. type: string volumePath: - description: volumePath is the path that identifies vSphere volume vmdk type: string required: - volumePath @@ -5625,49 +3341,36 @@ spec: - name x-kubernetes-list-type: map type: object - description: Override the default configurations of the agents type: object type: object status: - description: DatadogAgentStatus defines the observed state of DatadogAgent. properties: agent: - description: The combined actual state of all Agents as daemonsets or extended daemonsets. properties: available: - description: Number of available pods in the DaemonSet. format: int32 type: integer current: - description: Number of current pods in the DaemonSet. format: int32 type: integer currentHash: - description: CurrentHash is the stored hash of the DaemonSet. type: string daemonsetName: - description: DaemonsetName corresponds to the name of the created DaemonSet. type: string desired: - description: Number of desired pods in the DaemonSet. format: int32 type: integer lastUpdate: - description: LastUpdate is the last time the status was updated. format: date-time type: string ready: - description: Number of ready pods in the DaemonSet. format: int32 type: integer state: - description: State corresponds to the DaemonSet state. type: string status: - description: Status corresponds to the DaemonSet computed status. type: string upToDate: - description: Number of up to date pods in the DaemonSet. format: int32 type: integer required: @@ -5678,44 +3381,32 @@ spec: - upToDate type: object agentList: - description: The actual state of the Agent as a daemonset or an extended daemonset. items: - description: DaemonSetStatus defines the observed state of Agent running as DaemonSet. properties: available: - description: Number of available pods in the DaemonSet. format: int32 type: integer current: - description: Number of current pods in the DaemonSet. format: int32 type: integer currentHash: - description: CurrentHash is the stored hash of the DaemonSet. type: string daemonsetName: - description: DaemonsetName corresponds to the name of the created DaemonSet. type: string desired: - description: Number of desired pods in the DaemonSet. format: int32 type: integer lastUpdate: - description: LastUpdate is the last time the status was updated. format: date-time type: string ready: - description: Number of ready pods in the DaemonSet. format: int32 type: integer state: - description: State corresponds to the DaemonSet state. type: string status: - description: Status corresponds to the DaemonSet computed status. type: string upToDate: - description: Number of up to date pods in the DaemonSet. format: int32 type: integer required: @@ -5728,168 +3419,92 @@ spec: type: array x-kubernetes-list-type: atomic clusterAgent: - description: The actual state of the Cluster Agent as a deployment. properties: availableReplicas: - description: Total number of available pods (ready for at least minReadySeconds) targeted by this Deployment. format: int32 type: integer currentHash: - description: CurrentHash is the stored hash of the Deployment. type: string deploymentName: - description: DeploymentName corresponds to the name of the Deployment. type: string generatedToken: - description: |- - GeneratedToken corresponds to the generated token if any token was provided in the Credential configuration when ClusterAgent is - enabled. type: string lastUpdate: - description: LastUpdate is the last time the status was updated. format: date-time type: string readyReplicas: - description: Total number of ready pods targeted by this Deployment. format: int32 type: integer replicas: - description: Total number of non-terminated pods targeted by this Deployment (their labels match the selector). format: int32 type: integer state: - description: State corresponds to the Deployment state. type: string status: - description: Status corresponds to the Deployment computed status. type: string unavailableReplicas: - description: |- - Total number of unavailable pods targeted by this Deployment. This is the total number of - pods that are still required for the Deployment to have 100% available capacity. They may - either be pods that are running but not yet available or pods that still have not been created. format: int32 type: integer updatedReplicas: - description: Total number of non-terminated pods targeted by this Deployment that have the desired template spec. format: int32 type: integer type: object clusterChecksRunner: - description: The actual state of the Cluster Checks Runner as a deployment. properties: availableReplicas: - description: Total number of available pods (ready for at least minReadySeconds) targeted by this Deployment. format: int32 type: integer currentHash: - description: CurrentHash is the stored hash of the Deployment. type: string deploymentName: - description: DeploymentName corresponds to the name of the Deployment. type: string generatedToken: - description: |- - GeneratedToken corresponds to the generated token if any token was provided in the Credential configuration when ClusterAgent is - enabled. type: string lastUpdate: - description: LastUpdate is the last time the status was updated. format: date-time type: string readyReplicas: - description: Total number of ready pods targeted by this Deployment. format: int32 type: integer replicas: - description: Total number of non-terminated pods targeted by this Deployment (their labels match the selector). format: int32 type: integer state: - description: State corresponds to the Deployment state. type: string status: - description: Status corresponds to the Deployment computed status. type: string unavailableReplicas: - description: |- - Total number of unavailable pods targeted by this Deployment. This is the total number of - pods that are still required for the Deployment to have 100% available capacity. They may - either be pods that are running but not yet available or pods that still have not been created. format: int32 type: integer updatedReplicas: - description: Total number of non-terminated pods targeted by this Deployment that have the desired template spec. format: int32 type: integer type: object conditions: - description: Conditions Represents the latest available observations of a DatadogAgent's current state. items: - description: |- - Condition contains details for one aspect of the current state of this API Resource. - --- - This struct is intended for direct use as an array at the field path .status.conditions. For example, - - - type FooStatus struct{ - // Represents the observations of a foo's current state. - // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" - // +patchMergeKey=type - // +patchStrategy=merge - // +listType=map - // +listMapKey=type - Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` - - - // other fields - } properties: lastTransitionTime: - description: |- - lastTransitionTime is the last time the condition transitioned from one status to another. - This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: |- - message is a human readable message indicating details about the transition. - This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: |- - observedGeneration represents the .metadata.generation that the condition was set based upon. - For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date - with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: |- - reason contains a programmatic identifier indicating the reason for the condition's last transition. - Producers of specific condition types may define expected values and meanings for this field, - and whether the values are considered a guaranteed API. - The value should be a CamelCase string. - This field may not be empty. maxLength: 1024 minLength: 1 pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ type: string status: - description: status of the condition, one of True, False, Unknown. enum: - "True" - "False" - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -5905,180 +3520,98 @@ spec: - type x-kubernetes-list-type: map remoteConfigConfiguration: - description: RemoteConfigConfiguration stores the configuration received from RemoteConfig. properties: features: - description: DatadogFeatures are features running on the Agent and Cluster Agent. properties: admissionController: - description: AdmissionController configuration. properties: agentCommunicationMode: - description: |- - AgentCommunicationMode corresponds to the mode used by the Datadog application libraries to communicate with the Agent. - It can be "hostip", "service", or "socket". type: string agentSidecarInjection: - description: AgentSidecarInjection contains Agent sidecar injection configurations. properties: clusterAgentCommunicationEnabled: - description: |- - ClusterAgentCommunicationEnabled enables communication between Agent sidecars and the Cluster Agent. - Default : true type: boolean enabled: - description: |- - Enabled enables Sidecar injections. - Default: false type: boolean image: - description: Image overrides the default Agent image name and tag for the Agent sidecar. properties: jmxEnabled: - description: |- - Define whether the Agent image should support JMX. - To be used if the Name field does not correspond to a full image string. type: boolean name: - description: |- - Define the image to use: - Use "gcr.io/datadoghq/agent:latest" for Datadog Agent 7. - Use "datadog/dogstatsd:latest" for standalone Datadog Agent DogStatsD 7. - Use "gcr.io/datadoghq/cluster-agent:latest" for Datadog Cluster Agent. - Use "agent" with the registry and tag configurations for /agent:. - Use "cluster-agent" with the registry and tag configurations for /cluster-agent:. - If the name is the full image string—`:` or `/:`, then `tag`, `jmxEnabled`, - and `global.registry` values are ignored. - Otherwise, image string is created by overriding default settings with supplied `name`, `tag`, and `jmxEnabled` values; - image string is created using default registry unless `global.registry` is configured. type: string pullPolicy: - description: |- - The Kubernetes pull policy: - Use Always, Never, or IfNotPresent. type: string pullSecrets: - description: |- - It is possible to specify Docker registry credentials. - See https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod items: - description: |- - LocalObjectReference contains enough information to let you locate the - referenced object inside the same namespace. properties: name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + default: "" type: string type: object x-kubernetes-map-type: atomic type: array tag: - description: |- - Define the image tag to use. - To be used if the Name field does not correspond to a full image string. type: string type: object profiles: - description: Profiles define the sidecar configuration override. Only one profile is supported. items: - description: Profile defines a sidecar configuration override. properties: env: - description: EnvVars specifies the environment variables for the profile. items: - description: EnvVar represents an environment variable present in a Container. properties: name: - description: Name of the environment variable. Must be a C_IDENTIFIER. type: string value: - description: |- - Variable references $(VAR_NAME) are expanded - using the previously defined environment variables in the container and - any service environment variables. If a variable cannot be resolved, - the reference in the input string will be unchanged. Double $$ are reduced - to a single $, which allows for escaping the $(VAR_NAME) syntax: i.e. - "$$(VAR_NAME)" will produce the string literal "$(VAR_NAME)". - Escaped references will never be expanded, regardless of whether the variable - exists or not. - Defaults to "". type: string valueFrom: - description: Source for the environment variable's value. Cannot be used if value is not empty. properties: configMapKeyRef: - description: Selects a key of a ConfigMap. properties: key: - description: The key to select. type: string name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + default: "" type: string optional: - description: Specify whether the ConfigMap or its key must be defined type: boolean required: - key type: object x-kubernetes-map-type: atomic fieldRef: - description: |- - Selects a field of the pod: supports metadata.name, metadata.namespace, `metadata.labels['']`, `metadata.annotations['']`, - spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. properties: apiVersion: - description: Version of the schema the FieldPath is written in terms of, defaults to "v1". type: string fieldPath: - description: Path of the field to select in the specified API version. type: string required: - fieldPath type: object x-kubernetes-map-type: atomic resourceFieldRef: - description: |- - Selects a resource of the container: only resources limits and requests - (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. properties: containerName: - description: 'Container name: required for volumes, optional for env vars' type: string divisor: anyOf: - type: integer - type: string - description: Specifies the output format of the exposed resources, defaults to "1" pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true resource: - description: 'Required: resource to select' type: string required: - resource type: object x-kubernetes-map-type: atomic secretKeyRef: - description: Selects a key of a secret in the pod's namespace properties: key: - description: The key of the secret to select from. Must be a valid secret key. type: string name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? + default: "" type: string optional: - description: Specify whether the Secret or its key must be defined type: boolean required: - key @@ -6093,27 +3626,13 @@ spec: - name x-kubernetes-list-type: map resources: - description: ResourceRequirements specifies the resource requirements for the profile. properties: claims: - description: |- - Claims lists the names of resources, defined in spec.resourceClaims, - that are used by this container. - - - This is an alpha field and requires enabling the - DynamicResourceAllocation feature gate. - - - This field is immutable. It can only be set for containers. items: - description: ResourceClaim references one entry in PodSpec.ResourceClaims. properties: name: - description: |- - Name must match the name of one entry in pod.spec.resourceClaims of - the Pod where this field is used. It makes that resource available - inside a container. + type: string + request: type: string required: - name @@ -6129,9 +3648,6 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: |- - Limits describes the maximum amount of compute resources allowed. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object requests: additionalProperties: @@ -6140,112 +3656,67 @@ spec: - type: string pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ x-kubernetes-int-or-string: true - description: |- - Requests describes the minimum amount of compute resources required. - If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - otherwise to an implementation-defined value. Requests cannot exceed Limits. - More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ type: object type: object type: object type: array x-kubernetes-list-type: atomic provider: - description: |- - Provider is used to add infrastructure provider-specific configurations to the Agent sidecar. - Currently only "fargate" is supported. - To use the feature in other environments (including local testing) omit the config. - See also: https://docs.datadoghq.com/integrations/eks_fargate type: string registry: - description: Registry overrides the default registry for the sidecar Agent. type: string selectors: - description: Selectors define the pod selector for sidecar injection. Only one rule is supported. items: - description: Selectors define a pod selector for sidecar injection. properties: namespaceSelector: - description: NamespaceSelector specifies the label selector for namespaces. properties: matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. properties: key: - description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic objectSelector: - description: ObjectSelector specifies the label selector for objects. properties: matchExpressions: - description: matchExpressions is a list of label selector requirements. The requirements are ANDed. items: - description: |- - A label selector requirement is a selector that contains values, a key, and an operator that - relates the key and values. properties: key: - description: key is the label key that the selector applies to. type: string operator: - description: |- - operator represents a key's relationship to a set of values. - Valid operators are In, NotIn, Exists and DoesNotExist. type: string values: - description: |- - values is an array of string values. If the operator is In or NotIn, - the values array must be non-empty. If the operator is Exists or DoesNotExist, - the values array must be empty. This array is replaced during a strategic - merge patch. items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator type: object type: array + x-kubernetes-list-type: atomic matchLabels: additionalProperties: type: string - description: |- - matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, whose key field is "key", the - operator is "In", and the values array contains only "value". The requirements are ANDed. type: object type: object x-kubernetes-map-type: atomic @@ -6254,238 +3725,131 @@ spec: x-kubernetes-list-type: atomic type: object cwsInstrumentation: - description: CWSInstrumentation holds the CWS Instrumentation endpoint configuration properties: enabled: - description: |- - Enable the CWS Instrumentation admission controller endpoint. - Default: false type: boolean mode: - description: |- - Mode defines the behavior of the CWS Instrumentation endpoint, and can be either "init_container" or "remote_copy". - Default: "remote_copy" type: string type: object enabled: - description: |- - Enabled enables the Admission Controller. - Default: true type: boolean failurePolicy: - description: FailurePolicy determines how unrecognized and timeout errors are handled. type: string mutateUnlabelled: - description: |- - MutateUnlabelled enables config injection without the need of pod label 'admission.datadoghq.com/enabled="true"'. - Default: false type: boolean + mutation: + properties: + enabled: + type: boolean + type: object registry: - description: Registry defines an image registry for the admission controller. type: string serviceName: - description: ServiceName corresponds to the webhook service name. type: string + validation: + properties: + enabled: + type: boolean + type: object webhookName: - description: |- - WebhookName is a custom name for the MutatingWebhookConfiguration. - Default: "datadog-webhook" type: string type: object apm: - description: APM (Application Performance Monitoring) configuration. properties: enabled: - description: |- - Enabled enables Application Performance Monitoring. - Default: true type: boolean hostPortConfig: - description: |- - HostPortConfig contains host port configuration. - Enabled Default: false - Port Default: 8126 properties: enabled: - description: |- - Enabled enables host port configuration - Default: false type: boolean hostPort: - description: |- - Port takes a port number (0 < x < 65536) to expose on the host. (Most containers do not need this.) - If HostNetwork is enabled, this value must match the ContainerPort. format: int32 type: integer type: object instrumentation: - description: |- - SingleStepInstrumentation allows the agent to inject the Datadog APM libraries into all pods in the cluster. - Feature is in beta. - See also: https://docs.datadoghq.com/tracing/trace_collection/single-step-apm - Enabled Default: false properties: disabledNamespaces: - description: DisabledNamespaces disables injecting the Datadog APM libraries into pods in specific namespaces. items: type: string type: array x-kubernetes-list-type: set enabled: - description: |- - Enabled enables injecting the Datadog APM libraries into all pods in the cluster. - Default: false type: boolean enabledNamespaces: - description: EnabledNamespaces enables injecting the Datadog APM libraries into pods in specific namespaces. items: type: string type: array x-kubernetes-list-type: set languageDetection: - description: |- - LanguageDetection detects languages and adds them as annotations on Deployments, but does not use these languages for injecting libraries to workload pods. - (Requires Agent 7.52.0+ and Cluster Agent 7.52.0+) properties: enabled: - description: |- - Enabled enables Language Detection to automatically detect languages of user workloads (beta). - Requires SingleStepInstrumentation.Enabled to be true. - Default: true type: boolean type: object libVersions: additionalProperties: type: string - description: |- - LibVersions configures injection of specific tracing library versions with Single Step Instrumentation. - : - ex: "java": "v1.18.0" type: object type: object unixDomainSocketConfig: - description: |- - UnixDomainSocketConfig contains socket configuration. - See also: https://docs.datadoghq.com/agent/kubernetes/apm/?tab=helm#agent-environment-variables - Enabled Default: true - Path Default: `/var/run/datadog/apm.socket` properties: enabled: - description: |- - Enabled enables Unix Domain Socket. - Default: true type: boolean path: - description: Path defines the socket path used when enabled. type: string type: object type: object asm: - description: ASM (Application Security Management) configuration. properties: iast: - description: |- - IAST configures Interactive Application Security Testing. - Enabled Default: false properties: enabled: - description: |- - Enabled enables Interactive Application Security Testing (IAST). - Default: false type: boolean type: object sca: - description: |- - SCA configures Software Composition Analysis. - Enabled Default: false properties: enabled: - description: |- - Enabled enables Software Composition Analysis (SCA). - Default: false type: boolean type: object threats: - description: |- - Threats configures ASM App & API Protection. - Enabled Default: false properties: enabled: - description: |- - Enabled enables ASM App & API Protection. - Default: false type: boolean type: object type: object autoscaling: - description: Autoscaling configuration. properties: workload: - description: Workload contains the configuration for the workload autoscaling product. properties: enabled: - description: |- - Enabled enables the workload autoscaling product. - Default: false type: boolean type: object type: object clusterChecks: - description: ClusterChecks configuration. properties: enabled: - description: |- - Enables Cluster Checks scheduling in the Cluster Agent. - Default: true type: boolean useClusterChecksRunners: - description: |- - Enabled enables Cluster Checks Runners to run all Cluster Checks. - Default: false type: boolean type: object cspm: - description: CSPM (Cloud Security Posture Management) configuration. properties: checkInterval: - description: CheckInterval defines the check interval. type: string customBenchmarks: - description: |- - CustomBenchmarks contains CSPM benchmarks. - The content of the ConfigMap will be merged with the benchmarks bundled with the agent. - Any benchmarks with the same name as those existing in the agent will take precedence. properties: configData: - description: ConfigData corresponds to the configuration file content. type: string configMap: - description: ConfigMap references an existing ConfigMap with the configuration file content. properties: items: - description: Items maps a ConfigMap data `key` to a file `path` mount. items: - description: Maps a string key to a path within a volume. properties: key: - description: key is the key to project. type: string mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. type: string required: - key @@ -6496,64 +3860,34 @@ spec: - key x-kubernetes-list-type: map name: - description: Name is the name of the ConfigMap. type: string type: object type: object enabled: - description: |- - Enabled enables Cloud Security Posture Management. - Default: false type: boolean hostBenchmarks: - description: HostBenchmarks contains configuration for host benchmarks. properties: enabled: - description: |- - Enabled enables host benchmarks. - Default: true type: boolean type: object type: object cws: - description: CWS (Cloud Workload Security) configuration. properties: customPolicies: - description: |- - CustomPolicies contains security policies. - The content of the ConfigMap will be merged with the policies bundled with the agent. - Any policies with the same name as those existing in the agent will take precedence. properties: configData: - description: ConfigData corresponds to the configuration file content. type: string configMap: - description: ConfigMap references an existing ConfigMap with the configuration file content. properties: items: - description: Items maps a ConfigMap data `key` to a file `path` mount. items: - description: Maps a string key to a path within a volume. properties: key: - description: key is the key to project. type: string mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. type: string required: - key @@ -6564,102 +3898,54 @@ spec: - key x-kubernetes-list-type: map name: - description: Name is the name of the ConfigMap. type: string type: object type: object enabled: - description: |- - Enabled enables Cloud Workload Security. - Default: false type: boolean network: properties: enabled: - description: |- - Enabled enables Cloud Workload Security Network detections. - Default: true type: boolean type: object remoteConfiguration: properties: enabled: - description: |- - Enabled enables Remote Configuration for Cloud Workload Security. - Default: true type: boolean type: object securityProfiles: properties: enabled: - description: |- - Enabled enables Security Profiles collection for Cloud Workload Security. - Default: true type: boolean type: object syscallMonitorEnabled: - description: |- - SyscallMonitorEnabled enables Syscall Monitoring (recommended for troubleshooting only). - Default: false type: boolean type: object dogstatsd: - description: Dogstatsd configuration. properties: hostPortConfig: - description: |- - HostPortConfig contains host port configuration. - Enabled Default: false - Port Default: 8125 properties: enabled: - description: |- - Enabled enables host port configuration - Default: false type: boolean hostPort: - description: |- - Port takes a port number (0 < x < 65536) to expose on the host. (Most containers do not need this.) - If HostNetwork is enabled, this value must match the ContainerPort. format: int32 type: integer type: object mapperProfiles: - description: |- - Configure the Dogstasd Mapper Profiles. - Can be passed as raw data or via a json encoded string in a config map. - See also: https://docs.datadoghq.com/developers/dogstatsd/dogstatsd_mapper/ properties: configData: - description: ConfigData corresponds to the configuration file content. type: string configMap: - description: ConfigMap references an existing ConfigMap with the configuration file content. properties: items: - description: Items maps a ConfigMap data `key` to a file `path` mount. items: - description: Maps a string key to a path within a volume. properties: key: - description: key is the key to project. type: string mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. type: string required: - key @@ -6670,72 +3956,36 @@ spec: - key x-kubernetes-list-type: map name: - description: Name is the name of the ConfigMap. type: string type: object type: object originDetectionEnabled: - description: |- - OriginDetectionEnabled enables origin detection for container tagging. - See also: https://docs.datadoghq.com/developers/dogstatsd/unix_socket/#using-origin-detection-for-container-tagging type: boolean tagCardinality: - description: |- - TagCardinality configures tag cardinality for the metrics collected using origin detection (`low`, `orchestrator` or `high`). - See also: https://docs.datadoghq.com/getting_started/tagging/assigning_tags/?tab=containerizedenvironments#environment-variables - Cardinality default: low type: string unixDomainSocketConfig: - description: |- - UnixDomainSocketConfig contains socket configuration. - See also: https://docs.datadoghq.com/agent/kubernetes/apm/?tab=helm#agent-environment-variables - Enabled Default: true - Path Default: `/var/run/datadog/dsd.socket` properties: enabled: - description: |- - Enabled enables Unix Domain Socket. - Default: true type: boolean path: - description: Path defines the socket path used when enabled. type: string type: object type: object ebpfCheck: - description: EBPFCheck configuration. properties: enabled: - description: |- - Enables the eBPF check. - Default: false type: boolean type: object eventCollection: - description: EventCollection configuration. properties: collectKubernetesEvents: - description: |- - CollectKubernetesEvents enables Kubernetes event collection. - Default: true type: boolean collectedEventTypes: - description: |- - CollectedEventTypes defines the list of events to collect when UnbundleEvents is enabled. - Default: - [ - {"kind":"Pod","reasons":["Failed","BackOff","Unhealthy","FailedScheduling","FailedMount","FailedAttachVolume"]}, - {"kind":"Node","reasons":["TerminatingEvictedPod","NodeNotReady","Rebooted","HostPortConflict"]}, - {"kind":"CronJob","reasons":["SawCompletedJob"]} - ] items: - description: EventTypes defines the kind and reasons of events to collect. properties: kind: - description: 'Kind is the kind of event to collect. (ex: Pod, Node, CronJob)' type: string reasons: - description: 'Reasons is a list of event reasons to collect. (ex: Failed, BackOff, Unhealthy)' items: type: string type: array @@ -6747,156 +3997,80 @@ spec: type: array x-kubernetes-list-type: atomic unbundleEvents: - description: |- - UnbundleEvents enables collection of Kubernetes events as individual events. - Default: false type: boolean type: object externalMetricsServer: - description: ExternalMetricsServer configuration. properties: enabled: - description: |- - Enabled enables the External Metrics Server. - Default: false type: boolean endpoint: - description: |- - Override the API endpoint for the External Metrics Server. - URL Default: "https://app.datadoghq.com". properties: credentials: - description: Credentials defines the Datadog credentials used to submit data to/query data from Datadog. properties: apiKey: - description: |- - APIKey configures your Datadog API key. - See also: https://app.datadoghq.com/account/settings#agent/kubernetes type: string apiSecret: - description: |- - APISecret references an existing Secret which stores the API key instead of creating a new one. - If set, this parameter takes precedence over "APIKey". properties: keyName: - description: KeyName is the key of the secret to use. type: string secretName: - description: SecretName is the name of the secret. type: string required: - secretName type: object appKey: - description: |- - AppKey configures your Datadog application key. - If you are using features.externalMetricsServer.enabled = true, you must set - a Datadog application key for read access to your metrics. type: string appSecret: - description: |- - AppSecret references an existing Secret which stores the application key instead of creating a new one. - If set, this parameter takes precedence over "AppKey". properties: keyName: - description: KeyName is the key of the secret to use. type: string secretName: - description: SecretName is the name of the secret. type: string required: - secretName type: object type: object url: - description: URL defines the endpoint URL. type: string type: object port: - description: |- - Port specifies the metricsProvider External Metrics Server service port. - Default: 8443 format: int32 type: integer registerAPIService: - description: |- - RegisterAPIService registers the External Metrics endpoint as an APIService - Default: true type: boolean useDatadogMetrics: - description: |- - UseDatadogMetrics enables usage of the DatadogMetrics CRD (allowing one to scale on arbitrary Datadog metric queries). - Default: true type: boolean wpaController: - description: |- - WPAController enables the informer and controller of the Watermark Pod Autoscaler. - NOTE: The Watermark Pod Autoscaler controller needs to be installed. - See also: https://github.com/DataDog/watermarkpodautoscaler. - Default: false type: boolean type: object helmCheck: - description: HelmCheck configuration. properties: collectEvents: - description: |- - CollectEvents set to `true` enables event collection in the Helm check - (Requires Agent 7.36.0+ and Cluster Agent 1.20.0+) - Default: false type: boolean enabled: - description: |- - Enabled enables the Helm check. - Default: false type: boolean valuesAsTags: additionalProperties: type: string - description: |- - ValuesAsTags collects Helm values from a release and uses them as tags - (Requires Agent and Cluster Agent 7.40.0+). - Default: {} type: object type: object kubeStateMetricsCore: - description: KubeStateMetricsCore check configuration. properties: conf: - description: |- - Conf overrides the configuration for the default Kubernetes State Metrics Core check. - This must point to a ConfigMap containing a valid cluster check configuration. properties: configData: - description: ConfigData corresponds to the configuration file content. type: string configMap: - description: ConfigMap references an existing ConfigMap with the configuration file content. properties: items: - description: Items maps a ConfigMap data `key` to a file `path` mount. items: - description: Maps a string key to a path within a volume. properties: key: - description: key is the key to project. type: string mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. type: string required: - key @@ -6907,163 +4081,77 @@ spec: - key x-kubernetes-list-type: map name: - description: Name is the name of the ConfigMap. type: string type: object type: object enabled: - description: |- - Enabled enables Kube State Metrics Core. - Default: true type: boolean type: object liveContainerCollection: - description: LiveContainerCollection configuration. properties: enabled: - description: |- - Enables container collection for the Live Container View. - Default: true type: boolean type: object liveProcessCollection: - description: LiveProcessCollection configuration. properties: enabled: - description: |- - Enabled enables Process monitoring. - Default: false type: boolean scrubProcessArguments: - description: |- - ScrubProcessArguments enables scrubbing of sensitive data in process command-lines (passwords, tokens, etc. ). - Default: true type: boolean stripProcessArguments: - description: |- - StripProcessArguments enables stripping of all process arguments. - Default: false type: boolean type: object logCollection: - description: LogCollection configuration. properties: containerCollectAll: - description: |- - ContainerCollectAll enables Log collection from all containers. - Default: false type: boolean containerCollectUsingFiles: - description: |- - ContainerCollectUsingFiles enables log collection from files in `/var/log/pods instead` of using the container runtime API. - Collecting logs from files is usually the most efficient way of collecting logs. - See also: https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/#log-collection-setup - Default: true type: boolean containerLogsPath: - description: |- - ContainerLogsPath allows log collection from the container log path. - Set to a different path if you are not using the Docker runtime. - See also: https://docs.datadoghq.com/agent/kubernetes/daemonset_setup/?tab=k8sfile#create-manifest - Default: `/var/lib/docker/containers` type: string containerSymlinksPath: - description: |- - ContainerSymlinksPath allows log collection to use symbolic links in this directory to validate container ID -> pod. - Default: `/var/log/containers` type: string enabled: - description: |- - Enabled enables Log collection. - Default: false type: boolean openFilesLimit: - description: |- - OpenFilesLimit sets the maximum number of log files that the Datadog Agent tails. - Increasing this limit can increase resource consumption of the Agent. - See also: https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/#log-collection-setup - Default: 100 format: int32 type: integer podLogsPath: - description: |- - PodLogsPath allows log collection from a pod log path. - Default: `/var/log/pods` type: string tempStoragePath: - description: |- - TempStoragePath (always mounted from the host) is used by the Agent to store information about processed log files. - If the Agent is restarted, it starts tailing the log files immediately. - Default: `/var/lib/datadog-agent/logs` type: string type: object npm: - description: NPM (Network Performance Monitoring) configuration. properties: collectDNSStats: - description: |- - CollectDNSStats enables DNS stat collection. - Default: false type: boolean enableConntrack: - description: |- - EnableConntrack enables the system-probe agent to connect to the netlink/conntrack subsystem to add NAT information to connection data. - See also: http://conntrack-tools.netfilter.org/ - Default: false type: boolean enabled: - description: |- - Enabled enables Network Performance Monitoring. - Default: false type: boolean type: object oomKill: - description: OOMKill configuration. properties: enabled: - description: |- - Enables the OOMKill eBPF-based check. - Default: false type: boolean type: object orchestratorExplorer: - description: OrchestratorExplorer check configuration. properties: conf: - description: |- - Conf overrides the configuration for the default Orchestrator Explorer check. - This must point to a ConfigMap containing a valid cluster check configuration. properties: configData: - description: ConfigData corresponds to the configuration file content. type: string configMap: - description: ConfigMap references an existing ConfigMap with the configuration file content. properties: items: - description: Items maps a ConfigMap data `key` to a file `path` mount. items: - description: Maps a string key to a path within a volume. properties: key: - description: key is the key to project. type: string mode: - description: |- - mode is Optional: mode bits used to set permissions on this file. - Must be an octal value between 0000 and 0777 or a decimal value between 0 and 511. - YAML accepts both octal and decimal values, JSON requires decimal values for mode bits. - If not specified, the volume defaultMode will be used. - This might be in conflict with other options that affect the file - mode, like fsGroup, and the result can be other mode bits set. format: int32 type: integer path: - description: |- - path is the relative path of the file to map the key to. - May not be an absolute path. - May not contain the path element '..'. - May not start with the string '..'. type: string required: - key @@ -7074,185 +4162,128 @@ spec: - key x-kubernetes-list-type: map name: - description: Name is the name of the ConfigMap. type: string type: object type: object customResources: - description: |- - `CustomResources` defines custom resources for the orchestrator explorer to collect. - Each item should follow the convention `group/version/kind`. For example, `datadoghq.com/v1alpha1/datadogmetrics`. items: type: string type: array x-kubernetes-list-type: set ddUrl: - description: |- - Override the API endpoint for the Orchestrator Explorer. - URL Default: "https://orchestrator.datadoghq.com". type: string enabled: - description: |- - Enabled enables the Orchestrator Explorer. - Default: true type: boolean extraTags: - description: |- - Additional tags to associate with the collected data in the form of `a b c`. - This is a Cluster Agent option distinct from DD_TAGS that is used in the Orchestrator Explorer. items: type: string type: array x-kubernetes-list-type: set scrubContainers: - description: |- - ScrubContainers enables scrubbing of sensitive container data (passwords, tokens, etc. ). - Default: true type: boolean type: object otlp: - description: OTLP ingest configuration properties: receiver: - description: Receiver contains configuration for the OTLP ingest receiver. properties: protocols: - description: Protocols contains configuration for the OTLP ingest receiver protocols. properties: grpc: - description: GRPC contains configuration for the OTLP ingest OTLP/gRPC receiver. properties: enabled: - description: Enable the OTLP/gRPC endpoint. type: boolean endpoint: - description: |- - Endpoint for OTLP/gRPC. - gRPC supports several naming schemes: https://github.com/grpc/grpc/blob/master/doc/naming.md - The Datadog Operator supports only 'host:port' (usually `0.0.0.0:port`). - Default: `0.0.0.0:4317`. type: string + hostPortConfig: + properties: + enabled: + type: boolean + hostPort: + format: int32 + type: integer + type: object type: object http: - description: HTTP contains configuration for the OTLP ingest OTLP/HTTP receiver. properties: enabled: - description: Enable the OTLP/HTTP endpoint. type: boolean endpoint: - description: |- - Endpoint for OTLP/HTTP. - Default: '0.0.0.0:4318'. type: string + hostPortConfig: + properties: + enabled: + type: boolean + hostPort: + format: int32 + type: integer + type: object type: object type: object type: object type: object processDiscovery: - description: ProcessDiscovery configuration. properties: enabled: - description: |- - Enabled enables the Process Discovery check in the Agent. - Default: true type: boolean type: object prometheusScrape: - description: PrometheusScrape configuration. properties: additionalConfigs: - description: AdditionalConfigs allows adding advanced Prometheus check configurations with custom discovery rules. type: string enableServiceEndpoints: - description: |- - EnableServiceEndpoints enables generating dedicated checks for service endpoints. - Default: false type: boolean enabled: - description: |- - Enable autodiscovery of pods and services exposing Prometheus metrics. - Default: false type: boolean version: - description: |- - Version specifies the version of the OpenMetrics check. - Default: 2 type: integer type: object remoteConfiguration: - description: Remote Configuration configuration. properties: enabled: - description: |- - Enable this option to activate Remote Configuration. - Default: true type: boolean type: object sbom: - description: SBOM collection configuration. properties: containerImage: - description: SBOMTypeConfig contains configuration for a SBOM collection type. properties: analyzers: - description: Analyzers to use for SBOM collection. items: type: string type: array x-kubernetes-list-type: set enabled: - description: |- - Enable this option to activate SBOM collection. - Default: false type: boolean overlayFSDirectScan: - description: |- - Enable this option to enable experimental overlayFS direct scan. - Default: false type: boolean uncompressedLayersSupport: - description: |- - Enable this option to enable support for uncompressed layers. - Default: false type: boolean type: object enabled: - description: |- - Enable this option to activate SBOM collection. - Default: false type: boolean host: - description: SBOMTypeConfig contains configuration for a SBOM collection type. properties: analyzers: - description: Analyzers to use for SBOM collection. items: type: string type: array x-kubernetes-list-type: set enabled: - description: |- - Enable this option to activate SBOM collection. - Default: false type: boolean type: object type: object + serviceDiscovery: + properties: + enabled: + type: boolean + type: object tcpQueueLength: - description: TCPQueueLength configuration. properties: enabled: - description: |- - Enables the TCP queue length eBPF-based check. - Default: false type: boolean type: object usm: - description: USM (Universal Service Monitoring) configuration. properties: enabled: - description: |- - Enabled enables Universal Service Monitoring. - Default: false type: boolean type: object type: object @@ -7274,7 +4305,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.3 name: datadogagentprofiles.datadoghq.com spec: group: datadoghq.com @@ -7362,10 +4393,13 @@ spec: description: The key to select. type: string name: + default: "" description: |- Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the ConfigMap or its key must be defined @@ -7418,10 +4452,13 @@ spec: description: The key of the secret to select from. Must be a valid secret key. type: string name: + default: "" description: |- Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - TODO: Add other useful fields. apiVersion, kind, uid? type: string optional: description: Specify whether the Secret or its key must be defined @@ -7449,11 +4486,9 @@ spec: Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. - This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. - This field is immutable. It can only be set for containers. items: description: ResourceClaim references one entry in PodSpec.ResourceClaims. @@ -7464,6 +4499,12 @@ spec: the Pod where this field is used. It makes that resource available inside a container. type: string + request: + description: |- + Request is the name chosen for a request in the referenced claim. + If empty, everything from the claim is made available, otherwise + only the result of this request. + type: string required: - name type: object @@ -7577,6 +4618,7 @@ spec: items: type: string type: array + x-kubernetes-list-type: atomic required: - key - operator @@ -7593,24 +4635,7 @@ spec: conditions: description: Conditions represents the latest available observations of a DatadogAgentProfile's current state. items: - description: |- - Condition contains details for one aspect of the current state of this API Resource. - --- - This struct is intended for direct use as an array at the field path .status.conditions. For example, - - - type FooStatus struct{ - // Represents the observations of a foo's current state. - // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" - // +patchMergeKey=type - // +patchStrategy=merge - // +listType=map - // +listMapKey=type - Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` - - - // other fields - } + description: Condition contains details for one aspect of the current state of this API Resource. properties: lastTransitionTime: description: |- @@ -7651,12 +4676,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -7721,7 +4741,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.3 name: datadogmonitors.datadoghq.com spec: group: datadoghq.com @@ -8065,7 +5085,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.3 name: datadogslos.datadoghq.com spec: group: datadoghq.com @@ -8199,24 +5219,7 @@ spec: conditions: description: Conditions represents the latest available observations of the state of a DatadogSLO. items: - description: |- - Condition contains details for one aspect of the current state of this API Resource. - --- - This struct is intended for direct use as an array at the field path .status.conditions. For example, - - - type FooStatus struct{ - // Represents the observations of a foo's current state. - // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" - // +patchMergeKey=type - // +patchStrategy=merge - // +listType=map - // +listMapKey=type - Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` - - - // other fields - } + description: Condition contains details for one aspect of the current state of this API Resource. properties: lastTransitionTime: description: |- @@ -8257,12 +5260,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -8316,7 +5314,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.16.3 name: datadogdashboards.datadoghq.com spec: group: datadoghq.com @@ -8466,24 +5464,7 @@ spec: conditions: description: Conditions represents the latest available observations of the state of a DatadogDashboard. items: - description: |- - Condition contains details for one aspect of the current state of this API Resource. - --- - This struct is intended for direct use as an array at the field path .status.conditions. For example, - - - type FooStatus struct{ - // Represents the observations of a foo's current state. - // Known .status.conditions.type are: "Available", "Progressing", and "Degraded" - // +patchMergeKey=type - // +patchStrategy=merge - // +listType=map - // +listMapKey=type - Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` - - - // other fields - } + description: Condition contains details for one aspect of the current state of this API Resource. properties: lastTransitionTime: description: |- @@ -8524,12 +5505,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string @@ -8577,4 +5553,4 @@ status: kind: "" plural: "" conditions: [] - storedVersions: [] \ No newline at end of file + storedVersions: [] diff --git a/google-marketplace/publish-marketplace.sh b/google-marketplace/publish-marketplace.sh index 9c310bacdec6a..a4075f420b2cc 100755 --- a/google-marketplace/publish-marketplace.sh +++ b/google-marketplace/publish-marketplace.sh @@ -26,4 +26,6 @@ then fi docker build --pull --platform linux/amd64 --no-cache --build-arg TAG="$FULL_TAG" --tag "$REGISTRY/deployer:$FULL_TAG" . && docker push "$REGISTRY/deployer:$FULL_TAG" -docker tag "$REGISTRY/deployer:$FULL_TAG" "$REGISTRY/deployer:$SHORT_TAG" && docker push "$REGISTRY/deployer:$SHORT_TAG" +gcrane mutate --annotation "com.googleapis.cloudmarketplace.product.service.name=datadog" "$REGISTRY/deployer:$FULL_TAG" +# We use gcloud to add the tag to the existing manifest, as docker push creates a new manifest +gcloud container images add-tag "$REGISTRY/deployer:$FULL_TAG" "$REGISTRY/deployer:$SHORT_TAG" --quiet diff --git a/google-marketplace/schema.yaml b/google-marketplace/schema.yaml index c457e04037f58..b25bd97c2fc72 100644 --- a/google-marketplace/schema.yaml +++ b/google-marketplace/schema.yaml @@ -82,6 +82,7 @@ properties: rulesType: CUSTOM rules: # nonResource rules not allowed + # Ref: https://github.com/GoogleCloudPlatform/marketplace-k8s-app-tools/blob/8d76357d033914e05578a13711489dc2bfeb916a/marketplace/deployer_util/config_helper.py#L981-L983 # - nonResourceURLs: # - /metrics # - /metrics/slis @@ -91,26 +92,8 @@ properties: - "" resources: - componentstatuses - verbs: - - get - - list - - watch - - apiGroups: - - "" - resources: - - configmaps - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - "" - resources: - deployments + - namespaces verbs: - get - list @@ -118,19 +101,13 @@ properties: - apiGroups: - "" resources: + - configmaps - endpoints - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - "" - resources: - events + - pods + - secrets + - serviceaccounts + - services verbs: - create - delete @@ -143,17 +120,13 @@ properties: - "" resources: - limitranges + - persistentvolumeclaims + - persistentvolumes + - replicationcontrollers + - resourcequotas verbs: - list - watch - - apiGroups: - - "" - resources: - - namespaces - verbs: - - get - - list - - watch - apiGroups: - "" resources: @@ -167,102 +140,11 @@ properties: - "" resources: - nodes/metrics - verbs: - - get - - apiGroups: - - "" - resources: - nodes/proxy - verbs: - - get - - apiGroups: - - "" - resources: - nodes/spec - verbs: - - get - - apiGroups: - - "" - resources: - nodes/stats verbs: - get - - apiGroups: - - "" - resources: - - persistentvolumeclaims - verbs: - - list - - watch - - apiGroups: - - "" - resources: - - persistentvolumes - verbs: - - list - - watch - - apiGroups: - - "" - resources: - - pods - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - "" - resources: - - replicationcontrollers - verbs: - - list - - watch - - apiGroups: - - "" - resources: - - resourcequotas - verbs: - - list - - watch - - apiGroups: - - "" - resources: - - secrets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - "" - resources: - - serviceaccounts - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - "" - resources: - - services - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - apiGroups: - '*' resources: @@ -279,6 +161,7 @@ properties: - '*' - apiGroups: - apiextensions.k8s.io + - extensions resources: - customresourcedefinitions verbs: @@ -296,17 +179,6 @@ properties: - apps resources: - daemonsets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - apps - resources: - deployments verbs: - create @@ -320,6 +192,7 @@ properties: - apps resources: - replicasets + - statefulsets verbs: - get - list @@ -331,14 +204,6 @@ properties: verbs: - list - watch - - apiGroups: - - apps - resources: - - statefulsets - verbs: - - get - - list - - watch - apiGroups: - apps - extensions @@ -356,30 +221,6 @@ properties: verbs: - create - get - - apiGroups: - - authorization.k8s.io - resources: - - clusterrolebindings - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - authorization.k8s.io - resources: - - clusterroles - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - apiGroups: - authorization.k8s.io resources: @@ -389,18 +230,18 @@ properties: - apiGroups: - authorization.k8s.io resources: - - rolebindings + - subjectaccessreviews verbs: - create - - delete - get - - list - - patch - - update - - watch - apiGroups: - authorization.k8s.io + - rbac.authorization.k8s.io + - roles.rbac.authorization.k8s.io resources: + - clusterrolebindings + - clusterroles + - rolebindings - roles verbs: - create @@ -410,13 +251,6 @@ properties: - patch - update - watch - - apiGroups: - - authorization.k8s.io - resources: - - subjectaccessreviews - verbs: - - create - - get - apiGroups: - autoscaling resources: @@ -435,13 +269,6 @@ properties: - batch resources: - cronjobs - verbs: - - get - - list - - watch - - apiGroups: - - batch - resources: - jobs verbs: - get @@ -482,70 +309,15 @@ properties: - datadoghq.com resources: - datadogagentprofiles - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - datadoghq.com - resources: - datadogagentprofiles/finalizers - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - datadoghq.com - resources: - - datadogagentprofiles/status - verbs: - - get - - patch - - update - - apiGroups: - - datadoghq.com - resources: - datadogagents - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - datadoghq.com - resources: - datadogagents/finalizers - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - datadoghq.com - resources: - - datadogagents/status - verbs: - - get - - patch - - update - - apiGroups: - - datadoghq.com - resources: - datadogdashboards + - datadogmonitors + - datadogmonitors/finalizers + - datadogslos + - datadogslos/finalizers + - extendeddaemonsets verbs: - create - delete @@ -557,13 +329,11 @@ properties: - apiGroups: - datadoghq.com resources: - - datadogdashboards/finalizers - verbs: - - update - - apiGroups: - - datadoghq.com - resources: + - datadogagentprofiles/status + - datadogagents/status - datadogdashboards/status + - datadogmonitors/status + - datadogslos/status verbs: - get - patch @@ -571,112 +341,32 @@ properties: - apiGroups: - datadoghq.com resources: - - datadogmetrics - verbs: - - create - - delete - - list - - watch - - apiGroups: - - datadoghq.com - resources: + - datadogdashboards/finalizers - datadogmetrics/status verbs: - update - apiGroups: - datadoghq.com resources: - - datadogmonitors - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - datadoghq.com - resources: - - datadogmonitors/finalizers + - datadogmetrics verbs: - create - delete - - get - list - - patch - - update - watch - - apiGroups: - - datadoghq.com - resources: - - datadogmonitors/status - verbs: - - get - - patch - - update - apiGroups: - datadoghq.com resources: - datadogpodautoscalers - verbs: - - '*' - - apiGroups: - - datadoghq.com - resources: - datadogpodautoscalers/status verbs: - '*' - - apiGroups: - - datadoghq.com - resources: - - datadogslos - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - datadoghq.com - resources: - - datadogslos/finalizers - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - datadoghq.com - resources: - - datadogslos/status - verbs: - - get - - patch - - update - apiGroups: - datadoghq.com resources: - extendeddaemonsetreplicasets verbs: - get - - apiGroups: - - datadoghq.com - resources: - - extendeddaemonsets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - apiGroups: - datadoghq.com resources: @@ -685,13 +375,6 @@ properties: - get - list - watch - - apiGroups: - - extensions - resources: - - customresourcedefinitions - verbs: - - list - - watch - apiGroups: - external.metrics.k8s.io resources: @@ -731,14 +414,6 @@ properties: - patch - update - watch - - apiGroups: - - policy - resources: - - podsecuritypolicies - verbs: - - get - - list - - watch - apiGroups: - quota.openshift.io resources: @@ -746,102 +421,6 @@ properties: verbs: - get - list - - apiGroups: - - rbac.authorization.k8s.io - resources: - - clusterrolebindings - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - rbac.authorization.k8s.io - resources: - - clusterroles - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - rbac.authorization.k8s.io - resources: - - rolebindings - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - rbac.authorization.k8s.io - resources: - - roles - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - roles.rbac.authorization.k8s.io - resources: - - clusterrolebindings - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - roles.rbac.authorization.k8s.io - resources: - - clusterroles - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - roles.rbac.authorization.k8s.io - resources: - - rolebindings - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - - apiGroups: - - roles.rbac.authorization.k8s.io - resources: - - roles - verbs: - - create - - delete - - get - - list - - patch - - update - - watch - apiGroups: - security.openshift.io resourceNames: diff --git a/google-marketplace/test-marketplace.sh b/google-marketplace/test-marketplace.sh index 2276c1d737671..f074b3a19ac20 100755 --- a/google-marketplace/test-marketplace.sh +++ b/google-marketplace/test-marketplace.sh @@ -13,7 +13,8 @@ FULL_TAG=$1 echo "Running gcrane cp" gcrane cp "datadog/operator:$FULL_TAG" "$REGISTRY/datadog-operator:$FULL_TAG" echo "Running docker build" -docker build --pull --platform linux/amd64 --no-cache --build-arg TAG="$FULL_TAG" --tag "$REGISTRY/deployer:$FULL_TAG" . && docker push "$REGISTRY/deployer:$FULL_TAG" +docker build --pull --platform linux/amd64 --no-cache --build-arg TAG="$FULL_TAG" --tag "$REGISTRY/deployer:$FULL_TAG" --push . +gcrane mutate --annotation "com.googleapis.cloudmarketplace.product.service.name=datadog" "$REGISTRY/deployer:$FULL_TAG" # Note: do not use "createAgent": true, as when resources are cleaned up mpdev will orphan the DatadogAgent echo "Running mpdev verify" EXTRA_DOCKER_PARAMS=--platform=linux/amd64 mpdev verify --deployer=$REGISTRY/deployer:"$FULL_TAG" diff --git a/internal/remote-agent/main.go b/internal/remote-agent/main.go index be86d0f6811ad..8545d32f56dd1 100644 --- a/internal/remote-agent/main.go +++ b/internal/remote-agent/main.go @@ -32,6 +32,7 @@ import ( type remoteAgentServer struct { started time.Time + pbcore.UnimplementedRemoteAgentServer } func (s *remoteAgentServer) GetStatusDetails(_ context.Context, req *pbcore.GetStatusDetailsRequest) (*pbcore.GetStatusDetailsResponse, error) { diff --git a/internal/tools/go.mod b/internal/tools/go.mod index 62a9752009b0f..7304608526f24 100644 --- a/internal/tools/go.mod +++ b/internal/tools/go.mod @@ -56,9 +56,9 @@ require ( github.com/chavacava/garif v0.1.0 // indirect github.com/chigopher/pathlib v0.19.1 // indirect github.com/ckaznocha/intrange v0.1.2 // indirect - github.com/cloudflare/circl v1.3.8 // indirect + github.com/cloudflare/circl v1.5.0 // indirect github.com/curioswitch/go-reassign v0.2.0 // indirect - github.com/cyphar/filepath-securejoin v0.3.4 // indirect + github.com/cyphar/filepath-securejoin v0.3.6 // indirect github.com/daixiang0/gci v0.13.4 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/denis-tingaikin/go-header v0.5.0 // indirect @@ -76,8 +76,8 @@ require ( github.com/ghostiam/protogetter v0.3.6 // indirect github.com/go-critic/go-critic v0.11.4 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect - github.com/go-git/go-billy/v5 v5.5.0 // indirect - github.com/go-git/go-git/v5 v5.12.0 // indirect + github.com/go-git/go-billy/v5 v5.6.1 // indirect + github.com/go-git/go-git/v5 v5.13.0 // indirect github.com/go-toolsmith/astcast v1.1.0 // indirect github.com/go-toolsmith/astcopy v1.1.0 // indirect github.com/go-toolsmith/astequal v1.2.0 // indirect @@ -89,8 +89,8 @@ require ( github.com/go-xmlfmt/xmlfmt v1.1.2 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gofrs/flock v0.12.1 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/mock v1.6.0 // indirect + github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect + github.com/golang/mock v1.7.0-rc.1 // indirect github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect github.com/golangci/gofmt v0.0.0-20240816233607-d8596aa466a9 // indirect github.com/golangci/misspell v0.6.0 // indirect @@ -142,11 +142,12 @@ require ( github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mattn/go-runewidth v0.0.15 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect github.com/mattn/go-zglob v0.0.2-0.20191112051448-a8912a37f9e7 // indirect github.com/mgechev/revive v1.3.9 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect + github.com/mmcloughlin/avo v0.6.0 // indirect github.com/montanaflynn/stats v0.7.0 // indirect github.com/moricho/tparallel v0.3.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect @@ -157,13 +158,13 @@ require ( github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/onsi/ginkgo/v2 v2.20.2 // indirect github.com/pelletier/go-toml/v2 v2.2.2 // indirect - github.com/pjbgf/sha1cd v0.3.0 // indirect + github.com/pjbgf/sha1cd v0.3.1 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/polyfloyd/go-errorlint v1.6.0 // indirect github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.60.1 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/quasilyte/go-ruleguard v0.4.2 // indirect github.com/quasilyte/go-ruleguard/dsl v0.3.22 // indirect @@ -189,12 +190,12 @@ require ( github.com/sirupsen/logrus v1.9.3 // indirect github.com/sivchari/containedctx v1.0.3 // indirect github.com/sivchari/tenv v1.10.0 // indirect - github.com/skeema/knownhosts v1.2.2 // indirect + github.com/skeema/knownhosts v1.3.0 // indirect github.com/sonatard/noctx v0.0.2 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/sourcegraph/go-diff v0.7.0 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/viper v1.19.0 // indirect @@ -223,18 +224,18 @@ require ( go.uber.org/automaxprocs v1.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.31.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect + golang.org/x/crypto v0.32.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f // indirect golang.org/x/mod v0.22.0 // indirect - golang.org/x/net v0.33.0 // indirect + golang.org/x/net v0.34.0 // indirect golang.org/x/sync v0.10.0 // indirect - golang.org/x/sys v0.28.0 // indirect - golang.org/x/term v0.27.0 // indirect + golang.org/x/sys v0.29.0 // indirect + golang.org/x/term v0.28.0 // indirect golang.org/x/text v0.21.0 // indirect - golang.org/x/tools v0.28.0 // indirect + golang.org/x/tools v0.29.0 // indirect gonum.org/v1/gonum v0.15.1 // indirect - google.golang.org/protobuf v1.35.2 // indirect + google.golang.org/protobuf v1.36.3 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/neurosnap/sentences.v1 v1.0.6 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect @@ -245,3 +246,6 @@ require ( mvdan.cc/gofumpt v0.7.0 // indirect mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f // indirect ) + +// github.com/golang/mock is unmaintained and archived, v1.6.0 is the last released version +replace github.com/golang/mock => github.com/golang/mock v1.6.0 diff --git a/internal/tools/go.sum b/internal/tools/go.sum index 832b2df4aa05e..1452e38be7a32 100644 --- a/internal/tools/go.sum +++ b/internal/tools/go.sum @@ -93,15 +93,15 @@ github.com/chigopher/pathlib v0.19.1 h1:RoLlUJc0CqBGwq239cilyhxPNLXTK+HXoASGyGzn github.com/chigopher/pathlib v0.19.1/go.mod h1:tzC1dZLW8o33UQpWkNkhvPwL5n4yyFRFm/jL1YGWFvY= github.com/ckaznocha/intrange v0.1.2 h1:3Y4JAxcMntgb/wABQ6e8Q8leMd26JbX2790lIss9MTI= github.com/ckaznocha/intrange v0.1.2/go.mod h1:RWffCw/vKBwHeOEwWdCikAtY0q4gGt8VhJZEEA5n+RE= -github.com/cloudflare/circl v1.3.8 h1:j+V8jJt09PoeMFIu2uh5JUyEaIHTXVOHslFoLNAKqwI= -github.com/cloudflare/circl v1.3.8/go.mod h1:PDRU+oXvdD7KCtgKxW95M5Z8BpSCJXQORiZFnBQS5QU= +github.com/cloudflare/circl v1.5.0 h1:hxIWksrX6XN5a1L2TI/h53AGPhNHoUBo+TD1ms9+pys= +github.com/cloudflare/circl v1.5.0/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo= github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc= -github.com/cyphar/filepath-securejoin v0.3.4 h1:VBWugsJh2ZxJmLFSM06/0qzQyiQX2Qs0ViKrUAcqdZ8= -github.com/cyphar/filepath-securejoin v0.3.4/go.mod h1:8s/MCNJREmFK0H02MF6Ihv1nakJe4L/w3WZLHNkvlYM= +github.com/cyphar/filepath-securejoin v0.3.6 h1:4d9N5ykBnSp5Xn2JkhocYDkOpURL/18CYMpo6xB9uWM= +github.com/cyphar/filepath-securejoin v0.3.6/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI= github.com/daixiang0/gci v0.13.4 h1:61UGkmpoAcxHM2hhNkZEf5SzwQtWJXTSws7jaPyqwlw= github.com/daixiang0/gci v0.13.4/go.mod h1:12etP2OniiIdP4q+kjUGrC/rUagga7ODbqsom5Eo5Yk= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -120,8 +120,8 @@ github.com/dnephin/pflag v1.0.7 h1:oxONGlWxhmUct0YzKTgrpQv9AUA1wtPBn7zuSjJqptk= github.com/dnephin/pflag v1.0.7/go.mod h1:uxE91IoWURlOiTUIA8Mq5ZZkAv3dPUfZNaT80Zm7OQE= github.com/ekzhu/minhash-lsh v0.0.0-20171225071031-5c06ee8586a1 h1:/7G7q8SDJdrah5jDYqZI8pGFjSqiCzfSEO+NgqKCYX0= github.com/ekzhu/minhash-lsh v0.0.0-20171225071031-5c06ee8586a1/go.mod h1:yEtCVi+QamvzjEH4U/m6ZGkALIkF2xfQnFp0BcKmIOk= -github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcejNsXKSkQ6lcIaNec2nyfOdlTBR2lU= -github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= +github.com/elazarl/goproxy v1.2.1 h1:njjgvO6cRG9rIqN2ebkqy6cQz2Njkx7Fsfv/zIZqgug= +github.com/elazarl/goproxy v1.2.1/go.mod h1:YfEbZtqP4AetfO6d40vWchF3znWX7C7Vd6ZMfdL8z64= github.com/emicklei/dot v0.15.0 h1:XDBW0Xco1QNyRb33cqLe10cT04yMWL1XpCZfa98Q6Og= github.com/emicklei/dot v0.15.0/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= @@ -150,8 +150,8 @@ github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlya github.com/ghostiam/protogetter v0.3.6 h1:R7qEWaSgFCsy20yYHNIJsU9ZOb8TziSRRxuAOTVKeOk= github.com/ghostiam/protogetter v0.3.6/go.mod h1:7lpeDnEJ1ZjL/YtyoN99ljO4z0pd3H0d18/t2dPBxHw= github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/gliderlabs/ssh v0.3.7 h1:iV3Bqi942d9huXnzEF2Mt+CY9gLu8DNM4Obd+8bODRE= -github.com/gliderlabs/ssh v0.3.7/go.mod h1:zpHEXBstFnQYtGnB8k8kQLol82umzn/2/snG7alWVD8= +github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c= +github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU= github.com/go-critic/go-critic v0.11.4 h1:O7kGOCx0NDIni4czrkRIXTnit0mkyKOCePh3My6OyEU= github.com/go-critic/go-critic v0.11.4/go.mod h1:2QAdo4iuLik5S9YG0rT4wcZ8QxwHYkrr6/2MWAiv/vc= github.com/go-enry/go-license-detector/v4 v4.3.0 h1:OFlQAVNw5FlKUjX4OuW8JOabu8MQHjTKDb9pdeNYMUw= @@ -160,14 +160,14 @@ github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4u github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= -github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= +github.com/go-git/go-billy/v5 v5.6.1 h1:u+dcrgaguSSkbjzHwelEjc0Yj300NUevrrPphk/SoRA= +github.com/go-git/go-billy/v5 v5.6.1/go.mod h1:0AsLr1z2+Uksi4NlElmMblP5rPcDZNRCD8ujZCRR2BE= github.com/go-git/go-git-fixtures/v4 v4.0.1/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4= github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII= github.com/go-git/go-git/v5 v5.1.0/go.mod h1:ZKfuPUoY1ZqIG4QG9BDBh3G4gLM5zvPuSJAozQrZuyM= -github.com/go-git/go-git/v5 v5.12.0 h1:7Md+ndsjrzZxbddRDZjF14qK+NN56sy6wkqaVrjZtys= -github.com/go-git/go-git/v5 v5.12.0/go.mod h1:FTM9VKtnI2m65hNI/TenDDDnUf2Q9FHnXYjuz9i5OEY= +github.com/go-git/go-git/v5 v5.13.0 h1:vLn5wlGIh/X78El6r3Jr+30W16Blk0CTcxTYcYPWi5E= +github.com/go-git/go-git/v5 v5.13.0/go.mod h1:Wjo7/JyVKtQgUNdXYXIepzWfJQkUEIGvkvVkiXRR/zw= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= @@ -204,8 +204,8 @@ github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/gofrs/flock v0.12.1 h1:MTLVXXHf8ekldpJk3AKicLij9MdwOWkZ+a/jHHZby9E= github.com/gofrs/flock v0.12.1/go.mod h1:9zxTsyu5xtJ9DK+1tFZyibEV7y3uwDxPPfbxeeHCoD0= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -354,8 +354,8 @@ github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= -github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.5/go.mod h1:WVKg1VTActs4Qso6iwGbiFih2UIHo0ENGwNd0Lj+XmI= github.com/mattn/go-zglob v0.0.2-0.20191112051448-a8912a37f9e7 h1:6HgbBMgs3hI9y1/MYG0r9j6daUubUskZNsEW4fkWR/k= github.com/mattn/go-zglob v0.0.2-0.20191112051448-a8912a37f9e7/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= @@ -365,6 +365,8 @@ github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mmcloughlin/avo v0.6.0 h1:QH6FU8SKoTLaVs80GA8TJuLNkUYl4VokHKlPhVDg4YY= +github.com/mmcloughlin/avo v0.6.0/go.mod h1:8CoAGaCSYXtCPR+8y18Y9aB/kxb8JSS6FRI7mSkvD+8= github.com/montanaflynn/stats v0.0.0-20151014174947-eeaced052adb/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/montanaflynn/stats v0.7.0 h1:r3y12KyNxj/Sb/iOE46ws+3mS1+MZca1wlHQFPsY/JU= github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= @@ -398,8 +400,8 @@ github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT9 github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= -github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= -github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= +github.com/pjbgf/sha1cd v0.3.1 h1:Dh2GYdpJnO84lIw0LJwTFXjcNbasP/bklicSznyAaPI= +github.com/pjbgf/sha1cd v0.3.1/go.mod h1:Y8t7jSB/dEI/lQE04A1HVKteqjj9bX5O4+Cex0TCu8s= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -414,8 +416,8 @@ github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+ github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/quasilyte/go-ruleguard v0.4.2 h1:htXcXDK6/rO12kiTHKfHuqR4kr3Y4M0J0rOL6CH/BYs= @@ -475,8 +477,8 @@ github.com/sivchari/containedctx v1.0.3 h1:x+etemjbsh2fB5ewm5FeLNi5bUjK0V8n0RB+W github.com/sivchari/containedctx v1.0.3/go.mod h1:c1RDvCbnJLtH4lLcYD/GqwiBSSf4F5Qk0xld2rBqzJ4= github.com/sivchari/tenv v1.10.0 h1:g/hzMA+dBCKqGXgW8AV/1xIWhAvDrx0zFKNR48NFMg0= github.com/sivchari/tenv v1.10.0/go.mod h1:tdY24masnVoZFxYrHv/nD6Tc8FbkEtAQEEziXpyMgqY= -github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A= -github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= +github.com/skeema/knownhosts v1.3.0 h1:AM+y0rI04VksttfwjkSTNQorvGqmwATnvnAHpSgc0LY= +github.com/skeema/knownhosts v1.3.0/go.mod h1:sPINvnADmT/qYH1kfv+ePMmOBTH6Tbl7b5LvTDjFK7M= github.com/sonatard/noctx v0.0.2 h1:L7Dz4De2zDQhW8S0t+KUjY0MAQJd6SgVwhzNIc4ok00= github.com/sonatard/noctx v0.0.2/go.mod h1:kzFz+CzWSjQ2OzIm46uJZoXuBpa2+0y3T36U18dWqIo= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= @@ -485,8 +487,8 @@ github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCp github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -589,14 +591,14 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= -golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f h1:phY1HzDcf18Aq9A8KkmRtY9WvOFIxN8wgfvy6Zm1DV8= @@ -643,8 +645,8 @@ golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.0.0-20170207211851-4464e7848382/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/perf v0.0.0-20210220033136-40a54f11e909 h1:rWw0Gj4DMl/2otJ8CnfTcwOWkpROAc6qhXXoMrYOCgo= golang.org/x/perf v0.0.0-20210220033136-40a54f11e909/go.mod h1:KRSrLY7jerMEa0Ih7gBheQ3FYDiSx6liMnniX1o3j2g= @@ -689,8 +691,8 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -699,8 +701,8 @@ golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= -golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= -golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -740,8 +742,8 @@ golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8= -golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= -golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= +golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE= +golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -755,8 +757,8 @@ gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZ google.golang.org/api v0.0.0-20170206182103-3d017632ea10/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/grpc v0.0.0-20170208002647-2a6bf6142e96/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/internal/tools/proto/go.mod b/internal/tools/proto/go.mod index 4c81cd0d2261a..d937bff40513c 100644 --- a/internal/tools/proto/go.mod +++ b/internal/tools/proto/go.mod @@ -4,31 +4,32 @@ go 1.23.0 require ( github.com/favadi/protoc-go-inject-tag v1.4.0 - github.com/golang/mock v1.6.0 + github.com/golang/mock v1.7.0-rc.1 github.com/golang/protobuf v1.5.4 - github.com/grpc-ecosystem/grpc-gateway v1.16.0 + github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.0 github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 - github.com/tinylib/msgp v1.2.4 - google.golang.org/grpc v1.67.1 + github.com/tinylib/msgp v1.2.5 + google.golang.org/grpc v1.69.4 ) require ( - github.com/ghodss/yaml v1.0.0 // indirect - github.com/golang/glog v1.2.2 // indirect - github.com/kr/pretty v0.3.1 // indirect github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect github.com/rogpeppe/go-internal v1.13.1 // indirect + go.opentelemetry.io/otel v1.33.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.33.0 // indirect golang.org/x/mod v0.22.0 // indirect - golang.org/x/net v0.33.0 // indirect + golang.org/x/net v0.34.0 // indirect golang.org/x/sync v0.10.0 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect - golang.org/x/tools v0.28.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect - google.golang.org/protobuf v1.35.2 // indirect - gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect + golang.org/x/tools v0.29.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect + google.golang.org/protobuf v1.36.3 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect ) replace google.golang.org/protobuf v1.33.0 => google.golang.org/protobuf v1.34.0 + +// github.com/golang/mock is unmaintained and archived, v1.6.0 is the last released version +replace github.com/golang/mock => github.com/golang/mock v1.6.0 diff --git a/internal/tools/proto/go.sum b/internal/tools/proto/go.sum index 43d99cf5a50b1..60f42652c97e5 100644 --- a/internal/tools/proto/go.sum +++ b/internal/tools/proto/go.sum @@ -1,137 +1,88 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/favadi/protoc-go-inject-tag v1.4.0 h1:K3KXxbgRw5WT4f43LbglARGz/8jVsDOS7uMjG4oNvXY= github.com/favadi/protoc-go-inject-tag v1.4.0/go.mod h1:AZ+PK+QDKUOLlBRG0rYiKkUX5Hw7+7GTFzlU99GFSbQ= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.2.2 h1:1+mZ9upx1Dh6FmUTFR1naJ77miKiXgALjWOZ3NVFPmY= -github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.0 h1:VD1gqscl4nYs1YxVuSdemTrSgTKrwOWDK0FVFMqm+Cg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.0/go.mod h1:4EgsQoS4TOhJizV+JTFg40qx1Ofh3XmXEQNBpgvNT40= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c h1:dAMKvw0MlJT1GshSTtih8C2gDs04w8dReiOGXrGLNoY= github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/tinylib/msgp v1.2.4 h1:yLFeUGostXXSGW5vxfT5dXG/qzkn4schv2I7at5+hVU= -github.com/tinylib/msgp v1.2.4/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= +github.com/tinylib/msgp v1.2.5 h1:WeQg1whrXRFiZusidTQqzETkRpGjFjcIhW6uqWH09po= +github.com/tinylib/msgp v1.2.5/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= +go.opentelemetry.io/otel/sdk/metric v1.33.0 h1:Gs5VK9/WUJhNXZgn8MR6ITatvAmKeIuCtNbsP3JkNqU= +go.opentelemetry.io/otel/sdk/metric v1.33.0/go.mod h1:dL5ykHZmm1B1nVRk9dDjChwDmt81MjVp3gLkQRwKf/Q= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= -golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= +golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE= +golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= -google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= -google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f h1:gap6+3Gk41EItBuyi4XX/bp4oqJ3UwuIMl25yGinuAA= +google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:Ic02D47M+zbarjYYUlK57y316f2MoN0gjAwI3f2S95o= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= +google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= +google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/internal/tools/proto/tools.go b/internal/tools/proto/tools.go index 743e27cca5c92..e4d6db3fcb6fb 100644 --- a/internal/tools/proto/tools.go +++ b/internal/tools/proto/tools.go @@ -19,7 +19,7 @@ import ( _ "github.com/favadi/protoc-go-inject-tag" _ "github.com/golang/mock/mockgen" _ "github.com/golang/protobuf/protoc-gen-go" - _ "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway" + _ "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway" _ "github.com/planetscale/vtprotobuf/cmd/protoc-gen-go-vtproto" _ "github.com/tinylib/msgp" _ "google.golang.org/grpc" diff --git a/modules.yml b/modules.yml index 5d5e5cb2057ff..23d835c876095 100644 --- a/modules.yml +++ b/modules.yml @@ -54,18 +54,12 @@ modules: comp/logs/agent/config: used_by_otel: true comp/netflow/payload: default - comp/otelcol/collector-contrib/def: - used_by_otel: true - comp/otelcol/collector-contrib/impl: - used_by_otel: true - comp/otelcol/converter/def: - used_by_otel: true - comp/otelcol/converter/impl: - used_by_otel: true - comp/otelcol/ddflareextension/def: - used_by_otel: true - comp/otelcol/ddflareextension/impl: - used_by_otel: true + comp/otelcol/collector-contrib/def: default + comp/otelcol/collector-contrib/impl: default + comp/otelcol/converter/def: default + comp/otelcol/converter/impl: default + comp/otelcol/ddflareextension/def: default + comp/otelcol/ddflareextension/impl: default comp/otelcol/logsagentpipeline: used_by_otel: true comp/otelcol/logsagentpipeline/logsagentpipelineimpl: @@ -78,13 +72,14 @@ modules: used_by_otel: true comp/otelcol/otlp/components/metricsclient: used_by_otel: true - comp/otelcol/otlp/components/processor/infraattributesprocessor: - used_by_otel: true + comp/otelcol/otlp/components/processor/infraattributesprocessor: default comp/otelcol/otlp/components/statsprocessor: used_by_otel: true comp/otelcol/otlp/testutil: used_by_otel: true - comp/serializer/compression: + comp/serializer/logscompression: + used_by_otel: true + comp/serializer/metricscompression: used_by_otel: true comp/trace/agent/def: used_by_otel: true @@ -146,6 +141,7 @@ modules: pkg/config/utils: used_by_otel: true pkg/errors: default + pkg/fips: default pkg/gohai: default pkg/linters/components/pkgconfigusage: independent: false @@ -217,6 +213,8 @@ modules: used_by_otel: true pkg/util/common: used_by_otel: true + pkg/util/compression: + used_by_otel: true pkg/util/containers/image: used_by_otel: true pkg/util/defaultpaths: @@ -239,7 +237,7 @@ modules: used_by_otel: true pkg/util/log/setup: used_by_otel: true - pkg/util/optional: + pkg/util/option: used_by_otel: true pkg/util/pointer: used_by_otel: true diff --git a/omnibus/config/patches/lua/nodoc.patch b/omnibus/config/patches/lua/nodoc.patch deleted file mode 100644 index ba94498a1caaf..0000000000000 --- a/omnibus/config/patches/lua/nodoc.patch +++ /dev/null @@ -1,50 +0,0 @@ ---- a/Makefile -+++ b/Makefile -@@ -14,7 +14,6 @@ INSTALL_TOP= /usr/local - INSTALL_BIN= $(INSTALL_TOP)/bin - INSTALL_INC= $(INSTALL_TOP)/include - INSTALL_LIB= $(INSTALL_TOP)/lib --INSTALL_MAN= $(INSTALL_TOP)/man/man1 - INSTALL_LMOD= $(INSTALL_TOP)/share/lua/$V - INSTALL_CMOD= $(INSTALL_TOP)/lib/lua/$V - -@@ -42,7 +41,6 @@ PLATS= guess aix bsd c89 freebsd generic linux linux-readline macosx mingw posix - TO_BIN= lua luac - TO_INC= lua.h luaconf.h lualib.h lauxlib.h lua.hpp - TO_LIB= liblua.a --TO_MAN= lua.1 luac.1 - - # Lua version and release. - V= 5.4 -@@ -55,17 +53,15 @@ $(PLATS) help test clean: - @cd src && $(MAKE) $@ - - install: dummy -- cd src && $(MKDIR) $(INSTALL_BIN) $(INSTALL_INC) $(INSTALL_LIB) $(INSTALL_MAN) $(INSTALL_LMOD) $(INSTALL_CMOD) -+ cd src && $(MKDIR) $(INSTALL_BIN) $(INSTALL_INC) $(INSTALL_LIB) $(INSTALL_LMOD) $(INSTALL_CMOD) - cd src && $(INSTALL_EXEC) $(TO_BIN) $(INSTALL_BIN) - cd src && $(INSTALL_DATA) $(TO_INC) $(INSTALL_INC) - cd src && $(INSTALL_DATA) $(TO_LIB) $(INSTALL_LIB) -- cd doc && $(INSTALL_DATA) $(TO_MAN) $(INSTALL_MAN) - - uninstall: - cd src && cd $(INSTALL_BIN) && $(RM) $(TO_BIN) - cd src && cd $(INSTALL_INC) && $(RM) $(TO_INC) - cd src && cd $(INSTALL_LIB) && $(RM) $(TO_LIB) -- cd doc && cd $(INSTALL_MAN) && $(RM) $(TO_MAN) - - local: - $(MAKE) install INSTALL_TOP=../install -@@ -82,12 +78,10 @@ echo: - @echo "TO_BIN= $(TO_BIN)" - @echo "TO_INC= $(TO_INC)" - @echo "TO_LIB= $(TO_LIB)" -- @echo "TO_MAN= $(TO_MAN)" - @echo "INSTALL_TOP= $(INSTALL_TOP)" - @echo "INSTALL_BIN= $(INSTALL_BIN)" - @echo "INSTALL_INC= $(INSTALL_INC)" - @echo "INSTALL_LIB= $(INSTALL_LIB)" -- @echo "INSTALL_MAN= $(INSTALL_MAN)" - @echo "INSTALL_LMOD= $(INSTALL_LMOD)" - @echo "INSTALL_CMOD= $(INSTALL_CMOD)" - @echo "INSTALL_EXEC= $(INSTALL_EXEC)" diff --git a/omnibus/config/projects/agent.rb b/omnibus/config/projects/agent.rb index 51bacc89a45d0..4aea7e20fcd73 100644 --- a/omnibus/config/projects/agent.rb +++ b/omnibus/config/projects/agent.rb @@ -333,8 +333,26 @@ windows_symbol_stripping_file bin end - if ENV['SIGN_WINDOWS_DD_WCS'] - BINARIES_TO_SIGN = GO_BINARIES + [ + if windows_signing_enabled? + # Sign additional binaries from here. + # We can't request signing from the respective components/software definitions + # for now since the binaries may be restored from cache, which would + # shortcut the associated build directives, which would not schedule the files + # for signing. + PYTHON_BINARIES = [ + "#{python_3_embedded}\\python.exe", + "#{python_3_embedded}\\pythonw.exe", + "#{python_3_embedded}\\python3.dll", + "#{python_3_embedded}\\python312.dll", + ] + OPENSSL_BINARIES = [ + "#{python_3_embedded}\\DLLs\\libcrypto-3-x64.dll", + "#{python_3_embedded}\\DLLs\\libssl-3-x64.dll", + "#{python_3_embedded}\\bin\\openssl.exe", + fips_mode? ? "#{python_3_embedded}\\lib\\ossl-modules\\fips.dll" : nil, + ].compact + + BINARIES_TO_SIGN = GO_BINARIES + PYTHON_BINARIES + OPENSSL_BINARIES + [ "#{install_dir}\\bin\\agent\\ddtray.exe", "#{install_dir}\\bin\\agent\\libdatadog-agent-three.dll" ] diff --git a/omnibus/config/software/attr.rb b/omnibus/config/software/attr.rb index 294bfe28fd8a2..7c4c3ae4b32e6 100644 --- a/omnibus/config/software/attr.rb +++ b/omnibus/config/software/attr.rb @@ -31,7 +31,12 @@ build do env = with_standard_compiler_flags(with_embedded_path) - configure env: env + configure_options = [ + "--disable-static", + "--disable-nls", + ] + + configure(*configure_options, env: env) make "-j #{workers}", env: env make "-j #{workers} install", env: env diff --git a/omnibus/config/software/datadog-agent-dependencies.rb b/omnibus/config/software/datadog-agent-dependencies.rb index fc34796e2fa4b..ad642b1d7c1be 100644 --- a/omnibus/config/software/datadog-agent-dependencies.rb +++ b/omnibus/config/software/datadog-agent-dependencies.rb @@ -6,9 +6,9 @@ if linux_target? dependency 'procps-ng' dependency 'curl' - if fips_mode? - dependency 'openssl-fips-provider' - end +end +if fips_mode? + dependency 'openssl-fips-provider' end # Bundled cacerts file (is this a good idea?) diff --git a/omnibus/config/software/datadog-agent-finalize.rb b/omnibus/config/software/datadog-agent-finalize.rb index b797b6e8dcc55..59b7a38520fd0 100644 --- a/omnibus/config/software/datadog-agent-finalize.rb +++ b/omnibus/config/software/datadog-agent-finalize.rb @@ -175,6 +175,7 @@ if osx_target? # Remove linux specific configs delete "#{install_dir}/etc/conf.d/file_handle.d" + delete "#{install_dir}/etc/conf.d/service_discovery.d" # remove windows specific configs delete "#{install_dir}/etc/conf.d/winproc.d" diff --git a/omnibus/config/software/datadog-agent.rb b/omnibus/config/software/datadog-agent.rb index 12b916c14fd83..9dd41fa755a2b 100644 --- a/omnibus/config/software/datadog-agent.rb +++ b/omnibus/config/software/datadog-agent.rb @@ -91,7 +91,7 @@ do_windows_sysprobe = "--windows-sysprobe" end command "inv -e rtloader.clean" - command "inv -e rtloader.make --install-prefix \"#{windows_safe_path(python_2_embedded)}\" --cmake-options \"-G \\\"Unix Makefiles\\\" \\\"-DPython3_EXECUTABLE=#{windows_safe_path(python_3_embedded)}\\python.exe\"\"", :env => env + command "inv -e rtloader.make --install-prefix \"#{windows_safe_path(python_3_embedded)}\" --cmake-options \"-G \\\"Unix Makefiles\\\" \\\"-DPython3_EXECUTABLE=#{windows_safe_path(python_3_embedded)}\\python.exe\"\"", :env => env command "mv rtloader/bin/*.dll #{install_dir}/bin/agent/" command "inv -e agent.build --exclude-rtloader --major-version #{major_version_arg} --no-development --install-path=#{install_dir} --embedded-path=#{install_dir}/embedded #{do_windows_sysprobe} --flavor #{flavor_arg}", env: env command "inv -e systray.build --major-version #{major_version_arg}", env: env @@ -99,7 +99,7 @@ command "inv -e rtloader.clean" command "inv -e rtloader.make --install-prefix \"#{install_dir}/embedded\" --cmake-options '-DCMAKE_CXX_FLAGS:=\"-D_GLIBCXX_USE_CXX11_ABI=0\" -DCMAKE_INSTALL_LIBDIR=lib -DCMAKE_FIND_FRAMEWORK:STRING=NEVER -DPython3_EXECUTABLE=#{install_dir}/embedded/bin/python3'", :env => env command "inv -e rtloader.install" - bundle_arg = bundled_agents ? bundled_agents.map { |k| "--bundle #{k}" }.join(" ") : "--bundle agent" + bundle_arg = bundled_agents.map { |k| "--bundle #{k}" }.join(" ") include_sds = "" if linux_target? @@ -166,6 +166,7 @@ command "invoke -e system-probe.build #{fips_args}", env: env elsif linux_target? command "invoke -e system-probe.build-sysprobe-binary #{fips_args} --install-path=#{install_dir}", env: env + command "!(objdump -p ./bin/system-probe/system-probe | egrep 'GLIBC_2\.(1[8-9]|[2-9][0-9])')" end if windows_target? @@ -198,7 +199,7 @@ # CWS Instrumentation cws_inst_support = !heroku_target? && linux_target? if cws_inst_support - command "invoke -e cws-instrumentation.build", :env => env + command "invoke -e cws-instrumentation.build #{fips_args}", :env => env copy 'bin/cws-instrumentation/cws-instrumentation', "#{install_dir}/embedded/bin" end diff --git a/omnibus/config/software/libacl.rb b/omnibus/config/software/libacl.rb index ade34b40326f2..62e10ed34ce33 100644 --- a/omnibus/config/software/libacl.rb +++ b/omnibus/config/software/libacl.rb @@ -34,6 +34,7 @@ env = with_standard_compiler_flags(with_embedded_path) configure_options = [ + "--disable-nls", "--disable-static", ] diff --git a/omnibus/config/software/libgpg-error.rb b/omnibus/config/software/libgpg-error.rb index 4714696d4257c..7a47bb4e70e72 100644 --- a/omnibus/config/software/libgpg-error.rb +++ b/omnibus/config/software/libgpg-error.rb @@ -39,6 +39,7 @@ "--disable-tests", "--disable-doc", "--disable-languages", + "--disable-nls", ] configure(*configure_options, env: env) diff --git a/omnibus/config/software/lua.rb b/omnibus/config/software/lua.rb index 3b610c8e01d29..6e58bf2f2e8fa 100644 --- a/omnibus/config/software/lua.rb +++ b/omnibus/config/software/lua.rb @@ -30,14 +30,17 @@ build do env = with_standard_compiler_flags(with_embedded_path) - patch source: "nodoc.patch", env: env # don't install documentation - # lua compiles in a slightly interesting way, it has minimal configuration options # and only provides a makefile. We can't use use `-DLUA_USE_LINUX` or the `make linux` # methods because they make the assumption that the readline package has been installed. mycflags = "-I#{install_dir}/embedded/include -O2 -DLUA_USE_POSIX -DLUA_USE_DLOPEN -fpic" myldflags = "-Wl,-rpath,#{install_dir}/embedded/lib -L#{install_dir}/embedded/lib" mylibs = "-ldl" - make "all MYCFLAGS='#{mycflags}' MYLDFLAGS='#{myldflags}' MYLIBS='#{mylibs}'", env: env, cwd: "#{project_dir}/src" - make "-j #{workers} install INSTALL_TOP=#{install_dir}/embedded", env: env + make "liblua.a MYCFLAGS='#{mycflags}' MYLDFLAGS='#{myldflags}' MYLIBS='#{mylibs}'", env: env, cwd: "#{project_dir}/src" + copy "#{project_dir}/src/liblua.a", "#{install_dir}/embedded/lib/" + copy "#{project_dir}/src/lua.h", "#{install_dir}/embedded/include/" + copy "#{project_dir}/src/luaconf.h", "#{install_dir}/embedded/include/" + copy "#{project_dir}/src/lualib.h", "#{install_dir}/embedded/include/" + copy "#{project_dir}/src/lauxlib.h", "#{install_dir}/embedded/include/" + copy "#{project_dir}/src/lua.hpp", "#{install_dir}/embedded/include/" end diff --git a/omnibus/config/software/openssl-fips-provider.rb b/omnibus/config/software/openssl-fips-provider.rb index 7d9584b2e3767..4f591a542d9f3 100644 --- a/omnibus/config/software/openssl-fips-provider.rb +++ b/omnibus/config/software/openssl-fips-provider.rb @@ -28,22 +28,53 @@ # https://csrc.nist.gov/CSRC/media/projects/cryptographic-module-validation-program/documents/security-policies/140sp4282.pdf # # ---------------- DO NOT MODIFY LINES BELOW HERE ---------------- - command "./Configure enable-fips", env: env + unless windows_target? + # Exact build steps from security policy: + # https://csrc.nist.gov/CSRC/media/projects/cryptographic-module-validation-program/documents/security-policies/140sp4282.pdf + # + # ---------------- DO NOT MODIFY LINES BELOW HERE ---------------- + command "./Configure enable-fips", env: env - command "make", env: env - command "make install", env: env + command "make", env: env + command "make install", env: env # ---------------- DO NOT MODIFY LINES ABOVE HERE ---------------- + else + # ---------------- DO NOT MODIFY LINES BELOW HERE ---------------- + command "perl.exe ./Configure enable-fips", env: env - mkdir "#{install_dir}/embedded/ssl" - mkdir "#{install_dir}/embedded/lib/ossl-modules" - copy "/usr/local/lib*/ossl-modules/fips.so", "#{install_dir}/embedded/lib/ossl-modules/fips.so" + command "make", env: env + command "make install_fips", env: env + # ---------------- DO NOT MODIFY LINES ABOVE HERE ---------------- + end + + + dest = if !windows_target? then "#{install_dir}/embedded" else "#{windows_safe_path(python_3_embedded)}" end + mkdir "#{dest}/ssl" + mkdir "#{dest}/lib/ossl-modules" + mkdir "#{dest}/bin" + if linux_target? + copy "/usr/local/lib*/ossl-modules/fips.so", "#{dest}/lib/ossl-modules/fips.so" + elsif windows_target? + copy "providers/fips.dll", "#{dest}/lib/ossl-modules/fips.dll" + end + if linux_target? + embedded_ssl_dir = "#{install_dir}/embedded/ssl" + elsif windows_target? + # Use the default installation directory instead of install_dir which is just a build path. + # This simpifies container setup because we don't need to modify the config file. + # The MSI contains logic to replace the path at install time. + # Note: We intentionally use forward slashes here + embedded_ssl_dir = "C:/Program Files/Datadog/Datadog Agent/embedded3/ssl" + end erb source: "openssl.cnf.erb", - dest: "#{install_dir}/embedded/ssl/openssl.cnf.tmp", + dest: "#{dest}/ssl/openssl.cnf.tmp", mode: 0644, - vars: { install_dir: install_dir } - erb source: "fipsinstall.sh.erb", - dest: "#{install_dir}/embedded/bin/fipsinstall.sh", - mode: 0755, - vars: { install_dir: install_dir } + vars: { embedded_ssl_dir: embedded_ssl_dir } + unless windows_target? + erb source: "fipsinstall.sh.erb", + dest: "#{dest}/bin/fipsinstall.sh", + mode: 0755, + vars: { install_dir: install_dir } + end end diff --git a/omnibus/config/software/python3.rb b/omnibus/config/software/python3.rb index 1cd3c096363f2..f53ba24b733ab 100644 --- a/omnibus/config/software/python3.rb +++ b/omnibus/config/software/python3.rb @@ -1,47 +1,48 @@ name "python3" -default_version "3.12.6" - -if ohai["platform"] != "windows" +default_version "3.12.8" +unless windows? dependency "libxcrypt" dependency "libffi" dependency "ncurses" dependency "zlib" - dependency "openssl3" dependency "bzip2" dependency "libsqlite3" dependency "liblzma" dependency "libyaml" +end +dependency "openssl3" - source :url => "https://python.org/ftp/python/#{version}/Python-#{version}.tgz", - :sha256 => "85a4c1be906d20e5c5a69f2466b00da769c221d6a684acfd3a514dbf5bf10a66" - - relative_path "Python-#{version}" - - python_configure_options = [ - "--without-readline", # Disables readline support - "--with-ensurepip=yes" # We upgrade pip later, in the pip3 software definition - ] - - if mac_os_x? - python_configure_options.push("--enable-ipv6", - "--with-universal-archs=intel", - "--enable-shared") - elsif linux_target? - python_configure_options.push("--enable-shared", - "--enable-ipv6") - elsif aix? - # something here... - end +source :url => "https://python.org/ftp/python/#{version}/Python-#{version}.tgz", + :sha256 => "5978435c479a376648cb02854df3b892ace9ed7d32b1fead652712bee9d03a45" - python_configure_options.push("--with-dbmliborder=") +relative_path "Python-#{version}" - build do - # 2.0 is the license version here, not the python version - license "Python-2.0" +build do + # 2.0 is the license version here, not the python version + license "Python-2.0" + unless windows_target? env = with_standard_compiler_flags(with_embedded_path) + python_configure_options = [ + "--without-readline", # Disables readline support + "--with-ensurepip=yes" # We upgrade pip later, in the pip3 software definition + ] + + if mac_os_x? + python_configure_options.push("--enable-ipv6", + "--with-universal-archs=intel", + "--enable-shared") + elsif linux_target? + python_configure_options.push("--enable-shared", + "--enable-ipv6") + elsif aix? + # something here... + end + + python_configure_options.push("--with-dbmliborder=") + # Force different defaults for the "optimization settings" # This removes the debug symbol generation and doesn't enable all warnings env["OPT"] = "-DNDEBUG -fwrapv" @@ -66,24 +67,74 @@ block do FileUtils.rm_f(Dir.glob("#{install_dir}/embedded/lib/python#{major}.#{minor}/distutils/command/wininst-*.exe")) end - end - -else - dependency "vc_redist_14" - - # note that starting with 3.7.3 on Windows, the zip should be created without the built-in pip - source :url => "https://dd-agent-omnibus.s3.amazonaws.com/python-windows-#{version}-amd64.zip", - :sha256 => "045d20a659fe80041b6fd508b77f250b03330347d64f128b392b88e68897f5a0".downcase + else + dependency "vc_redist_14" + + vcrt140_root = "#{Omnibus::Config.source_dir()}/vc_redist_140/expanded" + ############################### + # Setup openssl dependency... # + ############################### + + # We must provide python with the same file hierarchy as + # https://github.com/python/cpython-bin-deps/tree/openssl-bin-3.0/amd64 + # but with our OpenSSL build instead. + + # This is not necessarily the version we built, but the version + # the Python build system expects. + openssl_version = "3.0.15" + python_arch = "amd64" + + mkdir "externals\\openssl-bin-#{openssl_version}\\#{python_arch}\\include" + # Copy the import library to have them point at our own built versions, regardless of + # their names in usual python builds + copy "#{install_dir}\\embedded3\\lib\\libcrypto.dll.a", "externals\\openssl-bin-#{openssl_version}\\#{python_arch}\\libcrypto.lib" + copy "#{install_dir}\\embedded3\\lib\\libssl.dll.a", "externals\\openssl-bin-#{openssl_version}\\#{python_arch}\\libssl.lib" + copy "#{install_dir}\\embedded3\\lib\\libssl.dll.a", "externals\\openssl-bin-#{openssl_version}\\#{python_arch}\\libssl.lib" + # Copy the actual DLLs, be sure to keep the same name since that's what the IMPLIBs expect + copy "#{install_dir}\\embedded3\\bin\\libssl-3-x64.dll", "externals\\openssl-bin-#{openssl_version}\\#{python_arch}\\libssl-3.dll" + # Create empty PDBs since python's build system require those to be present + command "touch externals\\openssl-bin-#{openssl_version}\\#{python_arch}\\libssl-3.pdb" + copy "#{install_dir}\\embedded3\\bin\\libcrypto-3-x64.dll", "externals\\openssl-bin-#{openssl_version}\\#{python_arch}\\libcrypto-3.dll" + command "touch externals\\openssl-bin-#{openssl_version}\\#{python_arch}\\libcrypto-3.pdb" + # The applink "header" + copy "#{install_dir}\\embedded3\\include\\openssl\\applink.c", "externals\\openssl-bin-#{openssl_version}\\#{python_arch}\\include\\" + # And finally the headers: + copy "#{install_dir}\\embedded3\\include\\openssl", "externals\\openssl-bin-#{openssl_version}\\#{python_arch}\\include\\" + # Now build python itself... + + ############################### + # Build Python... # + ############################### + # -e to enable external libraries. They won't be fetched if already + # present, but the modules will be built nonetheless. + command "PCbuild\\build.bat -e --pgo" + # Install the built artifacts to their expected locations + # --include-dev - include include/ and libs/ directories + # --include-venv - necessary for ensurepip to work + # --include-stable - adds python3.dll + command "PCbuild\\#{python_arch}\\python.exe PC\\layout\\main.py --build PCbuild\\#{python_arch} --precompile --copy #{windows_safe_path(python_3_embedded)} --include-dev --include-venv --include-stable -vv" + + ############################### + # Install build artifacts... # + ############################### + # We copied the OpenSSL libraries with the name python expects to keep the build happy + # but at runtime, it will attempt to load the DLLs pointed at by the .dll.a generated by + # the OpenSSL build, so we need to copy those files to the install directory. + # The ones we copied for the build are now irrelevant + openssl_arch = "x64" + copy "#{install_dir}\\embedded3\\bin\\libcrypto-3-#{openssl_arch}.dll", "#{windows_safe_path(python_3_embedded)}\\DLLs" + copy "#{install_dir}\\embedded3\\bin\\libssl-3-#{openssl_arch}.dll", "#{windows_safe_path(python_3_embedded)}\\DLLs" + # We can also remove the DLLs that were put there by the python build since they won't be loaded anyway + delete "#{windows_safe_path(python_3_embedded)}\\DLLs\\libcrypto-3.dll" + delete "#{windows_safe_path(python_3_embedded)}\\DLLs\\libssl-3.dll" + # Generate libpython3XY.a for MinGW tools + # https://docs.python.org/3/whatsnew/3.8.html + major, minor, _ = version.split(".") + command "gendef #{windows_safe_path(python_3_embedded)}\\python#{major}#{minor}.dll" + command "dlltool --dllname python#{major}#{minor}.dll --def python#{major}#{minor}.def --output-lib #{windows_safe_path(python_3_embedded)}\\libs\\libpython#{major}#{minor}.a" - vcrt140_root = "#{Omnibus::Config.source_dir()}/vc_redist_140/expanded" - build do - # 2.0 is the license version here, not the python version - license "Python-2.0" - - command "XCOPY /YEHIR *.* \"#{windows_safe_path(python_3_embedded)}\"" - - # Install pip python = "#{windows_safe_path(python_3_embedded)}\\python.exe" command "#{python} -m ensurepip" end end + diff --git a/omnibus/config/software/vc_redist.rb b/omnibus/config/software/vc_redist.rb index 93be63e05b9ae..1080e4fd6acca 100644 --- a/omnibus/config/software/vc_redist.rb +++ b/omnibus/config/software/vc_redist.rb @@ -23,7 +23,7 @@ # as a DLL, we need to redistribute the CRT DLLS. We (now) need the DLLS in # both embedded and dist, as we have executables in each of those directories # that require them. - command "XCOPY /YEH .\\*.* \"#{windows_safe_path(python_2_embedded)}\" /IR" + command "XCOPY /YEH .\\*.* \"#{windows_safe_path(python_3_embedded)}\" /IR" # # also copy them to the bin/agent directory, so we can (optionally) install on diff --git a/omnibus/config/templates/openssl-fips-provider/openssl.cnf.erb b/omnibus/config/templates/openssl-fips-provider/openssl.cnf.erb index 23b1637b9c017..7f5180c77daef 100644 --- a/omnibus/config/templates/openssl-fips-provider/openssl.cnf.erb +++ b/omnibus/config/templates/openssl-fips-provider/openssl.cnf.erb @@ -1,7 +1,7 @@ config_diagnostics = 1 openssl_conf = openssl_init -.include <%= install_dir %>/embedded/ssl/fipsmodule.cnf +.include <%= embedded_ssl_dir %>/fipsmodule.cnf [openssl_init] providers = provider_sect diff --git a/omnibus/lib/project_helpers.rb b/omnibus/lib/project_helpers.rb index d1498342ec617..12bee380183ef 100644 --- a/omnibus/lib/project_helpers.rb +++ b/omnibus/lib/project_helpers.rb @@ -10,3 +10,6 @@ def sysprobe_enabled?() !heroku_target? && linux_target? && !ENV.fetch('SYSTEM_PROBE_BIN', '').empty? end +def windows_signing_enabled?() + return ENV['SIGN_WINDOWS_DD_WCS'] +end diff --git a/omnibus/package-scripts/agent-deb/postinst b/omnibus/package-scripts/agent-deb/postinst index 09a1c4a0d6d41..9c193a8c98ac4 100755 --- a/omnibus/package-scripts/agent-deb/postinst +++ b/omnibus/package-scripts/agent-deb/postinst @@ -187,7 +187,7 @@ else fi if [ -f "$INSTALL_DIR/embedded/bin/python" ]; then - ${INSTALL_DIR}/embedded/bin/python "${INSTALL_DIR}/python-scripts/postinst.py" "${INSTALL_DIR}" || true + ${INSTALL_DIR}/embedded/bin/python "${INSTALL_DIR}/python-scripts/post.py" "${INSTALL_DIR}" || true fi exit 0 diff --git a/omnibus/package-scripts/agent-deb/prerm b/omnibus/package-scripts/agent-deb/prerm index 67c916c335da5..96f10fb205543 100755 --- a/omnibus/package-scripts/agent-deb/prerm +++ b/omnibus/package-scripts/agent-deb/prerm @@ -156,11 +156,11 @@ remove_remote_config_db() remove_persist_integration_files() { # Remove any file related to reinstalling non-core integrations (see python-scripts/packages.py for the names) - if [ -f "$INSTALL_DIR/.prerm_python_installed_packages.txt" ]; then - rm "$INSTALL_DIR/.prerm_python_installed_packages.txt" || true + if [ -f "$INSTALL_DIR/.pre_python_installed_packages.txt" ]; then + rm "$INSTALL_DIR/.pre_python_installed_packages.txt" || true fi - if [ -f "$INSTALL_DIR/.postinst_python_installed_packages.txt" ]; then - rm "$INSTALL_DIR/.postinst_python_installed_packages.txt" || true + if [ -f "$INSTALL_DIR/.post_python_installed_packages.txt" ]; then + rm "$INSTALL_DIR/.post_python_installed_packages.txt" || true fi if [ -f "$INSTALL_DIR/.diff_python_installed_packages.txt" ]; then rm "$INSTALL_DIR/.diff_python_installed_packages.txt" || true @@ -178,7 +178,7 @@ case "$1" in #this can't be merged with the later case block because running 're upgrade) # We're upgrading. if [ -f "$INSTALL_DIR/embedded/bin/python" ]; then - ${INSTALL_DIR}/embedded/bin/python "${INSTALL_DIR}/python-scripts/prerm.py" "${INSTALL_DIR}" || true + ${INSTALL_DIR}/embedded/bin/python "${INSTALL_DIR}/python-scripts/pre.py" "${INSTALL_DIR}" || true fi ;; *) diff --git a/omnibus/package-scripts/agent-rpm/postinst b/omnibus/package-scripts/agent-rpm/postinst index 64493e95c57d9..baae1a8ed7a7a 100755 --- a/omnibus/package-scripts/agent-rpm/postinst +++ b/omnibus/package-scripts/agent-rpm/postinst @@ -41,7 +41,7 @@ chown -R root:root ${INSTALL_DIR}/embedded/share/system-probe/ebpf chown -R root:root ${INSTALL_DIR}/embedded/share/system-probe/java if [ -f "$INSTALL_DIR/embedded/bin/python" ]; then - ${INSTALL_DIR}/embedded/bin/python "${INSTALL_DIR}/python-scripts/postinst.py" "${INSTALL_DIR}" || true + ${INSTALL_DIR}/embedded/bin/python "${INSTALL_DIR}/python-scripts/post.py" "${INSTALL_DIR}" || true fi exit 0 diff --git a/omnibus/package-scripts/agent-rpm/preinst b/omnibus/package-scripts/agent-rpm/preinst index 24733dd871353..428c8b7110e80 100755 --- a/omnibus/package-scripts/agent-rpm/preinst +++ b/omnibus/package-scripts/agent-rpm/preinst @@ -53,6 +53,24 @@ if ! getent passwd dd-agent >/dev/null; then fi fi + + +if [ -f "$INSTALL_DIR/embedded/bin/python" ]; then + if [ -f "${INSTALL_DIR}/python-scripts/pre.py" ]; then + PRE_PYTHON_FILE="${INSTALL_DIR}/python-scripts/pre.py" + else + # Search for previous version of the python file + if [ -f "${INSTALL_DIR}/python-scripts/prerm.py" ]; then + PRE_PYTHON_FILE="${INSTALL_DIR}/python-scripts/prerm.py" + fi + fi + if [ -n "$PRE_PYTHON_FILE" ]; then + ${INSTALL_DIR}/embedded/bin/python "${PRE_PYTHON_FILE}" "${INSTALL_DIR}" || true + else + echo "[ WARNING ]\tPRE_PYTHON_FILE is not set" + fi +fi + # Starting with 6.10, integrations are also uninstalled on package removal # Since 6.18.0, a file containing all integrations files which have been installed by diff --git a/omnibus/package-scripts/agent-rpm/prerm b/omnibus/package-scripts/agent-rpm/prerm index df2a03b266047..ca3c836393203 100755 --- a/omnibus/package-scripts/agent-rpm/prerm +++ b/omnibus/package-scripts/agent-rpm/prerm @@ -140,11 +140,11 @@ remove_remote_config_db() remove_persist_integration_files() { # Remove any file related to reinstalling non-core integrations (see python-scripts/packages.py for the names) - if [ -f "$INSTALL_DIR/.prerm_python_installed_packages.txt" ]; then - rm "$INSTALL_DIR/.prerm_python_installed_packages.txt" || true + if [ -f "$INSTALL_DIR/.pre_python_installed_packages.txt" ]; then + rm "$INSTALL_DIR/.pre_python_installed_packages.txt" || true fi - if [ -f "$INSTALL_DIR/.postinst_python_installed_packages.txt" ]; then - rm "$INSTALL_DIR/.postinst_python_installed_packages.txt" || true + if [ -f "$INSTALL_DIR/.post_python_installed_packages.txt" ]; then + rm "$INSTALL_DIR/.post_python_installed_packages.txt" || true fi if [ -f "$INSTALL_DIR/.diff_python_installed_packages.txt" ]; then rm "$INSTALL_DIR/.diff_python_installed_packages.txt" || true @@ -158,16 +158,6 @@ remove_fips_module() rm -rf "${INSTALL_DIR}/embedded/ssl/fipsmodule.cnf" || true } -case "$*" in - 1) - # We're upgrading. - if [ -f "$INSTALL_DIR/embedded/bin/python" ]; then - ${INSTALL_DIR}/embedded/bin/python "${INSTALL_DIR}/python-scripts/prerm.py" "${INSTALL_DIR}" || true - fi - ;; - *) - ;; -esac stop_agent deregister_agent remove_sysprobe_core_files diff --git a/omnibus/python-scripts/packages.py b/omnibus/python-scripts/packages.py index 655b6bd9cb1f1..2c7a658a133a9 100644 --- a/omnibus/python-scripts/packages.py +++ b/omnibus/python-scripts/packages.py @@ -1,47 +1,54 @@ import os -import pwd -import grp +if not os.name == 'nt': + import pwd + import grp +else: + import win32security import importlib.metadata -import pkg_resources -from packaging import version +import packaging import subprocess +import packaging.requirements +import packaging.version + DO_NOT_REMOVE_WARNING_HEADER = "# DO NOT REMOVE/MODIFY - used internally by installation process\n" -def run_command(command): +def run_command(args): """ Execute a shell command and return its output and errors. """ try: - print(f"Running command: '{command}'") - result = subprocess.run(command, shell=True, text=True, capture_output=True, check=True) + print(f"Running command: '{' '.join(args)}'") + result = subprocess.run(args, text=True, capture_output=True, check=True) return result.stdout, result.stderr except subprocess.CalledProcessError as e: print(f"Command '{e.cmd}' failed with return code: {e.returncode}") print(f"Error: {e.stderr}") return e.stdout, e.stderr -def extract_version(specifier): +def extract_version(req): """ - Extract version from the specifier string. + Extract version from the specifier string using packaging. """ try: - # Get the first version specifier from the specifier string - return str(next(iter(pkg_resources.Requirement.parse(f'{specifier}').specifier))) - except Exception: + # Parse the specifier and get the first version from the specifier set + version_spec = next(iter(req.specifier), None) + return str(version_spec.version) if version_spec else None + except Exception as e: + print(f"Error parsing specifier: {e}") return None -def prerm_python_installed_packages_file(directory): +def pre_python_installed_packages_file(directory): """ - Create prerm installed packages file path. + Create pre installed packages file path. """ - return os.path.join(directory, '.prerm_python_installed_packages.txt') + return os.path.join(directory, '.pre_python_installed_packages.txt') -def postinst_python_installed_packages_file(directory): +def post_python_installed_packages_file(directory): """ - Create postinst installed packages file path. + Create post installed packages file path. """ - return os.path.join(directory, '.postinst_python_installed_packages.txt') + return os.path.join(directory, '.post_python_installed_packages.txt') def diff_python_installed_packages_file(directory): """ @@ -55,6 +62,50 @@ def requirements_agent_release_file(directory): """ return os.path.join(directory, 'requirements-agent-release.txt') +def check_file_owner_system_windows(filename): + """ + Check if the file is owned by the SYSTEM or Administrators user on Windows. + """ + # check if file exists + if not os.path.exists(filename): + return True + + # get NT System account SID + system_sid = win32security.ConvertStringSidToSid("S-1-5-18") + + # get administator SID + administrators_sid = win32security.ConvertStringSidToSid("S-1-5-32-544") + + # get owner of file + sd = win32security.GetFileSecurity(filename, win32security.OWNER_SECURITY_INFORMATION) + owner_sid = sd.GetSecurityDescriptorOwner() + + # print owner SID + print(f"{filename}: SID: {win32security.ConvertSidToStringSid(owner_sid)}") + + return owner_sid == system_sid or owner_sid == administrators_sid + +def check_all_files_owner_system_windows(directory): + """ + Check if all files used by this feature are owned by SYSTEM or Administrators. + This prevents issues with files created prior to first install by unauthorized users + being used to install arbitrary packaged at install time. + The MSI sets the datadirectory permissions before running this script so we + don't have to worry about TOCTOU. + """ + files = [] + files.append(directory) + files.append(pre_python_installed_packages_file(directory)) + files.append(post_python_installed_packages_file(directory)) + files.append(diff_python_installed_packages_file(directory)) + + for file in files: + if not check_file_owner_system_windows(file): + print(f"{file} is not owned by SYSTEM or Administrators, it may have come from an untrusted source, aborting installation.") + return False + return True + + def create_python_installed_packages_file(filename): """ Create a file listing the currently installed Python dependencies. @@ -65,7 +116,8 @@ def create_python_installed_packages_file(filename): installed_packages = importlib.metadata.distributions() for dist in installed_packages: f.write(f"{dist.metadata['Name']}=={dist.version}\n") - os.chown(filename, pwd.getpwnam('dd-agent').pw_uid, grp.getgrnam('dd-agent').gr_gid) + if not os.name == 'nt': + os.chown(filename, pwd.getpwnam('dd-agent').pw_uid, grp.getgrnam('dd-agent').gr_gid) def create_diff_installed_packages_file(directory, old_file, new_file): """ @@ -82,34 +134,47 @@ def create_diff_installed_packages_file(directory, old_file, new_file): if old_req: _, old_req_value = old_req # Extract and compare versions - old_version_str = extract_version(str(old_req_value.specifier)) - new_version_str = extract_version(str(new_req_value.specifier)) + old_version_str = extract_version(old_req_value) + new_version_str = extract_version(new_req_value) if old_version_str and new_version_str: - if version.parse(new_version_str) > version.parse(old_version_str): + if packaging.version.parse(new_version_str) > packaging.version.parse(old_version_str): f.write(f"{new_req_value}\n") else: # Package is new in the new file; include it f.write(f"{new_req_value}\n") - os.chown(diff_file, pwd.getpwnam('dd-agent').pw_uid, grp.getgrnam('dd-agent').gr_gid) + if not os.name == 'nt': + os.chown(diff_file, pwd.getpwnam('dd-agent').pw_uid, grp.getgrnam('dd-agent').gr_gid) -def install_datadog_package(package): +def install_datadog_package(package, install_directory): """ Install Datadog integrations running datadog-agent command """ - print(f"Installing datadog integration: '{package}'") - run_command(f'datadog-agent integration install -t {package} -r') + if os.name == 'nt': + agent_cmd = os.path.join(install_directory, 'bin', 'agent.exe') + args = [agent_cmd, 'integration', 'install', '-t', package, '-r'] + else: + args = ['datadog-agent', 'integration', 'install', '-t', package, '-r'] + + run_command(args) def install_dependency_package(pip, package): """ Install python dependency running pip install command """ print(f"Installing python dependency: '{package}'") - run_command(f'{pip} install {package}') + command = pip.copy() + command.extend(['install', package]) + run_command(command) -def install_diff_packages_file(pip, filename, exclude_filename): +def install_diff_packages_file(install_directory, filename, exclude_filename): """ Install all Datadog integrations and python dependencies from a file """ + if os.name == 'nt': + python_path = os.path.join(install_directory, "embedded3", "python.exe") + pip = [python_path, '-m', 'pip'] + else: + pip = [os.path.join(install_directory, "embedded", "bin", "pip")] print(f"Installing python packages from: '{filename}'") install_packages = load_requirements(filename) exclude_packages = load_requirements(exclude_filename) @@ -118,7 +183,7 @@ def install_diff_packages_file(pip, filename, exclude_filename): print(f"Skipping '{install_package_name}' as it's already included in '{exclude_filename}' file") else: if install_package_line.startswith('datadog-'): - install_datadog_package(install_package_line) + install_datadog_package(install_package_line, install_directory) else: install_dependency_package(pip, install_package_line) @@ -154,7 +219,11 @@ def load_requirements(filename): else: # Add valid requirement to the list valid_requirements.append(req_stripped) - return {req.name: (req_stripped, req) for req_stripped, req in zip(valid_requirements, pkg_resources.parse_requirements(valid_requirements))} + # Parse valid requirements using packaging + return { + req.name: (req_stripped, req) + for req_stripped, req in zip(valid_requirements, (packaging.requirements.Requirement(r) for r in valid_requirements)) + } def cleanup_files(*files): """ diff --git a/omnibus/python-scripts/post.py b/omnibus/python-scripts/post.py new file mode 100644 index 0000000000000..28acd424a000b --- /dev/null +++ b/omnibus/python-scripts/post.py @@ -0,0 +1,68 @@ +""" +This module provides functions for managing Datadog integrations and Python dependencies after installation + +Usage: +- The script should be run with a single argument specifying the installation directory. +- Example: `python post.py /path/to/install/dir` +""" + +import os +import sys +import packages + +def post(install_directory, storage_location, skip_flag=False): + try: + if os.path.exists(install_directory) and os.path.exists(storage_location): + post_python_installed_packages_file = packages.post_python_installed_packages_file(storage_location) + packages.create_python_installed_packages_file(post_python_installed_packages_file) + flag_path = os.path.join(storage_location, ".install_python_third_party_deps") + if os.path.exists(flag_path) or skip_flag: + print(f"File '{flag_path}' found") + diff_python_installed_packages_file = packages.diff_python_installed_packages_file(storage_location) + if os.path.exists(diff_python_installed_packages_file): + requirements_agent_release_file = packages.requirements_agent_release_file(install_directory) + # don't delete the diff file. This handles install failure cases on windows + # on uninstall/install if install fails we need the diff file to retry the install + packages.install_diff_packages_file(install_directory, diff_python_installed_packages_file, requirements_agent_release_file) + else: + print(f"File '{diff_python_installed_packages_file}' not found.") + return 0 + else: + print(f"File '{flag_path}' not found: no third party integration will be installed.") + return 0 + else: + print(f"Directory '{install_directory}' and '{storage_location}' not found.") + return 1 + except Exception as e: + print(f"Error: {e}") + return 1 + return 0 + +if os.name == 'nt': + def main(): + if len(sys.argv) != 3: + print("Usage: post.py ") + return 1 + install_directory = sys.argv[1] + data_dog_data_dir = sys.argv[2] + # Check data dog data directory exists and files are owned by system + # should be run here to prevent security issues + if not os.path.exists(data_dog_data_dir): + print(f"Directory {data_dog_data_dir} does not exist.") + return 1 + if not packages.check_all_files_owner_system_windows(data_dog_data_dir): + print("Files are not owned by system.") + return 1 + # The MSI uses its own flag to control whether or not this script is executed + # so we skip/ignore the file-based flag used by other platforms. + return post(install_directory, data_dog_data_dir, skip_flag=True) +else: + def main(): + if len(sys.argv) != 2: + print("Usage: post.py ") + return 1 + install_directory = sys.argv[1] + return post(install_directory, install_directory) + +if __name__ == '__main__': + sys.exit(main()) diff --git a/omnibus/python-scripts/postinst.py b/omnibus/python-scripts/postinst.py deleted file mode 100644 index 3be9bcb912110..0000000000000 --- a/omnibus/python-scripts/postinst.py +++ /dev/null @@ -1,39 +0,0 @@ -""" -This module provides functions for managing Datadog integrations and Python dependencies after installation - -Usage: -- The script should be run with a single argument specifying the installation directory. -- Example: `python postinst.py /path/to/install/dir` -""" - -import os -import sys -import packages - -def main(): - try: - if len(sys.argv) != 2: - print("Usage: postinst.py ") - install_directory = sys.argv[1] - if os.path.exists(install_directory): - postinst_python_installed_packages_file = packages.postinst_python_installed_packages_file(install_directory) - packages.create_python_installed_packages_file(postinst_python_installed_packages_file) - flag_path = f"{install_directory}/.install_python_third_party_deps" - if os.path.exists(flag_path): - print(f"File '{flag_path}' found") - diff_python_installed_packages_file = packages.diff_python_installed_packages_file(install_directory) - if os.path.exists(diff_python_installed_packages_file): - requirements_agent_release_file = packages.requirements_agent_release_file(install_directory) - packages.install_diff_packages_file(f"{install_directory}/embedded/bin/pip", diff_python_installed_packages_file, requirements_agent_release_file) - packages.cleanup_files(diff_python_installed_packages_file) - else: - print(f"File '{diff_python_installed_packages_file}' not found.") - else: - print(f"File '{flag_path}' not found: no third party integration will be installed.") - else: - print(f"Directory '{install_directory}' not found.") - except Exception as e: - print(f"Error: {e}") - -if __name__ == '__main__': - main() diff --git a/omnibus/python-scripts/pre.py b/omnibus/python-scripts/pre.py new file mode 100644 index 0000000000000..8cf5f3548fdca --- /dev/null +++ b/omnibus/python-scripts/pre.py @@ -0,0 +1,60 @@ +""" +This module handles the cleanup of Datadog integrations and Python dependencies during package removal. + +Usage: +- The script should be run with a single argument specifying the installation directory. +- Example: `python pre.py /path/to/install/dir` +""" + +import os +import sys +import packages + +def pre(install_directory, storage_location): + try: + if os.path.exists(install_directory) and os.path.exists(storage_location): + post_python_installed_packages_file = packages.post_python_installed_packages_file(storage_location) + if os.path.exists(post_python_installed_packages_file): + pre_python_installed_packages_file = packages.pre_python_installed_packages_file(storage_location) + packages.create_python_installed_packages_file(pre_python_installed_packages_file) + packages.create_diff_installed_packages_file(storage_location, post_python_installed_packages_file, pre_python_installed_packages_file) + packages.cleanup_files(post_python_installed_packages_file, pre_python_installed_packages_file) + else: + print(f"File {post_python_installed_packages_file} does not exist.") + return 1 + else: + print(f"Directory {install_directory} and {storage_location} do not exist.") + return 1 + except Exception as e: + print(f"Error: {e}") + return 1 + return 0 + +if os.name == 'nt': + def main(): + if len(sys.argv) != 3: + print("Usage: pre.py ") + return 1 + install_directory = sys.argv[1] + data_dog_data_dir = sys.argv[2] + # Check data dog data directory exists and files are owned by system + # should be run here to prevent security issues + if not os.path.exists(data_dog_data_dir): + print(f"Directory {data_dog_data_dir} does not exist.") + return 1 + if not packages.check_all_files_owner_system_windows(data_dog_data_dir): + print("Files are not owned by system.") + return 1 + return pre(install_directory, data_dog_data_dir) +else: + def main(): + if len(sys.argv) != 2: + print("Usage: pre.py ") + return 1 + install_directory = sys.argv[1] + return pre(install_directory, install_directory) + + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/omnibus/python-scripts/prerm.py b/omnibus/python-scripts/prerm.py deleted file mode 100644 index a1b35875e5cd3..0000000000000 --- a/omnibus/python-scripts/prerm.py +++ /dev/null @@ -1,33 +0,0 @@ -""" -This module handles the cleanup of Datadog integrations and Python dependencies during package removal. - -Usage: -- The script should be run with a single argument specifying the installation directory. -- Example: `python prerm.py /path/to/install/dir` -""" - -import os -import sys -import packages - -def main(): - try: - if len(sys.argv) != 2: - print("Usage: prerm.py ") - install_directory = sys.argv[1] - if os.path.exists(install_directory): - postinst_python_installed_packages_file = packages.postinst_python_installed_packages_file(install_directory) - if os.path.exists(postinst_python_installed_packages_file): - prerm_python_installed_packages_file = packages.prerm_python_installed_packages_file(install_directory) - packages.create_python_installed_packages_file(prerm_python_installed_packages_file) - packages.create_diff_installed_packages_file(install_directory, postinst_python_installed_packages_file, prerm_python_installed_packages_file) - packages.cleanup_files(postinst_python_installed_packages_file, prerm_python_installed_packages_file) - else: - print(f"File {postinst_python_installed_packages_file} does not exist.") - else: - print(f"Directory {install_directory} does not exist.") - except Exception as e: - print(f"Error: {e}") - -if __name__ == '__main__': - main() diff --git a/pkg/aggregator/aggregator.go b/pkg/aggregator/aggregator.go index 0423f9871c65d..eeade81c429e0 100644 --- a/pkg/aggregator/aggregator.go +++ b/pkg/aggregator/aggregator.go @@ -30,7 +30,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/status/health" "github.com/DataDog/datadog-agent/pkg/tagset" "github.com/DataDog/datadog-agent/pkg/telemetry" - "github.com/DataDog/datadog-agent/pkg/util" "github.com/DataDog/datadog-agent/pkg/util/flavor" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/sort" @@ -160,13 +159,13 @@ var ( tlmDogstatsdContextsByMtype = telemetry.NewGauge("aggregator", "dogstatsd_contexts_by_mtype", []string{"shard", "metric_type"}, "Count the number of dogstatsd contexts in the aggregator, by metric type") tlmDogstatsdContextsBytesByMtype = telemetry.NewGauge("aggregator", "dogstatsd_contexts_bytes_by_mtype", - []string{"shard", "metric_type", util.BytesKindTelemetryKey}, "Estimated count of bytes taken by contexts in the aggregator, by metric type") + []string{"shard", "metric_type", tags.BytesKindTelemetryKey}, "Estimated count of bytes taken by contexts in the aggregator, by metric type") tlmChecksContexts = telemetry.NewGauge("aggregator", "checks_contexts", []string{"shard"}, "Count the number of checks contexts in the check aggregator") tlmChecksContextsByMtype = telemetry.NewGauge("aggregator", "checks_contexts_by_mtype", []string{"shard", "metric_type"}, "Count the number of checks contexts in the check aggregator, by metric type") tlmChecksContextsBytesByMtype = telemetry.NewGauge("aggregator", "checks_contexts_bytes_by_mtype", - []string{"shard", "metric_type", util.BytesKindTelemetryKey}, "Estimated count of bytes taken by contexts in the check aggregator, by metric type") + []string{"shard", "metric_type", tags.BytesKindTelemetryKey}, "Estimated count of bytes taken by contexts in the check aggregator, by metric type") // Hold series to be added to aggregated series on each flush recurrentSeries metrics.Series diff --git a/pkg/aggregator/aggregator_test.go b/pkg/aggregator/aggregator_test.go index 2b447281eea77..97fb6e03c9466 100644 --- a/pkg/aggregator/aggregator_test.go +++ b/pkg/aggregator/aggregator_test.go @@ -29,7 +29,9 @@ import ( orchestratorforwarder "github.com/DataDog/datadog-agent/comp/forwarder/orchestrator" haagent "github.com/DataDog/datadog-agent/comp/haagent/def" haagentmock "github.com/DataDog/datadog-agent/comp/haagent/mock" - compressionmock "github.com/DataDog/datadog-agent/comp/serializer/compression/fx-mock" + logscompressionmock "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx-mock" + compression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/def" + metricscompressionmock "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/fx-mock" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/metrics" @@ -794,10 +796,11 @@ type aggregatorDeps struct { Demultiplexer *AgentDemultiplexer OrchestratorFwd orchestratorforwarder.Component EventPlatformFwd eventplatform.Component + Compressor compression.Component } func createAggrDeps(t *testing.T) aggregatorDeps { - deps := fxutil.Test[TestDeps](t, defaultforwarder.MockModule(), core.MockBundle(), compressionmock.MockModule(), haagentmock.Module()) + deps := fxutil.Test[TestDeps](t, defaultforwarder.MockModule(), core.MockBundle(), logscompressionmock.MockModule(), metricscompressionmock.MockModule(), haagentmock.Module()) opts := demuxTestOptions() return aggregatorDeps{ diff --git a/pkg/aggregator/check_sampler_bench_test.go b/pkg/aggregator/check_sampler_bench_test.go index 1e3f5eae3fd8f..e5c6de05432a4 100644 --- a/pkg/aggregator/check_sampler_bench_test.go +++ b/pkg/aggregator/check_sampler_bench_test.go @@ -18,11 +18,12 @@ import ( "github.com/DataDog/datadog-agent/comp/forwarder/eventplatform" "github.com/DataDog/datadog-agent/comp/forwarder/eventplatform/eventplatformimpl" haagentmock "github.com/DataDog/datadog-agent/comp/haagent/mock" + logscompressionmock "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx-mock" //nolint:revive // TODO(AML) Fix revive linter forwarder "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder/resolver" - compression "github.com/DataDog/datadog-agent/comp/serializer/compression/def" + compression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/def" "github.com/DataDog/datadog-agent/pkg/aggregator/ckey" "github.com/DataDog/datadog-agent/pkg/aggregator/internal/tags" @@ -30,7 +31,7 @@ import ( pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) type benchmarkDeps struct { @@ -51,8 +52,8 @@ func benchmarkAddBucket(bucketValue int64, b *testing.B) { options := DefaultAgentDemultiplexerOptions() options.DontStartForwarders = true sharedForwarder := forwarder.NewDefaultForwarder(pkgconfigsetup.Datadog(), deps.Log, forwarderOpts) - orchestratorForwarder := optional.NewOption[defaultforwarder.Forwarder](defaultforwarder.NoopForwarder{}) - eventPlatformForwarder := optional.NewOptionPtr[eventplatform.Forwarder](eventplatformimpl.NewNoopEventPlatformForwarder(deps.Hostname)) + orchestratorForwarder := option.New[defaultforwarder.Forwarder](defaultforwarder.NoopForwarder{}) + eventPlatformForwarder := option.NewPtr[eventplatform.Forwarder](eventplatformimpl.NewNoopEventPlatformForwarder(deps.Hostname, logscompressionmock.NewMockCompressor())) haAgent := haagentmock.NewMockHaAgent() demux := InitAndStartAgentDemultiplexer(deps.Log, sharedForwarder, &orchestratorForwarder, options, eventPlatformForwarder, haAgent, deps.Compressor, taggerComponent, "hostname") defer demux.Stop(true) diff --git a/pkg/aggregator/context_resolver.go b/pkg/aggregator/context_resolver.go index 997e26b2f0939..48921a43719e9 100644 --- a/pkg/aggregator/context_resolver.go +++ b/pkg/aggregator/context_resolver.go @@ -15,7 +15,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/tagset" "github.com/DataDog/datadog-agent/pkg/telemetry" - "github.com/DataDog/datadog-agent/pkg/util" + "github.com/DataDog/datadog-agent/pkg/util/size" ) // Context holds the elements that form a context, and can be serialized into a context key @@ -61,7 +61,7 @@ func (c *Context) DataSizeInBytes() int { } // Make sure we implement the interface -var _ util.HasSizeInBytes = &Context{} +var _ size.HasSizeInBytes = &Context{} // contextResolver allows tracking and expiring contexts type contextResolver struct { @@ -171,8 +171,8 @@ func (cr *contextResolver) updateMetrics(countsByMTypeGauge telemetry.Gauge, byt continue } countsByMTypeGauge.WithValues(cr.id, mtype).Set(float64(count)) - bytesByMTypeGauge.Set(float64(bytes), cr.id, mtype, util.BytesKindStruct) - bytesByMTypeGauge.Set(float64(dataBytes), cr.id, mtype, util.BytesKindData) + bytesByMTypeGauge.Set(float64(bytes), cr.id, mtype, tags.BytesKindStruct) + bytesByMTypeGauge.Set(float64(dataBytes), cr.id, mtype, tags.BytesKindData) } } diff --git a/pkg/aggregator/demultiplexer_agent.go b/pkg/aggregator/demultiplexer_agent.go index a519383eea73e..1e39c83ba5dd4 100644 --- a/pkg/aggregator/demultiplexer_agent.go +++ b/pkg/aggregator/demultiplexer_agent.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/comp/forwarder/eventplatform" orchestratorforwarder "github.com/DataDog/datadog-agent/comp/forwarder/orchestrator" haagent "github.com/DataDog/datadog-agent/comp/haagent/def" - compression "github.com/DataDog/datadog-agent/comp/serializer/compression/def" + compression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/def" "github.com/DataDog/datadog-agent/pkg/aggregator/internal/tags" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" diff --git a/pkg/aggregator/demultiplexer_agent_test.go b/pkg/aggregator/demultiplexer_agent_test.go index ce59d5ea8943f..052797b064836 100644 --- a/pkg/aggregator/demultiplexer_agent_test.go +++ b/pkg/aggregator/demultiplexer_agent_test.go @@ -25,8 +25,9 @@ import ( "github.com/DataDog/datadog-agent/comp/forwarder/orchestrator/orchestratorimpl" haagent "github.com/DataDog/datadog-agent/comp/haagent/def" haagentmock "github.com/DataDog/datadog-agent/comp/haagent/mock" - compression "github.com/DataDog/datadog-agent/comp/serializer/compression/def" - compressionmock "github.com/DataDog/datadog-agent/comp/serializer/compression/fx-mock" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx-mock" + compression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/def" + metricscompression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/fx-mock" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -114,7 +115,12 @@ func TestDemuxNoAggOptionEnabled(t *testing.T) { func TestDemuxNoAggOptionIsDisabledByDefault(t *testing.T) { opts := demuxTestOptions() - deps := fxutil.Test[TestDeps](t, defaultforwarder.MockModule(), core.MockBundle(), compressionmock.MockModule(), haagentmock.Module()) + deps := fxutil.Test[TestDeps](t, + defaultforwarder.MockModule(), + core.MockBundle(), + haagentmock.Module(), + logscompression.MockModule(), + metricscompression.MockModule()) demux := InitAndStartAgentDemultiplexerForTest(deps, opts, "") require.False(t, demux.Options().EnableNoAggregationPipeline, "the no aggregation pipeline should be disabled by default") @@ -171,8 +177,9 @@ func createDemultiplexerAgentTestDeps(t *testing.T) DemultiplexerAgentTestDeps { core.MockBundle(), orchestratorimpl.MockModule(), eventplatformimpl.MockModule(), + logscompression.MockModule(), + metricscompression.MockModule(), haagentmock.Module(), - compressionmock.MockModule(), fx.Provide(func() tagger.Component { return taggerComponent }), ) } diff --git a/pkg/aggregator/demultiplexer_mock.go b/pkg/aggregator/demultiplexer_mock.go index 6d79e009338b3..66a007db4d4f9 100644 --- a/pkg/aggregator/demultiplexer_mock.go +++ b/pkg/aggregator/demultiplexer_mock.go @@ -17,23 +17,25 @@ import ( "github.com/DataDog/datadog-agent/comp/forwarder/eventplatform" "github.com/DataDog/datadog-agent/comp/forwarder/eventplatform/eventplatformimpl" haagent "github.com/DataDog/datadog-agent/comp/haagent/def" - compression "github.com/DataDog/datadog-agent/comp/serializer/compression/def" - "github.com/DataDog/datadog-agent/pkg/util/optional" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/def" + metricscompression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/def" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // TestDeps contains dependencies for InitAndStartAgentDemultiplexerForTest type TestDeps struct { fx.In - Log log.Component - Hostname hostname.Component - SharedForwarder defaultforwarder.Component - Compressor compression.Component - HaAgent haagent.Component + Log log.Component + Hostname hostname.Component + SharedForwarder defaultforwarder.Component + LogsCompression logscompression.Component + MetricsCompression metricscompression.Component + HaAgent haagent.Component } // InitAndStartAgentDemultiplexerForTest initializes an aggregator for tests. func InitAndStartAgentDemultiplexerForTest(deps TestDeps, options AgentDemultiplexerOptions, hostname string) *AgentDemultiplexer { - orchestratorForwarder := optional.NewOption[defaultforwarder.Forwarder](defaultforwarder.NoopForwarder{}) - eventPlatformForwarder := optional.NewOptionPtr[eventplatform.Forwarder](eventplatformimpl.NewNoopEventPlatformForwarder(deps.Hostname)) - return InitAndStartAgentDemultiplexer(deps.Log, deps.SharedForwarder, &orchestratorForwarder, options, eventPlatformForwarder, deps.HaAgent, deps.Compressor, nooptagger.NewComponent(), hostname) + orchestratorForwarder := option.New[defaultforwarder.Forwarder](defaultforwarder.NoopForwarder{}) + eventPlatformForwarder := option.NewPtr[eventplatform.Forwarder](eventplatformimpl.NewNoopEventPlatformForwarder(deps.Hostname, deps.LogsCompression)) + return InitAndStartAgentDemultiplexer(deps.Log, deps.SharedForwarder, &orchestratorForwarder, options, eventPlatformForwarder, deps.HaAgent, deps.MetricsCompression, nooptagger.NewComponent(), hostname) } diff --git a/pkg/aggregator/demultiplexer_serverless.go b/pkg/aggregator/demultiplexer_serverless.go index c77982bc8be1e..d80128fa426d0 100644 --- a/pkg/aggregator/demultiplexer_serverless.go +++ b/pkg/aggregator/demultiplexer_serverless.go @@ -14,12 +14,12 @@ import ( logimpl "github.com/DataDog/datadog-agent/comp/core/log/impl" tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" forwarder "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" - "github.com/DataDog/datadog-agent/comp/serializer/compression/selector" "github.com/DataDog/datadog-agent/pkg/aggregator/internal/tags" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/serializer" + "github.com/DataDog/datadog-agent/pkg/util/compression/selector" "github.com/DataDog/datadog-agent/pkg/util/hostname" ) @@ -49,7 +49,7 @@ func InitAndStartServerlessDemultiplexer(keysPerDomain map[string][]string, forw logger := logimpl.NewTemporaryLoggerWithoutInit() forwarder := forwarder.NewSyncForwarder(pkgconfigsetup.Datadog(), logger, keysPerDomain, forwarderTimeout) h, _ := hostname.Get(context.Background()) - serializer := serializer.NewSerializer(forwarder, nil, selector.NewCompressor(pkgconfigsetup.Datadog()), pkgconfigsetup.Datadog(), h) + serializer := serializer.NewSerializer(forwarder, nil, selector.FromConfig(pkgconfigsetup.Datadog()), pkgconfigsetup.Datadog(), h) metricSamplePool := metrics.NewMetricSamplePool(MetricSamplePoolBatchSize, utils.IsTelemetryEnabled(pkgconfigsetup.Datadog())) tagsStore := tags.NewStore(pkgconfigsetup.Datadog().GetBool("aggregator_use_tags_store"), "timesampler") diff --git a/pkg/aggregator/demultiplexer_test.go b/pkg/aggregator/demultiplexer_test.go index 72ed3464eb346..307eceda9e76b 100644 --- a/pkg/aggregator/demultiplexer_test.go +++ b/pkg/aggregator/demultiplexer_test.go @@ -22,8 +22,9 @@ import ( orchestratorForwarder "github.com/DataDog/datadog-agent/comp/forwarder/orchestrator" orchestratorForwarderImpl "github.com/DataDog/datadog-agent/comp/forwarder/orchestrator/orchestratorimpl" haagentmock "github.com/DataDog/datadog-agent/comp/haagent/mock" - compression "github.com/DataDog/datadog-agent/comp/serializer/compression/def" - compressionmock "github.com/DataDog/datadog-agent/comp/serializer/compression/fx-mock" + logscompressionmock "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx-mock" + compression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/def" + metricscompressionmock "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/fx-mock" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -295,7 +296,8 @@ func createDemuxDepsWithOrchestratorFwd( orchestratorForwarderImpl.Module(orchestratorParams), eventplatformimpl.Module(eventPlatformParams), eventplatformreceiverimpl.Module(), - compressionmock.MockModule(), + logscompressionmock.MockModule(), + metricscompressionmock.MockModule(), haagentmock.Module(), ) deps := fxutil.Test[internalDemutiplexerDeps](t, modules) @@ -304,6 +306,7 @@ func createDemuxDepsWithOrchestratorFwd( TestDeps: deps.TestDeps, Demultiplexer: InitAndStartAgentDemultiplexer(deps.Log, deps.SharedForwarder, deps.OrchestratorForwarder, opts, deps.Eventplatform, deps.HaAgent, deps.Compressor, nooptagger.NewComponent(), ""), OrchestratorFwd: deps.OrchestratorForwarder, + Compressor: deps.Compressor, EventPlatformFwd: deps.Eventplatform, } } diff --git a/pkg/aggregator/internal/tags/store.go b/pkg/aggregator/internal/tags/store.go index f23a0f8ddfdeb..a99de61d4d4c4 100644 --- a/pkg/aggregator/internal/tags/store.go +++ b/pkg/aggregator/internal/tags/store.go @@ -12,11 +12,10 @@ import ( "go.uber.org/atomic" - "github.com/DataDog/datadog-agent/pkg/util" - "github.com/DataDog/datadog-agent/pkg/aggregator/ckey" "github.com/DataDog/datadog-agent/pkg/tagset" "github.com/DataDog/datadog-agent/pkg/telemetry" + "github.com/DataDog/datadog-agent/pkg/util/size" ) // Entry is used to keep track of tag slices shared by the contexts. @@ -35,15 +34,15 @@ type Entry struct { // SizeInBytes returns the size of the Entry in bytes. func (e *Entry) SizeInBytes() int { - return util.SizeOfStringSlice(e.tags) + 8 + return size.SizeOfStringSlice(e.tags) + 8 } // DataSizeInBytes returns the size of the Entry data in bytes. func (e *Entry) DataSizeInBytes() int { - return util.DataSizeOfStringSlice(e.tags) + return size.DataSizeOfStringSlice(e.tags) } -var _ util.HasSizeInBytes = (*Entry)(nil) +var _ size.HasSizeInBytes = (*Entry)(nil) // Tags returns the strings stored in the Entry. The slice may be // shared with other users and should not be modified. Users can keep @@ -158,8 +157,8 @@ func (tc *Store) updateTelemetry(s *entryStats) { tlmTagsetMinTags.Set(float64(s.minSize), t.name) tlmTagsetMaxTags.Set(float64(s.maxSize), t.name) tlmTagsetSumTags.Set(float64(s.sumSize), t.name) - tlmTagsetSumTagBytes.Set(float64(s.sumSizeBytes), t.name, util.BytesKindStruct) - tlmTagsetSumTagBytes.Set(float64(s.sumDataSizeBytes), t.name, util.BytesKindData) + tlmTagsetSumTagBytes.Set(float64(s.sumSizeBytes), t.name, BytesKindStruct) + tlmTagsetSumTagBytes.Set(float64(s.sumDataSizeBytes), t.name, BytesKindData) } func newCounter(name string, help string, tags ...string) telemetry.Counter { @@ -172,6 +171,15 @@ func newGauge(name string, help string, tags ...string) telemetry.Gauge { append([]string{"cache_instance_name"}, tags...), help) } +var ( + // BytesKindTelemetryKey is the tag key used to identify the kind of telemetry value. + BytesKindTelemetryKey = "bytes_kind" + // BytesKindStruct is the tag value used to mark bytes as struct. + BytesKindStruct = "struct" + // BytesKindData is the tag value used to mark bytes as data. Those are likely to be interned strings. + BytesKindData = "data" +) + var ( tlmHits = newCounter("hits_total", "number of times cache already contained the tags") tlmMiss = newCounter("miss_total", "number of times cache did not contain the tags") @@ -181,7 +189,7 @@ var ( tlmTagsetMaxTags = newGauge("tagset_max_tags", "maximum number of tags in a tagset") tlmTagsetSumTags = newGauge("tagset_sum_tags", "total number of tags stored in all tagsets by the cache") tlmTagsetRefsCnt = newGauge("tagset_refs_count", "distribution of usage count of tagsets in the cache", "ge") - tlmTagsetSumTagBytes = newGauge("tagset_sum_tags_bytes", "total number of bytes stored in all tagsets by the cache", util.BytesKindTelemetryKey) + tlmTagsetSumTagBytes = newGauge("tagset_sum_tags_bytes", "total number of bytes stored in all tagsets by the cache", BytesKindTelemetryKey) ) type storeTelemetry struct { diff --git a/pkg/aggregator/mocksender/mocksender.go b/pkg/aggregator/mocksender/mocksender.go index d20e0a3d19081..c19b806fb8a2c 100644 --- a/pkg/aggregator/mocksender/mocksender.go +++ b/pkg/aggregator/mocksender/mocksender.go @@ -20,12 +20,13 @@ import ( nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/impl-noop" haagentmock "github.com/DataDog/datadog-agent/comp/haagent/mock" - compressionmock "github.com/DataDog/datadog-agent/comp/serializer/compression/fx-mock" + logscompressionmock "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx-mock" + metricscompressionmock "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/fx-mock" "github.com/DataDog/datadog-agent/pkg/aggregator" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // NewMockSender initiates the aggregator and returns a @@ -41,10 +42,10 @@ func CreateDefaultDemultiplexer() *aggregator.AgentDemultiplexer { opts.DontStartForwarders = true log := logimpl.NewTemporaryLoggerWithoutInit() sharedForwarder := defaultforwarder.NewDefaultForwarder(pkgconfigsetup.Datadog(), log, defaultforwarder.NewOptions(pkgconfigsetup.Datadog(), log, nil)) - orchestratorForwarder := optional.NewOption[defaultforwarder.Forwarder](defaultforwarder.NoopForwarder{}) - eventPlatformForwarder := optional.NewOptionPtr[eventplatform.Forwarder](eventplatformimpl.NewNoopEventPlatformForwarder(hostnameimpl.NewHostnameService())) + orchestratorForwarder := option.New[defaultforwarder.Forwarder](defaultforwarder.NoopForwarder{}) + eventPlatformForwarder := option.NewPtr[eventplatform.Forwarder](eventplatformimpl.NewNoopEventPlatformForwarder(hostnameimpl.NewHostnameService(), logscompressionmock.NewMockCompressor())) taggerComponent := nooptagger.NewComponent() - return aggregator.InitAndStartAgentDemultiplexer(log, sharedForwarder, &orchestratorForwarder, opts, eventPlatformForwarder, haagentmock.NewMockHaAgent(), compressionmock.NewMockCompressor(), taggerComponent, "") + return aggregator.InitAndStartAgentDemultiplexer(log, sharedForwarder, &orchestratorForwarder, opts, eventPlatformForwarder, haagentmock.NewMockHaAgent(), metricscompressionmock.NewMockCompressor(), taggerComponent, "") } // NewMockSenderWithSenderManager returns a functional mocked Sender for testing diff --git a/pkg/aggregator/sender_test.go b/pkg/aggregator/sender_test.go index 4479842cc18da..68a02066f5a9a 100644 --- a/pkg/aggregator/sender_test.go +++ b/pkg/aggregator/sender_test.go @@ -24,13 +24,14 @@ import ( "github.com/DataDog/datadog-agent/comp/forwarder/eventplatform" "github.com/DataDog/datadog-agent/comp/forwarder/eventplatform/eventplatformimpl" haagentmock "github.com/DataDog/datadog-agent/comp/haagent/mock" - compressionmock "github.com/DataDog/datadog-agent/comp/serializer/compression/fx-mock" + logscompressionmock "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx-mock" + metricscompressionmock "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/fx-mock" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/metrics/event" "github.com/DataDog/datadog-agent/pkg/metrics/servicecheck" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) type senderWithChans struct { @@ -57,9 +58,9 @@ func initSender(id checkid.ID, defaultHostname string) (s senderWithChans) { func testDemux(log log.Component, hostname hostname.Component) *AgentDemultiplexer { opts := DefaultAgentDemultiplexerOptions() opts.DontStartForwarders = true - orchestratorForwarder := optional.NewOption[defaultforwarder.Forwarder](defaultforwarder.NoopForwarder{}) - eventPlatformForwarder := optional.NewOptionPtr[eventplatform.Forwarder](eventplatformimpl.NewNoopEventPlatformForwarder(hostname)) - demux := initAgentDemultiplexer(log, NewForwarderTest(log), &orchestratorForwarder, opts, eventPlatformForwarder, haagentmock.NewMockHaAgent(), compressionmock.NewMockCompressor(), nooptagger.NewComponent(), defaultHostname) + orchestratorForwarder := option.New[defaultforwarder.Forwarder](defaultforwarder.NoopForwarder{}) + eventPlatformForwarder := option.NewPtr[eventplatform.Forwarder](eventplatformimpl.NewNoopEventPlatformForwarder(hostname, logscompressionmock.NewMockCompressor())) + demux := initAgentDemultiplexer(log, NewForwarderTest(log), &orchestratorForwarder, opts, eventPlatformForwarder, haagentmock.NewMockHaAgent(), metricscompressionmock.NewMockCompressor(), nooptagger.NewComponent(), defaultHostname) return demux } diff --git a/pkg/api/go.mod b/pkg/api/go.mod index ab50796cf7a58..408551f054c98 100644 --- a/pkg/api/go.mod +++ b/pkg/api/go.mod @@ -27,7 +27,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/fxutil => ../util/fxutil github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../util/hostname/validate github.com/DataDog/datadog-agent/pkg/util/log => ../util/log - github.com/DataDog/datadog-agent/pkg/util/optional => ../util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../util/scrubber github.com/DataDog/datadog-agent/pkg/util/system => ../util/system @@ -40,11 +40,11 @@ replace ( require ( github.com/DataDog/datadog-agent/comp/core/config v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/config/mock v0.59.0 - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 github.com/stretchr/testify v1.10.0 ) @@ -55,18 +55,18 @@ require ( github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect github.com/DataDog/viper v1.14.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect @@ -79,16 +79,16 @@ require ( github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -100,8 +100,8 @@ require ( go.uber.org/fx v1.23.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/api/go.sum b/pkg/api/go.sum index dd16364891695..77eac717c35df 100644 --- a/pkg/api/go.sum +++ b/pkg/api/go.sum @@ -72,7 +72,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -110,8 +109,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -138,8 +137,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -156,8 +155,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -171,8 +170,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -183,8 +182,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -239,8 +238,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -277,8 +276,8 @@ golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= @@ -306,8 +305,8 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/api/util/util.go b/pkg/api/util/util.go index b22c2e7b04ad0..ba3add696f5a9 100644 --- a/pkg/api/util/util.go +++ b/pkg/api/util/util.go @@ -10,6 +10,7 @@ import ( "crypto/subtle" "crypto/tls" "crypto/x509" + "encoding/pem" "fmt" "net" "net/http" @@ -41,7 +42,7 @@ var ( clientTLSConfig = &tls.Config{ InsecureSkipVerify: true, } - serverTLSConfig *tls.Config + serverTLSConfig = &tls.Config{} initSource source ) @@ -164,7 +165,12 @@ func GetTLSServerConfig() *tls.Config { tokenLock.RLock() defer tokenLock.RUnlock() if initSource == uninitialized { - log.Errorf("GetTLSServerConfig was called before being initialized (through SetAuthToken or CreateAndSetAuthToken function)") + log.Errorf("GetTLSServerConfig was called before being initialized (through SetAuthToken or CreateAndSetAuthToken function), generating a self-signed certificate") + config, err := generateSelfSignedCert() + if err != nil { + log.Error(err.Error()) + } + serverTLSConfig = &config } return serverTLSConfig.Clone() } @@ -275,3 +281,28 @@ func IsIPv6(ip string) bool { parsed := net.ParseIP(ip) return parsed != nil && parsed.To4() == nil } + +func generateSelfSignedCert() (tls.Config, error) { + // create cert + hosts := []string{"127.0.0.1", "localhost"} + _, rootCertPEM, rootKey, err := pkgtoken.GenerateRootCert(hosts, 2048) + if err != nil { + return tls.Config{}, fmt.Errorf("unable to generate a self-signed certificate: %v", err) + } + + // PEM encode the private key + rootKeyPEM := pem.EncodeToMemory(&pem.Block{ + Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(rootKey), + }) + + // Create a TLS cert using the private key and certificate + rootTLSCert, err := tls.X509KeyPair(rootCertPEM, rootKeyPEM) + if err != nil { + return tls.Config{}, fmt.Errorf("unable to generate a self-signed certificate: %v", err) + + } + + return tls.Config{ + Certificates: []tls.Certificate{rootTLSCert}, + }, nil +} diff --git a/pkg/api/util/util_test.go b/pkg/api/util/util_test.go index 2e44817c8d308..5306c4c6bfe44 100644 --- a/pkg/api/util/util_test.go +++ b/pkg/api/util/util_test.go @@ -6,9 +6,13 @@ package util import ( + "crypto/tls" + "net" + "net/http" "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestIsIPv6(t *testing.T) { @@ -50,3 +54,42 @@ func TestIsIPv6(t *testing.T) { }) } } + +func TestStartingServerClientWithUninitializedTLS(t *testing.T) { + // re initialize the client and server tls config + initSource = uninitialized + clientTLSConfig = &tls.Config{ + InsecureSkipVerify: true, + } + + // create a server with the provided tls server config + l, err := net.Listen("tcp", ":0") + require.NoError(t, err) + + server := &http.Server{ + Handler: http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusOK) + }), + } + + tlsListener := tls.NewListener(l, GetTLSServerConfig()) + + go server.Serve(tlsListener) //nolint:errcheck + defer server.Close() + + // create a http client with the provided tls client config + _, port, err := net.SplitHostPort(l.Addr().String()) + require.NoError(t, err) + + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: GetTLSClientConfig(), + }, + } + + // make a request to the server + resp, err := client.Get("https://localhost:" + port) + require.NoError(t, err) + defer resp.Body.Close() + assert.Equal(t, http.StatusOK, resp.StatusCode) +} diff --git a/pkg/cli/subcommands/check/command.go b/pkg/cli/subcommands/check/command.go index d914682340504..e7ec8a6e9b5b2 100644 --- a/pkg/cli/subcommands/check/command.go +++ b/pkg/cli/subcommands/check/command.go @@ -68,7 +68,8 @@ import ( "github.com/DataDog/datadog-agent/comp/metadata/inventorychecks/inventorychecksimpl" "github.com/DataDog/datadog-agent/comp/remote-config/rcservice" "github.com/DataDog/datadog-agent/comp/remote-config/rcservicemrf" - compressionfx "github.com/DataDog/datadog-agent/comp/serializer/compression/fx" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx" + metricscompression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/fx" "github.com/DataDog/datadog-agent/pkg/aggregator" "github.com/DataDog/datadog-agent/pkg/cli/standalone" pkgcollector "github.com/DataDog/datadog-agent/pkg/collector" @@ -82,7 +83,7 @@ import ( statuscollector "github.com/DataDog/datadog-agent/pkg/status/collector" "github.com/DataDog/datadog-agent/pkg/util/defaultpaths" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/util/scrubber" ) @@ -178,12 +179,13 @@ func MakeCommand(globalParamsGetter func() GlobalParams) *cobra.Command { autodiscoveryimpl.Module(), forwarder.Bundle(defaultforwarder.NewParams(defaultforwarder.WithNoopForwarder())), inventorychecksimpl.Module(), + logscompression.Module(), + metricscompression.Module(), // inventorychecksimpl depends on a collector and serializer when created to send payload. // Here we just want to collect metadata to be displayed, so we don't need a collector. collector.NoneModule(), fx.Supply(status.NewInformationProvider(statuscollector.Provider{})), fx.Provide(func() serializer.MetricSerializer { return nil }), - compressionfx.Module(), // Initializing the aggregator with a flush interval of 0 (to disable the flush goroutines) demultiplexerimpl.Module(demultiplexerimpl.NewDefaultParams(demultiplexerimpl.WithFlushInterval(0))), orchestratorForwarderImpl.Module(orchestratorForwarderImpl.NewNoopParams()), @@ -202,10 +204,10 @@ func MakeCommand(globalParamsGetter func() GlobalParams) *cobra.Command { // TODO(components): this is a temporary hack as the StartServer() method of the API package was previously called with nil arguments // This highlights the fact that the API Server created by JMX (through ExecJmx... function) should be different from the ones created // in others commands such as run. - fx.Supply(optional.NewNoneOption[rcservice.Component]()), - fx.Supply(optional.NewNoneOption[rcservicemrf.Component]()), - fx.Supply(optional.NewNoneOption[logagent.Component]()), - fx.Supply(optional.NewNoneOption[integrations.Component]()), + fx.Supply(option.None[rcservice.Component]()), + fx.Supply(option.None[rcservicemrf.Component]()), + fx.Supply(option.None[logagent.Component]()), + fx.Supply(option.None[integrations.Component]()), fx.Provide(func() server.Component { return nil }), fx.Provide(func() replay.Component { return nil }), fx.Provide(func() pidmap.Component { return nil }), @@ -263,10 +265,10 @@ func run( agentAPI internalAPI.Component, invChecks inventorychecks.Component, statusComponent status.Component, - collector optional.Option[collector.Component], + collector option.Option[collector.Component], jmxLogger jmxlogger.Component, telemetry telemetry.Component, - logReceiver optional.Option[integrations.Component], + logReceiver option.Option[integrations.Component], ) error { previousIntegrationTracing := false previousIntegrationTracingExhaustive := false diff --git a/pkg/cli/subcommands/clusterchecks/command.go b/pkg/cli/subcommands/clusterchecks/command.go index 1b41623dd4a04..978ea2ba2d457 100644 --- a/pkg/cli/subcommands/clusterchecks/command.go +++ b/pkg/cli/subcommands/clusterchecks/command.go @@ -23,7 +23,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/api/util" "github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks/types" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" - "github.com/DataDog/datadog-agent/pkg/flare" + clusterAgentFlare "github.com/DataDog/datadog-agent/pkg/flare/clusteragent" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -119,11 +119,11 @@ func bundleParams(globalParams GlobalParams) core.BundleParams { //nolint:revive // TODO(CINT) Fix revive linter func run(_ log.Component, _ config.Component, cliParams *cliParams) error { - if err := flare.GetClusterChecks(color.Output, cliParams.checkName); err != nil { + if err := clusterAgentFlare.GetClusterChecks(color.Output, cliParams.checkName); err != nil { return err } - return flare.GetEndpointsChecks(color.Output, cliParams.checkName) + return clusterAgentFlare.GetEndpointsChecks(color.Output, cliParams.checkName) } func rebalance(_ log.Component, config config.Component, cliParams *cliParams) error { diff --git a/pkg/cli/subcommands/dcaconfigcheck/command.go b/pkg/cli/subcommands/dcaconfigcheck/command.go index 0d697aed77e5f..87af67896b32f 100644 --- a/pkg/cli/subcommands/dcaconfigcheck/command.go +++ b/pkg/cli/subcommands/dcaconfigcheck/command.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core" "github.com/DataDog/datadog-agent/comp/core/config" log "github.com/DataDog/datadog-agent/comp/core/log/def" - "github.com/DataDog/datadog-agent/pkg/flare" + clusterAgentFlare "github.com/DataDog/datadog-agent/pkg/flare/clusteragent" "github.com/DataDog/datadog-agent/pkg/util/fxutil" ) @@ -62,7 +62,7 @@ func MakeCommand(globalParamsGetter func() GlobalParams) *cobra.Command { } func run(_ log.Component, _ config.Component, cliParams *cliParams) error { - if err := flare.GetClusterAgentConfigCheck(color.Output, cliParams.verbose); err != nil { + if err := clusterAgentFlare.GetClusterAgentConfigCheck(color.Output, cliParams.verbose); err != nil { return fmt.Errorf("the agent ran into an error while checking config: %w", err) } diff --git a/pkg/cli/subcommands/dcaflare/command.go b/pkg/cli/subcommands/dcaflare/command.go index 336803a281723..ddf4852e7affe 100644 --- a/pkg/cli/subcommands/dcaflare/command.go +++ b/pkg/cli/subcommands/dcaflare/command.go @@ -20,12 +20,12 @@ import ( "github.com/DataDog/datadog-agent/comp/core/flare/helpers" log "github.com/DataDog/datadog-agent/comp/core/log/def" "github.com/DataDog/datadog-agent/comp/core/secrets" - compressionfx "github.com/DataDog/datadog-agent/comp/serializer/compression/fx" "github.com/DataDog/datadog-agent/pkg/api/util" "github.com/DataDog/datadog-agent/pkg/config/settings" settingshttp "github.com/DataDog/datadog-agent/pkg/config/settings/http" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/flare" + clusterAgentFlare "github.com/DataDog/datadog-agent/pkg/flare/clusteragent" "github.com/DataDog/datadog-agent/pkg/util/defaultpaths" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/input" @@ -88,7 +88,6 @@ func MakeCommand(globalParamsGetter func() GlobalParams) *cobra.Command { LogParams: log.ForOneShot(LoggerName, DefaultLogLevel, true), }), core.Bundle(), - compressionfx.Module(), ) }, } @@ -105,8 +104,8 @@ func MakeCommand(globalParamsGetter func() GlobalParams) *cobra.Command { return cmd } -func readProfileData(seconds int) (flare.ProfileData, error) { - pdata := flare.ProfileData{} +func readProfileData(seconds int) (clusterAgentFlare.ProfileData, error) { + pdata := clusterAgentFlare.ProfileData{} c := util.GetClient(false) fmt.Fprintln(color.Output, color.BlueString("Getting a %ds profile snapshot from datadog-cluster-agent.", seconds)) @@ -152,7 +151,7 @@ func readProfileData(seconds int) (flare.ProfileData, error) { func run(cliParams *cliParams, _ config.Component) error { fmt.Fprintln(color.Output, color.BlueString("Asking the Cluster Agent to build the flare archive.")) var ( - profile flare.ProfileData + profile clusterAgentFlare.ProfileData e error ) c := util.GetClient(false) // FIX: get certificates right then make this true @@ -208,7 +207,7 @@ func run(cliParams *cliParams, _ config.Component) error { fmt.Fprintln(color.Output, color.RedString("The agent was unable to make a full flare: %s.", e.Error())) } fmt.Fprintln(color.Output, color.YellowString("Initiating flare locally, some logs will be missing.")) - filePath, e = flare.CreateDCAArchive(true, defaultpaths.GetDistPath(), logFile, profile, nil) + filePath, e = clusterAgentFlare.CreateDCAArchive(true, defaultpaths.GetDistPath(), logFile, profile, nil) if e != nil { fmt.Printf("The flare zipfile failed to be created: %s\n", e) return e diff --git a/pkg/clusteragent/admission/controllers/webhook/controller_v1.go b/pkg/clusteragent/admission/controllers/webhook/controller_v1.go index 4cd136fa776d6..b65ea3d7b6fd3 100644 --- a/pkg/clusteragent/admission/controllers/webhook/controller_v1.go +++ b/pkg/clusteragent/admission/controllers/webhook/controller_v1.go @@ -212,6 +212,19 @@ func (c *ControllerV1) reconcile() error { log.Errorf("Failed to update Mutating Webhook %s: %v", c.config.getWebhookName(), err) } } + } else { + mutatingWebhook, err := c.mutatingWebhooksLister.Get(c.config.getWebhookName()) + if err != nil { + if !errors.IsNotFound(err) { + log.Errorf("Failed to get Mutating Webhook %s: %v", c.config.getWebhookName(), err) + } + } else { + log.Infof("Mutating Webhook %s was found, deleting it", c.config.getWebhookName()) + err := c.deleteMutatingWebhook(mutatingWebhook) + if err != nil { + log.Errorf("Failed to delete Mutating Webhook %s: %v", c.config.getWebhookName(), err) + } + } } if c.config.validationEnabled { @@ -231,6 +244,19 @@ func (c *ControllerV1) reconcile() error { log.Errorf("Failed to update Validating Webhook %s: %v", c.config.getWebhookName(), err) } } + } else { + validatingWebhook, err := c.validatingWebhooksLister.Get(c.config.getWebhookName()) + if err != nil { + if !errors.IsNotFound(err) { + log.Errorf("Failed to get Validating Webhook %s: %v", c.config.getWebhookName(), err) + } + } else { + log.Infof("Validating Webhook %s was found, deleting it", c.config.getWebhookName()) + err := c.deleteValidatingWebhook(validatingWebhook) + if err != nil { + log.Errorf("Failed to delete Validating Webhook %s: %v", c.config.getWebhookName(), err) + } + } } return err @@ -273,6 +299,12 @@ func (c *ControllerV1) newValidatingWebhooks(secret *corev1.Secret) []admiv1.Val return webhooks } +// deleteValidatingWebhook deletes the ValidatingWebhookConfiguration object. +func (c *ControllerV1) deleteValidatingWebhook(webhook *admiv1.ValidatingWebhookConfiguration) error { + err := c.clientSet.AdmissionregistrationV1().ValidatingWebhookConfigurations().Delete(context.TODO(), webhook.Name, metav1.DeleteOptions{}) + return err +} + // createMutatingWebhook creates a new MutatingWebhookConfiguration object. func (c *ControllerV1) createMutatingWebhook(secret *corev1.Secret) error { webhook := &admiv1.MutatingWebhookConfiguration{ @@ -310,6 +342,12 @@ func (c *ControllerV1) newMutatingWebhooks(secret *corev1.Secret) []admiv1.Mutat return webhooks } +// deleteMutatingWebhook deletes the MutatingWebhookConfiguration object. +func (c *ControllerV1) deleteMutatingWebhook(webhook *admiv1.MutatingWebhookConfiguration) error { + err := c.clientSet.AdmissionregistrationV1().MutatingWebhookConfigurations().Delete(context.TODO(), webhook.Name, metav1.DeleteOptions{}) + return err +} + // generateTemplates generates the webhook templates from the configuration. func (c *ControllerV1) generateTemplates() { // Generate validating webhook templates diff --git a/pkg/clusteragent/admission/controllers/webhook/controller_v1beta1.go b/pkg/clusteragent/admission/controllers/webhook/controller_v1beta1.go index 201a784ac7b8d..fbcf7bee4135d 100644 --- a/pkg/clusteragent/admission/controllers/webhook/controller_v1beta1.go +++ b/pkg/clusteragent/admission/controllers/webhook/controller_v1beta1.go @@ -213,6 +213,19 @@ func (c *ControllerV1beta1) reconcile() error { log.Errorf("Failed to update Mutating Webhook %s: %v", c.config.getWebhookName(), err) } } + } else { + mutatingWebhook, err := c.mutatingWebhooksLister.Get(c.config.getWebhookName()) + if err != nil { + if !errors.IsNotFound(err) { + log.Errorf("Failed to get Mutating Webhook %s: %v", c.config.getWebhookName(), err) + } + } else { + log.Infof("Mutating Webhook %s was found, deleting it", c.config.getWebhookName()) + err := c.deleteMutatingWebhook(mutatingWebhook) + if err != nil { + log.Errorf("Failed to delete Mutating Webhook %s: %v", c.config.getWebhookName(), err) + } + } } if c.config.validationEnabled { @@ -232,6 +245,19 @@ func (c *ControllerV1beta1) reconcile() error { log.Errorf("Failed to update Validating Webhook %s: %v", c.config.getWebhookName(), err) } } + } else { + validatingWebhook, err := c.validatingWebhooksLister.Get(c.config.getWebhookName()) + if err != nil { + if !errors.IsNotFound(err) { + log.Errorf("Failed to get Validating Webhook %s: %v", c.config.getWebhookName(), err) + } + } else { + log.Infof("Validating Webhook %s was found, deleting it", c.config.getWebhookName()) + err := c.deleteValidatingWebhook(validatingWebhook) + if err != nil { + log.Errorf("Failed to delete Validating Webhook %s: %v", c.config.getWebhookName(), err) + } + } } return err @@ -274,6 +300,12 @@ func (c *ControllerV1beta1) newValidatingWebhooks(secret *corev1.Secret) []admiv return webhooks } +// deleteValidatingWebhook deletes the ValidatingWebhookConfiguration object. +func (c *ControllerV1beta1) deleteValidatingWebhook(webhook *admiv1beta1.ValidatingWebhookConfiguration) error { + err := c.clientSet.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Delete(context.TODO(), webhook.Name, metav1.DeleteOptions{}) + return err +} + // createMutatingWebhook creates a new MutatingWebhookConfiguration object. func (c *ControllerV1beta1) createMutatingWebhook(secret *corev1.Secret) error { webhook := &admiv1beta1.MutatingWebhookConfiguration{ @@ -312,6 +344,12 @@ func (c *ControllerV1beta1) newMutatingWebhooks(secret *corev1.Secret) []admiv1b return webhooks } +// deleteMutatingWebhook deletes the MutatingWebhookConfiguration object. +func (c *ControllerV1beta1) deleteMutatingWebhook(webhook *admiv1beta1.MutatingWebhookConfiguration) error { + err := c.clientSet.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Delete(context.TODO(), webhook.Name, metav1.DeleteOptions{}) + return err +} + func (c *ControllerV1beta1) generateTemplates() { validatingWebhooks := []admiv1beta1.ValidatingWebhook{} for _, webhook := range c.webhooks { diff --git a/pkg/clusteragent/admission/mutate/autoinstrumentation/auto_instrumentation_test.go b/pkg/clusteragent/admission/mutate/autoinstrumentation/auto_instrumentation_test.go index 97dda98d33174..c57f5cc83790c 100644 --- a/pkg/clusteragent/admission/mutate/autoinstrumentation/auto_instrumentation_test.go +++ b/pkg/clusteragent/admission/mutate/autoinstrumentation/auto_instrumentation_test.go @@ -340,7 +340,6 @@ func TestInjectAutoInstruConfigV2(t *testing.T) { Name: volumeName, MountPath: "/opt/datadog-packages/datadog-apm-inject", SubPath: "opt/datadog-packages/datadog-apm-inject", - ReadOnly: true, }, mounts[0], "expected first container volume mount to be the injector") require.Equal(t, corev1.VolumeMount{ Name: etcVolume.Name, @@ -653,27 +652,35 @@ func assertLibReq(t *testing.T, pod *corev1.Pod, lang language, image, envKey, e } func TestExtractLibInfo(t *testing.T) { + defaultLibImageVersions := map[string]string{ + "java": "registry/dd-lib-java-init:v1", + "js": "registry/dd-lib-js-init:v5", + "python": "registry/dd-lib-python-init:v2", + "dotnet": "registry/dd-lib-dotnet-init:v3", + "ruby": "registry/dd-lib-ruby-init:v2", + } + // TODO: Add new entry when a new language is supported allLatestDefaultLibs := []libInfo{ { lang: "java", - image: "registry/dd-lib-java-init:v1", + image: defaultLibImageVersions["java"], }, { lang: "js", - image: "registry/dd-lib-js-init:v5", + image: defaultLibImageVersions["js"], }, { lang: "python", - image: "registry/dd-lib-python-init:v2", + image: defaultLibImageVersions["python"], }, { lang: "dotnet", - image: "registry/dd-lib-dotnet-init:v3", + image: defaultLibImageVersions["dotnet"], }, { lang: "ruby", - image: "registry/dd-lib-ruby-init:v2", + image: defaultLibImageVersions["ruby"], }, } @@ -697,6 +704,17 @@ func TestExtractLibInfo(t *testing.T) { }, }, }, + { + name: "java with default version", + pod: common.FakePodWithAnnotation("admission.datadoghq.com/java-lib.version", "default"), + containerRegistry: "registry", + expectedLibsToInject: []libInfo{ + { + lang: "java", + image: "registry/dd-lib-java-init:v1", + }, + }, + }, { name: "java from common registry", pod: common.FakePodWithAnnotation("admission.datadoghq.com/java-lib.version", "v1"), @@ -962,6 +980,21 @@ func TestExtractLibInfo(t *testing.T) { mockConfig.SetWithoutSource("apm_config.instrumentation.lib_versions", map[string]string{"java": "v1.20.0"}) }, }, + { + name: "single step instrumentation with default java version", + pod: common.FakePodWithNamespaceAndLabel("ns", "", ""), + containerRegistry: "registry", + expectedLibsToInject: []libInfo{ + { + lang: "java", + image: defaultLibImageVersions["java"], + }, + }, + setupConfig: func() { + mockConfig.SetWithoutSource("apm_config.instrumentation.enabled", true) + mockConfig.SetWithoutSource("apm_config.instrumentation.lib_versions", map[string]string{"java": "default"}) + }, + }, { name: "single step instrumentation with pinned java and python versions", pod: common.FakePodWithNamespaceAndLabel("ns", "", ""), diff --git a/pkg/clusteragent/admission/mutate/autoinstrumentation/injector.go b/pkg/clusteragent/admission/mutate/autoinstrumentation/injector.go index 6ed06e0bbd2fd..bfea083f5cc15 100644 --- a/pkg/clusteragent/admission/mutate/autoinstrumentation/injector.go +++ b/pkg/clusteragent/admission/mutate/autoinstrumentation/injector.go @@ -123,7 +123,7 @@ func (i *injector) requirements() libRequirement { }, volumeMounts: []volumeMount{ volumeMountETCDPreloadAppContainer.prepended(), - v2VolumeMountInjector.readOnly().prepended(), + v2VolumeMountInjector.prepended(), }, envVars: []envVar{ { diff --git a/pkg/clusteragent/admission/mutate/autoinstrumentation/language_versions.go b/pkg/clusteragent/admission/mutate/autoinstrumentation/language_versions.go index e76387c975f2c..c3855531c6e63 100644 --- a/pkg/clusteragent/admission/mutate/autoinstrumentation/language_versions.go +++ b/pkg/clusteragent/admission/mutate/autoinstrumentation/language_versions.go @@ -11,8 +11,9 @@ import ( "fmt" "slices" - "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/common" corev1 "k8s.io/api/core/v1" + + "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/common" ) const ( @@ -32,6 +33,10 @@ func (l language) defaultLibInfo(registry, ctrName string) libInfo { } func (l language) libImageName(registry, tag string) string { + if tag == defaultVersionMagicString { + tag = l.defaultLibVersion() + } + return fmt.Sprintf("%s/dd-lib-%s-init:%s", registry, l, tag) } @@ -112,6 +117,10 @@ func (l language) isEnabledByDefault() bool { return l != "php" } +// defaultVersionMagicString is a magic string that indicates that the user +// wishes to utilize the default version found in languageVersions. +const defaultVersionMagicString = "default" + // languageVersions defines the major library versions we consider "default" for each // supported language. If not set, we will default to "latest", see defaultLibVersion. // diff --git a/pkg/clusteragent/admission/mutate/autoinstrumentation/mutators.go b/pkg/clusteragent/admission/mutate/autoinstrumentation/mutators.go index bd9345134ebfd..d701e83e0d699 100644 --- a/pkg/clusteragent/admission/mutate/autoinstrumentation/mutators.go +++ b/pkg/clusteragent/admission/mutate/autoinstrumentation/mutators.go @@ -140,7 +140,7 @@ func (v volumeMount) mutateContainer(c *corev1.Container) error { return nil } -func (v volumeMount) readOnly() volumeMount { +func (v volumeMount) readOnly() volumeMount { // nolint:unused m := v.VolumeMount m.ReadOnly = true return volumeMount{m, v.Prepend} diff --git a/pkg/clusteragent/admission/mutate/cwsinstrumentation/cws_instrumentation.go b/pkg/clusteragent/admission/mutate/cwsinstrumentation/cws_instrumentation.go index e5da18c3b28dd..f9f968a0b783e 100644 --- a/pkg/clusteragent/admission/mutate/cwsinstrumentation/cws_instrumentation.go +++ b/pkg/clusteragent/admission/mutate/cwsinstrumentation/cws_instrumentation.go @@ -39,7 +39,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/cwsinstrumentation/k8scp" "github.com/DataDog/datadog-agent/pkg/clusteragent/admission/mutate/cwsinstrumentation/k8sexec" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" - "github.com/DataDog/datadog-agent/pkg/security/resolvers/usersessions" + "github.com/DataDog/datadog-agent/pkg/security/utils/k8sutils" "github.com/DataDog/datadog-agent/pkg/util/containers" apiserverUtils "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" apiServerCommon "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/common" @@ -574,7 +574,7 @@ func (ci *CWSInstrumentation) injectCWSCommandInstrumentation(exec *corev1.PodEx } // prepare the user session context - userSessionCtx, err := usersessions.PrepareK8SUserSessionContext(userInfo, cwsUserSessionDataMaxSize) + userSessionCtx, err := k8sutils.PrepareK8SUserSessionContext(userInfo, cwsUserSessionDataMaxSize) if err != nil { log.Debugf("ignoring instrumentation of %s: %v", mutatecommon.PodString(pod), err) metrics.CWSExecInstrumentationAttempts.Observe(1, ci.mode.String(), "false", cwsCredentialsSerializationErrorReason) diff --git a/pkg/clusteragent/admission/mutate/cwsinstrumentation/cws_instrumentation_test.go b/pkg/clusteragent/admission/mutate/cwsinstrumentation/cws_instrumentation_test.go index fccb37527cc23..a2ae64c664bdb 100644 --- a/pkg/clusteragent/admission/mutate/cwsinstrumentation/cws_instrumentation_test.go +++ b/pkg/clusteragent/admission/mutate/cwsinstrumentation/cws_instrumentation_test.go @@ -225,7 +225,7 @@ func Test_injectCWSCommandInstrumentation(t *testing.T) { name: "my-pod", ns: "my-namespace", userInfo: &authenticationv1.UserInfo{}, - include: []string{"kube_namespae:my-namespace"}, + include: []string{"kube_namespace:my-namespace"}, apiClientAnnotations: map[string]string{ cwsInstrumentationPodAnotationStatus: cwsInstrumentationPodAnotationReady, }, diff --git a/pkg/clusteragent/admission/validate/kubernetesadmissionevents/kubernetesadmissionevents_test.go b/pkg/clusteragent/admission/validate/kubernetesadmissionevents/kubernetesadmissionevents_test.go index 81d58c49a00ce..2ba0bea1dbe30 100644 --- a/pkg/clusteragent/admission/validate/kubernetesadmissionevents/kubernetesadmissionevents_test.go +++ b/pkg/clusteragent/admission/validate/kubernetesadmissionevents/kubernetesadmissionevents_test.go @@ -29,7 +29,8 @@ import ( "github.com/DataDog/datadog-agent/comp/core/hostname/hostnameimpl" "github.com/DataDog/datadog-agent/comp/core/log/def" logmock "github.com/DataDog/datadog-agent/comp/core/log/mock" - compressionmock "github.com/DataDog/datadog-agent/comp/serializer/compression/fx-mock" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx-mock" + metricscompression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/fx-mock" "github.com/DataDog/datadog-agent/pkg/aggregator/mocksender" "github.com/DataDog/datadog-agent/pkg/metrics/event" "github.com/DataDog/datadog-agent/pkg/util/fxutil" @@ -251,13 +252,13 @@ func TestKubernetesAdmissionEvents(t *testing.T) { // Emit the event start := time.Now() mockSender.On("Event", mock.AnythingOfType("event.Event")).Return().Once() + validated, err := kubernetesAuditWebhook.emitEvent(&tt.request, "", nil) // Force flush to serializer to ensure the event is emitted and received. demultiplexerMock.ForceFlushToSerializer(start, true) - validated, err := kubernetesAuditWebhook.emitEvent(&tt.request, "", nil) assert.NoError(t, err) assert.True(t, validated) if tt.expectedEmitted { - mockSender.AssertCalled(t, "Event", tt.expectedEvent) + mockSender.AssertEvent(t, tt.expectedEvent, 1*time.Second) } else { mockSender.AssertNotCalled(t, "Event") } @@ -267,5 +268,5 @@ func TestKubernetesAdmissionEvents(t *testing.T) { // createDemultiplexer creates a demultiplexer for testing func createDemultiplexer(t *testing.T) demultiplexer.FakeSamplerMock { - return fxutil.Test[demultiplexer.FakeSamplerMock](t, fx.Provide(func() log.Component { return logmock.New(t) }), compressionmock.MockModule(), demultiplexerimpl.FakeSamplerMockModule(), hostnameimpl.MockModule()) + return fxutil.Test[demultiplexer.FakeSamplerMock](t, fx.Provide(func() log.Component { return logmock.New(t) }), logscompression.MockModule(), metricscompression.MockModule(), demultiplexerimpl.FakeSamplerMockModule(), hostnameimpl.MockModule()) } diff --git a/pkg/util/aliases.go b/pkg/clusteragent/autoscaling/workload/loadstore/doc.go similarity index 64% rename from pkg/util/aliases.go rename to pkg/clusteragent/autoscaling/workload/loadstore/doc.go index 8ba34adbfddc1..4db049dcfa4d5 100644 --- a/pkg/util/aliases.go +++ b/pkg/clusteragent/autoscaling/workload/loadstore/doc.go @@ -3,11 +3,9 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package util +//go:build kubeapiserver -import ( - "github.com/DataDog/datadog-agent/pkg/util/sort" -) - -// SortUniqInPlace alias -var SortUniqInPlace = sort.UniqInPlace +/* +Package loadstore stores local failover metrics for the workload that need autoscaling +*/ +package loadstore diff --git a/pkg/clusteragent/autoscaling/workload/loadstore/entity.go b/pkg/clusteragent/autoscaling/workload/loadstore/entity.go new file mode 100644 index 0000000000000..5a3b688b50483 --- /dev/null +++ b/pkg/clusteragent/autoscaling/workload/loadstore/entity.go @@ -0,0 +1,127 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build kubeapiserver + +package loadstore + +import ( + "fmt" + "time" +) + +// EntityType defines the type of entity. +type EntityType int8 + +// PodOwnerType is parsed from kube_ownerref_kind, example values: deployment, statefulset, daemonset, etc. +type PodOwnerType int8 + +// ValueType defines the datatype of workload value. +type ValueType float64 + +// Enumeration of entity types. +const ( + ContainerType EntityType = iota + PodType // TODO: PodType is not supported yet + UnknownType +) + +// Enumeration of pod owner types which is parsed from tags kube_ownerref_kind +const ( + Deployment PodOwnerType = iota + ReplicaSet + Unsupported +) + +const ( + // maxDataPoints is the maximum number of data points to store per entity. + maxDataPoints = 3 + // defaultPurgeInterval is the default interval to purge inactive entities. + defaultPurgeInterval = 3 * time.Minute + // defaultExpireInterval is the default interval to expire entities. + defaultExpireInterval = 3 * time.Minute +) + +// Entity represents an entity with a type and its attributes. +// if entity is a pod, if entity restarts, a new entity will be created because podname is different +// if entity is a container, the entity will be same +type Entity struct { + EntityType EntityType // required, PodType or ContainerType + + // Use display_container_name for EntityName if EntityType is container + // or use podname for entityName if EntityType is pod + // display_container_name = container.Name + pod.Name + // if container is restarted, the display_container_name will be the same + EntityName string // required + + Namespace string // required + PodOwnerName string // required, parsed from tags kube_ownerref_name + PodOwnerkind PodOwnerType // required, parsed from tags kube_ownerref_kind + PodName string // required, parsed from tags pod_name + ContainerName string // optional, short container name, empty if EntityType is PodType + MetricName string // required, metric name of workload +} + +// EntityValue represents a value with a timestamp. +type EntityValue struct { + value ValueType + timestamp Timestamp +} + +// String returns a string representation of the EntityValue. +func (ev *EntityValue) String() string { + // Convert the timestamp to a time.Time object assuming the timestamp is in seconds. + // If the timestamp is in milliseconds, use time.UnixMilli(ev.timestamp) instead. + readableTime := time.Unix(int64(ev.timestamp), 0).Local().Format(time.RFC3339) + return fmt.Sprintf("Value: %f, Timestamp: %s", ev.value, readableTime) +} + +// EntityValueQueue represents a queue with a fixed capacity that removes the front element when full +type EntityValueQueue struct { + data []*EntityValue + head int + tail int + size int + capacity int +} + +// pushBack adds an element to the back of the queue. +// If the queue is full, it removes the front element first. +func (q *EntityValueQueue) pushBack(value *EntityValue) bool { + if q.size == q.capacity { + // Remove the front element + q.head = (q.head + 1) % q.capacity + q.size-- + } + + // Add the new element at the back + q.data[q.tail] = value + q.tail = (q.tail + 1) % q.capacity + q.size++ + return true +} + +// ToSlice converts the EntityValueQueue data to a slice of EntityValue. +func (q *EntityValueQueue) ToSlice() []EntityValue { + if q.size == 0 { + return []EntityValue{} + } + + result := make([]EntityValue, 0, q.size) + if q.head < q.tail { + for _, v := range q.data[q.head:q.tail] { + result = append(result, *v) + } + } else { + for _, v := range q.data[q.head:] { + result = append(result, *v) + } + for _, v := range q.data[:q.tail] { + result = append(result, *v) + } + } + + return result +} diff --git a/pkg/clusteragent/autoscaling/workload/loadstore/store.go b/pkg/clusteragent/autoscaling/workload/loadstore/store.go new file mode 100644 index 0000000000000..479f33230f1d7 --- /dev/null +++ b/pkg/clusteragent/autoscaling/workload/loadstore/store.go @@ -0,0 +1,168 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build kubeapiserver + +package loadstore + +import ( + "context" + "strings" + "sync" + + "github.com/DataDog/agent-payload/v5/gogen" + "github.com/DataDog/datadog-agent/pkg/util/kubernetes" +) + +var ( + // WorkloadMetricStore is the store for workload metrics + WorkloadMetricStore Store + // WorkloadMetricStoreOnce is used to init the store once + WorkloadMetricStoreOnce sync.Once +) + +// GetWorkloadMetricStore returns the workload metric store, init once +func GetWorkloadMetricStore(ctx context.Context) Store { + WorkloadMetricStoreOnce.Do(func() { + WorkloadMetricStore = NewEntityStore(ctx) + }) + return WorkloadMetricStore +} + +// StoreInfo represents the store information which aggregates the entities to lowest level, i.e., container level +type StoreInfo struct { + currentTime Timestamp + StatsResults []*StatsResult +} + +// StatsResult provides a summary of the entities, grouped by namespace, podOwner, and metric name. +type StatsResult struct { + Namespace string + PodOwner string + MetricName string + Count int // Under , number of containers if container type or pods if pod type +} + +// PodResult provides the time series of entity values for a pod and its containers +type PodResult struct { + PodName string + ContainerValues map[string][]EntityValue // container name to a time series of entity values, e.g cpu usage from past three collection + PodLevelValue []EntityValue // If Pod level value is not available, it will be empty +} + +// QueryResult provides the pod results for a given query +type QueryResult struct { + results []PodResult +} + +// Store is an interface for in-memory storage of entities and their load metric values. +type Store interface { + // SetEntitiesValues sets the values for the given map + SetEntitiesValues(entities map[*Entity]*EntityValue) + + // GetStoreInfo returns the store information. + GetStoreInfo() StoreInfo + + // GetMetricsRaw provides the values of qualified entities by given search filters + GetMetricsRaw(metricName string, + namespace string, + podOwnerName string, + containerName string) QueryResult +} + +// createEntitiesFromPayload is a helper function used for creating entities from the metric payload. +func createEntitiesFromPayload(payload *gogen.MetricPayload) map[*Entity]*EntityValue { + entities := make(map[*Entity]*EntityValue) + splitTag := func(tag string) (key string, value string) { + splitIndex := strings.Index(tag, ":") + if splitIndex < -1 { + return "", "" + } + return tag[:splitIndex], tag[splitIndex+1:] + } + for _, series := range payload.Series { + metricName := series.GetMetric() + points := series.GetPoints() + tags := series.GetTags() + entity := Entity{ + EntityType: UnknownType, + EntityName: "", + Namespace: "", + MetricName: metricName, + PodOwnerName: "", + PodOwnerkind: Unsupported, + } + for _, tag := range tags { + k, v := splitTag(tag) + switch k { + case "display_container_name": + entity.EntityType = ContainerType + entity.EntityName = v + case "kube_namespace": + entity.Namespace = v + case "container_id": + entity.EntityType = ContainerType + case "kube_ownerref_name": + entity.PodOwnerName = v + case "kube_ownerref_kind": + switch strings.ToLower(v) { + case "deployment": + entity.PodOwnerkind = Deployment + case "replicaset": + entity.PodOwnerkind = ReplicaSet + // TODO: add more cases + default: + entity.PodOwnerkind = Unsupported + } + case "container_name": + entity.ContainerName = v + case "pod_name": + entity.PodName = v + } + } + // TODO: + // if PodType, populate entity.type first + // if entity.EntityType == PodType { + // entity.EntityName = entity.PodName + // } + + // for replicaset, the logic should be consistent with getNamespacedPodOwner in podwatcher + if entity.PodOwnerkind == ReplicaSet { + deploymentName := kubernetes.ParseDeploymentForReplicaSet(entity.PodOwnerName) + if deploymentName != "" { + entity.PodOwnerkind = Deployment + entity.PodOwnerName = deploymentName + } else { + entity.PodOwnerkind = Unsupported + } + } + if entity.MetricName == "" || + entity.EntityType == UnknownType || + entity.Namespace == "" || + entity.PodOwnerName == "" || + entity.EntityName == "" || + entity.PodOwnerkind == Unsupported { + continue + } + for _, point := range points { + if point != nil && point.GetTimestamp() > 0 { + entities[&entity] = &EntityValue{ + value: ValueType(point.GetValue()), + timestamp: Timestamp(point.GetTimestamp()), + } + } + } + } + return entities +} + +// ProcessLoadPayload converts the metric payload and stores the entities and their values in the store. +func ProcessLoadPayload(payload *gogen.MetricPayload, store Store) { + if payload == nil || store == nil { + return + } + entities := createEntitiesFromPayload(payload) + store.SetEntitiesValues(entities) +} diff --git a/pkg/clusteragent/autoscaling/workload/loadstore/store_test.go b/pkg/clusteragent/autoscaling/workload/loadstore/store_test.go new file mode 100644 index 0000000000000..6ff07160dbfe2 --- /dev/null +++ b/pkg/clusteragent/autoscaling/workload/loadstore/store_test.go @@ -0,0 +1,173 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build kubeapiserver && test + +package loadstore + +import ( + "fmt" + "sync" + "testing" + "time" + + "github.com/DataDog/agent-payload/v5/gogen" + "github.com/stretchr/testify/assert" +) + +func createSeriesPayload(i int, timeDelta int64) *gogen.MetricPayload { + containerID := fmt.Sprintf("container_id:%d", 10) + containerName := "container_name:container_test" + displayContainerName := fmt.Sprintf("display_container_name:pod_%d-container_test", i) + namespace := "kube_namespace:test" + deployment := "kube_deployment:redis_test" + kubeOwnerrefName := "kube_ownerref_name:redis_test" + kubeOwnerrefKind := "kube_ownerref_kind:deployment" + podName := fmt.Sprintf("pod_name:redis_%d", i) + payload := gogen.MetricPayload{ + Series: []*gogen.MetricPayload_MetricSeries{ + { + Metric: "container.memory.usage", + Type: 3, // Gauge + Points: []*gogen.MetricPayload_MetricPoint{ + { + Timestamp: time.Now().Unix() - timeDelta, // timeDelta seconds ago + Value: float64(i), + }, + }, + Tags: []string{containerID, displayContainerName, namespace, deployment, kubeOwnerrefName, kubeOwnerrefKind, podName, containerName}, + Resources: []*gogen.MetricPayload_Resource{ + { + Type: "host", Name: "localHost", + }, + }, + }, + }, + } + return &payload +} + +func createSeriesPayload2(i int, timeDelta int64) *gogen.MetricPayload { + containerID := fmt.Sprintf("container_id:%d", i) + containerName := "container_name:container_test" + displayContainerName := fmt.Sprintf("display_container_name:pod_%d-container_test", i) + namespace := "kube_namespace:test" + deployment := "kube_deployment:nginx_test" + kubeOwnerrefName := "kube_ownerref_name:nginx_test-8957fc986" + kubeOwnerrefKind := "kube_ownerref_kind:replicaset" + podName := fmt.Sprintf("pod_name:nginx_%d", i) + payload := gogen.MetricPayload{ + Series: []*gogen.MetricPayload_MetricSeries{ + { + Metric: "container.cpu.usage", + Type: 3, // Gauge + Points: []*gogen.MetricPayload_MetricPoint{ + { + Timestamp: time.Now().Unix() - timeDelta, // timeDelta seconds ago + Value: float64(i), + }, + }, + Tags: []string{containerID, displayContainerName, namespace, deployment, kubeOwnerrefName, kubeOwnerrefKind, podName, containerName}, + Resources: []*gogen.MetricPayload_Resource{ + { + Type: "host", Name: "localHost2", + }, + }, + }, + }, + } + return &payload +} + +func TestCreateEntitiesFromPayload(t *testing.T) { + numPayloads := 10 + for i := 0; i < numPayloads; i++ { + payload := createSeriesPayload(i, 100) + entities := createEntitiesFromPayload(payload) + assert.Equal(t, len(entities), 1) + for k, v := range entities { + assert.Equal(t, "container.memory.usage", k.MetricName) + assert.Equal(t, ValueType(i), v.value) + assert.Equal(t, fmt.Sprintf("redis_%d", i), k.PodName) + assert.Equal(t, "test", k.Namespace) + assert.Equal(t, "redis_test", k.PodOwnerName) + assert.Equal(t, "container_test", k.ContainerName) + assert.Equal(t, fmt.Sprintf("pod_%d-container_test", i), k.EntityName) + } + } +} + +func TestStoreAndPurgeEntities(t *testing.T) { + numPayloads := 100 + store := EntityStore{ + key2ValuesMap: make(map[uint64]*dataItem), + keyAttrTable: make(map[compositeKey]podList), + lock: sync.RWMutex{}, + } + for _, timeDelta := range []int64{100, 85, 70} { + for i := 0; i < numPayloads; i++ { + payload := createSeriesPayload(i, timeDelta) + entities := createEntitiesFromPayload(payload) + store.SetEntitiesValues(entities) + payload2 := createSeriesPayload2(i, timeDelta) + entities2 := createEntitiesFromPayload(payload2) + store.SetEntitiesValues(entities2) + + } + } + storeInfo := store.GetStoreInfo() + assert.Equal(t, 2, len(storeInfo.StatsResults)) + for _, statsResult := range storeInfo.StatsResults { + assert.Equal(t, numPayloads, statsResult.Count) + assert.Equal(t, "test", statsResult.Namespace) + assert.Contains(t, []string{"redis_test", "nginx_test"}, statsResult.PodOwner) + if statsResult.PodOwner == "redis_test" { + assert.Equal(t, "container.memory.usage", statsResult.MetricName) + } else { // nginx_test + assert.Equal(t, "container.cpu.usage", statsResult.MetricName) + } + } + store.purgeInactiveEntities(10 * time.Second) + storeInfo = store.GetStoreInfo() + for _, statsResult := range storeInfo.StatsResults { + assert.Equal(t, 0, statsResult.Count) + } +} + +func TestGetMetrics(t *testing.T) { + numPayloads := 100 + store := EntityStore{ + key2ValuesMap: make(map[uint64]*dataItem), + keyAttrTable: make(map[compositeKey]podList), + lock: sync.RWMutex{}, + } + for _, timeDelta := range []int64{100, 85, 80} { + for i := 0; i < numPayloads; i++ { + payload := createSeriesPayload(i, timeDelta) + entities := createEntitiesFromPayload(payload) + store.SetEntitiesValues(entities) + payload2 := createSeriesPayload2(i, timeDelta) + entities2 := createEntitiesFromPayload(payload2) + store.SetEntitiesValues(entities2) + + } + } + queryResult := store.GetMetricsRaw("container.cpu.usage", "test", "nginx_test", "") + assert.Equal(t, 100, len(queryResult.results)) + for _, podResult := range queryResult.results { + assert.Equal(t, 1, len(podResult.ContainerValues)) + assert.Equal(t, 0, len(podResult.PodLevelValue)) + for containerName, entityValues := range podResult.ContainerValues { + assert.Equal(t, "container_test", containerName) + assert.Equal(t, 3, len(entityValues)) + } + } + + emptyQueryResult := store.GetMetricsRaw("container.cpu.usage", "test", "nginx_test", "container_test2") + assert.Equal(t, 0, len(emptyQueryResult.results)) + + filteredQueryResult := store.GetMetricsRaw("container.memory.usage", "test", "redis_test", "container_test") + assert.Equal(t, 100, len(filteredQueryResult.results)) +} diff --git a/pkg/clusteragent/autoscaling/workload/loadstore/storeimpl.go b/pkg/clusteragent/autoscaling/workload/loadstore/storeimpl.go new file mode 100644 index 0000000000000..205b2fa9035c3 --- /dev/null +++ b/pkg/clusteragent/autoscaling/workload/loadstore/storeimpl.go @@ -0,0 +1,259 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build kubeapiserver + +package loadstore + +import ( + "context" + "sync" + "time" + + "github.com/DataDog/datadog-agent/pkg/util/log" +) + +var _ Store = (*EntityStore)(nil) + +type dataItem struct { + entity *Entity + valueQueue EntityValueQueue // value queue, default 3 data points + lastActiveTs Timestamp // last active timestamp +} + +func convertsToEntityValueSlice(data []*EntityValue) []EntityValue { + result := make([]EntityValue, len(data)) + for i, v := range data { + if v != nil { + result[i] = *v + } + } + return result +} + +// compositeKey is a hash id of composite key for the keyAttrTable, which is used for quick filtering +type compositeKey uint64 + +func generateCompositeKey(namespace, podOwnerName, metricName string) compositeKey { + return compositeKey(generateHash(namespace, podOwnerName, metricName)) +} + +// dataPerPod stores the mapping between contaienr name and entity hash id and pod level entity hash id if available +// {containerName: entityHashId, containerName2: entityHashId2...} +type dataPerPod struct { + containers map[string]uint64 // map container name -> entity hash id + podEntityID uint64 // pod level entity hash id, if not available, it will be 0 +} + +// podList has a map of pod name (i.e. pod name: expod-hash1-hash2 ) to dataPerPod +type podList struct { + pods map[string]*dataPerPod + namespace string + podOwnerName string + metricName string +} + +// EntityStore manages mappings between entities and their hashed keys. +type EntityStore struct { + key2ValuesMap map[uint64]*dataItem // Maps hash(entity) to a dataitem (entity and its values) + keyAttrTable map[compositeKey]podList // map Hash -> pod name -> dataPerPod + lock sync.RWMutex // Protects access to store and entityMap +} + +// NewEntityStore creates a new EntityStore. +func NewEntityStore(ctx context.Context) *EntityStore { + store := EntityStore{ + key2ValuesMap: make(map[uint64]*dataItem), + keyAttrTable: make(map[compositeKey]podList), + lock: sync.RWMutex{}, + } + store.startCleanupInBackground(ctx) + return &store +} + +// SetEntitiesValues inserts entities into the store. +func (es *EntityStore) SetEntitiesValues(entities map[*Entity]*EntityValue) { + es.lock.Lock() // Lock for writing + defer es.lock.Unlock() + for entity, value := range entities { + if entity.EntityName == "" || entity.MetricName == "" || entity.Namespace == "" || entity.PodOwnerName == "" { + log.Tracef("Skipping entity with empty entityName, podOwnerName, namespace or metricName: %v", entity) + continue + } + entityHash := hashEntityToUInt64(entity) + data, exists := es.key2ValuesMap[entityHash] + if !exists { + data = &dataItem{ + entity: entity, + valueQueue: EntityValueQueue{ + data: make([]*EntityValue, maxDataPoints), + head: 0, + tail: 0, + size: 0, + capacity: maxDataPoints, + }, + lastActiveTs: value.timestamp, + } + data.valueQueue.pushBack(value) + es.key2ValuesMap[entityHash] = data + } else { + if data.lastActiveTs < value.timestamp { + // Update the last active timestamp + data.lastActiveTs = value.timestamp + data.valueQueue.pushBack(value) + } //else if lastActiveTs is greater than value.timestamp, skip the value because it is outdated + } + + // Update the key attribute table + compositeKeyHash := generateCompositeKey(entity.Namespace, entity.PodOwnerName, entity.MetricName) + if _, ok := es.keyAttrTable[compositeKeyHash]; !ok { + es.keyAttrTable[compositeKeyHash] = podList{ + pods: make(map[string]*dataPerPod), + namespace: entity.Namespace, + podOwnerName: entity.PodOwnerName, + metricName: entity.MetricName, + } + } + if _, ok := (es.keyAttrTable[compositeKeyHash].pods)[entity.PodName]; !ok { + (es.keyAttrTable[compositeKeyHash].pods)[entity.PodName] = &dataPerPod{ + containers: make(map[string]uint64), + podEntityID: 0, + } + } + // Update the pod level entity hash id + if entity.EntityType == PodType { + (es.keyAttrTable[compositeKeyHash].pods)[entity.PodName].podEntityID = entityHash + } + if entity.EntityType == ContainerType { + (es.keyAttrTable[compositeKeyHash].pods)[entity.PodName].containers[entity.ContainerName] = entityHash + } + } +} + +/* +GetMetricsRaw to get all entities by given search filters + + metricName: required + namespace: required + podOwnerName: required + containerName: optional +*/ +func (es *EntityStore) GetMetricsRaw(metricName string, + namespace string, + podOwnerName string, + containerName string) QueryResult { + es.lock.RLock() // Lock for writing + defer es.lock.RUnlock() + compositeKeyHash := generateCompositeKey(namespace, podOwnerName, metricName) + podList, ok := es.keyAttrTable[compositeKeyHash] + if !ok { + return QueryResult{} + } + var result QueryResult + for podName, dataPerPod := range podList.pods { + if dataPerPod.podEntityID != 0 { // if it is a pod level entity + entity := es.key2ValuesMap[dataPerPod.podEntityID] + podResult := PodResult{ + PodName: podName, + PodLevelValue: convertsToEntityValueSlice(entity.valueQueue.data), + } + result.results = append(result.results, podResult) + } else { + podList := PodResult{ + PodName: podName, + ContainerValues: make(map[string][]EntityValue), + } + for containerNameKey, entityHash := range dataPerPod.containers { + if containerName != "" && containerName != containerNameKey { + continue + } + entity := es.key2ValuesMap[entityHash] + podList.ContainerValues[containerNameKey] = convertsToEntityValueSlice(entity.valueQueue.data) + } + if len(podList.ContainerValues) > 0 { + result.results = append(result.results, podList) + } + } + } + return result +} + +func (es *EntityStore) deleteInternal(hash uint64) { + if toBeDelItem, exists := es.key2ValuesMap[hash]; exists { // find the entity to delete + compositeKeyHash := generateCompositeKey(toBeDelItem.entity.Namespace, toBeDelItem.entity.PodOwnerName, toBeDelItem.entity.MetricName) // calculate the composite key + if _, ok := es.keyAttrTable[compositeKeyHash]; ok { // search the composite key in the lookup table + if dataPerPod, ok := (es.keyAttrTable[compositeKeyHash].pods)[toBeDelItem.entity.PodName]; ok { // search the pod name in the lookup table + // Delete the container from the pod + if toBeDelItem.entity.EntityType == ContainerType { + delete(dataPerPod.containers, toBeDelItem.entity.ContainerName) // delete the container from the pod + } + // Delete the pod from the keyAttrTable if there is no container + if toBeDelItem.entity.EntityType == PodType || + (len(dataPerPod.containers) == 0 && dataPerPod.podEntityID == 0) { + delete((es.keyAttrTable[compositeKeyHash].pods), toBeDelItem.entity.PodName) + } + } + } + // Delete the entity from the key2ValuesMap + delete(es.key2ValuesMap, hash) + } +} + +// purgeInactiveEntities purges inactive entities. +func (es *EntityStore) purgeInactiveEntities(purgeInterval time.Duration) { + es.lock.Lock() // Lock for writing + defer es.lock.Unlock() + for hash, entityValueBlob := range es.key2ValuesMap { + lastActive := entityValueBlob.lastActiveTs + if time.Since(time.Unix(int64(lastActive), 0)) > purgeInterval { + es.deleteInternal(hash) + } + } +} + +// startCleanupInBackground purges expired entities periodically. +func (es *EntityStore) startCleanupInBackground(ctx context.Context) { + log.Infof("Starting entity store cleanup") + // Launch periodic cleanup mechanism + go func() { + ticker := time.NewTicker(defaultPurgeInterval) + defer ticker.Stop() + for { + select { + case <-ticker.C: + es.purgeInactiveEntities(defaultExpireInterval) + case <-ctx.Done(): + break + } + } + }() +} + +// GetStoreInfo returns the store information, aggregated by namespace, podOwner, and metric name +func (es *EntityStore) GetStoreInfo() StoreInfo { + es.lock.RLock() + defer es.lock.RUnlock() + var storeInfo StoreInfo + for _, podList := range es.keyAttrTable { + namespace := podList.namespace + podOwnerName := podList.podOwnerName + metricName := podList.metricName + count := 0 + for _, dataPerPod := range podList.pods { + count += len(dataPerPod.containers) + if dataPerPod.podEntityID != 0 { + count++ + } + } + storeInfo.StatsResults = append(storeInfo.StatsResults, &StatsResult{ + Namespace: namespace, + PodOwner: podOwnerName, + MetricName: metricName, + Count: count, + }) + } + storeInfo.currentTime = getCurrentTime() + return storeInfo +} diff --git a/pkg/clusteragent/autoscaling/workload/loadstore/util.go b/pkg/clusteragent/autoscaling/workload/loadstore/util.go new file mode 100644 index 0000000000000..ce940f904e8bb --- /dev/null +++ b/pkg/clusteragent/autoscaling/workload/loadstore/util.go @@ -0,0 +1,41 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build kubeapiserver + +package loadstore + +import ( + "hash/fnv" + "time" +) + +// Timestamp is a uint32 representing a timestamp. +type Timestamp uint32 + +// generateHash generates a uint64 hash for an unknown number of strings. +func generateHash(strings ...string) uint64 { + // Initialize a new FNV-1a hasher + hasher := fnv.New64a() + // Iterate over the strings and write each one to the hasher + for _, str := range strings { + hasher.Write([]byte(str)) + } + return hasher.Sum64() +} + +// hashEntityToUInt64 generates an uint64 hash for an Entity. +func hashEntityToUInt64(entity *Entity) uint64 { + return generateHash(entity.EntityName, entity.Namespace, entity.MetricName) +} + +// getCurrentTime returns the current time in uint32 +func getCurrentTime() Timestamp { + return timeToTimestamp(time.Now()) +} + +func timeToTimestamp(t time.Time) Timestamp { + return Timestamp(t.Unix()) +} diff --git a/pkg/clusteragent/clusterchecks/dangling_config.go b/pkg/clusteragent/clusterchecks/dangling_config.go new file mode 100644 index 0000000000000..a52289bc672f3 --- /dev/null +++ b/pkg/clusteragent/clusterchecks/dangling_config.go @@ -0,0 +1,37 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build clusterchecks + +package clusterchecks + +import ( + "time" + + "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" +) + +type danglingConfigWrapper struct { + config integration.Config + timeCreated time.Time + unscheduledCheck bool +} + +// createDanglingConfig creates a new danglingConfigWrapper +// This is used to keep track of the lifecycle of a dangling config +func createDanglingConfig(config integration.Config) *danglingConfigWrapper { + return &danglingConfigWrapper{ + config: config, + timeCreated: time.Now(), + unscheduledCheck: false, + } +} + +// isStuckScheduling returns true if the config has been in the store +// for longer than the unscheduledCheckThresholdSeconds +func (c *danglingConfigWrapper) isStuckScheduling(unscheduledCheckThresholdSeconds int64) bool { + expectCheckIsScheduledTime := c.timeCreated.Add(time.Duration(unscheduledCheckThresholdSeconds) * time.Second) + return time.Now().After(expectCheckIsScheduledTime) +} diff --git a/pkg/clusteragent/clusterchecks/dispatcher_configs.go b/pkg/clusteragent/clusterchecks/dispatcher_configs.go index 8d9dd4f7db0b5..f6769461090e5 100644 --- a/pkg/clusteragent/clusterchecks/dispatcher_configs.go +++ b/pkg/clusteragent/clusterchecks/dispatcher_configs.go @@ -28,7 +28,7 @@ func (d *dispatcher) getState() (types.StateResponse, error) { response := types.StateResponse{ Warmup: !d.store.active, - Dangling: makeConfigArray(d.store.danglingConfigs), + Dangling: makeConfigArrayFromDangling(d.store.danglingConfigs), } for _, node := range d.store.nodes { n := types.StateNodeResponse{ @@ -41,7 +41,7 @@ func (d *dispatcher) getState() (types.StateResponse, error) { return response, nil } -func (d *dispatcher) addConfig(config integration.Config, targetNodeName string) { +func (d *dispatcher) addConfig(config integration.Config, targetNodeName string) bool { d.store.Lock() defer d.store.Unlock() @@ -59,9 +59,12 @@ func (d *dispatcher) addConfig(config integration.Config, targetNodeName string) // No target node specified: store in danglingConfigs if targetNodeName == "" { - danglingConfigs.Inc(le.JoinLeaderValue) - d.store.danglingConfigs[digest] = config - return + // Only update if it's a new dangling config + if _, found := d.store.danglingConfigs[digest]; !found { + danglingConfigs.Inc(le.JoinLeaderValue) + d.store.danglingConfigs[digest] = createDanglingConfig(config) + } + return false } currentNode, foundCurrent := d.store.getNodeStore(d.store.digestToNode[digest]) @@ -82,6 +85,8 @@ func (d *dispatcher) addConfig(config integration.Config, targetNodeName string) currentNode.removeConfig(digest) currentNode.Unlock() } + + return true } func (d *dispatcher) removeConfig(digest string) { @@ -94,7 +99,7 @@ func (d *dispatcher) removeConfig(digest string) { delete(d.store.digestToNode, digest) delete(d.store.digestToConfig, digest) - delete(d.store.danglingConfigs, digest) + d.deleteDangling([]string{digest}) // This is a list because each instance in a config has its own check ID and // all of them need to be deleted. @@ -131,16 +136,28 @@ func (d *dispatcher) shouldDispatchDangling() bool { return len(d.store.danglingConfigs) > 0 && len(d.store.nodes) > 0 } -// retrieveAndClearDangling extracts dangling configs from the store -func (d *dispatcher) retrieveAndClearDangling() []integration.Config { - d.store.Lock() - defer d.store.Unlock() - configs := makeConfigArray(d.store.danglingConfigs) - d.store.clearDangling() - danglingConfigs.Set(0, le.JoinLeaderValue) +// retrieveDangling extracts dangling configs from the store +func (d *dispatcher) retrieveDangling() []integration.Config { + d.store.RLock() + defer d.store.RUnlock() + + configs := makeConfigArrayFromDangling(d.store.danglingConfigs) return configs } +// deleteDangling clears the dangling configs from the store +func (d *dispatcher) deleteDangling(ids []string) { + for _, id := range ids { + if c, found := d.store.danglingConfigs[id]; found { + delete(d.store.danglingConfigs, id) + danglingConfigs.Dec(le.JoinLeaderValue) + if c.unscheduledCheck { + unscheduledCheck.Dec(le.JoinLeaderValue, c.config.Name, c.config.Source) + } + } + } +} + // patchConfiguration transforms the configuration from AD into a config // ready to use by node agents. It does the following changes: // - empty the ADIdentifiers array, to avoid node-agents detecting them as templates diff --git a/pkg/clusteragent/clusterchecks/dispatcher_main.go b/pkg/clusteragent/clusterchecks/dispatcher_main.go index 6f1a894f4e354..1e82de9a4fc03 100644 --- a/pkg/clusteragent/clusterchecks/dispatcher_main.go +++ b/pkg/clusteragent/clusterchecks/dispatcher_main.go @@ -20,20 +20,22 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/clusteragent" "github.com/DataDog/datadog-agent/pkg/util/containers" "github.com/DataDog/datadog-agent/pkg/util/hostname" + le "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver/leaderelection/metrics" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/clustername" "github.com/DataDog/datadog-agent/pkg/util/log" ) // dispatcher holds the management logic for cluster-checks type dispatcher struct { - store *clusterStore - nodeExpirationSeconds int64 - extraTags []string - clcRunnersClient clusteragent.CLCRunnerClientInterface - advancedDispatching bool - excludedChecks map[string]struct{} - excludedChecksFromDispatching map[string]struct{} - rebalancingPeriod time.Duration + store *clusterStore + nodeExpirationSeconds int64 + unscheduledCheckThresholdSeconds int64 + extraTags []string + clcRunnersClient clusteragent.CLCRunnerClientInterface + advancedDispatching bool + excludedChecks map[string]struct{} + excludedChecksFromDispatching map[string]struct{} + rebalancingPeriod time.Duration } func newDispatcher(tagger tagger.Component) *dispatcher { @@ -41,6 +43,12 @@ func newDispatcher(tagger tagger.Component) *dispatcher { store: newClusterStore(), } d.nodeExpirationSeconds = pkgconfigsetup.Datadog().GetInt64("cluster_checks.node_expiration_timeout") + d.unscheduledCheckThresholdSeconds = pkgconfigsetup.Datadog().GetInt64("cluster_checks.unscheduled_check_threshold") + + if d.unscheduledCheckThresholdSeconds < d.nodeExpirationSeconds { + log.Warnf("The unscheduled_check_threshold value should be larger than node_expiration_timeout, setting it to the same value") + d.unscheduledCheckThresholdSeconds = d.nodeExpirationSeconds + } // Attach the cluster agent's global tags to all dispatched checks // as defined in the tagger's workloadmeta collector @@ -162,15 +170,19 @@ func (d *dispatcher) Unschedule(configs []integration.Config) { } // reschdule sends configurations to dispatching without checking or patching them as Schedule does. -func (d *dispatcher) reschedule(configs []integration.Config) { +func (d *dispatcher) reschedule(configs []integration.Config) []string { + addedConfigIDs := make([]string, 0, len(configs)) for _, c := range configs { log.Debugf("Rescheduling the check %s:%s", c.Name, c.Digest()) - d.add(c) + if d.add(c) { + addedConfigIDs = append(addedConfigIDs, c.Digest()) + } } + return addedConfigIDs } // add stores and delegates a given configuration -func (d *dispatcher) add(config integration.Config) { +func (d *dispatcher) add(config integration.Config) bool { target := d.getNodeToScheduleCheck() if target == "" { // If no node is found, store it in the danglingConfigs map for retrying later. @@ -179,7 +191,7 @@ func (d *dispatcher) add(config integration.Config) { log.Infof("Dispatching configuration %s:%s to node %s", config.Name, config.Digest(), target) } - d.addConfig(config, target) + return d.addConfig(config, target) } // remove deletes a given configuration @@ -196,6 +208,21 @@ func (d *dispatcher) reset() { d.store.reset() } +// scanUnscheduledChecks scans the store for configs that have been +// unscheduled for longer than the unscheduledCheckThresholdSeconds +func (d *dispatcher) scanUnscheduledChecks() { + d.store.Lock() + defer d.store.Unlock() + + for _, c := range d.store.danglingConfigs { + if !c.unscheduledCheck && c.isStuckScheduling(d.unscheduledCheckThresholdSeconds) { + log.Warnf("Detected unscheduled check config. Name:%s, Source:%s", c.config.Name, c.config.Source) + c.unscheduledCheck = true + unscheduledCheck.Inc(le.JoinLeaderValue, c.config.Name, c.config.Source) + } + } +} + // run is the main management goroutine for the dispatcher func (d *dispatcher) run(ctx context.Context) { d.store.Lock() @@ -211,6 +238,9 @@ func (d *dispatcher) run(ctx context.Context) { rebalanceTicker := time.NewTicker(d.rebalancingPeriod) defer rebalanceTicker.Stop() + unscheduledCheckTicker := time.NewTicker(time.Duration(d.unscheduledCheckThresholdSeconds) * time.Second) + defer unscheduledCheckTicker.Stop() + for { select { case <-ctx.Done(): @@ -223,9 +253,15 @@ func (d *dispatcher) run(ctx context.Context) { // Re-dispatch dangling configs if d.shouldDispatchDangling() { - danglingConfs := d.retrieveAndClearDangling() - d.reschedule(danglingConfs) + danglingConfigs := d.retrieveDangling() + scheduledConfigIDs := d.reschedule(danglingConfigs) + d.store.Lock() + d.deleteDangling(scheduledConfigIDs) + d.store.Unlock() } + case <-unscheduledCheckTicker.C: + // Check for configs that have been dangling longer than expected + d.scanUnscheduledChecks() case <-rebalanceTicker.C: if d.advancedDispatching { d.rebalance(false) diff --git a/pkg/clusteragent/clusterchecks/dispatcher_nodes.go b/pkg/clusteragent/clusterchecks/dispatcher_nodes.go index 2e7dd891ab912..4e203fd9f048d 100644 --- a/pkg/clusteragent/clusterchecks/dispatcher_nodes.go +++ b/pkg/clusteragent/clusterchecks/dispatcher_nodes.go @@ -150,7 +150,7 @@ func (d *dispatcher) expireNodes() { for digest, config := range node.digestToConfig { delete(d.store.digestToNode, digest) log.Debugf("Adding %s:%s as a dangling Cluster Check config", config.Name, digest) - d.store.danglingConfigs[digest] = config + d.store.danglingConfigs[digest] = createDanglingConfig(config) danglingConfigs.Inc(le.JoinLeaderValue) // TODO: Use partial label matching when it becomes available: diff --git a/pkg/clusteragent/clusterchecks/dispatcher_test.go b/pkg/clusteragent/clusterchecks/dispatcher_test.go index ff348a025ed58..f00141c8ea063 100644 --- a/pkg/clusteragent/clusterchecks/dispatcher_test.go +++ b/pkg/clusteragent/clusterchecks/dispatcher_test.go @@ -10,6 +10,7 @@ package clusterchecks import ( "sort" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -348,9 +349,10 @@ func TestRescheduleDanglingFromExpiredNodes(t *testing.T) { // Ensure we have 1 dangling to schedule, as new available node is registered assert.True(t, dispatcher.shouldDispatchDangling()) - configs := dispatcher.retrieveAndClearDangling() + configs := dispatcher.retrieveDangling() // Assert the check is scheduled - dispatcher.reschedule(configs) + scheduledIDs := dispatcher.reschedule(configs) + dispatcher.deleteDangling(scheduledIDs) danglingConfig, err := dispatcher.getAllConfigs() assert.NoError(t, err) assert.Equal(t, 1, len(danglingConfig)) @@ -401,6 +403,9 @@ func TestDispatchFourConfigsTwoNodes(t *testing.T) { } func TestDanglingConfig(t *testing.T) { + mockConfig := configmock.New(t) + mockConfig.SetWithoutSource("cluster_checks.unscheduled_check_threshold", 1) + mockConfig.SetWithoutSource("cluster_checks.node_expiration_timeout", 1) fakeTagger := mock.SetupFakeTagger(t) dispatcher := newDispatcher(fakeTagger) config := integration.Config{ @@ -418,12 +423,20 @@ func TestDanglingConfig(t *testing.T) { // shouldDispatchDangling is still false because no node is available assert.False(t, dispatcher.shouldDispatchDangling()) + // force config to dangle long enough to be classified as unscheduled check + assert.False(t, dispatcher.store.danglingConfigs[config.Digest()].unscheduledCheck) + require.Eventually(t, func() bool { + dispatcher.scanUnscheduledChecks() + return dispatcher.store.danglingConfigs[config.Digest()].unscheduledCheck + }, 2*time.Second, 250*time.Millisecond) + // register a node, shouldDispatchDangling will become true dispatcher.processNodeStatus("nodeA", "10.0.0.1", types.NodeStatus{}) assert.True(t, dispatcher.shouldDispatchDangling()) // get the danglings and make sure they are removed from the store - configs := dispatcher.retrieveAndClearDangling() + configs := dispatcher.retrieveDangling() + dispatcher.deleteDangling([]string{config.Digest()}) assert.Len(t, configs, 1) assert.Equal(t, 0, len(dispatcher.store.danglingConfigs)) } diff --git a/pkg/clusteragent/clusterchecks/helpers.go b/pkg/clusteragent/clusterchecks/helpers.go index 8ddddc7187215..fca74d44f6374 100644 --- a/pkg/clusteragent/clusterchecks/helpers.go +++ b/pkg/clusteragent/clusterchecks/helpers.go @@ -33,6 +33,17 @@ func makeConfigArray(configMap map[string]integration.Config) []integration.Conf return configSlice } +// makeConfigArrayFromDangling flattens a map of configs into a slice. Creating a new slice +// allows for thread-safe usage by other external, as long as the field values in +// the config objects are not modified. +func makeConfigArrayFromDangling(configMap map[string]*danglingConfigWrapper) []integration.Config { + configSlice := make([]integration.Config, 0, len(configMap)) + for _, c := range configMap { + configSlice = append(configSlice, c.config) + } + return configSlice +} + func timestampNowNano() int64 { return time.Now().UnixNano() } diff --git a/pkg/clusteragent/clusterchecks/metrics.go b/pkg/clusteragent/clusterchecks/metrics.go index 92540e44fabad..ac44e5290257e 100644 --- a/pkg/clusteragent/clusterchecks/metrics.go +++ b/pkg/clusteragent/clusterchecks/metrics.go @@ -19,6 +19,9 @@ var ( danglingConfigs = telemetry.NewGaugeWithOpts("cluster_checks", "configs_dangling", []string{le.JoinLeaderLabel}, "Number of check configurations not dispatched.", telemetry.Options{NoDoubleUnderscoreSep: true}) + unscheduledCheck = telemetry.NewGaugeWithOpts("cluster_checks", "unscheduled_check", + []string{le.JoinLeaderLabel, "config_name", "config_source"}, "Number of check configurations not scheduled.", + telemetry.Options{NoDoubleUnderscoreSep: true}) dispatchedConfigs = telemetry.NewGaugeWithOpts("cluster_checks", "configs_dispatched", []string{"node", le.JoinLeaderLabel}, "Number of check configurations dispatched, by node.", telemetry.Options{NoDoubleUnderscoreSep: true}) diff --git a/pkg/clusteragent/clusterchecks/stats.go b/pkg/clusteragent/clusterchecks/stats.go index 52d57243f60ca..a2decaaedf8e6 100644 --- a/pkg/clusteragent/clusterchecks/stats.go +++ b/pkg/clusteragent/clusterchecks/stats.go @@ -60,12 +60,19 @@ func (d *dispatcher) getStats() *types.Stats { for _, m := range d.store.digestToConfig { checkNames[m.Name] = struct{}{} } + unscheduledChecks := 0 + for _, c := range d.store.danglingConfigs { + if c.unscheduledCheck { + unscheduledChecks++ + } + } return &types.Stats{ - Active: d.store.active, - NodeCount: len(d.store.nodes), - ActiveConfigs: len(d.store.digestToNode), - DanglingConfigs: len(d.store.danglingConfigs), - TotalConfigs: len(d.store.digestToConfig), - CheckNames: checkNames, + Active: d.store.active, + NodeCount: len(d.store.nodes), + ActiveConfigs: len(d.store.digestToNode), + DanglingConfigs: len(d.store.danglingConfigs), + UnscheduledChecks: unscheduledChecks, + TotalConfigs: len(d.store.digestToConfig), + CheckNames: checkNames, } } diff --git a/pkg/clusteragent/clusterchecks/stores.go b/pkg/clusteragent/clusterchecks/stores.go index 285e024c0e074..b39c783631c82 100644 --- a/pkg/clusteragent/clusterchecks/stores.go +++ b/pkg/clusteragent/clusterchecks/stores.go @@ -27,7 +27,7 @@ type clusterStore struct { digestToConfig map[string]integration.Config // All configurations to dispatch digestToNode map[string]string // Node running a config nodes map[string]*nodeStore // All nodes known to the cluster-agent - danglingConfigs map[string]integration.Config // Configs we could not dispatch to any node + danglingConfigs map[string]*danglingConfigWrapper // Configs we could not dispatch to any node endpointsConfigs map[string]map[string]integration.Config // Endpoints configs to be consumed by node agents idToDigest map[checkid.ID]string // link check IDs to check configs } @@ -40,11 +40,15 @@ func newClusterStore() *clusterStore { // reset empties the store and resets all states func (s *clusterStore) reset() { + for _, node := range s.nodes { + dispatchedConfigs.Delete(node.name, le.JoinLeaderValue) + } + s.active = false s.digestToConfig = make(map[string]integration.Config) s.digestToNode = make(map[string]string) s.nodes = make(map[string]*nodeStore) - s.danglingConfigs = make(map[string]integration.Config) + s.danglingConfigs = make(map[string]*danglingConfigWrapper) s.endpointsConfigs = make(map[string]map[string]integration.Config) s.idToDigest = make(map[checkid.ID]string) } @@ -74,7 +78,7 @@ func (s *clusterStore) getOrCreateNodeStore(nodeName, clientIP string) *nodeStor // clearDangling resets the danglingConfigs map to a new empty one func (s *clusterStore) clearDangling() { - s.danglingConfigs = make(map[string]integration.Config) + s.danglingConfigs = make(map[string]*danglingConfigWrapper) } // nodeStore holds the state store for one node. diff --git a/pkg/clusteragent/clusterchecks/types/types.go b/pkg/clusteragent/clusterchecks/types/types.go index 352709a556e41..55782768e3cda 100644 --- a/pkg/clusteragent/clusterchecks/types/types.go +++ b/pkg/clusteragent/clusterchecks/types/types.go @@ -74,13 +74,14 @@ type Stats struct { LeaderIP string // Leading - Leader bool - Active bool - NodeCount int - ActiveConfigs int - DanglingConfigs int - TotalConfigs int - CheckNames map[string]struct{} + Leader bool + Active bool + NodeCount int + ActiveConfigs int + DanglingConfigs int + UnscheduledChecks int + TotalConfigs int + CheckNames map[string]struct{} } // LeaderIPCallback describes the leader-election method we diff --git a/pkg/collector/corechecks/cluster/helm/helm.go b/pkg/collector/corechecks/cluster/helm/helm.go index d9392b85873dd..b0b866e06fa7d 100644 --- a/pkg/collector/corechecks/cluster/helm/helm.go +++ b/pkg/collector/corechecks/cluster/helm/helm.go @@ -31,7 +31,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/metrics/servicecheck" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/util/pointer" ) @@ -86,8 +86,8 @@ func (cc *checkConfig) Parse(data []byte) error { } // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewOption(newCheck) +func Factory() option.Option[func() check.Check] { + return option.New(newCheck) } func newCheck() check.Check { diff --git a/pkg/collector/corechecks/cluster/helm/stub.go b/pkg/collector/corechecks/cluster/helm/stub.go index 2c708f1e39455..79c17d902a103 100644 --- a/pkg/collector/corechecks/cluster/helm/stub.go +++ b/pkg/collector/corechecks/cluster/helm/stub.go @@ -10,7 +10,7 @@ package helm import ( "github.com/DataDog/datadog-agent/pkg/collector/check" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -19,6 +19,6 @@ const ( ) // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewNoneOption[func() check.Check]() +func Factory() option.Option[func() check.Check] { + return option.None[func() check.Check]() } diff --git a/pkg/collector/corechecks/cluster/ksm/customresources/cr.go b/pkg/collector/corechecks/cluster/ksm/customresources/cr.go new file mode 100644 index 0000000000000..d5f32a1c48441 --- /dev/null +++ b/pkg/collector/corechecks/cluster/ksm/customresources/cr.go @@ -0,0 +1,166 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build kubeapiserver + +package customresources + +import ( + "context" + "time" + + "github.com/DataDog/datadog-agent/pkg/config/setup" + "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" + "github.com/DataDog/datadog-agent/pkg/util/log" + + "github.com/mitchellh/mapstructure" + "github.com/prometheus/client_golang/prometheus" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + "k8s.io/kube-state-metrics/v2/pkg/customresource" + "k8s.io/kube-state-metrics/v2/pkg/customresourcestate" + "k8s.io/kube-state-metrics/v2/pkg/discovery" + "k8s.io/kube-state-metrics/v2/pkg/metric" + generator "k8s.io/kube-state-metrics/v2/pkg/metric_generator" +) + +// NewCustomResourceFactory returns a new custom resource factory that uses the provided client for all CRDs +func NewCustomResourceFactory(factory customresource.RegistryFactory, client dynamic.Interface) customresource.RegistryFactory { + return &crFactory{ + factory: factory, + client: client, + } +} + +type crFactory struct { + factory customresource.RegistryFactory + client dynamic.Interface +} + +func (f *crFactory) Name() string { + return f.factory.Name() +} + +// Hack to force the re-use of our own client for all CRDs +func (f *crFactory) CreateClient(cfg *rest.Config) (interface{}, error) { + if u, ok := f.factory.ExpectedType().(*unstructured.Unstructured); ok { + gvr := schema.GroupVersionResource{ + Group: u.GroupVersionKind().Group, + Version: u.GroupVersionKind().Version, + Resource: f.factory.Name(), + } + return f.client.Resource(gvr), nil + } + return f.factory.CreateClient(cfg) +} + +func (f *crFactory) MetricFamilyGenerators() []generator.FamilyGenerator { + return f.factory.MetricFamilyGenerators() +} + +func (f *crFactory) ExpectedType() interface{} { + return f.factory.ExpectedType() +} + +func (f *crFactory) ListWatch(customResourceClient interface{}, ns string, fieldSelector string) cache.ListerWatcher { + return f.factory.ListWatch(customResourceClient, ns, fieldSelector) +} + +// GetCustomMetricNamesMapper returns a map KSM metric names to Datadog metric names for custom resources +func GetCustomMetricNamesMapper(resources []customresourcestate.Resource) (mapper map[string]string) { + mapper = make(map[string]string) + + for _, customResource := range resources { + for _, generator := range customResource.Metrics { + if generator.Each.Type == metric.Gauge || + generator.Each.Type == metric.StateSet { + mapper[customResource.GetMetricNamePrefix()+"_"+generator.Name] = "customresource." + generator.Name + } + } + } + + return mapper +} + +// Those Prometheus counters are currently not used, but they are required by the KSM API +var ( + crdsAddEventsCounter = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "kube_state_metrics_custom_resource_state_add_events_total", + Help: "Number of times that the CRD informer triggered the add event.", + }) + crdsDeleteEventsCounter = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "kube_state_metrics_custom_resource_state_delete_events_total", + Help: "Number of times that the CRD informer triggered the remove event.", + }) + crdsCacheCountGauge = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "kube_state_metrics_custom_resource_state_cache", + Help: "Net amount of CRDs affecting the cache currently.", + }) +) + +type customResourceDecoder struct { + data customresourcestate.Metrics +} + +// Decode decodes the custom resource state metrics configuration. +func (d customResourceDecoder) Decode(v interface{}) error { + return mapstructure.Decode(d.data, v) +} + +// GetCustomResourceFactories returns a list of custom resource factories +func GetCustomResourceFactories(resources customresourcestate.Metrics, c *apiserver.APIClient) (factories []customresource.RegistryFactory) { + discovererInstance := &discovery.CRDiscoverer{ + CRDsAddEventsCounter: crdsAddEventsCounter, + CRDsDeleteEventsCounter: crdsDeleteEventsCounter, + CRDsCacheCountGauge: crdsCacheCountGauge, + } + clientConfig, err := apiserver.GetClientConfig(time.Duration(setup.Datadog().GetInt64("kubernetes_apiserver_client_timeout"))*time.Second, 10, 20) + if err != nil { + panic(err) + } + if err := discovererInstance.StartDiscovery(context.Background(), clientConfig); err != nil { + log.Errorf("failed to start custom resource discovery: %v", err) + } + customResourceStateMetricFactoriesFunc, err := customresourcestate.FromConfig(customResourceDecoder{resources}, discovererInstance) + + if err != nil { + log.Errorf("failed to create custom resource state metrics: %v", err) + } else { + customResourceStateMetricFactories, err := customResourceStateMetricFactoriesFunc() + if err != nil { + log.Errorf("failed to create custom resource state metrics: %v", err) + } else { + factories = make([]customresource.RegistryFactory, 0, len(customResourceStateMetricFactories)) + for _, factory := range customResourceStateMetricFactories { + factories = append(factories, NewCustomResourceFactory(factory, c.DynamicCl)) + } + } + } + + return factories +} + +// GetCustomResourceClientsAndCollectors returns a map of custom resource clients and a list of collectors +func GetCustomResourceClientsAndCollectors(resources []customresourcestate.Resource, c *apiserver.APIClient) (clients map[string]interface{}, collectors []string) { + clients = make(map[string]interface{}) + collectors = make([]string, 0, len(resources)) + + for _, cr := range resources { + gvr := schema.GroupVersionResource{ + Group: cr.GroupVersionKind.Group, + Version: cr.GroupVersionKind.Version, + Resource: cr.GetResourceName(), + } + + cl := c.DynamicCl.Resource(gvr) + clients[cr.GetResourceName()] = cl + collectors = append(collectors, gvr.String()) + } + + return clients, collectors +} diff --git a/pkg/collector/corechecks/cluster/ksm/kubernetes_state.go b/pkg/collector/corechecks/cluster/ksm/kubernetes_state.go index 31bb81f63a916..e5000dea592b4 100644 --- a/pkg/collector/corechecks/cluster/ksm/kubernetes_state.go +++ b/pkg/collector/corechecks/cluster/ksm/kubernetes_state.go @@ -16,6 +16,16 @@ import ( "strings" "time" + "github.com/samber/lo" + "gopkg.in/yaml.v2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/discovery" + "k8s.io/client-go/tools/cache" + "k8s.io/kube-state-metrics/v2/pkg/allowdenylist" + "k8s.io/kube-state-metrics/v2/pkg/customresource" + "k8s.io/kube-state-metrics/v2/pkg/customresourcestate" + "k8s.io/kube-state-metrics/v2/pkg/options" + "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/comp/core/tagger/kubetags" "github.com/DataDog/datadog-agent/comp/core/tagger/tags" @@ -35,15 +45,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/clustername" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" - - "gopkg.in/yaml.v2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/discovery" - "k8s.io/client-go/tools/cache" - "k8s.io/kube-state-metrics/v2/pkg/allowdenylist" - "k8s.io/kube-state-metrics/v2/pkg/customresource" - "k8s.io/kube-state-metrics/v2/pkg/options" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -70,6 +72,8 @@ var extendedCollectors = map[string]string{ // collectorNameReplacement contains a mapping of collector names as they would appear in the KSM config to what // their new collector name would be. For backwards compatibility. var collectorNameReplacement = map[string]string{ + "apiservices": "apiregistration.k8s.io/v1, Resource=apiservices", + "customresourcedefinitions": "apiextensions.k8s.io/v1, Resource=customresourcedefinitions", // verticalpodautoscalers were removed from the built-in KSM metrics in KSM 2.9, and the changes made to // the KSM builder in KSM 2.9 result in the detected custom resource store name being different. "verticalpodautoscalers": "autoscaling.k8s.io/v1beta2, Resource=verticalpodautoscalers", @@ -117,6 +121,25 @@ type KSMConfig struct { // - pods Collectors []string `yaml:"collectors"` + // CustomResourceStateMetrics defines the custom resource states metrics + // https://github.com/kubernetes/kube-state-metrics/blob/main/docs/metrics/extend/customresourcestate-metrics.md + // Example: Enable custom resource state metrics for CRD mycrd. + // custom_resource: + // spec: + // resources: + // - groupVersionKind: + // group: "datadoghq.com" + // kind: "DatadogAgent" + // version: "v2alpha1" + // metrics: + // - name: "custom_metric" + // help: "custom_metric" + // each: + // type: Gauge + // gauge: + // path: [status, agent, available] + CustomResource customresourcestate.Metrics `yaml:"custom_resource"` + // LabelJoins allows adding the tags to join from other KSM metrics. // Example: Joining for deployment metrics. Based on: // kube_deployment_labels{deployment="kube-dns",label_addonmanager_kubernetes_io_mode="Reconcile"} @@ -261,6 +284,8 @@ func (k *KSMCheck) Configure(senderManager sender.SenderManager, integrationConf return err } + maps.Copy(k.metricNamesMapper, customresources.GetCustomMetricNamesMapper(k.instance.CustomResource.Spec.Resources)) + // Retrieve cluster name k.getClusterName() @@ -483,6 +508,13 @@ func (k *KSMCheck) discoverCustomResources(c *apiserver.APIClient, collectors [] clients[f.Name()] = client } + customResourceFactories := customresources.GetCustomResourceFactories(k.instance.CustomResource, c) + customResourceClients, customResourceCollectors := customresources.GetCustomResourceClientsAndCollectors(k.instance.CustomResource.Spec.Resources, c) + + collectors = lo.Uniq(append(collectors, customResourceCollectors...)) + maps.Copy(clients, customResourceClients) + factories = append(factories, customResourceFactories...) + return customResources{ collectors: collectors, clients: clients, @@ -626,11 +658,15 @@ func (k *KSMCheck) processMetrics(sender sender.Sender, metrics map[string][]ksm } continue } + metricPrefix := ksmMetricPrefix + if strings.HasPrefix(metricFamily.Name, "kube_customresource_") { + metricPrefix = metricPrefix[:len(metricPrefix)-1] + "_" + } if ddname, found := k.metricNamesMapper[metricFamily.Name]; found { lMapperOverride := labelsMapperOverride(metricFamily.Name) for _, m := range metricFamily.ListMetrics { hostname, tagList := k.hostnameAndTags(m.Labels, labelJoiner, lMapperOverride) - sender.Gauge(ksmMetricPrefix+ddname, m.Val, hostname, tagList) + sender.Gauge(metricPrefix+ddname, m.Val, hostname, tagList) } continue } @@ -952,8 +988,8 @@ func (k *KSMCheck) sendTelemetry(s sender.Sender) { } // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewOption(newCheck) +func Factory() option.Option[func() check.Check] { + return option.New(newCheck) } func newCheck() check.Check { diff --git a/pkg/collector/corechecks/cluster/ksm/kubernetes_state_defaults.go b/pkg/collector/corechecks/cluster/ksm/kubernetes_state_defaults.go index 3cc481dda3fa7..6c0cc604f71e8 100644 --- a/pkg/collector/corechecks/cluster/ksm/kubernetes_state_defaults.go +++ b/pkg/collector/corechecks/cluster/ksm/kubernetes_state_defaults.go @@ -89,7 +89,6 @@ func defaultMetricNamesMapper() map[string]string { "kube_verticalpodautoscaler_spec_resourcepolicy_container_policies_minallowed": "vpa.spec_container_minallowed", "kube_verticalpodautoscaler_spec_resourcepolicy_container_policies_maxallowed": "vpa.spec_container_maxallowed", "kube_cronjob_spec_suspend": "cronjob.spec_suspend", - "kube_job_duration": "job.duration", "kube_ingress_path": "ingress.path", } } diff --git a/pkg/collector/corechecks/cluster/ksm/kubernetes_state_transformers.go b/pkg/collector/corechecks/cluster/ksm/kubernetes_state_transformers.go index 192826c875df8..8b8be97d79c5e 100644 --- a/pkg/collector/corechecks/cluster/ksm/kubernetes_state_transformers.go +++ b/pkg/collector/corechecks/cluster/ksm/kubernetes_state_transformers.go @@ -101,6 +101,7 @@ func defaultMetricTransformers() map[string]metricTransformerFunc { "kube_cronjob_next_schedule_time": cronJobNextScheduleTransformer, "kube_cronjob_status_last_schedule_time": cronJobLastScheduleTransformer, "kube_job_complete": jobCompleteTransformer, + "kube_job_duration": jobDurationTransformer, "kube_job_failed": jobFailedTransformer, "kube_job_status_failed": jobStatusFailedTransformer, "kube_job_status_succeeded": jobStatusSucceededTransformer, @@ -372,6 +373,10 @@ func jobCompleteTransformer(s sender.Sender, _ string, metric ksmstore.DDMetric, jobServiceCheck(s, metric, servicecheck.ServiceCheckOK, hostname, tags) } +func jobDurationTransformer(s sender.Sender, _ string, metric ksmstore.DDMetric, hostname string, tags []string, _ time.Time) { + jobMetric(s, metric, ksmMetricPrefix+"job.duration", hostname, tags) +} + // jobFailedTransformer sends a metric and a service check based on kube_job_failed func jobFailedTransformer(s sender.Sender, _ string, metric ksmstore.DDMetric, hostname string, tags []string, _ time.Time) { for i, tag := range tags { diff --git a/pkg/collector/corechecks/cluster/ksm/kubernetes_state_transformers_test.go b/pkg/collector/corechecks/cluster/ksm/kubernetes_state_transformers_test.go index d9df9dc7eadf1..57893bb167b2e 100644 --- a/pkg/collector/corechecks/cluster/ksm/kubernetes_state_transformers_test.go +++ b/pkg/collector/corechecks/cluster/ksm/kubernetes_state_transformers_test.go @@ -270,7 +270,7 @@ func Test_jobCompleteTransformer(t *testing.T) { expected: &serviceCheck{ name: "kubernetes_state.job.complete", status: servicecheck.ServiceCheckOK, - tags: []string{"job_name:foo", "namespace:default"}, + tags: []string{"job_name:foo-1509998340", "namespace:default", "kube_cronjob:foo"}, }, }, { @@ -289,7 +289,7 @@ func Test_jobCompleteTransformer(t *testing.T) { expected: &serviceCheck{ name: "kubernetes_state.job.complete", status: servicecheck.ServiceCheckOK, - tags: []string{"job:foo", "namespace:default"}, + tags: []string{"job:foo-1509998340", "namespace:default", "kube_cronjob:foo"}, }, }, { @@ -315,7 +315,7 @@ func Test_jobCompleteTransformer(t *testing.T) { currentTime := time.Now() jobCompleteTransformer(s, tt.args.name, tt.args.metric, tt.args.hostname, tt.args.tags, currentTime) if tt.expected != nil { - s.AssertServiceCheck(t, tt.expected.name, tt.expected.status, tt.args.hostname, tt.args.tags, "") + s.AssertServiceCheck(t, tt.expected.name, tt.expected.status, tt.args.hostname, tt.expected.tags, "") s.AssertNumberOfCalls(t, "ServiceCheck", 1) } else { s.AssertNotCalled(t, "ServiceCheck") @@ -324,6 +324,82 @@ func Test_jobCompleteTransformer(t *testing.T) { } } +func Test_jobDurationTransformer(t *testing.T) { + tests := []struct { + name string + args args + expected *metricsExpected + }{ + { + name: "nominal case, job_name tag", + args: args{ + name: "kube_job_duration", + metric: ksmstore.DDMetric{ + Val: 1, + Labels: map[string]string{ + "job_name": "foo-1509998340", + "namespace": "default", + }, + }, + tags: []string{"job_name:foo-1509998340", "namespace:default"}, + }, + expected: &metricsExpected{ + name: "kubernetes_state.job.duration", + val: 1, + tags: []string{"job_name:foo-1509998340", "namespace:default", "kube_cronjob:foo"}, + }, + }, + { + name: "nominal case, job tag", + args: args{ + name: "kube_job_duration", + metric: ksmstore.DDMetric{ + Val: 1, + Labels: map[string]string{ + "job": "foo-1509998340", + "namespace": "default", + }, + }, + tags: []string{"job:foo-1509998340", "namespace:default"}, + }, + expected: &metricsExpected{ + name: "kubernetes_state.job.duration", + val: 1, + tags: []string{"job:foo-1509998340", "namespace:default", "kube_cronjob:foo"}, + }, + }, + { + name: "inactive", + args: args{ + name: "kube_job_duration", + metric: ksmstore.DDMetric{ + Val: 0, + Labels: map[string]string{ + "job_name": "foo-1509998340", + "namespace": "default", + }, + }, + tags: []string{"job_name:foo-1509998340", "namespace:default"}, + }, + expected: nil, + }, + } + for _, tt := range tests { + s := mocksender.NewMockSender("ksm") + s.SetupAcceptAll() + t.Run(tt.name, func(t *testing.T) { + currentTime := time.Now() + jobDurationTransformer(s, tt.args.name, tt.args.metric, tt.args.hostname, tt.args.tags, currentTime) + if tt.expected != nil { + s.AssertMetric(t, "Gauge", tt.expected.name, tt.expected.val, tt.args.hostname, tt.expected.tags) + s.AssertNumberOfCalls(t, "Gauge", 1) + } else { + s.AssertNotCalled(t, "Gauge") + } + }) + } +} + func Test_jobFailedTransformer(t *testing.T) { tests := []struct { name string @@ -443,7 +519,7 @@ func Test_jobStatusSucceededTransformer(t *testing.T) { expected: &metricsExpected{ name: "kubernetes_state.job.succeeded", val: 1, - tags: []string{"job_name:foo", "namespace:default"}, + tags: []string{"job_name:foo-1509998340", "namespace:default", "kube_cronjob:foo"}, }, }, { @@ -462,7 +538,7 @@ func Test_jobStatusSucceededTransformer(t *testing.T) { expected: &metricsExpected{ name: "kubernetes_state.job.succeeded", val: 1, - tags: []string{"job:foo", "namespace:default"}, + tags: []string{"job:foo-1509998340", "namespace:default", "kube_cronjob:foo"}, }, }, { @@ -488,7 +564,7 @@ func Test_jobStatusSucceededTransformer(t *testing.T) { currentTime := time.Now() jobStatusSucceededTransformer(s, tt.args.name, tt.args.metric, tt.args.hostname, tt.args.tags, currentTime) if tt.expected != nil { - s.AssertMetric(t, "Gauge", tt.expected.name, tt.expected.val, tt.args.hostname, tt.args.tags) + s.AssertMetric(t, "Gauge", tt.expected.name, tt.expected.val, tt.args.hostname, tt.expected.tags) s.AssertNumberOfCalls(t, "Gauge", 1) } else { s.AssertNotCalled(t, "Gauge") @@ -755,7 +831,7 @@ func Test_pvPhaseTransformer(t *testing.T) { currentTime := time.Now() pvPhaseTransformer(s, tt.args.name, tt.args.metric, tt.args.hostname, tt.args.tags, currentTime) if tt.expected != nil { - s.AssertMetric(t, "Gauge", tt.expected.name, tt.expected.val, tt.args.hostname, tt.args.tags) + s.AssertMetric(t, "Gauge", tt.expected.name, tt.expected.val, tt.args.hostname, tt.expected.tags) s.AssertNumberOfCalls(t, "Gauge", 1) } else { s.AssertNotCalled(t, "Gauge") @@ -814,7 +890,7 @@ func Test_serviceTypeTransformer(t *testing.T) { currentTime := time.Now() serviceTypeTransformer(s, tt.args.name, tt.args.metric, tt.args.hostname, tt.args.tags, currentTime) if tt.expected != nil { - s.AssertMetric(t, "Gauge", tt.expected.name, tt.expected.val, tt.args.hostname, tt.args.tags) + s.AssertMetric(t, "Gauge", tt.expected.name, tt.expected.val, tt.args.hostname, tt.expected.tags) s.AssertNumberOfCalls(t, "Gauge", 1) } else { s.AssertNotCalled(t, "Gauge") @@ -873,7 +949,7 @@ func Test_podPhaseTransformer(t *testing.T) { currentTime := time.Now() podPhaseTransformer(s, tt.args.name, tt.args.metric, tt.args.hostname, tt.args.tags, currentTime) if tt.expected != nil { - s.AssertMetric(t, "Gauge", tt.expected.name, tt.expected.val, tt.args.hostname, tt.args.tags) + s.AssertMetric(t, "Gauge", tt.expected.name, tt.expected.val, tt.args.hostname, tt.expected.tags) s.AssertNumberOfCalls(t, "Gauge", 1) } else { s.AssertNotCalled(t, "Gauge") @@ -1043,7 +1119,7 @@ func Test_containerWaitingReasonTransformer(t *testing.T) { currentTime := time.Now() containerWaitingReasonTransformer(s, tt.args.name, tt.args.metric, tt.args.hostname, tt.args.tags, currentTime) if tt.expected != nil { - s.AssertMetric(t, "Gauge", tt.expected.name, tt.expected.val, tt.args.hostname, tt.args.tags) + s.AssertMetric(t, "Gauge", tt.expected.name, tt.expected.val, tt.args.hostname, tt.expected.tags) s.AssertNumberOfCalls(t, "Gauge", 1) } else { s.AssertNotCalled(t, "Gauge") @@ -1129,7 +1205,7 @@ func Test_containerTerminatedReasonTransformer(t *testing.T) { currentTime := time.Now() containerTerminatedReasonTransformer(s, tt.args.name, tt.args.metric, tt.args.hostname, tt.args.tags, currentTime) if tt.expected != nil { - s.AssertMetric(t, "Gauge", tt.expected.name, tt.expected.val, tt.args.hostname, tt.args.tags) + s.AssertMetric(t, "Gauge", tt.expected.name, tt.expected.val, tt.args.hostname, tt.expected.tags) s.AssertNumberOfCalls(t, "Gauge", 1) } else { s.AssertNotCalled(t, "Gauge") @@ -1218,7 +1294,7 @@ func Test_limitrangeTransformer(t *testing.T) { currentTime := time.Now() limitrangeTransformer(s, tt.args.name, tt.args.metric, tt.args.hostname, tt.args.tags, currentTime) if tt.expected != nil { - s.AssertMetric(t, "Gauge", tt.expected.name, tt.expected.val, tt.args.hostname, tt.args.tags) + s.AssertMetric(t, "Gauge", tt.expected.name, tt.expected.val, tt.args.hostname, tt.expected.tags) s.AssertNumberOfCalls(t, "Gauge", 1) } else { s.AssertNotCalled(t, "Gauge") @@ -1291,7 +1367,7 @@ func Test_nodeUnschedulableTransformer(t *testing.T) { currentTime := time.Now() nodeUnschedulableTransformer(s, tt.args.name, tt.args.metric, tt.args.hostname, tt.args.tags, currentTime) if tt.expected != nil { - s.AssertMetric(t, "Gauge", tt.expected.name, tt.expected.val, tt.args.hostname, tt.args.tags) + s.AssertMetric(t, "Gauge", tt.expected.name, tt.expected.val, tt.args.hostname, tt.expected.tags) s.AssertNumberOfCalls(t, "Gauge", 1) } else { s.AssertNotCalled(t, "Gauge") diff --git a/pkg/collector/corechecks/cluster/ksm/stub.go b/pkg/collector/corechecks/cluster/ksm/stub.go index 300927b4ebb59..b802c184ba0be 100644 --- a/pkg/collector/corechecks/cluster/ksm/stub.go +++ b/pkg/collector/corechecks/cluster/ksm/stub.go @@ -10,7 +10,7 @@ package ksm import ( "github.com/DataDog/datadog-agent/pkg/collector/check" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -19,6 +19,6 @@ const ( ) // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewNoneOption[func() check.Check]() +func Factory() option.Option[func() check.Check] { + return option.None[func() check.Check]() } diff --git a/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_apiserver.go b/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_apiserver.go index 7603251ab5ed4..3ff88474680bd 100644 --- a/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_apiserver.go +++ b/pkg/collector/corechecks/cluster/kubernetesapiserver/kubernetes_apiserver.go @@ -33,7 +33,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/clustername" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // Covers the Control Plane service check and the in memory pod metadata. @@ -143,8 +143,8 @@ func NewKubeASCheck(base core.CheckBase, instance *KubeASConfig, tagger tagger.C } // Factory creates a new check factory -func Factory(tagger tagger.Component) optional.Option[func() check.Check] { - return optional.NewOption( +func Factory(tagger tagger.Component) option.Option[func() check.Check] { + return option.New( func() check.Check { return newCheck(tagger) }, diff --git a/pkg/collector/corechecks/cluster/kubernetesapiserver/stub.go b/pkg/collector/corechecks/cluster/kubernetesapiserver/stub.go index f50f78bde14f0..152f7bfa92dd1 100644 --- a/pkg/collector/corechecks/cluster/kubernetesapiserver/stub.go +++ b/pkg/collector/corechecks/cluster/kubernetesapiserver/stub.go @@ -11,7 +11,7 @@ package kubernetesapiserver import ( tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" "github.com/DataDog/datadog-agent/pkg/collector/check" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -20,6 +20,6 @@ const ( ) // Factory creates a new check factory -func Factory(_ tagger.Component) optional.Option[func() check.Check] { - return optional.NewNoneOption[func() check.Check]() +func Factory(_ tagger.Component) option.Option[func() check.Check] { + return option.None[func() check.Check]() } diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/collector.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/collector.go index b85af3b72f338..67fd75bf8a3ba 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/collector.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/collector.go @@ -34,6 +34,9 @@ type Collector interface { // Run triggers the collection process given a configuration and returns the // collection result. Returns an error if the collection failed. Run(*CollectorRunConfig) (*CollectorRunResult, error) + + // Process is used to process the list of resources and return the result. + Process(rcfg *CollectorRunConfig, list interface{}) (*CollectorRunResult, error) } // CollectorMetadata contains information about a collector. @@ -48,6 +51,8 @@ type CollectorMetadata struct { Version string IsSkipped bool SkippedReason string + LabelsAsTags map[string]string + AnnotationsAsTags map[string]string } // FullName returns a string that contains the collector name and version. diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/ecs/task.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/ecs/task.go index d87c43156d107..e00023327b931 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/ecs/task.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/ecs/task.go @@ -60,6 +60,11 @@ func (t *TaskCollector) Run(rcfg *collectors.CollectorRunConfig) (*collectors.Co tasks = append(tasks, t.fetchContainers(rcfg, newTask)) } + return t.Process(rcfg, tasks) +} + +// Process is used to process the resources. +func (t *TaskCollector) Process(rcfg *collectors.CollectorRunConfig, list interface{}) (*collectors.CollectorRunResult, error) { ctx := &processors.ECSProcessorContext{ BaseProcessorContext: processors.BaseProcessorContext{ Cfg: rcfg.Config, @@ -75,7 +80,7 @@ func (t *TaskCollector) Run(rcfg *collectors.CollectorRunConfig) (*collectors.Co Hostname: rcfg.HostName, } - processResult, processed := t.processor.Process(ctx, tasks) + processResult, processed := t.processor.Process(ctx, list) if processed == -1 { return nil, fmt.Errorf("unable to process resources: a panic occurred") @@ -83,7 +88,7 @@ func (t *TaskCollector) Run(rcfg *collectors.CollectorRunConfig) (*collectors.Co result := &collectors.CollectorRunResult{ Result: processResult, - ResourcesListed: len(list), + ResourcesListed: len(t.processor.Handlers().ResourceList(ctx, list)), ResourcesProcessed: processed, } diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/inventory/inventory.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/inventory/inventory.go index 6372a6ce10f25..b4ada4c57191a 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/inventory/inventory.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/inventory/inventory.go @@ -16,6 +16,7 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/collectors" k8sCollectors "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s" + "github.com/DataDog/datadog-agent/pkg/config/utils" ) // CollectorInventory is used to store and retrieve available collectors. @@ -26,34 +27,35 @@ type CollectorInventory struct { // NewCollectorInventory returns a new inventory containing all known // collectors. func NewCollectorInventory(cfg config.Component, store workloadmeta.Component, tagger tagger.Component) *CollectorInventory { + metadataAsTags := utils.GetMetadataAsTags(cfg) return &CollectorInventory{ collectors: []collectors.CollectorVersions{ k8sCollectors.NewCRDCollectorVersions(), k8sCollectors.NewClusterCollectorVersions(), - k8sCollectors.NewClusterRoleBindingCollectorVersions(), - k8sCollectors.NewClusterRoleCollectorVersions(), - k8sCollectors.NewCronJobCollectorVersions(), - k8sCollectors.NewDaemonSetCollectorVersions(), - k8sCollectors.NewDeploymentCollectorVersions(), - k8sCollectors.NewHorizontalPodAutoscalerCollectorVersions(), - k8sCollectors.NewIngressCollectorVersions(), - k8sCollectors.NewJobCollectorVersions(), - k8sCollectors.NewLimitRangeCollectorVersions(), - k8sCollectors.NewNamespaceCollectorVersions(), - k8sCollectors.NewNetworkPolicyCollectorVersions(), - k8sCollectors.NewNodeCollectorVersions(), - k8sCollectors.NewPersistentVolumeClaimCollectorVersions(), - k8sCollectors.NewPersistentVolumeCollectorVersions(), - k8sCollectors.NewPodDisruptionBudgetCollectorVersions(), - k8sCollectors.NewReplicaSetCollectorVersions(), - k8sCollectors.NewRoleBindingCollectorVersions(), - k8sCollectors.NewRoleCollectorVersions(), - k8sCollectors.NewServiceAccountCollectorVersions(), - k8sCollectors.NewServiceCollectorVersions(), - k8sCollectors.NewStatefulSetCollectorVersions(), - k8sCollectors.NewStorageClassCollectorVersions(), - k8sCollectors.NewUnassignedPodCollectorVersions(cfg, store, tagger), - k8sCollectors.NewVerticalPodAutoscalerCollectorVersions(), + k8sCollectors.NewClusterRoleBindingCollectorVersions(metadataAsTags), + k8sCollectors.NewClusterRoleCollectorVersions(metadataAsTags), + k8sCollectors.NewCronJobCollectorVersions(metadataAsTags), + k8sCollectors.NewDaemonSetCollectorVersions(metadataAsTags), + k8sCollectors.NewDeploymentCollectorVersions(metadataAsTags), + k8sCollectors.NewHorizontalPodAutoscalerCollectorVersions(metadataAsTags), + k8sCollectors.NewIngressCollectorVersions(metadataAsTags), + k8sCollectors.NewJobCollectorVersions(metadataAsTags), + k8sCollectors.NewLimitRangeCollectorVersions(metadataAsTags), + k8sCollectors.NewNamespaceCollectorVersions(metadataAsTags), + k8sCollectors.NewNetworkPolicyCollectorVersions(metadataAsTags), + k8sCollectors.NewNodeCollectorVersions(metadataAsTags), + k8sCollectors.NewPersistentVolumeClaimCollectorVersions(metadataAsTags), + k8sCollectors.NewPersistentVolumeCollectorVersions(metadataAsTags), + k8sCollectors.NewPodDisruptionBudgetCollectorVersions(metadataAsTags), + k8sCollectors.NewReplicaSetCollectorVersions(metadataAsTags), + k8sCollectors.NewRoleBindingCollectorVersions(metadataAsTags), + k8sCollectors.NewRoleCollectorVersions(metadataAsTags), + k8sCollectors.NewServiceAccountCollectorVersions(metadataAsTags), + k8sCollectors.NewServiceCollectorVersions(metadataAsTags), + k8sCollectors.NewStatefulSetCollectorVersions(metadataAsTags), + k8sCollectors.NewStorageClassCollectorVersions(metadataAsTags), + k8sCollectors.NewUnassignedPodCollectorVersions(cfg, store, tagger, metadataAsTags), + k8sCollectors.NewVerticalPodAutoscalerCollectorVersions(metadataAsTags), }, } } diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/cluster.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/cluster.go index 916ab29d9eb73..a5162fad08b10 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/cluster.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/cluster.go @@ -44,7 +44,7 @@ func NewClusterCollector() *ClusterCollector { IsMetadataProducer: true, IsManifestProducer: true, SupportsManifestBuffering: true, - Name: "clusters", + Name: clusterName, NodeType: orchestrator.K8sCluster, }, processor: k8sProcessors.NewClusterProcessor(), @@ -74,6 +74,11 @@ func (c *ClusterCollector) Run(rcfg *collectors.CollectorRunConfig) (*collectors return nil, collectors.NewListingError(err) } + return c.Process(rcfg, list) +} + +// Process is used to process the list of resources and return the result. +func (c *ClusterCollector) Process(rcfg *collectors.CollectorRunConfig, list interface{}) (*collectors.CollectorRunResult, error) { ctx := collectors.NewK8sProcessorContext(rcfg, c.metadata) processResult, processed, err := c.processor.Process(ctx, list) diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/clusterrole.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/clusterrole.go index 1cdc6518ddc4f..1797c13216aa5 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/clusterrole.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/clusterrole.go @@ -11,8 +11,8 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/collectors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" k8sProcessors "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors/k8s" + "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/orchestrator" - "k8s.io/apimachinery/pkg/labels" rbacv1Informers "k8s.io/client-go/informers/rbac/v1" rbacv1Listers "k8s.io/client-go/listers/rbac/v1" @@ -20,9 +20,9 @@ import ( ) // NewClusterRoleCollectorVersions builds the group of collector versions. -func NewClusterRoleCollectorVersions() collectors.CollectorVersions { +func NewClusterRoleCollectorVersions(metadataAsTags utils.MetadataAsTags) collectors.CollectorVersions { return collectors.NewCollectorVersions( - NewClusterRoleCollector(), + NewClusterRoleCollector(metadataAsTags), ) } @@ -36,7 +36,11 @@ type ClusterRoleCollector struct { // NewClusterRoleCollector creates a new collector for the Kubernetes // ClusterRole resource. -func NewClusterRoleCollector() *ClusterRoleCollector { +func NewClusterRoleCollector(metadataAsTags utils.MetadataAsTags) *ClusterRoleCollector { + resourceType := getResourceType(clusterRoleName, clusterRoleVersion) + labelsAsTags := metadataAsTags.GetResourcesLabelsAsTags()[resourceType] + annotationsAsTags := metadataAsTags.GetResourcesAnnotationsAsTags()[resourceType] + return &ClusterRoleCollector{ metadata: &collectors.CollectorMetadata{ IsDefaultVersion: true, @@ -44,9 +48,11 @@ func NewClusterRoleCollector() *ClusterRoleCollector { IsMetadataProducer: true, IsManifestProducer: true, SupportsManifestBuffering: true, - Name: "clusterroles", + Name: clusterRoleName, NodeType: orchestrator.K8sClusterRole, - Version: "rbac.authorization.k8s.io/v1", + Version: clusterRoleVersion, + LabelsAsTags: labelsAsTags, + AnnotationsAsTags: annotationsAsTags, }, processor: processors.NewProcessor(new(k8sProcessors.ClusterRoleHandlers)), } @@ -75,6 +81,11 @@ func (c *ClusterRoleCollector) Run(rcfg *collectors.CollectorRunConfig) (*collec return nil, collectors.NewListingError(err) } + return c.Process(rcfg, list) +} + +// Process is used to process the list of resources and return the result. +func (c *ClusterRoleCollector) Process(rcfg *collectors.CollectorRunConfig, list interface{}) (*collectors.CollectorRunResult, error) { ctx := collectors.NewK8sProcessorContext(rcfg, c.metadata) processResult, processed := c.processor.Process(ctx, list) @@ -85,7 +96,7 @@ func (c *ClusterRoleCollector) Run(rcfg *collectors.CollectorRunConfig) (*collec result := &collectors.CollectorRunResult{ Result: processResult, - ResourcesListed: len(list), + ResourcesListed: len(c.processor.Handlers().ResourceList(ctx, list)), ResourcesProcessed: processed, } diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/clusterrolebinding.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/clusterrolebinding.go index 715289b771dc8..e9881e0903276 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/clusterrolebinding.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/clusterrolebinding.go @@ -11,6 +11,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/collectors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" k8sProcessors "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors/k8s" + "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/orchestrator" "k8s.io/apimachinery/pkg/labels" @@ -20,9 +21,9 @@ import ( ) // NewClusterRoleBindingCollectorVersions builds the group of collector versions. -func NewClusterRoleBindingCollectorVersions() collectors.CollectorVersions { +func NewClusterRoleBindingCollectorVersions(metadataAsTags utils.MetadataAsTags) collectors.CollectorVersions { return collectors.NewCollectorVersions( - NewClusterRoleBindingCollector(), + NewClusterRoleBindingCollector(metadataAsTags), ) } @@ -36,7 +37,11 @@ type ClusterRoleBindingCollector struct { // NewClusterRoleBindingCollector creates a new collector for the Kubernetes // ClusterRoleBinding resource. -func NewClusterRoleBindingCollector() *ClusterRoleBindingCollector { +func NewClusterRoleBindingCollector(metadataAsTags utils.MetadataAsTags) *ClusterRoleBindingCollector { + resourceType := getResourceType(clusterRoleBindingName, clusterRoleBindingVersion) + labelsAsTags := metadataAsTags.GetResourcesLabelsAsTags()[resourceType] + annotationsAsTags := metadataAsTags.GetResourcesAnnotationsAsTags()[resourceType] + return &ClusterRoleBindingCollector{ metadata: &collectors.CollectorMetadata{ IsDefaultVersion: true, @@ -44,9 +49,11 @@ func NewClusterRoleBindingCollector() *ClusterRoleBindingCollector { IsMetadataProducer: true, IsManifestProducer: true, SupportsManifestBuffering: true, - Name: "clusterrolebindings", + Name: clusterRoleBindingName, NodeType: orchestrator.K8sClusterRoleBinding, - Version: "rbac.authorization.k8s.io/v1", + Version: clusterRoleBindingVersion, + LabelsAsTags: labelsAsTags, + AnnotationsAsTags: annotationsAsTags, }, processor: processors.NewProcessor(new(k8sProcessors.ClusterRoleBindingHandlers)), } @@ -75,6 +82,11 @@ func (c *ClusterRoleBindingCollector) Run(rcfg *collectors.CollectorRunConfig) ( return nil, collectors.NewListingError(err) } + return c.Process(rcfg, list) +} + +// Process is used to process the list of resources and return the result. +func (c *ClusterRoleBindingCollector) Process(rcfg *collectors.CollectorRunConfig, list interface{}) (*collectors.CollectorRunResult, error) { ctx := collectors.NewK8sProcessorContext(rcfg, c.metadata) processResult, processed := c.processor.Process(ctx, list) @@ -85,7 +97,7 @@ func (c *ClusterRoleBindingCollector) Run(rcfg *collectors.CollectorRunConfig) ( result := &collectors.CollectorRunResult{ Result: processResult, - ResourcesListed: len(list), + ResourcesListed: len(c.processor.Handlers().ResourceList(ctx, list)), ResourcesProcessed: processed, } diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/cr.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/cr.go index 107301727fee9..24eea5c8af1b2 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/cr.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/cr.go @@ -93,6 +93,11 @@ func (c *CRCollector) Run(rcfg *collectors.CollectorRunConfig) (*collectors.Coll return nil, collectors.NewListingError(fmt.Errorf("crd collector %s/%s has reached to the limit %d, skipping it", c.metadata.Version, c.metadata.Name, defaultMaximumCRDQuota)) } + return c.Process(rcfg, list) +} + +// Process is used to process the list of resources and return the result. +func (c *CRCollector) Process(rcfg *collectors.CollectorRunConfig, list interface{}) (*collectors.CollectorRunResult, error) { ctx := collectors.NewK8sProcessorContext(rcfg, c.metadata) processResult, processed := c.processor.Process(ctx, list) @@ -103,7 +108,7 @@ func (c *CRCollector) Run(rcfg *collectors.CollectorRunConfig) (*collectors.Coll result := &collectors.CollectorRunResult{ Result: processResult, - ResourcesListed: len(list), + ResourcesListed: len(c.processor.Handlers().ResourceList(ctx, list)), ResourcesProcessed: processed, } diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/crd.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/crd.go index b2f5f5463ec58..3d8d2ed13c667 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/crd.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/crd.go @@ -45,9 +45,9 @@ func NewCRDCollector() *CRDCollector { IsManifestProducer: true, IsMetadataProducer: false, SupportsManifestBuffering: false, - Name: "customresourcedefinitions", + Name: crdName, NodeType: orchestrator.K8sCRD, - Version: "apiextensions.k8s.io/v1", + Version: crdVersion, }, processor: processors.NewProcessor(new(k8sProcessors.CRDHandlers)), } @@ -81,6 +81,11 @@ func (c *CRDCollector) Run(rcfg *collectors.CollectorRunConfig) (*collectors.Col return nil, collectors.NewListingError(err) } + return c.Process(rcfg, list) +} + +// Process is used to process the list of resources and return the result. +func (c *CRDCollector) Process(rcfg *collectors.CollectorRunConfig, list interface{}) (*collectors.CollectorRunResult, error) { ctx := collectors.NewK8sProcessorContext(rcfg, c.metadata) processResult, processed := c.processor.Process(ctx, list) @@ -91,7 +96,7 @@ func (c *CRDCollector) Run(rcfg *collectors.CollectorRunConfig) (*collectors.Col result := &collectors.CollectorRunResult{ Result: processResult, - ResourcesListed: len(list), + ResourcesListed: len(c.processor.Handlers().ResourceList(ctx, list)), ResourcesProcessed: processed, } diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/cronjob.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/cronjob.go index 5d2fbcfa980ea..c91082b01fcf2 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/cronjob.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/cronjob.go @@ -9,12 +9,13 @@ package k8s import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/collectors" + "github.com/DataDog/datadog-agent/pkg/config/utils" ) // NewCronJobCollectorVersions builds the group of collector versions for -func NewCronJobCollectorVersions() collectors.CollectorVersions { +func NewCronJobCollectorVersions(metadataAsTags utils.MetadataAsTags) collectors.CollectorVersions { return collectors.NewCollectorVersions( - NewCronJobV1Collector(), - NewCronJobV1Beta1Collector(), + NewCronJobV1Collector(metadataAsTags), + NewCronJobV1Beta1Collector(metadataAsTags), ) } diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/cronjob_v1.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/cronjob_v1.go index 72551c168ee3d..b1d9c6486e516 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/cronjob_v1.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/cronjob_v1.go @@ -11,6 +11,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/collectors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" k8sProcessors "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors/k8s" + "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/orchestrator" "k8s.io/apimachinery/pkg/labels" @@ -28,7 +29,11 @@ type CronJobV1Collector struct { } // NewCronJobV1Collector creates a new collector for the Kubernetes Job resource. -func NewCronJobV1Collector() *CronJobV1Collector { +func NewCronJobV1Collector(metadataAsTags utils.MetadataAsTags) *CronJobV1Collector { + resourceType := getResourceType(cronJobName, cronJobVersionV1) + labelsAsTags := metadataAsTags.GetResourcesLabelsAsTags()[resourceType] + annotationsAsTags := metadataAsTags.GetResourcesAnnotationsAsTags()[resourceType] + return &CronJobV1Collector{ metadata: &collectors.CollectorMetadata{ IsDefaultVersion: true, @@ -36,9 +41,11 @@ func NewCronJobV1Collector() *CronJobV1Collector { IsMetadataProducer: true, IsManifestProducer: true, SupportsManifestBuffering: true, - Name: "cronjobs", + Name: cronJobName, NodeType: orchestrator.K8sCronJob, - Version: "batch/v1", + Version: cronJobVersionV1, + LabelsAsTags: labelsAsTags, + AnnotationsAsTags: annotationsAsTags, }, processor: processors.NewProcessor(new(k8sProcessors.CronJobV1Handlers)), } @@ -67,6 +74,11 @@ func (c *CronJobV1Collector) Run(rcfg *collectors.CollectorRunConfig) (*collecto return nil, collectors.NewListingError(err) } + return c.Process(rcfg, list) +} + +// Process is used to process the list of resources and return the result. +func (c *CronJobV1Collector) Process(rcfg *collectors.CollectorRunConfig, list interface{}) (*collectors.CollectorRunResult, error) { ctx := collectors.NewK8sProcessorContext(rcfg, c.metadata) processResult, processed := c.processor.Process(ctx, list) @@ -77,7 +89,7 @@ func (c *CronJobV1Collector) Run(rcfg *collectors.CollectorRunConfig) (*collecto result := &collectors.CollectorRunResult{ Result: processResult, - ResourcesListed: len(list), + ResourcesListed: len(c.processor.Handlers().ResourceList(ctx, list)), ResourcesProcessed: processed, } diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/cronjob_v1beta1.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/cronjob_v1beta1.go index 000fa354b5b81..314121e3b1d54 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/cronjob_v1beta1.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/cronjob_v1beta1.go @@ -11,6 +11,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/collectors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" k8sProcessors "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors/k8s" + "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/orchestrator" "k8s.io/apimachinery/pkg/labels" @@ -28,7 +29,11 @@ type CronJobV1Beta1Collector struct { } // NewCronJobV1Beta1Collector creates a new collector for the Kubernetes Job resource. -func NewCronJobV1Beta1Collector() *CronJobV1Beta1Collector { +func NewCronJobV1Beta1Collector(metadataAsTags utils.MetadataAsTags) *CronJobV1Beta1Collector { + resourceType := getResourceType(cronJobName, cronJobVersionV1Beta1) + labelsAsTags := metadataAsTags.GetResourcesLabelsAsTags()[resourceType] + annotationsAsTags := metadataAsTags.GetResourcesAnnotationsAsTags()[resourceType] + return &CronJobV1Beta1Collector{ metadata: &collectors.CollectorMetadata{ IsDefaultVersion: false, @@ -36,9 +41,11 @@ func NewCronJobV1Beta1Collector() *CronJobV1Beta1Collector { IsMetadataProducer: true, IsManifestProducer: true, SupportsManifestBuffering: true, - Name: "cronjobs", + Name: cronJobName, NodeType: orchestrator.K8sCronJob, - Version: "batch/v1beta1", + Version: cronJobVersionV1Beta1, + LabelsAsTags: labelsAsTags, + AnnotationsAsTags: annotationsAsTags, }, processor: processors.NewProcessor(new(k8sProcessors.CronJobV1Beta1Handlers)), } @@ -67,6 +74,11 @@ func (c *CronJobV1Beta1Collector) Run(rcfg *collectors.CollectorRunConfig) (*col return nil, collectors.NewListingError(err) } + return c.Process(rcfg, list) +} + +// Process is used to process the list of resources and return the result. +func (c *CronJobV1Beta1Collector) Process(rcfg *collectors.CollectorRunConfig, list interface{}) (*collectors.CollectorRunResult, error) { ctx := collectors.NewK8sProcessorContext(rcfg, c.metadata) processResult, processed := c.processor.Process(ctx, list) @@ -77,7 +89,7 @@ func (c *CronJobV1Beta1Collector) Run(rcfg *collectors.CollectorRunConfig) (*col result := &collectors.CollectorRunResult{ Result: processResult, - ResourcesListed: len(list), + ResourcesListed: len(c.processor.Handlers().ResourceList(ctx, list)), ResourcesProcessed: processed, } diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/daemonset.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/daemonset.go index bc01177cbb564..76985d0e279e7 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/daemonset.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/daemonset.go @@ -11,6 +11,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/collectors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" k8sProcessors "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors/k8s" + "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/orchestrator" "k8s.io/apimachinery/pkg/labels" @@ -20,9 +21,9 @@ import ( ) // NewDaemonSetCollectorVersions builds the group of collector versions. -func NewDaemonSetCollectorVersions() collectors.CollectorVersions { +func NewDaemonSetCollectorVersions(metadataAsTags utils.MetadataAsTags) collectors.CollectorVersions { return collectors.NewCollectorVersions( - NewDaemonSetCollector(), + NewDaemonSetCollector(metadataAsTags), ) } @@ -36,7 +37,11 @@ type DaemonSetCollector struct { // NewDaemonSetCollector creates a new collector for the Kubernetes DaemonSet // resource. -func NewDaemonSetCollector() *DaemonSetCollector { +func NewDaemonSetCollector(metadataAsTags utils.MetadataAsTags) *DaemonSetCollector { + resourceType := getResourceType(daemonSetName, daemonSetVersion) + labelsAsTags := metadataAsTags.GetResourcesLabelsAsTags()[resourceType] + annotationsAsTags := metadataAsTags.GetResourcesAnnotationsAsTags()[resourceType] + return &DaemonSetCollector{ metadata: &collectors.CollectorMetadata{ IsDefaultVersion: true, @@ -44,9 +49,11 @@ func NewDaemonSetCollector() *DaemonSetCollector { IsMetadataProducer: true, IsManifestProducer: true, SupportsManifestBuffering: true, - Name: "daemonsets", + Name: daemonSetName, NodeType: orchestrator.K8sDaemonSet, - Version: "apps/v1", + Version: daemonSetVersion, + LabelsAsTags: labelsAsTags, + AnnotationsAsTags: annotationsAsTags, }, processor: processors.NewProcessor(new(k8sProcessors.DaemonSetHandlers)), } @@ -75,6 +82,11 @@ func (c *DaemonSetCollector) Run(rcfg *collectors.CollectorRunConfig) (*collecto return nil, collectors.NewListingError(err) } + return c.Process(rcfg, list) +} + +// Process is used to process the list of resources and return the result. +func (c *DaemonSetCollector) Process(rcfg *collectors.CollectorRunConfig, list interface{}) (*collectors.CollectorRunResult, error) { ctx := collectors.NewK8sProcessorContext(rcfg, c.metadata) processResult, processed := c.processor.Process(ctx, list) @@ -85,7 +97,7 @@ func (c *DaemonSetCollector) Run(rcfg *collectors.CollectorRunConfig) (*collecto result := &collectors.CollectorRunResult{ Result: processResult, - ResourcesListed: len(list), + ResourcesListed: len(c.processor.Handlers().ResourceList(ctx, list)), ResourcesProcessed: processed, } diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/deployment.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/deployment.go index 668603d8a7e4c..fcb6067347c99 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/deployment.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/deployment.go @@ -11,6 +11,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/collectors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" k8sProcessors "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors/k8s" + "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/orchestrator" "k8s.io/apimachinery/pkg/labels" @@ -20,9 +21,9 @@ import ( ) // NewDeploymentCollectorVersions builds the group of collector versions. -func NewDeploymentCollectorVersions() collectors.CollectorVersions { +func NewDeploymentCollectorVersions(metadataAsTags utils.MetadataAsTags) collectors.CollectorVersions { return collectors.NewCollectorVersions( - NewDeploymentCollector(), + NewDeploymentCollector(metadataAsTags), ) } @@ -36,7 +37,11 @@ type DeploymentCollector struct { // NewDeploymentCollector creates a new collector for the Kubernetes Deployment // resource. -func NewDeploymentCollector() *DeploymentCollector { +func NewDeploymentCollector(metadataAsTags utils.MetadataAsTags) *DeploymentCollector { + resourceType := getResourceType(deploymentName, deploymentVersion) + labelsAsTags := metadataAsTags.GetResourcesLabelsAsTags()[resourceType] + annotationsAsTags := metadataAsTags.GetResourcesAnnotationsAsTags()[resourceType] + return &DeploymentCollector{ metadata: &collectors.CollectorMetadata{ IsDefaultVersion: true, @@ -44,9 +49,11 @@ func NewDeploymentCollector() *DeploymentCollector { IsMetadataProducer: true, IsManifestProducer: true, SupportsManifestBuffering: true, - Name: "deployments", + Name: deploymentName, NodeType: orchestrator.K8sDeployment, - Version: "apps/v1", + Version: deploymentVersion, + LabelsAsTags: labelsAsTags, + AnnotationsAsTags: annotationsAsTags, }, processor: processors.NewProcessor(new(k8sProcessors.DeploymentHandlers)), } @@ -75,6 +82,11 @@ func (c *DeploymentCollector) Run(rcfg *collectors.CollectorRunConfig) (*collect return nil, collectors.NewListingError(err) } + return c.Process(rcfg, list) +} + +// Process is used to process the list of resources and return the result. +func (c *DeploymentCollector) Process(rcfg *collectors.CollectorRunConfig, list interface{}) (*collectors.CollectorRunResult, error) { ctx := collectors.NewK8sProcessorContext(rcfg, c.metadata) processResult, processed := c.processor.Process(ctx, list) @@ -85,7 +97,7 @@ func (c *DeploymentCollector) Run(rcfg *collectors.CollectorRunConfig) (*collect result := &collectors.CollectorRunResult{ Result: processResult, - ResourcesListed: len(list), + ResourcesListed: len(c.processor.Handlers().ResourceList(ctx, list)), ResourcesProcessed: processed, } diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/horizontalpodautoscaler.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/horizontalpodautoscaler.go index 2f6565ca7647d..01d9471aff12d 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/horizontalpodautoscaler.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/horizontalpodautoscaler.go @@ -11,6 +11,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/collectors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" k8sProcessors "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors/k8s" + "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/orchestrator" v2Informers "k8s.io/client-go/informers/autoscaling/v2" @@ -21,9 +22,9 @@ import ( ) // NewHorizontalPodAutoscalerCollectorVersions builds the group of collector versions. -func NewHorizontalPodAutoscalerCollectorVersions() collectors.CollectorVersions { +func NewHorizontalPodAutoscalerCollectorVersions(metadataAsTags utils.MetadataAsTags) collectors.CollectorVersions { return collectors.NewCollectorVersions( - NewHorizontalPodAutoscalerCollector(), + NewHorizontalPodAutoscalerCollector(metadataAsTags), ) } @@ -37,7 +38,11 @@ type HorizontalPodAutoscalerCollector struct { // NewHorizontalPodAutoscalerCollector creates a new collector for the Kubernetes // HorizontalPodAutoscaler resource. -func NewHorizontalPodAutoscalerCollector() *HorizontalPodAutoscalerCollector { +func NewHorizontalPodAutoscalerCollector(metadataAsTags utils.MetadataAsTags) *HorizontalPodAutoscalerCollector { + resourceType := getResourceType(hpaName, hpaVersion) + labelsAsTags := metadataAsTags.GetResourcesLabelsAsTags()[resourceType] + annotationsAsTags := metadataAsTags.GetResourcesAnnotationsAsTags()[resourceType] + return &HorizontalPodAutoscalerCollector{ metadata: &collectors.CollectorMetadata{ IsDefaultVersion: true, @@ -45,9 +50,11 @@ func NewHorizontalPodAutoscalerCollector() *HorizontalPodAutoscalerCollector { IsMetadataProducer: true, IsManifestProducer: true, SupportsManifestBuffering: true, - Name: "horizontalpodautoscalers", + Name: hpaName, NodeType: orchestrator.K8sHorizontalPodAutoscaler, - Version: "autoscaling/v2", + Version: hpaVersion, + LabelsAsTags: labelsAsTags, + AnnotationsAsTags: annotationsAsTags, }, processor: processors.NewProcessor(new(k8sProcessors.HorizontalPodAutoscalerHandlers)), } @@ -76,6 +83,11 @@ func (c *HorizontalPodAutoscalerCollector) Run(rcfg *collectors.CollectorRunConf return nil, collectors.NewListingError(err) } + return c.Process(rcfg, list) +} + +// Process is used to process the list of resources and return the result. +func (c *HorizontalPodAutoscalerCollector) Process(rcfg *collectors.CollectorRunConfig, list interface{}) (*collectors.CollectorRunResult, error) { ctx := collectors.NewK8sProcessorContext(rcfg, c.metadata) processResult, processed := c.processor.Process(ctx, list) @@ -86,7 +98,7 @@ func (c *HorizontalPodAutoscalerCollector) Run(rcfg *collectors.CollectorRunConf result := &collectors.CollectorRunResult{ Result: processResult, - ResourcesListed: len(list), + ResourcesListed: len(c.processor.Handlers().ResourceList(ctx, list)), ResourcesProcessed: processed, } diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/ingress.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/ingress.go index 197dea30727ed..eb4b9d31ddb9b 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/ingress.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/ingress.go @@ -11,6 +11,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/collectors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" k8sProcessors "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors/k8s" + "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/orchestrator" "k8s.io/apimachinery/pkg/labels" @@ -20,9 +21,9 @@ import ( ) // NewIngressCollectorVersions builds the group of collector versions. -func NewIngressCollectorVersions() collectors.CollectorVersions { +func NewIngressCollectorVersions(metadataAsTags utils.MetadataAsTags) collectors.CollectorVersions { return collectors.NewCollectorVersions( - NewIngressCollector(), + NewIngressCollector(metadataAsTags), ) } @@ -36,7 +37,11 @@ type IngressCollector struct { // NewIngressCollector creates a new collector for the Kubernetes Ingress // resource. -func NewIngressCollector() *IngressCollector { +func NewIngressCollector(metadataAsTags utils.MetadataAsTags) *IngressCollector { + resourceType := getResourceType(ingressName, ingressVersion) + labelsAsTags := metadataAsTags.GetResourcesLabelsAsTags()[resourceType] + annotationsAsTags := metadataAsTags.GetResourcesAnnotationsAsTags()[resourceType] + return &IngressCollector{ metadata: &collectors.CollectorMetadata{ IsDefaultVersion: true, @@ -44,9 +49,11 @@ func NewIngressCollector() *IngressCollector { IsMetadataProducer: true, IsManifestProducer: true, SupportsManifestBuffering: true, - Name: "ingresses", + Name: ingressName, NodeType: orchestrator.K8sIngress, - Version: "networking.k8s.io/v1", + Version: ingressVersion, + LabelsAsTags: labelsAsTags, + AnnotationsAsTags: annotationsAsTags, }, processor: processors.NewProcessor(new(k8sProcessors.IngressHandlers)), } @@ -75,6 +82,11 @@ func (c *IngressCollector) Run(rcfg *collectors.CollectorRunConfig) (*collectors return nil, collectors.NewListingError(err) } + return c.Process(rcfg, list) +} + +// Process is used to process the list of resources and return the result. +func (c *IngressCollector) Process(rcfg *collectors.CollectorRunConfig, list interface{}) (*collectors.CollectorRunResult, error) { ctx := collectors.NewK8sProcessorContext(rcfg, c.metadata) processResult, processed := c.processor.Process(ctx, list) @@ -85,7 +97,7 @@ func (c *IngressCollector) Run(rcfg *collectors.CollectorRunConfig) (*collectors result := &collectors.CollectorRunResult{ Result: processResult, - ResourcesListed: len(list), + ResourcesListed: len(c.processor.Handlers().ResourceList(ctx, list)), ResourcesProcessed: processed, } diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/job.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/job.go index 6a66eed934877..0b1bed004274a 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/job.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/job.go @@ -11,6 +11,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/collectors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" k8sProcessors "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors/k8s" + "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/orchestrator" "k8s.io/apimachinery/pkg/labels" @@ -20,9 +21,9 @@ import ( ) // NewJobCollectorVersions builds the group of collector versions. -func NewJobCollectorVersions() collectors.CollectorVersions { +func NewJobCollectorVersions(metadataAsTags utils.MetadataAsTags) collectors.CollectorVersions { return collectors.NewCollectorVersions( - NewJobCollector(), + NewJobCollector(metadataAsTags), ) } @@ -35,7 +36,11 @@ type JobCollector struct { } // NewJobCollector creates a new collector for the Kubernetes Job resource. -func NewJobCollector() *JobCollector { +func NewJobCollector(metadataAsTags utils.MetadataAsTags) *JobCollector { + resourceType := getResourceType(jobName, jobVersion) + labelsAsTags := metadataAsTags.GetResourcesLabelsAsTags()[resourceType] + annotationsAsTags := metadataAsTags.GetResourcesAnnotationsAsTags()[resourceType] + return &JobCollector{ metadata: &collectors.CollectorMetadata{ IsDefaultVersion: true, @@ -43,9 +48,11 @@ func NewJobCollector() *JobCollector { IsMetadataProducer: true, IsManifestProducer: true, SupportsManifestBuffering: true, - Name: "jobs", + Name: jobName, NodeType: orchestrator.K8sJob, - Version: "batch/v1", + Version: jobVersion, + LabelsAsTags: labelsAsTags, + AnnotationsAsTags: annotationsAsTags, }, processor: processors.NewProcessor(new(k8sProcessors.JobHandlers)), } @@ -74,6 +81,11 @@ func (c *JobCollector) Run(rcfg *collectors.CollectorRunConfig) (*collectors.Col return nil, collectors.NewListingError(err) } + return c.Process(rcfg, list) +} + +// Process is used to process the list of resources and return the result. +func (c *JobCollector) Process(rcfg *collectors.CollectorRunConfig, list interface{}) (*collectors.CollectorRunResult, error) { ctx := collectors.NewK8sProcessorContext(rcfg, c.metadata) processResult, processed := c.processor.Process(ctx, list) @@ -84,7 +96,7 @@ func (c *JobCollector) Run(rcfg *collectors.CollectorRunConfig) (*collectors.Col result := &collectors.CollectorRunResult{ Result: processResult, - ResourcesListed: len(list), + ResourcesListed: len(c.processor.Handlers().ResourceList(ctx, list)), ResourcesProcessed: processed, } diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/limitrange.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/limitrange.go index 1104c13c3524b..9ec2da35153e9 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/limitrange.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/limitrange.go @@ -11,6 +11,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/collectors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" k8sProcessors "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors/k8s" + "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/orchestrator" "k8s.io/apimachinery/pkg/labels" @@ -20,9 +21,9 @@ import ( ) // NewLimitRangeCollectorVersions builds the group of collector versions. -func NewLimitRangeCollectorVersions() collectors.CollectorVersions { +func NewLimitRangeCollectorVersions(metadataAsTags utils.MetadataAsTags) collectors.CollectorVersions { return collectors.NewCollectorVersions( - NewLimitRangeCollector(), + NewLimitRangeCollector(metadataAsTags), ) } @@ -36,7 +37,11 @@ type LimitRangeCollector struct { // NewLimitRangeCollector creates a new collector for the Kubernetes // LimitRange resource. -func NewLimitRangeCollector() *LimitRangeCollector { +func NewLimitRangeCollector(metadataAsTags utils.MetadataAsTags) *LimitRangeCollector { + resourceType := getResourceType(limitRangeName, limitRangeVersion) + labelsAsTags := metadataAsTags.GetResourcesLabelsAsTags()[resourceType] + annotationsAsTags := metadataAsTags.GetResourcesAnnotationsAsTags()[resourceType] + return &LimitRangeCollector{ metadata: &collectors.CollectorMetadata{ IsDefaultVersion: true, @@ -44,9 +49,11 @@ func NewLimitRangeCollector() *LimitRangeCollector { IsMetadataProducer: true, IsManifestProducer: true, SupportsManifestBuffering: true, - Name: "limitranges", + Name: limitRangeName, NodeType: orchestrator.K8sLimitRange, - Version: "v1", + Version: limitRangeVersion, + LabelsAsTags: labelsAsTags, + AnnotationsAsTags: annotationsAsTags, }, processor: processors.NewProcessor(new(k8sProcessors.LimitRangeHandlers)), } @@ -75,6 +82,11 @@ func (c *LimitRangeCollector) Run(rcfg *collectors.CollectorRunConfig) (*collect return nil, collectors.NewListingError(err) } + return c.Process(rcfg, list) +} + +// Process is used to process the list of resources and return the result. +func (c *LimitRangeCollector) Process(rcfg *collectors.CollectorRunConfig, list interface{}) (*collectors.CollectorRunResult, error) { ctx := collectors.NewK8sProcessorContext(rcfg, c.metadata) processResult, processed := c.processor.Process(ctx, list) @@ -85,7 +97,7 @@ func (c *LimitRangeCollector) Run(rcfg *collectors.CollectorRunConfig) (*collect result := &collectors.CollectorRunResult{ Result: processResult, - ResourcesListed: len(list), + ResourcesListed: len(c.processor.Handlers().ResourceList(ctx, list)), ResourcesProcessed: processed, } diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/namespace.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/namespace.go index e635d10e762d6..f61a548fd9451 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/namespace.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/namespace.go @@ -11,6 +11,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/collectors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" k8sProcessors "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors/k8s" + "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/orchestrator" "k8s.io/apimachinery/pkg/labels" @@ -20,9 +21,9 @@ import ( ) // NewNamespaceCollectorVersions builds the group of collector versions. -func NewNamespaceCollectorVersions() collectors.CollectorVersions { +func NewNamespaceCollectorVersions(metadataAsTags utils.MetadataAsTags) collectors.CollectorVersions { return collectors.NewCollectorVersions( - NewNamespaceCollector(), + NewNamespaceCollector(metadataAsTags), ) } @@ -36,7 +37,11 @@ type NamespaceCollector struct { // NewNamespaceCollector creates a new collector for the Kubernetes // Namespace resource. -func NewNamespaceCollector() *NamespaceCollector { +func NewNamespaceCollector(metadataAsTags utils.MetadataAsTags) *NamespaceCollector { + resourceType := getResourceType(namespaceName, namespaceVersion) + labelsAsTags := metadataAsTags.GetResourcesLabelsAsTags()[resourceType] + annotationsAsTags := metadataAsTags.GetResourcesAnnotationsAsTags()[resourceType] + return &NamespaceCollector{ metadata: &collectors.CollectorMetadata{ IsDefaultVersion: true, @@ -44,9 +49,11 @@ func NewNamespaceCollector() *NamespaceCollector { IsMetadataProducer: true, IsManifestProducer: true, SupportsManifestBuffering: true, - Name: "namespaces", + Name: namespaceName, NodeType: orchestrator.K8sNamespace, - Version: "v1", + Version: namespaceVersion, + LabelsAsTags: labelsAsTags, + AnnotationsAsTags: annotationsAsTags, }, processor: processors.NewProcessor(new(k8sProcessors.NamespaceHandlers)), } @@ -75,6 +82,11 @@ func (c *NamespaceCollector) Run(rcfg *collectors.CollectorRunConfig) (*collecto return nil, collectors.NewListingError(err) } + return c.Process(rcfg, list) +} + +// Process is used to process the list of resources and return the result. +func (c *NamespaceCollector) Process(rcfg *collectors.CollectorRunConfig, list interface{}) (*collectors.CollectorRunResult, error) { ctx := collectors.NewK8sProcessorContext(rcfg, c.metadata) processResult, processed := c.processor.Process(ctx, list) @@ -85,7 +97,7 @@ func (c *NamespaceCollector) Run(rcfg *collectors.CollectorRunConfig) (*collecto result := &collectors.CollectorRunResult{ Result: processResult, - ResourcesListed: len(list), + ResourcesListed: len(c.processor.Handlers().ResourceList(ctx, list)), ResourcesProcessed: processed, } diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/networkpolicy.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/networkpolicy.go index bfd568fb52a2d..a7f9513f04721 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/networkpolicy.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/networkpolicy.go @@ -8,6 +8,7 @@ package k8s import ( + "github.com/DataDog/datadog-agent/pkg/config/utils" "k8s.io/apimachinery/pkg/labels" networkingv1Informers "k8s.io/client-go/informers/networking/v1" networkingv1Listers "k8s.io/client-go/listers/networking/v1" @@ -20,9 +21,9 @@ import ( ) // NewNetworkPolicyCollectorVersions builds the group of collector versions. -func NewNetworkPolicyCollectorVersions() collectors.CollectorVersions { +func NewNetworkPolicyCollectorVersions(metadataAsTags utils.MetadataAsTags) collectors.CollectorVersions { return collectors.NewCollectorVersions( - NewNetworkPolicyCollector(), + NewNetworkPolicyCollector(metadataAsTags), ) } @@ -36,7 +37,11 @@ type NetworkPolicyCollector struct { // NewNetworkPolicyCollector creates a new collector for the Kubernetes // NetworkPolicy resource. -func NewNetworkPolicyCollector() *NetworkPolicyCollector { +func NewNetworkPolicyCollector(metadataAsTags utils.MetadataAsTags) *NetworkPolicyCollector { + resourceType := getResourceType(networkPolicyName, networkPolicyVersion) + labelsAsTags := metadataAsTags.GetResourcesLabelsAsTags()[resourceType] + annotationsAsTags := metadataAsTags.GetResourcesAnnotationsAsTags()[resourceType] + return &NetworkPolicyCollector{ metadata: &collectors.CollectorMetadata{ IsDefaultVersion: true, @@ -44,9 +49,11 @@ func NewNetworkPolicyCollector() *NetworkPolicyCollector { IsMetadataProducer: true, IsManifestProducer: true, SupportsManifestBuffering: true, - Name: "networkpolicies", + Name: networkPolicyName, NodeType: orchestrator.K8sNetworkPolicy, - Version: "networking.k8s.io/v1", + Version: networkPolicyVersion, + LabelsAsTags: labelsAsTags, + AnnotationsAsTags: annotationsAsTags, }, processor: processors.NewProcessor(new(k8sProcessors.NetworkPolicyHandlers)), } @@ -75,6 +82,11 @@ func (c *NetworkPolicyCollector) Run(rcfg *collectors.CollectorRunConfig) (*coll return nil, collectors.NewListingError(err) } + return c.Process(rcfg, list) +} + +// Process is used to process the list of resources and return the result. +func (c *NetworkPolicyCollector) Process(rcfg *collectors.CollectorRunConfig, list interface{}) (*collectors.CollectorRunResult, error) { ctx := collectors.NewK8sProcessorContext(rcfg, c.metadata) processResult, processed := c.processor.Process(ctx, list) @@ -85,7 +97,7 @@ func (c *NetworkPolicyCollector) Run(rcfg *collectors.CollectorRunConfig) (*coll result := &collectors.CollectorRunResult{ Result: processResult, - ResourcesListed: len(list), + ResourcesListed: len(c.processor.Handlers().ResourceList(ctx, list)), ResourcesProcessed: processed, } diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/node.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/node.go index 991f52ae253fa..050b694589175 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/node.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/node.go @@ -11,6 +11,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/collectors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" k8sProcessors "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors/k8s" + "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/orchestrator" "k8s.io/apimachinery/pkg/labels" @@ -20,9 +21,9 @@ import ( ) // NewNodeCollectorVersions builds the group of collector versions. -func NewNodeCollectorVersions() collectors.CollectorVersions { +func NewNodeCollectorVersions(metadataAsTags utils.MetadataAsTags) collectors.CollectorVersions { return collectors.NewCollectorVersions( - NewNodeCollector(), + NewNodeCollector(metadataAsTags), ) } @@ -35,7 +36,11 @@ type NodeCollector struct { } // NewNodeCollector creates a new collector for the Kubernetes Node resource. -func NewNodeCollector() *NodeCollector { +func NewNodeCollector(metadataAsTags utils.MetadataAsTags) *NodeCollector { + resourceType := getResourceType(nodeName, nodeVersion) + labelsAsTags := metadataAsTags.GetResourcesLabelsAsTags()[resourceType] + annotationsAsTags := metadataAsTags.GetResourcesAnnotationsAsTags()[resourceType] + return &NodeCollector{ metadata: &collectors.CollectorMetadata{ IsDefaultVersion: true, @@ -43,9 +48,11 @@ func NewNodeCollector() *NodeCollector { IsMetadataProducer: true, IsManifestProducer: true, SupportsManifestBuffering: true, - Name: "nodes", + Name: nodeName, NodeType: orchestrator.K8sNode, - Version: "v1", + Version: nodeVersion, + LabelsAsTags: labelsAsTags, + AnnotationsAsTags: annotationsAsTags, }, processor: processors.NewProcessor(new(k8sProcessors.NodeHandlers)), } @@ -74,6 +81,11 @@ func (c *NodeCollector) Run(rcfg *collectors.CollectorRunConfig) (*collectors.Co return nil, collectors.NewListingError(err) } + return c.Process(rcfg, list) +} + +// Process is used to process the list of resources and return the result. +func (c *NodeCollector) Process(rcfg *collectors.CollectorRunConfig, list interface{}) (*collectors.CollectorRunResult, error) { ctx := collectors.NewK8sProcessorContext(rcfg, c.metadata) processResult, processed := c.processor.Process(ctx, list) @@ -84,7 +96,7 @@ func (c *NodeCollector) Run(rcfg *collectors.CollectorRunConfig) (*collectors.Co result := &collectors.CollectorRunResult{ Result: processResult, - ResourcesListed: len(list), + ResourcesListed: len(c.processor.Handlers().ResourceList(ctx, list)), ResourcesProcessed: processed, } diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/persistentvolume.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/persistentvolume.go index 7c78f6af2efdc..4461ba9285846 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/persistentvolume.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/persistentvolume.go @@ -11,6 +11,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/collectors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" k8sProcessors "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors/k8s" + "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/orchestrator" "k8s.io/apimachinery/pkg/labels" @@ -20,9 +21,9 @@ import ( ) // NewPersistentVolumeCollectorVersions builds the group of collector versions. -func NewPersistentVolumeCollectorVersions() collectors.CollectorVersions { +func NewPersistentVolumeCollectorVersions(metadataAsTags utils.MetadataAsTags) collectors.CollectorVersions { return collectors.NewCollectorVersions( - NewPersistentVolumeCollector(), + NewPersistentVolumeCollector(metadataAsTags), ) } @@ -36,7 +37,11 @@ type PersistentVolumeCollector struct { // NewPersistentVolumeCollector creates a new collector for the Kubernetes // PersistentVolume resource. -func NewPersistentVolumeCollector() *PersistentVolumeCollector { +func NewPersistentVolumeCollector(metadataAsTags utils.MetadataAsTags) *PersistentVolumeCollector { + resourceType := getResourceType(persistentVolumeName, persistentVolumeVersion) + labelsAsTags := metadataAsTags.GetResourcesLabelsAsTags()[resourceType] + annotationsAsTags := metadataAsTags.GetResourcesAnnotationsAsTags()[resourceType] + return &PersistentVolumeCollector{ metadata: &collectors.CollectorMetadata{ IsDefaultVersion: true, @@ -44,9 +49,11 @@ func NewPersistentVolumeCollector() *PersistentVolumeCollector { IsMetadataProducer: true, IsManifestProducer: true, SupportsManifestBuffering: true, - Name: "persistentvolumes", + Name: persistentVolumeName, NodeType: orchestrator.K8sPersistentVolume, - Version: "v1", + Version: persistentVolumeVersion, + LabelsAsTags: labelsAsTags, + AnnotationsAsTags: annotationsAsTags, }, processor: processors.NewProcessor(new(k8sProcessors.PersistentVolumeHandlers)), } @@ -75,6 +82,11 @@ func (c *PersistentVolumeCollector) Run(rcfg *collectors.CollectorRunConfig) (*c return nil, collectors.NewListingError(err) } + return c.Process(rcfg, list) +} + +// Process is used to process the list of resources and return the result. +func (c *PersistentVolumeCollector) Process(rcfg *collectors.CollectorRunConfig, list interface{}) (*collectors.CollectorRunResult, error) { ctx := collectors.NewK8sProcessorContext(rcfg, c.metadata) processResult, processed := c.processor.Process(ctx, list) @@ -85,7 +97,7 @@ func (c *PersistentVolumeCollector) Run(rcfg *collectors.CollectorRunConfig) (*c result := &collectors.CollectorRunResult{ Result: processResult, - ResourcesListed: len(list), + ResourcesListed: len(c.processor.Handlers().ResourceList(ctx, list)), ResourcesProcessed: processed, } diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/persistentvolumeclaim.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/persistentvolumeclaim.go index e5bf23dadade3..aed8cd2827237 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/persistentvolumeclaim.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/persistentvolumeclaim.go @@ -11,6 +11,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/collectors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" k8sProcessors "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors/k8s" + "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/orchestrator" "k8s.io/apimachinery/pkg/labels" @@ -20,9 +21,9 @@ import ( ) // NewPersistentVolumeClaimCollectorVersions builds the group of collector versions. -func NewPersistentVolumeClaimCollectorVersions() collectors.CollectorVersions { +func NewPersistentVolumeClaimCollectorVersions(metadataAsTags utils.MetadataAsTags) collectors.CollectorVersions { return collectors.NewCollectorVersions( - NewPersistentVolumeClaimCollector(), + NewPersistentVolumeClaimCollector(metadataAsTags), ) } @@ -36,7 +37,11 @@ type PersistentVolumeClaimCollector struct { // NewPersistentVolumeClaimCollector creates a new collector for the Kubernetes // PersistentVolumeClaim resource. -func NewPersistentVolumeClaimCollector() *PersistentVolumeClaimCollector { +func NewPersistentVolumeClaimCollector(metadataAsTags utils.MetadataAsTags) *PersistentVolumeClaimCollector { + resourceType := getResourceType(persistentVolumeClaimName, persistentVolumeClaimVersion) + labelsAsTags := metadataAsTags.GetResourcesLabelsAsTags()[resourceType] + annotationsAsTags := metadataAsTags.GetResourcesAnnotationsAsTags()[resourceType] + return &PersistentVolumeClaimCollector{ metadata: &collectors.CollectorMetadata{ IsDefaultVersion: true, @@ -44,9 +49,11 @@ func NewPersistentVolumeClaimCollector() *PersistentVolumeClaimCollector { IsMetadataProducer: true, IsManifestProducer: true, SupportsManifestBuffering: true, - Name: "persistentvolumeclaims", + Name: persistentVolumeClaimName, NodeType: orchestrator.K8sPersistentVolumeClaim, - Version: "v1", + Version: persistentVolumeClaimVersion, + LabelsAsTags: labelsAsTags, + AnnotationsAsTags: annotationsAsTags, }, processor: processors.NewProcessor(new(k8sProcessors.PersistentVolumeClaimHandlers)), } @@ -75,6 +82,11 @@ func (c *PersistentVolumeClaimCollector) Run(rcfg *collectors.CollectorRunConfig return nil, collectors.NewListingError(err) } + return c.Process(rcfg, list) +} + +// Process is used to process the list of resources and return the result. +func (c *PersistentVolumeClaimCollector) Process(rcfg *collectors.CollectorRunConfig, list interface{}) (*collectors.CollectorRunResult, error) { ctx := collectors.NewK8sProcessorContext(rcfg, c.metadata) processResult, processed := c.processor.Process(ctx, list) @@ -85,7 +97,7 @@ func (c *PersistentVolumeClaimCollector) Run(rcfg *collectors.CollectorRunConfig result := &collectors.CollectorRunResult{ Result: processResult, - ResourcesListed: len(list), + ResourcesListed: len(c.processor.Handlers().ResourceList(ctx, list)), ResourcesProcessed: processed, } diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/pod_unassigned.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/pod_unassigned.go index de25efa0b01e4..9f75b19ebf721 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/pod_unassigned.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/pod_unassigned.go @@ -14,6 +14,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/collectors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" k8sProcessors "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors/k8s" + "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/orchestrator" "k8s.io/apimachinery/pkg/labels" @@ -23,9 +24,9 @@ import ( ) // NewUnassignedPodCollectorVersions builds the group of collector versions. -func NewUnassignedPodCollectorVersions(cfg config.Component, store workloadmeta.Component, tagger tagger.Component) collectors.CollectorVersions { +func NewUnassignedPodCollectorVersions(cfg config.Component, store workloadmeta.Component, tagger tagger.Component, metadataAsTags utils.MetadataAsTags) collectors.CollectorVersions { return collectors.NewCollectorVersions( - NewUnassignedPodCollector(cfg, store, tagger), + NewUnassignedPodCollector(cfg, store, tagger, metadataAsTags), ) } @@ -40,7 +41,11 @@ type UnassignedPodCollector struct { // NewUnassignedPodCollector creates a new collector for the Kubernetes Pod // resource that is not assigned to any node. -func NewUnassignedPodCollector(cfg config.Component, store workloadmeta.Component, tagger tagger.Component) *UnassignedPodCollector { +func NewUnassignedPodCollector(cfg config.Component, store workloadmeta.Component, tagger tagger.Component, metadataAsTags utils.MetadataAsTags) *UnassignedPodCollector { + resourceType := getResourceType(podName, podVersion) + labelsAsTags := metadataAsTags.GetResourcesLabelsAsTags()[resourceType] + annotationsAsTags := metadataAsTags.GetResourcesAnnotationsAsTags()[resourceType] + return &UnassignedPodCollector{ metadata: &collectors.CollectorMetadata{ IsDefaultVersion: true, @@ -48,9 +53,11 @@ func NewUnassignedPodCollector(cfg config.Component, store workloadmeta.Componen IsMetadataProducer: true, IsManifestProducer: true, SupportsManifestBuffering: true, - Name: "pods", + Name: podName, NodeType: orchestrator.K8sPod, - Version: "v1", + Version: podVersion, + LabelsAsTags: labelsAsTags, + AnnotationsAsTags: annotationsAsTags, }, processor: processors.NewProcessor(k8sProcessors.NewPodHandlers(cfg, store, tagger)), } @@ -79,6 +86,11 @@ func (c *UnassignedPodCollector) Run(rcfg *collectors.CollectorRunConfig) (*coll return nil, collectors.NewListingError(err) } + return c.Process(rcfg, list) +} + +// Process is used to process the list of resources and return the result. +func (c *UnassignedPodCollector) Process(rcfg *collectors.CollectorRunConfig, list interface{}) (*collectors.CollectorRunResult, error) { ctx := collectors.NewK8sProcessorContext(rcfg, c.metadata) processResult, processed := c.processor.Process(ctx, list) @@ -89,7 +101,7 @@ func (c *UnassignedPodCollector) Run(rcfg *collectors.CollectorRunConfig) (*coll result := &collectors.CollectorRunResult{ Result: processResult, - ResourcesListed: len(list), + ResourcesListed: len(c.processor.Handlers().ResourceList(ctx, list)), ResourcesProcessed: processed, } diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/poddisruptionbudget.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/poddisruptionbudget.go index 8567b95ca4974..453862873cefd 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/poddisruptionbudget.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/poddisruptionbudget.go @@ -8,6 +8,7 @@ package k8s import ( + "github.com/DataDog/datadog-agent/pkg/config/utils" "k8s.io/apimachinery/pkg/labels" v1policyinformer "k8s.io/client-go/informers/policy/v1" v1policylister "k8s.io/client-go/listers/policy/v1" @@ -20,9 +21,9 @@ import ( ) // NewPodDisruptionBudgetCollectorVersions builds the group of collector versions. -func NewPodDisruptionBudgetCollectorVersions() collectors.CollectorVersions { +func NewPodDisruptionBudgetCollectorVersions(metadataAsTags utils.MetadataAsTags) collectors.CollectorVersions { return collectors.NewCollectorVersions( - NewPodDisruptionBudgetCollectorVersion(), + NewPodDisruptionBudgetCollectorVersion(metadataAsTags), ) } @@ -36,7 +37,11 @@ type PodDisruptionBudgetCollector struct { // NewPodDisruptionBudgetCollectorVersion creates a new collector for the Kubernetes Pod Disruption Budget // resource. -func NewPodDisruptionBudgetCollectorVersion() *PodDisruptionBudgetCollector { +func NewPodDisruptionBudgetCollectorVersion(metadataAsTags utils.MetadataAsTags) *PodDisruptionBudgetCollector { + resourceType := getResourceType(podDisruptionBudgetName, podDisruptionBudgetVersion) + labelsAsTags := metadataAsTags.GetResourcesLabelsAsTags()[resourceType] + annotationsAsTags := metadataAsTags.GetResourcesAnnotationsAsTags()[resourceType] + return &PodDisruptionBudgetCollector{ informer: nil, lister: nil, @@ -46,9 +51,11 @@ func NewPodDisruptionBudgetCollectorVersion() *PodDisruptionBudgetCollector { IsMetadataProducer: true, IsManifestProducer: true, SupportsManifestBuffering: true, - Name: "poddisruptionbudgets", + Name: podDisruptionBudgetName, NodeType: orchestrator.K8sPodDisruptionBudget, - Version: "policy/v1", + Version: podDisruptionBudgetVersion, + LabelsAsTags: labelsAsTags, + AnnotationsAsTags: annotationsAsTags, }, processor: processors.NewProcessor(new(k8sProcessors.PodDisruptionBudgetHandlers)), } @@ -77,6 +84,11 @@ func (c *PodDisruptionBudgetCollector) Run(rcfg *collectors.CollectorRunConfig) return nil, collectors.NewListingError(err) } + return c.Process(rcfg, list) +} + +// Process is used to process the list of resources and return the result. +func (c *PodDisruptionBudgetCollector) Process(rcfg *collectors.CollectorRunConfig, list interface{}) (*collectors.CollectorRunResult, error) { ctx := collectors.NewK8sProcessorContext(rcfg, c.metadata) processResult, processed := c.processor.Process(ctx, list) @@ -87,7 +99,7 @@ func (c *PodDisruptionBudgetCollector) Run(rcfg *collectors.CollectorRunConfig) result := &collectors.CollectorRunResult{ Result: processResult, - ResourcesListed: len(list), + ResourcesListed: len(c.processor.Handlers().ResourceList(ctx, list)), ResourcesProcessed: processed, } diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/replicaset.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/replicaset.go index 7fa0e593c7564..a7c72cb4a7db9 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/replicaset.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/replicaset.go @@ -11,6 +11,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/collectors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" k8sProcessors "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors/k8s" + "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/orchestrator" "k8s.io/apimachinery/pkg/labels" @@ -20,9 +21,9 @@ import ( ) // NewReplicaSetCollectorVersions builds the group of collector versions. -func NewReplicaSetCollectorVersions() collectors.CollectorVersions { +func NewReplicaSetCollectorVersions(metadataAsTags utils.MetadataAsTags) collectors.CollectorVersions { return collectors.NewCollectorVersions( - NewReplicaSetCollector(), + NewReplicaSetCollector(metadataAsTags), ) } @@ -36,7 +37,11 @@ type ReplicaSetCollector struct { // NewReplicaSetCollector creates a new collector for the Kubernetes ReplicaSet // resource. -func NewReplicaSetCollector() *ReplicaSetCollector { +func NewReplicaSetCollector(metadataAsTags utils.MetadataAsTags) *ReplicaSetCollector { + resourceType := getResourceType(replicaSetName, replicaSetVersion) + labelsAsTags := metadataAsTags.GetResourcesLabelsAsTags()[resourceType] + annotationsAsTags := metadataAsTags.GetResourcesAnnotationsAsTags()[resourceType] + return &ReplicaSetCollector{ metadata: &collectors.CollectorMetadata{ IsDefaultVersion: true, @@ -44,9 +49,11 @@ func NewReplicaSetCollector() *ReplicaSetCollector { IsMetadataProducer: true, IsManifestProducer: true, SupportsManifestBuffering: true, - Name: "replicasets", + Name: replicaSetName, NodeType: orchestrator.K8sReplicaSet, - Version: "apps/v1", + Version: replicaSetVersion, + LabelsAsTags: labelsAsTags, + AnnotationsAsTags: annotationsAsTags, }, processor: processors.NewProcessor(new(k8sProcessors.ReplicaSetHandlers)), } @@ -75,6 +82,11 @@ func (c *ReplicaSetCollector) Run(rcfg *collectors.CollectorRunConfig) (*collect return nil, collectors.NewListingError(err) } + return c.Process(rcfg, list) +} + +// Process is used to process the list of resources and return the result. +func (c *ReplicaSetCollector) Process(rcfg *collectors.CollectorRunConfig, list interface{}) (*collectors.CollectorRunResult, error) { ctx := collectors.NewK8sProcessorContext(rcfg, c.metadata) processResult, processed := c.processor.Process(ctx, list) @@ -85,7 +97,7 @@ func (c *ReplicaSetCollector) Run(rcfg *collectors.CollectorRunConfig) (*collect result := &collectors.CollectorRunResult{ Result: processResult, - ResourcesListed: len(list), + ResourcesListed: len(c.processor.Handlers().ResourceList(ctx, list)), ResourcesProcessed: processed, } diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/role.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/role.go index 5d0999f5bd59d..f6adb13461d38 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/role.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/role.go @@ -11,6 +11,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/collectors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" k8sProcessors "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors/k8s" + "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/orchestrator" "k8s.io/apimachinery/pkg/labels" @@ -20,9 +21,9 @@ import ( ) // NewRoleCollectorVersions builds the group of collector versions. -func NewRoleCollectorVersions() collectors.CollectorVersions { +func NewRoleCollectorVersions(metadataAsTags utils.MetadataAsTags) collectors.CollectorVersions { return collectors.NewCollectorVersions( - NewRoleCollector(), + NewRoleCollector(metadataAsTags), ) } @@ -35,7 +36,11 @@ type RoleCollector struct { } // NewRoleCollector creates a new collector for the Kubernetes Role resource. -func NewRoleCollector() *RoleCollector { +func NewRoleCollector(metadataAsTags utils.MetadataAsTags) *RoleCollector { + resourceType := getResourceType(roleName, roleVersion) + labelsAsTags := metadataAsTags.GetResourcesLabelsAsTags()[resourceType] + annotationsAsTags := metadataAsTags.GetResourcesAnnotationsAsTags()[resourceType] + return &RoleCollector{ metadata: &collectors.CollectorMetadata{ IsDefaultVersion: true, @@ -43,9 +48,11 @@ func NewRoleCollector() *RoleCollector { IsMetadataProducer: true, IsManifestProducer: true, SupportsManifestBuffering: true, - Name: "roles", + Name: roleName, NodeType: orchestrator.K8sRole, - Version: "rbac.authorization.k8s.io/v1", + Version: roleVersion, + LabelsAsTags: labelsAsTags, + AnnotationsAsTags: annotationsAsTags, }, processor: processors.NewProcessor(new(k8sProcessors.RoleHandlers)), } @@ -74,6 +81,11 @@ func (c *RoleCollector) Run(rcfg *collectors.CollectorRunConfig) (*collectors.Co return nil, collectors.NewListingError(err) } + return c.Process(rcfg, list) +} + +// Process is used to process the list of resources and return the result. +func (c *RoleCollector) Process(rcfg *collectors.CollectorRunConfig, list interface{}) (*collectors.CollectorRunResult, error) { ctx := collectors.NewK8sProcessorContext(rcfg, c.metadata) processResult, processed := c.processor.Process(ctx, list) @@ -84,7 +96,7 @@ func (c *RoleCollector) Run(rcfg *collectors.CollectorRunConfig) (*collectors.Co result := &collectors.CollectorRunResult{ Result: processResult, - ResourcesListed: len(list), + ResourcesListed: len(c.processor.Handlers().ResourceList(ctx, list)), ResourcesProcessed: processed, } diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/rolebinding.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/rolebinding.go index 6a499a0faa151..7252fccc741a8 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/rolebinding.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/rolebinding.go @@ -11,6 +11,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/collectors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" k8sProcessors "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors/k8s" + "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/orchestrator" "k8s.io/apimachinery/pkg/labels" @@ -20,9 +21,9 @@ import ( ) // NewRoleBindingCollectorVersions builds the group of collector versions. -func NewRoleBindingCollectorVersions() collectors.CollectorVersions { +func NewRoleBindingCollectorVersions(metadataAsTags utils.MetadataAsTags) collectors.CollectorVersions { return collectors.NewCollectorVersions( - NewRoleBindingCollector(), + NewRoleBindingCollector(metadataAsTags), ) } @@ -36,7 +37,11 @@ type RoleBindingCollector struct { // NewRoleBindingCollector creates a new collector for the Kubernetes // RoleBinding resource. -func NewRoleBindingCollector() *RoleBindingCollector { +func NewRoleBindingCollector(metadataAsTags utils.MetadataAsTags) *RoleBindingCollector { + resourceType := getResourceType(roleBindingName, roleBindingVersion) + labelsAsTags := metadataAsTags.GetResourcesLabelsAsTags()[resourceType] + annotationsAsTags := metadataAsTags.GetResourcesAnnotationsAsTags()[resourceType] + return &RoleBindingCollector{ metadata: &collectors.CollectorMetadata{ IsDefaultVersion: true, @@ -44,9 +49,11 @@ func NewRoleBindingCollector() *RoleBindingCollector { IsMetadataProducer: true, IsManifestProducer: true, SupportsManifestBuffering: true, - Name: "rolebindings", + Name: roleBindingName, NodeType: orchestrator.K8sRoleBinding, - Version: "rbac.authorization.k8s.io/v1", + Version: roleBindingVersion, + LabelsAsTags: labelsAsTags, + AnnotationsAsTags: annotationsAsTags, }, processor: processors.NewProcessor(new(k8sProcessors.RoleBindingHandlers)), } @@ -75,6 +82,11 @@ func (c *RoleBindingCollector) Run(rcfg *collectors.CollectorRunConfig) (*collec return nil, collectors.NewListingError(err) } + return c.Process(rcfg, list) +} + +// Process is used to process the list of resources and return the result. +func (c *RoleBindingCollector) Process(rcfg *collectors.CollectorRunConfig, list interface{}) (*collectors.CollectorRunResult, error) { ctx := collectors.NewK8sProcessorContext(rcfg, c.metadata) processResult, processed := c.processor.Process(ctx, list) @@ -85,7 +97,7 @@ func (c *RoleBindingCollector) Run(rcfg *collectors.CollectorRunConfig) (*collec result := &collectors.CollectorRunResult{ Result: processResult, - ResourcesListed: len(list), + ResourcesListed: len(c.processor.Handlers().ResourceList(ctx, list)), ResourcesProcessed: processed, } diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/service.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/service.go index 434a5243736a0..36ec87b084ab8 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/service.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/service.go @@ -11,6 +11,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/collectors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" k8sProcessors "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors/k8s" + "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/orchestrator" "k8s.io/apimachinery/pkg/labels" @@ -20,9 +21,9 @@ import ( ) // NewServiceCollectorVersions builds the group of collector versions. -func NewServiceCollectorVersions() collectors.CollectorVersions { +func NewServiceCollectorVersions(metadataAsTags utils.MetadataAsTags) collectors.CollectorVersions { return collectors.NewCollectorVersions( - NewServiceCollector(), + NewServiceCollector(metadataAsTags), ) } @@ -36,7 +37,11 @@ type ServiceCollector struct { // NewServiceCollector creates a new collector for the Kubernetes Service // resource. -func NewServiceCollector() *ServiceCollector { +func NewServiceCollector(metadataAsTags utils.MetadataAsTags) *ServiceCollector { + resourceType := getResourceType(serviceName, serviceVersion) + labelsAsTags := metadataAsTags.GetResourcesLabelsAsTags()[resourceType] + annotationsAsTags := metadataAsTags.GetResourcesAnnotationsAsTags()[resourceType] + return &ServiceCollector{ metadata: &collectors.CollectorMetadata{ IsDefaultVersion: true, @@ -44,9 +49,11 @@ func NewServiceCollector() *ServiceCollector { IsMetadataProducer: true, IsManifestProducer: true, SupportsManifestBuffering: true, - Name: "services", + Name: serviceName, NodeType: orchestrator.K8sService, - Version: "v1", + Version: serviceVersion, + LabelsAsTags: labelsAsTags, + AnnotationsAsTags: annotationsAsTags, }, processor: processors.NewProcessor(new(k8sProcessors.ServiceHandlers)), } @@ -75,6 +82,11 @@ func (c *ServiceCollector) Run(rcfg *collectors.CollectorRunConfig) (*collectors return nil, collectors.NewListingError(err) } + return c.Process(rcfg, list) +} + +// Process is used to process the list of resources and return the result. +func (c *ServiceCollector) Process(rcfg *collectors.CollectorRunConfig, list interface{}) (*collectors.CollectorRunResult, error) { ctx := collectors.NewK8sProcessorContext(rcfg, c.metadata) processResult, processed := c.processor.Process(ctx, list) @@ -85,7 +97,7 @@ func (c *ServiceCollector) Run(rcfg *collectors.CollectorRunConfig) (*collectors result := &collectors.CollectorRunResult{ Result: processResult, - ResourcesListed: len(list), + ResourcesListed: len(c.processor.Handlers().ResourceList(ctx, list)), ResourcesProcessed: processed, } diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/serviceaccount.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/serviceaccount.go index 1bb1d5cd92cf0..af84be67bf565 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/serviceaccount.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/serviceaccount.go @@ -11,6 +11,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/collectors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" k8sProcessors "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors/k8s" + "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/orchestrator" "k8s.io/apimachinery/pkg/labels" @@ -20,9 +21,9 @@ import ( ) // NewServiceAccountCollectorVersions builds the group of collector versions. -func NewServiceAccountCollectorVersions() collectors.CollectorVersions { +func NewServiceAccountCollectorVersions(metadataAsTags utils.MetadataAsTags) collectors.CollectorVersions { return collectors.NewCollectorVersions( - NewServiceAccountCollector(), + NewServiceAccountCollector(metadataAsTags), ) } @@ -36,7 +37,11 @@ type ServiceAccountCollector struct { // NewServiceAccountCollector creates a new collector for the Kubernetes // ServiceAccount resource. -func NewServiceAccountCollector() *ServiceAccountCollector { +func NewServiceAccountCollector(metadataAsTags utils.MetadataAsTags) *ServiceAccountCollector { + resourceType := getResourceType(serviceAccountName, serviceAccountVersion) + labelsAsTags := metadataAsTags.GetResourcesLabelsAsTags()[resourceType] + annotationsAsTags := metadataAsTags.GetResourcesAnnotationsAsTags()[resourceType] + return &ServiceAccountCollector{ metadata: &collectors.CollectorMetadata{ IsDefaultVersion: true, @@ -44,9 +49,11 @@ func NewServiceAccountCollector() *ServiceAccountCollector { IsMetadataProducer: true, IsManifestProducer: true, SupportsManifestBuffering: true, - Name: "serviceaccounts", + Name: serviceAccountName, NodeType: orchestrator.K8sServiceAccount, - Version: "v1", + Version: serviceAccountVersion, + LabelsAsTags: labelsAsTags, + AnnotationsAsTags: annotationsAsTags, }, processor: processors.NewProcessor(new(k8sProcessors.ServiceAccountHandlers)), } @@ -75,6 +82,11 @@ func (c *ServiceAccountCollector) Run(rcfg *collectors.CollectorRunConfig) (*col return nil, collectors.NewListingError(err) } + return c.Process(rcfg, list) +} + +// Process is used to process the list of resources and return the result. +func (c *ServiceAccountCollector) Process(rcfg *collectors.CollectorRunConfig, list interface{}) (*collectors.CollectorRunResult, error) { ctx := collectors.NewK8sProcessorContext(rcfg, c.metadata) processResult, processed := c.processor.Process(ctx, list) @@ -85,7 +97,7 @@ func (c *ServiceAccountCollector) Run(rcfg *collectors.CollectorRunConfig) (*col result := &collectors.CollectorRunResult{ Result: processResult, - ResourcesListed: len(list), + ResourcesListed: len(c.processor.Handlers().ResourceList(ctx, list)), ResourcesProcessed: processed, } diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/statefulset.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/statefulset.go index 2c1d839882847..ab081c8f026ef 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/statefulset.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/statefulset.go @@ -11,6 +11,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/collectors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" k8sProcessors "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors/k8s" + "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/orchestrator" "k8s.io/apimachinery/pkg/labels" @@ -20,9 +21,9 @@ import ( ) // NewStatefulSetCollectorVersions builds the group of collector versions. -func NewStatefulSetCollectorVersions() collectors.CollectorVersions { +func NewStatefulSetCollectorVersions(metadataAsTags utils.MetadataAsTags) collectors.CollectorVersions { return collectors.NewCollectorVersions( - NewStatefulSetCollector(), + NewStatefulSetCollector(metadataAsTags), ) } @@ -36,7 +37,11 @@ type StatefulSetCollector struct { // NewStatefulSetCollector creates a new collector for the Kubernetes // StatefulSet resource. -func NewStatefulSetCollector() *StatefulSetCollector { +func NewStatefulSetCollector(metadataAsTags utils.MetadataAsTags) *StatefulSetCollector { + resourceType := getResourceType(statefulSetName, statefulSetVersion) + labelsAsTags := metadataAsTags.GetResourcesLabelsAsTags()[resourceType] + annotationsAsTags := metadataAsTags.GetResourcesAnnotationsAsTags()[resourceType] + return &StatefulSetCollector{ metadata: &collectors.CollectorMetadata{ IsDefaultVersion: true, @@ -44,9 +49,11 @@ func NewStatefulSetCollector() *StatefulSetCollector { IsMetadataProducer: true, IsManifestProducer: true, SupportsManifestBuffering: true, - Name: "statefulsets", + Name: statefulSetName, NodeType: orchestrator.K8sStatefulSet, - Version: "apps/v1", + Version: statefulSetVersion, + LabelsAsTags: labelsAsTags, + AnnotationsAsTags: annotationsAsTags, }, processor: processors.NewProcessor(new(k8sProcessors.StatefulSetHandlers)), } @@ -75,6 +82,11 @@ func (c *StatefulSetCollector) Run(rcfg *collectors.CollectorRunConfig) (*collec return nil, collectors.NewListingError(err) } + return c.Process(rcfg, list) +} + +// Process is used to process the list of resources and return the result. +func (c *StatefulSetCollector) Process(rcfg *collectors.CollectorRunConfig, list interface{}) (*collectors.CollectorRunResult, error) { ctx := collectors.NewK8sProcessorContext(rcfg, c.metadata) processResult, processed := c.processor.Process(ctx, list) @@ -85,7 +97,7 @@ func (c *StatefulSetCollector) Run(rcfg *collectors.CollectorRunConfig) (*collec result := &collectors.CollectorRunResult{ Result: processResult, - ResourcesListed: len(list), + ResourcesListed: len(c.processor.Handlers().ResourceList(ctx, list)), ResourcesProcessed: processed, } diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/storageclass.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/storageclass.go index e789f2273b0ac..4f98536903c73 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/storageclass.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/storageclass.go @@ -11,6 +11,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/collectors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" k8sProcessors "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors/k8s" + "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/orchestrator" "k8s.io/apimachinery/pkg/labels" @@ -20,9 +21,9 @@ import ( ) // NewStorageClassCollectorVersions builds the group of collector versions. -func NewStorageClassCollectorVersions() collectors.CollectorVersions { +func NewStorageClassCollectorVersions(metadataAsTags utils.MetadataAsTags) collectors.CollectorVersions { return collectors.NewCollectorVersions( - NewStorageClassCollector(), + NewStorageClassCollector(metadataAsTags), ) } @@ -36,7 +37,11 @@ type StorageClassCollector struct { // NewStorageClassCollector creates a new collector for the Kubernetes // StorageClass resource. -func NewStorageClassCollector() *StorageClassCollector { +func NewStorageClassCollector(metadataAsTags utils.MetadataAsTags) *StorageClassCollector { + resourceType := getResourceType(storageClassName, storageClassVersion) + labelsAsTags := metadataAsTags.GetResourcesLabelsAsTags()[resourceType] + annotationsAsTags := metadataAsTags.GetResourcesAnnotationsAsTags()[resourceType] + return &StorageClassCollector{ metadata: &collectors.CollectorMetadata{ IsDefaultVersion: true, @@ -44,9 +49,11 @@ func NewStorageClassCollector() *StorageClassCollector { IsMetadataProducer: true, IsManifestProducer: true, SupportsManifestBuffering: true, - Name: "storageclasses", + Name: storageClassName, NodeType: orchestrator.K8sStorageClass, - Version: "storage.k8s.io/v1", + Version: storageClassVersion, + LabelsAsTags: labelsAsTags, + AnnotationsAsTags: annotationsAsTags, }, processor: processors.NewProcessor(new(k8sProcessors.StorageClassHandlers)), } @@ -75,6 +82,11 @@ func (c *StorageClassCollector) Run(rcfg *collectors.CollectorRunConfig) (*colle return nil, collectors.NewListingError(err) } + return c.Process(rcfg, list) +} + +// Process is used to process the list of resources and return the result. +func (c *StorageClassCollector) Process(rcfg *collectors.CollectorRunConfig, list interface{}) (*collectors.CollectorRunResult, error) { ctx := collectors.NewK8sProcessorContext(rcfg, c.metadata) processResult, processed := c.processor.Process(ctx, list) @@ -85,7 +97,7 @@ func (c *StorageClassCollector) Run(rcfg *collectors.CollectorRunConfig) (*colle result := &collectors.CollectorRunResult{ Result: processResult, - ResourcesListed: len(list), + ResourcesListed: len(c.processor.Handlers().ResourceList(ctx, list)), ResourcesProcessed: processed, } diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/types.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/types.go new file mode 100644 index 0000000000000..8d2029121da2d --- /dev/null +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/types.go @@ -0,0 +1,104 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build kubeapiserver && orchestrator + +package k8s + +import ( + "fmt" + "strings" +) + +const ( + clusterName = "clusters" + + clusterRoleName = "clusterroles" + clusterRoleVersion = "rbac.authorization.k8s.io/v1" + + clusterRoleBindingName = "clusterrolebindings" + clusterRoleBindingVersion = "rbac.authorization.k8s.io/v1" + + crdName = "customresourcedefinitions" + crdVersion = "apiextensions.k8s.io/v1" + + cronJobName = "cronjobs" + cronJobVersionV1 = "batch/v1" + cronJobVersionV1Beta1 = "batch/v1beta1" + + daemonSetName = "daemonsets" + daemonSetVersion = "apps/v1" + + deploymentName = "deployments" + deploymentVersion = "apps/v1" + + hpaName = "horizontalpodautoscalers" + hpaVersion = "autoscaling/v2" + + ingressName = "ingresses" + ingressVersion = "networking.k8s.io/v1" + + jobName = "jobs" + jobVersion = "batch/v1" + + limitRangeName = "limitranges" + limitRangeVersion = "v1" + + namespaceName = "namespaces" + namespaceVersion = "v1" + + networkPolicyName = "networkpolicies" + networkPolicyVersion = "networking.k8s.io/v1" + + nodeName = "nodes" + nodeVersion = "v1" + + persistentVolumeName = "persistentvolumes" + persistentVolumeVersion = "v1" + + persistentVolumeClaimName = "persistentvolumeclaims" + persistentVolumeClaimVersion = "v1" + + podName = "pods" + podVersion = "v1" + + podDisruptionBudgetName = "poddisruptionbudgets" + podDisruptionBudgetVersion = "policy/v1" + + replicaSetName = "replicasets" + replicaSetVersion = "apps/v1" + + roleName = "roles" + roleVersion = "rbac.authorization.k8s.io/v1" + + roleBindingName = "rolebindings" + roleBindingVersion = "rbac.authorization.k8s.io/v1" + + serviceName = "services" + serviceVersion = "v1" + + serviceAccountName = "serviceaccounts" + serviceAccountVersion = "v1" + + statefulSetName = "statefulsets" + statefulSetVersion = "apps/v1" + + storageClassName = "storageclasses" + storageClassVersion = "storage.k8s.io/v1" + + vpaName = "verticalpodautoscalers" + vpaVersion = "autoscaling.k8s.io/v1" +) + +// getResourceType returns a string in the format "name.apiGroup" if an API group is present in the version. +// Otherwise, it returns the name. +func getResourceType(name string, version string) string { + apiVersionParts := strings.Split(version, "/") + if len(apiVersionParts) == 2 { + apiGroup := apiVersionParts[0] + return fmt.Sprintf("%s.%s", name, apiGroup) + } + return name +} diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/verticalpodautoscaler.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/verticalpodautoscaler.go index e7008e9e04b8c..62c3b2a2e1578 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/verticalpodautoscaler.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8s/verticalpodautoscaler.go @@ -11,6 +11,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/collectors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" k8sProcessors "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors/k8s" + "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/orchestrator" v1Informers "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/informers/externalversions/autoscaling.k8s.io/v1" @@ -21,9 +22,9 @@ import ( ) // NewVerticalPodAutoscalerCollectorVersions builds the group of collector versions. -func NewVerticalPodAutoscalerCollectorVersions() collectors.CollectorVersions { +func NewVerticalPodAutoscalerCollectorVersions(metadataAsTags utils.MetadataAsTags) collectors.CollectorVersions { return collectors.NewCollectorVersions( - NewVerticalPodAutoscalerCollector(), + NewVerticalPodAutoscalerCollector(metadataAsTags), ) } @@ -37,7 +38,11 @@ type VerticalPodAutoscalerCollector struct { // NewVerticalPodAutoscalerCollector creates a new collector for the Kubernetes // VerticalPodAutoscaler resource. -func NewVerticalPodAutoscalerCollector() *VerticalPodAutoscalerCollector { +func NewVerticalPodAutoscalerCollector(metadataAsTags utils.MetadataAsTags) *VerticalPodAutoscalerCollector { + resourceType := getResourceType(vpaName, vpaVersion) + labelsAsTags := metadataAsTags.GetResourcesLabelsAsTags()[resourceType] + annotationsAsTags := metadataAsTags.GetResourcesAnnotationsAsTags()[resourceType] + return &VerticalPodAutoscalerCollector{ metadata: &collectors.CollectorMetadata{ IsDefaultVersion: true, @@ -45,9 +50,11 @@ func NewVerticalPodAutoscalerCollector() *VerticalPodAutoscalerCollector { IsMetadataProducer: true, IsManifestProducer: true, SupportsManifestBuffering: true, - Name: "verticalpodautoscalers", + Name: vpaName, NodeType: orchestrator.K8sVerticalPodAutoscaler, - Version: "autoscaling.k8s.io/v1", + Version: vpaVersion, + LabelsAsTags: labelsAsTags, + AnnotationsAsTags: annotationsAsTags, }, processor: processors.NewProcessor(new(k8sProcessors.VerticalPodAutoscalerHandlers)), } @@ -76,6 +83,11 @@ func (c *VerticalPodAutoscalerCollector) Run(rcfg *collectors.CollectorRunConfig return nil, collectors.NewListingError(err) } + return c.Process(rcfg, list) +} + +// Process is used to process the list of resources and return the result. +func (c *VerticalPodAutoscalerCollector) Process(rcfg *collectors.CollectorRunConfig, list interface{}) (*collectors.CollectorRunResult, error) { ctx := collectors.NewK8sProcessorContext(rcfg, c.metadata) processResult, processed := c.processor.Process(ctx, list) @@ -86,7 +98,7 @@ func (c *VerticalPodAutoscalerCollector) Run(rcfg *collectors.CollectorRunConfig result := &collectors.CollectorRunResult{ Result: processResult, - ResourcesListed: len(list), + ResourcesListed: len(c.processor.Handlers().ResourceList(ctx, list)), ResourcesProcessed: processed, } diff --git a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8scollector.go b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8scollector.go index b7c3051a39377..e47ea97ec95b3 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/collectors/k8scollector.go +++ b/pkg/collector/corechecks/cluster/orchestrator/collectors/k8scollector.go @@ -72,5 +72,7 @@ func NewK8sProcessorContext(rcfg *CollectorRunConfig, metadata *CollectorMetadat }, APIClient: rcfg.APIClient, ApiGroupVersionTag: fmt.Sprintf("kube_api_version:%s", metadata.Version), + LabelsAsTags: metadata.LabelsAsTags, + AnnotationsAsTags: metadata.AnnotationsAsTags, } } diff --git a/pkg/collector/corechecks/cluster/orchestrator/orchestrator.go b/pkg/collector/corechecks/cluster/orchestrator/orchestrator.go index 8476bceed7146..be1b1a1a883ce 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/orchestrator.go +++ b/pkg/collector/corechecks/cluster/orchestrator/orchestrator.go @@ -29,8 +29,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/clustername" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" - + "github.com/DataDog/datadog-agent/pkg/util/option" "go.uber.org/atomic" "gopkg.in/yaml.v2" "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions" @@ -103,8 +102,8 @@ func newOrchestratorCheck(base core.CheckBase, instance *OrchestratorInstance, c } // Factory creates a new check factory -func Factory(wlm workloadmeta.Component, cfg configcomp.Component, tagger tagger.Component) optional.Option[func() check.Check] { - return optional.NewOption(func() check.Check { return newCheck(cfg, wlm, tagger) }) +func Factory(wlm workloadmeta.Component, cfg configcomp.Component, tagger tagger.Component) option.Option[func() check.Check] { + return option.New(func() check.Check { return newCheck(cfg, wlm, tagger) }) } func newCheck(cfg configcomp.Component, wlm workloadmeta.Component, tagger tagger.Component) check.Check { diff --git a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/clusterrole.go b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/clusterrole.go index f4bbbc2cf7b22..bd6e644afa7b0 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/clusterrole.go +++ b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/clusterrole.go @@ -57,7 +57,7 @@ func (h *ClusterRoleHandlers) BuildMessageBody(ctx processors.ProcessorContext, //nolint:revive // TODO(CAPP) Fix revive linter func (h *ClusterRoleHandlers) ExtractResource(ctx processors.ProcessorContext, resource interface{}) (resourceModel interface{}) { r := resource.(*rbacv1.ClusterRole) - return k8sTransformers.ExtractClusterRole(r) + return k8sTransformers.ExtractClusterRole(ctx, r) } // ResourceList is a handler called to convert a list passed as a generic diff --git a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/clusterrolebinding.go b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/clusterrolebinding.go index 9aaa212513fe5..c190070a7563b 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/clusterrolebinding.go +++ b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/clusterrolebinding.go @@ -58,7 +58,7 @@ func (h *ClusterRoleBindingHandlers) BuildMessageBody(ctx processors.ProcessorCo //nolint:revive // TODO(CAPP) Fix revive linter func (h *ClusterRoleBindingHandlers) ExtractResource(ctx processors.ProcessorContext, resource interface{}) (resourceModel interface{}) { r := resource.(*rbacv1.ClusterRoleBinding) - return k8sTransformers.ExtractClusterRoleBinding(r) + return k8sTransformers.ExtractClusterRoleBinding(ctx, r) } // ResourceList is a handler called to convert a list passed as a generic diff --git a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/cronjob_v1.go b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/cronjob_v1.go index 7e9108ee3d4ee..36e112f9888b6 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/cronjob_v1.go +++ b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/cronjob_v1.go @@ -57,7 +57,7 @@ func (h *CronJobV1Handlers) BuildMessageBody(ctx processors.ProcessorContext, re //nolint:revive // TODO(CAPP) Fix revive linter func (h *CronJobV1Handlers) ExtractResource(ctx processors.ProcessorContext, resource interface{}) (resourceModel interface{}) { r := resource.(*batchv1.CronJob) - return k8sTransformers.ExtractCronJobV1(r) + return k8sTransformers.ExtractCronJobV1(ctx, r) } // ResourceList is a handler called to convert a list passed as a generic diff --git a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/cronjob_v1beta1.go b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/cronjob_v1beta1.go index 2d17b60004518..db10ba31c283d 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/cronjob_v1beta1.go +++ b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/cronjob_v1beta1.go @@ -58,7 +58,7 @@ func (h *CronJobV1Beta1Handlers) BuildMessageBody(ctx processors.ProcessorContex //nolint:revive // TODO(CAPP) Fix revive linter func (h *CronJobV1Beta1Handlers) ExtractResource(ctx processors.ProcessorContext, resource interface{}) (resourceModel interface{}) { r := resource.(*batchv1beta1.CronJob) - return k8sTransformers.ExtractCronJobV1Beta1(r) + return k8sTransformers.ExtractCronJobV1Beta1(ctx, r) } // ResourceList is a handler called to convert a list passed as a generic diff --git a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/daemonset.go b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/daemonset.go index b6ec8ade9089b..e9cf6af26f6ab 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/daemonset.go +++ b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/daemonset.go @@ -58,7 +58,7 @@ func (h *DaemonSetHandlers) BuildMessageBody(ctx processors.ProcessorContext, re //nolint:revive // TODO(CAPP) Fix revive linter func (h *DaemonSetHandlers) ExtractResource(ctx processors.ProcessorContext, resource interface{}) (resourceModel interface{}) { r := resource.(*appsv1.DaemonSet) - return k8sTransformers.ExtractDaemonSet(r) + return k8sTransformers.ExtractDaemonSet(ctx, r) } // ResourceList is a handler called to convert a list passed as a generic diff --git a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/deployment.go b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/deployment.go index aed5e1a42f4cd..a97f01e764b20 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/deployment.go +++ b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/deployment.go @@ -58,7 +58,7 @@ func (h *DeploymentHandlers) BuildMessageBody(ctx processors.ProcessorContext, r //nolint:revive // TODO(CAPP) Fix revive linter func (h *DeploymentHandlers) ExtractResource(ctx processors.ProcessorContext, resource interface{}) (resourceModel interface{}) { r := resource.(*appsv1.Deployment) - return k8sTransformers.ExtractDeployment(r) + return k8sTransformers.ExtractDeployment(ctx, r) } // ResourceList is a handler called to convert a list passed as a generic diff --git a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/horizontalpodautoscaler.go b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/horizontalpodautoscaler.go index cf74dd8cdb155..3f74007d17490 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/horizontalpodautoscaler.go +++ b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/horizontalpodautoscaler.go @@ -57,7 +57,7 @@ func (h *HorizontalPodAutoscalerHandlers) BuildMessageBody(ctx processors.Proces //nolint:revive // TODO(CAPP) Fix revive linter func (h *HorizontalPodAutoscalerHandlers) ExtractResource(ctx processors.ProcessorContext, resource interface{}) (horizontalPodAutoscalerModel interface{}) { r := resource.(*v2.HorizontalPodAutoscaler) - return k8sTransformers.ExtractHorizontalPodAutoscaler(r) + return k8sTransformers.ExtractHorizontalPodAutoscaler(ctx, r) } // ResourceList is a handler called to convert a list passed as a generic diff --git a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/ingress.go b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/ingress.go index 13e65ea2917ef..7c9c990191a69 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/ingress.go +++ b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/ingress.go @@ -58,7 +58,7 @@ func (h *IngressHandlers) BuildMessageBody(ctx processors.ProcessorContext, reso //nolint:revive // TODO(CAPP) Fix revive linter func (h *IngressHandlers) ExtractResource(ctx processors.ProcessorContext, resource interface{}) (resourceModel interface{}) { r := resource.(*netv1.Ingress) - return k8sTransformers.ExtractIngress(r) + return k8sTransformers.ExtractIngress(ctx, r) } // ResourceList is a handler called to convert a list passed as a generic diff --git a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/job.go b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/job.go index 64328ef34518b..05920c71690d5 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/job.go +++ b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/job.go @@ -58,7 +58,7 @@ func (h *JobHandlers) BuildMessageBody(ctx processors.ProcessorContext, resource //nolint:revive // TODO(CAPP) Fix revive linter func (h *JobHandlers) ExtractResource(ctx processors.ProcessorContext, resource interface{}) (resourceModel interface{}) { r := resource.(*batchv1.Job) - return k8sTransformers.ExtractJob(r) + return k8sTransformers.ExtractJob(ctx, r) } // ResourceList is a handler called to convert a list passed as a generic diff --git a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/limitrange.go b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/limitrange.go index 9effd8f943287..45127ea2cb9a1 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/limitrange.go +++ b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/limitrange.go @@ -57,7 +57,7 @@ func (h *LimitRangeHandlers) BuildMessageBody(ctx processors.ProcessorContext, r //nolint:revive func (h *LimitRangeHandlers) ExtractResource(ctx processors.ProcessorContext, resource interface{}) (LimitRangeModel interface{}) { r := resource.(*corev1.LimitRange) - return k8sTransformers.ExtractLimitRange(r) + return k8sTransformers.ExtractLimitRange(ctx, r) } // ResourceList is a handler called to convert a list passed as a generic diff --git a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/namespace.go b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/namespace.go index 9921fe7a706e5..a4e0bd85d2bfd 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/namespace.go +++ b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/namespace.go @@ -59,7 +59,7 @@ func (h *NamespaceHandlers) BuildMessageBody(ctx processors.ProcessorContext, re //nolint:revive // TODO(CAPP) Fix revive linter func (h *NamespaceHandlers) ExtractResource(ctx processors.ProcessorContext, resource interface{}) (namespaceModel interface{}) { r := resource.(*corev1.Namespace) - return k8sTransformers.ExtractNamespace(r) + return k8sTransformers.ExtractNamespace(ctx, r) } // ResourceList is a handler called to convert a list passed as a generic diff --git a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/networkpolicy.go b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/networkpolicy.go index dd16c8031e36a..27417e39e14ea 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/networkpolicy.go +++ b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/networkpolicy.go @@ -57,7 +57,7 @@ func (h *NetworkPolicyHandlers) BuildMessageBody(ctx processors.ProcessorContext //nolint:revive // TODO(CAPP) Fix revive linter func (h *NetworkPolicyHandlers) ExtractResource(ctx processors.ProcessorContext, resource interface{}) (resourceModel interface{}) { r := resource.(*netv1.NetworkPolicy) - return k8sTransformers.ExtractNetworkPolicy(r) + return k8sTransformers.ExtractNetworkPolicy(ctx, r) } // ResourceList is a handler called to convert a list passed as a generic diff --git a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/node.go b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/node.go index e3348f896265b..3d9ec43407564 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/node.go +++ b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/node.go @@ -58,7 +58,7 @@ func (h *NodeHandlers) BuildMessageBody(ctx processors.ProcessorContext, resourc //nolint:revive // TODO(CAPP) Fix revive linter func (h *NodeHandlers) ExtractResource(ctx processors.ProcessorContext, resource interface{}) (resourceModel interface{}) { r := resource.(*corev1.Node) - return k8sTransformers.ExtractNode(r) + return k8sTransformers.ExtractNode(ctx, r) } // ResourceList is a handler called to convert a list passed as a generic diff --git a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/persistentvolume.go b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/persistentvolume.go index 991ceb7c2dc7a..fde55ca2e7181 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/persistentvolume.go +++ b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/persistentvolume.go @@ -58,7 +58,7 @@ func (h *PersistentVolumeHandlers) BuildMessageBody(ctx processors.ProcessorCont //nolint:revive // TODO(CAPP) Fix revive linter func (h *PersistentVolumeHandlers) ExtractResource(ctx processors.ProcessorContext, resource interface{}) (resourceModel interface{}) { r := resource.(*corev1.PersistentVolume) - return k8sTransformers.ExtractPersistentVolume(r) + return k8sTransformers.ExtractPersistentVolume(ctx, r) } // ResourceList is a handler called to convert a list passed as a generic diff --git a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/persistentvolumeclaim.go b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/persistentvolumeclaim.go index b6e9aeee58d0a..fd47e3751ec12 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/persistentvolumeclaim.go +++ b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/persistentvolumeclaim.go @@ -58,7 +58,7 @@ func (h *PersistentVolumeClaimHandlers) BuildMessageBody(ctx processors.Processo //nolint:revive // TODO(CAPP) Fix revive linter func (h *PersistentVolumeClaimHandlers) ExtractResource(ctx processors.ProcessorContext, resource interface{}) (resourceModel interface{}) { r := resource.(*corev1.PersistentVolumeClaim) - return k8sTransformers.ExtractPersistentVolumeClaim(r) + return k8sTransformers.ExtractPersistentVolumeClaim(ctx, r) } // ResourceList is a handler called to convert a list passed as a generic diff --git a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/pod.go b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/pod.go index db54519fbc90b..a6b9c77f318eb 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/pod.go +++ b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/pod.go @@ -132,7 +132,7 @@ func (h *PodHandlers) BuildMessageBody(ctx processors.ProcessorContext, resource //nolint:revive // TODO(CAPP) Fix revive linter func (h *PodHandlers) ExtractResource(ctx processors.ProcessorContext, resource interface{}) (resourceModel interface{}) { r := resource.(*corev1.Pod) - return k8sTransformers.ExtractPod(r) + return k8sTransformers.ExtractPod(ctx, r) } // ResourceList is a handler called to convert a list passed as a generic diff --git a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/poddisruptionbudget.go b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/poddisruptionbudget.go index 3dc6fdb837358..f3b2d65c6fa44 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/poddisruptionbudget.go +++ b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/poddisruptionbudget.go @@ -49,9 +49,9 @@ func (h *PodDisruptionBudgetHandlers) BuildMessageBody(ctx processors.ProcessorC } // ExtractResource is a handler called to extract the resource model out of a raw resource. -func (h *PodDisruptionBudgetHandlers) ExtractResource(_ processors.ProcessorContext, resource interface{}) (resourceModel interface{}) { +func (h *PodDisruptionBudgetHandlers) ExtractResource(ctx processors.ProcessorContext, resource interface{}) (resourceModel interface{}) { r := resource.(*policyv1.PodDisruptionBudget) - return k8sTransformers.ExtractPodDisruptionBudget(r) + return k8sTransformers.ExtractPodDisruptionBudget(ctx, r) } // ResourceList is a handler called to convert a list passed as a generic diff --git a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/replicaset.go b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/replicaset.go index feeb0ee9e626f..475be9b42c0ef 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/replicaset.go +++ b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/replicaset.go @@ -58,7 +58,7 @@ func (h *ReplicaSetHandlers) BuildMessageBody(ctx processors.ProcessorContext, r //nolint:revive // TODO(CAPP) Fix revive linter func (h *ReplicaSetHandlers) ExtractResource(ctx processors.ProcessorContext, resource interface{}) (resourceModel interface{}) { r := resource.(*appsv1.ReplicaSet) - return k8sTransformers.ExtractReplicaSet(r) + return k8sTransformers.ExtractReplicaSet(ctx, r) } // ResourceList is a handler called to convert a list passed as a generic diff --git a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/role.go b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/role.go index 6fc61bd55ec24..1f67313346f7f 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/role.go +++ b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/role.go @@ -58,7 +58,7 @@ func (h *RoleHandlers) BuildMessageBody(ctx processors.ProcessorContext, resourc //nolint:revive // TODO(CAPP) Fix revive linter func (h *RoleHandlers) ExtractResource(ctx processors.ProcessorContext, resource interface{}) (resourceModel interface{}) { r := resource.(*rbacv1.Role) - return k8sTransformers.ExtractRole(r) + return k8sTransformers.ExtractRole(ctx, r) } // ResourceList is a handler called to convert a list passed as a generic diff --git a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/rolebinding.go b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/rolebinding.go index 07ffa2c7bc8f5..1ee555239ead2 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/rolebinding.go +++ b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/rolebinding.go @@ -57,7 +57,7 @@ func (h *RoleBindingHandlers) BuildMessageBody(ctx processors.ProcessorContext, //nolint:revive // TODO(CAPP) Fix revive linter func (h *RoleBindingHandlers) ExtractResource(ctx processors.ProcessorContext, resource interface{}) (resourceModel interface{}) { r := resource.(*rbacv1.RoleBinding) - return k8sTransformers.ExtractRoleBinding(r) + return k8sTransformers.ExtractRoleBinding(ctx, r) } // ResourceList is a handler called to convert a list passed as a generic diff --git a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/service.go b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/service.go index 25eeecf69a561..11dd7de339aed 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/service.go +++ b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/service.go @@ -58,7 +58,7 @@ func (h *ServiceHandlers) BuildMessageBody(ctx processors.ProcessorContext, reso //nolint:revive // TODO(CAPP) Fix revive linter func (h *ServiceHandlers) ExtractResource(ctx processors.ProcessorContext, resource interface{}) (resourceModel interface{}) { r := resource.(*corev1.Service) - return k8sTransformers.ExtractService(r) + return k8sTransformers.ExtractService(ctx, r) } // ResourceList is a handler called to convert a list passed as a generic diff --git a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/serviceaccount.go b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/serviceaccount.go index e25e3d3b0e743..8e87974ba0b4a 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/serviceaccount.go +++ b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/serviceaccount.go @@ -58,7 +58,7 @@ func (h *ServiceAccountHandlers) BuildMessageBody(ctx processors.ProcessorContex //nolint:revive // TODO(CAPP) Fix revive linter func (h *ServiceAccountHandlers) ExtractResource(ctx processors.ProcessorContext, resource interface{}) (resourceModel interface{}) { r := resource.(*corev1.ServiceAccount) - return k8sTransformers.ExtractServiceAccount(r) + return k8sTransformers.ExtractServiceAccount(ctx, r) } // ResourceList is a handler called to convert a list passed as a generic diff --git a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/statefulset.go b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/statefulset.go index c6fa1b0589eab..683ff29b59699 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/statefulset.go +++ b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/statefulset.go @@ -57,7 +57,7 @@ func (h *StatefulSetHandlers) BuildMessageBody(ctx processors.ProcessorContext, //nolint:revive // TODO(CAPP) Fix revive linter func (h *StatefulSetHandlers) ExtractResource(ctx processors.ProcessorContext, resource interface{}) (resourceModel interface{}) { r := resource.(*appsv1.StatefulSet) - return k8sTransformers.ExtractStatefulSet(r) + return k8sTransformers.ExtractStatefulSet(ctx, r) } // ResourceList is a handler called to convert a list passed as a generic diff --git a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/storageclass.go b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/storageclass.go index d72d99fad6435..b6dac3dcbb25b 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/storageclass.go +++ b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/storageclass.go @@ -57,7 +57,7 @@ func (h *StorageClassHandlers) BuildMessageBody(ctx processors.ProcessorContext, //nolint:revive func (h *StorageClassHandlers) ExtractResource(ctx processors.ProcessorContext, resource interface{}) (StorageClassModel interface{}) { r := resource.(*storagev1.StorageClass) - return k8sTransformers.ExtractStorageClass(r) + return k8sTransformers.ExtractStorageClass(ctx, r) } // ResourceList is a handler called to convert a list passed as a generic diff --git a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/verticalpodautoscaler.go b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/verticalpodautoscaler.go index 1f6a9643538f6..29752c871bcac 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/verticalpodautoscaler.go +++ b/pkg/collector/corechecks/cluster/orchestrator/processors/k8s/verticalpodautoscaler.go @@ -57,7 +57,7 @@ func (h *VerticalPodAutoscalerHandlers) BuildMessageBody(ctx processors.Processo //nolint:revive // TODO(CAPP) Fix revive linter func (h *VerticalPodAutoscalerHandlers) ExtractResource(ctx processors.ProcessorContext, resource interface{}) (verticalPodAutoscalerModel interface{}) { r := resource.(*v1.VerticalPodAutoscaler) - return k8sTransformers.ExtractVerticalPodAutoscaler(r) + return k8sTransformers.ExtractVerticalPodAutoscaler(ctx, r) } // ResourceList is a handler called to convert a list passed as a generic diff --git a/pkg/collector/corechecks/cluster/orchestrator/processors/processor.go b/pkg/collector/corechecks/cluster/orchestrator/processors/processor.go index 65e5619fef47a..85ec518cc0518 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/processors/processor.go +++ b/pkg/collector/corechecks/cluster/orchestrator/processors/processor.go @@ -75,6 +75,9 @@ type K8sProcessorContext struct { //nolint:revive // TODO(CAPP) Fix revive linter ApiGroupVersionTag string SystemInfo *model.SystemInfo + ResourceType string + LabelsAsTags map[string]string + AnnotationsAsTags map[string]string } // ECSProcessorContext holds ECS resource processing attributes diff --git a/pkg/collector/corechecks/cluster/orchestrator/stub.go b/pkg/collector/corechecks/cluster/orchestrator/stub.go index 15385f8d4b272..fc80e55ff0b1d 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/stub.go +++ b/pkg/collector/corechecks/cluster/orchestrator/stub.go @@ -13,7 +13,7 @@ import ( tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/collector/check" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -22,6 +22,6 @@ const ( ) // Factory creates a new check factory -func Factory(workloadmeta.Component, configcomp.Component, tagger.Component) optional.Option[func() check.Check] { - return optional.NewNoneOption[func() check.Check]() +func Factory(workloadmeta.Component, configcomp.Component, tagger.Component) option.Option[func() check.Check] { + return option.None[func() check.Check]() } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/clusterrole.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/clusterrole.go index 56402e8122664..7127c46f129c0 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/clusterrole.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/clusterrole.go @@ -9,6 +9,7 @@ package k8s import ( model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/transformers" rbacv1 "k8s.io/api/rbac/v1" @@ -16,7 +17,7 @@ import ( // ExtractClusterRole returns the protobuf model corresponding to a // Kubernetes ClusterRole resource. -func ExtractClusterRole(cr *rbacv1.ClusterRole) *model.ClusterRole { +func ExtractClusterRole(ctx processors.ProcessorContext, cr *rbacv1.ClusterRole) *model.ClusterRole { clusterRole := &model.ClusterRole{ Metadata: extractMetadata(&cr.ObjectMeta), Rules: extractPolicyRules(cr.Rules), @@ -27,7 +28,9 @@ func ExtractClusterRole(cr *rbacv1.ClusterRole) *model.ClusterRole { } } + pctx := ctx.(*processors.K8sProcessorContext) clusterRole.Tags = append(clusterRole.Tags, transformers.RetrieveUnifiedServiceTags(cr.ObjectMeta.Labels)...) + clusterRole.Tags = append(clusterRole.Tags, transformers.RetrieveMetadataTags(cr.ObjectMeta.Labels, cr.ObjectMeta.Annotations, pctx.LabelsAsTags, pctx.AnnotationsAsTags)...) return clusterRole } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/clusterrole_test.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/clusterrole_test.go index 31405a7ce1cc1..fe4a62b93a6fa 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/clusterrole_test.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/clusterrole_test.go @@ -8,10 +8,12 @@ package k8s import ( + "sort" "testing" "time" model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "github.com/stretchr/testify/assert" rbacv1 "k8s.io/api/rbac/v1" @@ -23,8 +25,10 @@ func TestExtractClusterRole(t *testing.T) { creationTime := metav1.NewTime(time.Date(2021, time.April, 16, 14, 30, 0, 0, time.UTC)) tests := map[string]struct { - input rbacv1.ClusterRole - expected model.ClusterRole + input rbacv1.ClusterRole + labelsAsTags map[string]string + annotationsAsTags map[string]string + expected model.ClusterRole }{ "standard": { input: rbacv1.ClusterRole{ @@ -73,6 +77,12 @@ func TestExtractClusterRole(t *testing.T) { }, }, }, + labelsAsTags: map[string]string{ + "app": "application", + }, + annotationsAsTags: map[string]string{ + "annotation": "annotation_key", + }, expected: model.ClusterRole{ AggregationRules: []*model.LabelSelectorRequirement{ { @@ -112,12 +122,23 @@ func TestExtractClusterRole(t *testing.T) { Verbs: []string{"create"}, }, }, + Tags: []string{ + "application:my-app", + "annotation_key:my-annotation", + }, }, }, } for name, tc := range tests { t.Run(name, func(t *testing.T) { - assert.Equal(t, &tc.expected, ExtractClusterRole(&tc.input)) + pctx := &processors.K8sProcessorContext{ + LabelsAsTags: tc.labelsAsTags, + AnnotationsAsTags: tc.annotationsAsTags, + } + actual := ExtractClusterRole(pctx, &tc.input) + sort.Strings(actual.Tags) + sort.Strings(tc.expected.Tags) + assert.Equal(t, &tc.expected, actual) }) } } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/clusterrolebinding.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/clusterrolebinding.go index 5902de6b0dfdc..34081aaf87c2b 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/clusterrolebinding.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/clusterrolebinding.go @@ -9,6 +9,7 @@ package k8s import ( model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/transformers" rbacv1 "k8s.io/api/rbac/v1" @@ -16,14 +17,16 @@ import ( // ExtractClusterRoleBinding returns the protobuf model corresponding to a // Kubernetes ClusterRoleBinding resource. -func ExtractClusterRoleBinding(crb *rbacv1.ClusterRoleBinding) *model.ClusterRoleBinding { +func ExtractClusterRoleBinding(ctx processors.ProcessorContext, crb *rbacv1.ClusterRoleBinding) *model.ClusterRoleBinding { c := &model.ClusterRoleBinding{ Metadata: extractMetadata(&crb.ObjectMeta), RoleRef: extractRoleRef(&crb.RoleRef), Subjects: extractSubjects(crb.Subjects), } + pctx := ctx.(*processors.K8sProcessorContext) c.Tags = append(c.Tags, transformers.RetrieveUnifiedServiceTags(crb.ObjectMeta.Labels)...) + c.Tags = append(c.Tags, transformers.RetrieveMetadataTags(crb.ObjectMeta.Labels, crb.ObjectMeta.Annotations, pctx.LabelsAsTags, pctx.AnnotationsAsTags)...) return c } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/clusterrolebinding_test.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/clusterrolebinding_test.go index 0ffaf44d751cc..2be8ce5f9e8ed 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/clusterrolebinding_test.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/clusterrolebinding_test.go @@ -8,10 +8,12 @@ package k8s import ( + "sort" "testing" "time" model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "github.com/stretchr/testify/assert" rbacv1 "k8s.io/api/rbac/v1" @@ -23,8 +25,10 @@ func TestExtractClusterRoleBinding(t *testing.T) { creationTime := metav1.NewTime(time.Date(2021, time.April, 16, 14, 30, 0, 0, time.UTC)) tests := map[string]struct { - input rbacv1.ClusterRoleBinding - expected model.ClusterRoleBinding + input rbacv1.ClusterRoleBinding + labelsAsTags map[string]string + annotationsAsTags map[string]string + expected model.ClusterRoleBinding }{ "standard": { input: rbacv1.ClusterRoleBinding{ @@ -54,6 +58,12 @@ func TestExtractClusterRoleBinding(t *testing.T) { }, }, }, + labelsAsTags: map[string]string{ + "app": "application", + }, + annotationsAsTags: map[string]string{ + "annotation": "annotation_key", + }, expected: model.ClusterRoleBinding{ Metadata: &model.Metadata{ Annotations: []string{"annotation:my-annotation"}, @@ -76,12 +86,23 @@ func TestExtractClusterRoleBinding(t *testing.T) { Name: "firstname.lastname@company.com", }, }, + Tags: []string{ + "application:my-app", + "annotation_key:my-annotation", + }, }, }, } for name, tc := range tests { t.Run(name, func(t *testing.T) { - assert.Equal(t, &tc.expected, ExtractClusterRoleBinding(&tc.input)) + pctx := &processors.K8sProcessorContext{ + LabelsAsTags: tc.labelsAsTags, + AnnotationsAsTags: tc.annotationsAsTags, + } + actual := ExtractClusterRoleBinding(pctx, &tc.input) + sort.Strings(actual.Tags) + sort.Strings(tc.expected.Tags) + assert.Equal(t, &tc.expected, actual) }) } } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/cronjob_v1.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/cronjob_v1.go index 82e09d8c3754e..b90db424b885b 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/cronjob_v1.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/cronjob_v1.go @@ -9,6 +9,7 @@ package k8s import ( model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/transformers" batchv1 "k8s.io/api/batch/v1" @@ -16,7 +17,7 @@ import ( // ExtractCronJobV1 returns the protobuf model corresponding to a Kubernetes // CronJob resource. -func ExtractCronJobV1(cj *batchv1.CronJob) *model.CronJob { +func ExtractCronJobV1(ctx processors.ProcessorContext, cj *batchv1.CronJob) *model.CronJob { cronJob := model.CronJob{ Metadata: extractMetadata(&cj.ObjectMeta), Spec: &model.CronJobSpec{ @@ -58,7 +59,10 @@ func ExtractCronJobV1(cj *batchv1.CronJob) *model.CronJob { } cronJob.Spec.ResourceRequirements = ExtractPodTemplateResourceRequirements(cj.Spec.JobTemplate.Spec.Template) + + pctx := ctx.(*processors.K8sProcessorContext) cronJob.Tags = append(cronJob.Tags, transformers.RetrieveUnifiedServiceTags(cj.ObjectMeta.Labels)...) + cronJob.Tags = append(cronJob.Tags, transformers.RetrieveMetadataTags(cj.ObjectMeta.Labels, cj.ObjectMeta.Annotations, pctx.LabelsAsTags, pctx.AnnotationsAsTags)...) return &cronJob } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/cronjob_v1_test.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/cronjob_v1_test.go index 2d174f7592a82..22b30ed31243e 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/cronjob_v1_test.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/cronjob_v1_test.go @@ -8,10 +8,12 @@ package k8s import ( + "sort" "testing" "time" model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "github.com/DataDog/datadog-agent/pkg/util/pointer" "github.com/stretchr/testify/assert" @@ -27,8 +29,10 @@ func TestExtractCronJobV1(t *testing.T) { lastSuccessfulTime := metav1.NewTime(time.Date(2021, time.April, 16, 14, 30, 0, 0, time.UTC)) tests := map[string]struct { - input batchv1.CronJob - expected model.CronJob + input batchv1.CronJob + labelsAsTags map[string]string + annotationsAsTags map[string]string + expected model.CronJob }{ "full cron job (active)": { input: batchv1.CronJob{ @@ -68,6 +72,12 @@ func TestExtractCronJobV1(t *testing.T) { LastSuccessfulTime: &lastSuccessfulTime, }, }, + labelsAsTags: map[string]string{ + "app": "application", + }, + annotationsAsTags: map[string]string{ + "annotation": "annotation_key", + }, expected: model.CronJob{ Metadata: &model.Metadata{ Annotations: []string{"annotation:my-annotation"}, @@ -100,12 +110,23 @@ func TestExtractCronJobV1(t *testing.T) { LastScheduleTime: lastScheduleTime.Unix(), LastSuccessfulTime: lastSuccessfulTime.Unix(), }, + Tags: []string{ + "application:my-app", + "annotation_key:my-annotation", + }, }, }, } for name, tc := range tests { t.Run(name, func(t *testing.T) { - assert.Equal(t, &tc.expected, ExtractCronJobV1(&tc.input)) + pctx := &processors.K8sProcessorContext{ + LabelsAsTags: tc.labelsAsTags, + AnnotationsAsTags: tc.annotationsAsTags, + } + actual := ExtractCronJobV1(pctx, &tc.input) + sort.Strings(actual.Tags) + sort.Strings(tc.expected.Tags) + assert.Equal(t, &tc.expected, actual) }) } } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/cronjob_v1beta1.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/cronjob_v1beta1.go index 66818141aff07..3018f6477360a 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/cronjob_v1beta1.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/cronjob_v1beta1.go @@ -9,14 +9,14 @@ package k8s import ( model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/transformers" - batchv1beta1 "k8s.io/api/batch/v1beta1" ) // ExtractCronJobV1Beta1 returns the protobuf model corresponding to a Kubernetes // CronJob resource. -func ExtractCronJobV1Beta1(cj *batchv1beta1.CronJob) *model.CronJob { +func ExtractCronJobV1Beta1(ctx processors.ProcessorContext, cj *batchv1beta1.CronJob) *model.CronJob { cronJob := model.CronJob{ Metadata: extractMetadata(&cj.ObjectMeta), Spec: &model.CronJobSpec{ @@ -55,7 +55,10 @@ func ExtractCronJobV1Beta1(cj *batchv1beta1.CronJob) *model.CronJob { } cronJob.Spec.ResourceRequirements = ExtractPodTemplateResourceRequirements(cj.Spec.JobTemplate.Spec.Template) + + pctx := ctx.(*processors.K8sProcessorContext) cronJob.Tags = append(cronJob.Tags, transformers.RetrieveUnifiedServiceTags(cj.ObjectMeta.Labels)...) + cronJob.Tags = append(cronJob.Tags, transformers.RetrieveMetadataTags(cj.ObjectMeta.Labels, cj.ObjectMeta.Annotations, pctx.LabelsAsTags, pctx.AnnotationsAsTags)...) return &cronJob } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/cronjob_v1beta1_test.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/cronjob_v1beta1_test.go index 2cd556ad3dc77..82e3fe78c90dd 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/cronjob_v1beta1_test.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/cronjob_v1beta1_test.go @@ -8,10 +8,12 @@ package k8s import ( + "sort" "testing" "time" model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "github.com/DataDog/datadog-agent/pkg/util/pointer" "github.com/stretchr/testify/assert" @@ -27,8 +29,10 @@ func TestExtractCronJobV1Beta1(t *testing.T) { lastScheduleTime := metav1.NewTime(time.Date(2021, time.April, 16, 14, 30, 0, 0, time.UTC)) tests := map[string]struct { - input batchv1beta1.CronJob - expected model.CronJob + input batchv1beta1.CronJob + labelsAsTags map[string]string + annotationsAsTags map[string]string + expected model.CronJob }{ "full cron job (active)": { input: batchv1beta1.CronJob{ @@ -67,6 +71,12 @@ func TestExtractCronJobV1Beta1(t *testing.T) { LastScheduleTime: &lastScheduleTime, }, }, + labelsAsTags: map[string]string{ + "app": "application", + }, + annotationsAsTags: map[string]string{ + "annotation": "annotation_key", + }, expected: model.CronJob{ Metadata: &model.Metadata{ Annotations: []string{"annotation:my-annotation"}, @@ -98,6 +108,10 @@ func TestExtractCronJobV1Beta1(t *testing.T) { }, LastScheduleTime: lastScheduleTime.Unix(), }, + Tags: []string{ + "application:my-app", + "annotation_key:my-annotation", + }, }, }, "cronjob with resources": { @@ -119,7 +133,14 @@ func TestExtractCronJobV1Beta1(t *testing.T) { } for name, tc := range tests { t.Run(name, func(t *testing.T) { - assert.Equal(t, &tc.expected, ExtractCronJobV1Beta1(&tc.input)) + pctx := &processors.K8sProcessorContext{ + LabelsAsTags: tc.labelsAsTags, + AnnotationsAsTags: tc.annotationsAsTags, + } + actual := ExtractCronJobV1Beta1(pctx, &tc.input) + sort.Strings(actual.Tags) + sort.Strings(tc.expected.Tags) + assert.Equal(t, &tc.expected, actual) }) } } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/daemonset.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/daemonset.go index 7d9164db536dd..ba649fb17935a 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/daemonset.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/daemonset.go @@ -9,14 +9,14 @@ package k8s import ( model "github.com/DataDog/agent-payload/v5/process" - appsv1 "k8s.io/api/apps/v1" - + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/transformers" + appsv1 "k8s.io/api/apps/v1" ) // ExtractDaemonSet returns the protobuf model corresponding to a Kubernetes // DaemonSet resource. -func ExtractDaemonSet(ds *appsv1.DaemonSet) *model.DaemonSet { +func ExtractDaemonSet(ctx processors.ProcessorContext, ds *appsv1.DaemonSet) *model.DaemonSet { daemonSet := model.DaemonSet{ Metadata: extractMetadata(&ds.ObjectMeta), Spec: &model.DaemonSetSpec{ @@ -55,7 +55,10 @@ func ExtractDaemonSet(ds *appsv1.DaemonSet) *model.DaemonSet { } daemonSet.Spec.ResourceRequirements = ExtractPodTemplateResourceRequirements(ds.Spec.Template) + + pctx := ctx.(*processors.K8sProcessorContext) daemonSet.Tags = append(daemonSet.Tags, transformers.RetrieveUnifiedServiceTags(ds.ObjectMeta.Labels)...) + daemonSet.Tags = append(daemonSet.Tags, transformers.RetrieveMetadataTags(ds.ObjectMeta.Labels, ds.ObjectMeta.Annotations, pctx.LabelsAsTags, pctx.AnnotationsAsTags)...) return &daemonSet } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/daemonset_test.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/daemonset_test.go index 5aaf8a256e1c2..7d63e5c5c64d7 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/daemonset_test.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/daemonset_test.go @@ -8,6 +8,7 @@ package k8s import ( + "sort" "time" "github.com/stretchr/testify/assert" @@ -17,6 +18,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "testing" ) @@ -27,8 +29,10 @@ func TestExtractDaemonset(t *testing.T) { timestamp := metav1.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)) // 1389744000 tests := map[string]struct { - input v1.DaemonSet - expected model.DaemonSet + input v1.DaemonSet + labelsAsTags map[string]string + annotationsAsTags map[string]string + expected model.DaemonSet }{ "empty ds": {input: v1.DaemonSet{}, expected: model.DaemonSet{Metadata: &model.Metadata{}, Spec: &model.DaemonSetSpec{}, Status: &model.DaemonSetStatus{}}}, "ds with resources": { @@ -63,8 +67,10 @@ func TestExtractDaemonset(t *testing.T) { "partial ds": { input: v1.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ - Name: "daemonset", - Namespace: "namespace", + Name: "daemonset", + Namespace: "namespace", + Labels: map[string]string{"app": "my-app"}, + Annotations: map[string]string{"annotation": "my-annotation"}, }, Spec: v1.DaemonSetSpec{ UpdateStrategy: v1.DaemonSetUpdateStrategy{ @@ -87,10 +93,19 @@ func TestExtractDaemonset(t *testing.T) { CurrentNumberScheduled: 1, NumberReady: 1, }, - }, expected: model.DaemonSet{ + }, + labelsAsTags: map[string]string{ + "app": "application", + }, + annotationsAsTags: map[string]string{ + "annotation": "annotation_key", + }, + expected: model.DaemonSet{ Metadata: &model.Metadata{ - Name: "daemonset", - Namespace: "namespace", + Name: "daemonset", + Namespace: "namespace", + Labels: []string{"app:my-app"}, + Annotations: []string{"annotation:my-annotation"}, }, Conditions: []*model.DaemonSetCondition{ { @@ -101,7 +116,11 @@ func TestExtractDaemonset(t *testing.T) { Message: "test message", }, }, - Tags: []string{"kube_condition_test:false"}, + Tags: []string{ + "kube_condition_test:false", + "application:my-app", + "annotation_key:my-annotation", + }, Spec: &model.DaemonSetSpec{ DeploymentStrategy: "RollingUpdate", MaxUnavailable: "1%", @@ -115,7 +134,14 @@ func TestExtractDaemonset(t *testing.T) { } for name, tc := range tests { t.Run(name, func(t *testing.T) { - assert.Equal(t, &tc.expected, ExtractDaemonSet(&tc.input)) + pctx := &processors.K8sProcessorContext{ + LabelsAsTags: tc.labelsAsTags, + AnnotationsAsTags: tc.annotationsAsTags, + } + actual := ExtractDaemonSet(pctx, &tc.input) + sort.Strings(actual.Tags) + sort.Strings(tc.expected.Tags) + assert.Equal(t, &tc.expected, actual) }) } } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/deployment.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/deployment.go index 30b9d154ed1b3..bc9f45a379a6a 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/deployment.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/deployment.go @@ -9,6 +9,7 @@ package k8s import ( model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/transformers" @@ -18,7 +19,7 @@ import ( // ExtractDeployment returns the protobuf model corresponding to a Kubernetes // Deployment resource. -func ExtractDeployment(d *appsv1.Deployment) *model.Deployment { +func ExtractDeployment(ctx processors.ProcessorContext, d *appsv1.Deployment) *model.Deployment { deploy := model.Deployment{ Metadata: extractMetadata(&d.ObjectMeta), } @@ -56,7 +57,10 @@ func ExtractDeployment(d *appsv1.Deployment) *model.Deployment { } deploy.ResourceRequirements = ExtractPodTemplateResourceRequirements(d.Spec.Template) + + pctx := ctx.(*processors.K8sProcessorContext) deploy.Tags = append(deploy.Tags, transformers.RetrieveUnifiedServiceTags(d.ObjectMeta.Labels)...) + deploy.Tags = append(deploy.Tags, transformers.RetrieveMetadataTags(d.ObjectMeta.Labels, d.ObjectMeta.Annotations, pctx.LabelsAsTags, pctx.AnnotationsAsTags)...) return &deploy } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/deployment_test.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/deployment_test.go index 2a96c6e29d639..d5dc893103b5d 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/deployment_test.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/deployment_test.go @@ -9,11 +9,12 @@ package k8s import ( "fmt" + "sort" "testing" "time" model "github.com/DataDog/agent-payload/v5/process" - + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "github.com/DataDog/datadog-agent/pkg/util/kubernetes" "github.com/stretchr/testify/assert" @@ -30,8 +31,10 @@ func TestExtractDeployment(t *testing.T) { testIntOrStrNumber := intstr.FromInt(1) testInt32 := int32(2) tests := map[string]struct { - input appsv1.Deployment - expected model.Deployment + input appsv1.Deployment + labelsAsTags map[string]string + annotationsAsTags map[string]string + expected model.Deployment }{ "full deploy": { input: appsv1.Deployment{ @@ -87,7 +90,14 @@ func TestExtractDeployment(t *testing.T) { }, }, }, - }, expected: model.Deployment{ + }, + labelsAsTags: map[string]string{ + "label": "application", + }, + annotationsAsTags: map[string]string{ + "annotation": "annotation_key", + }, + expected: model.Deployment{ Metadata: &model.Metadata{ Name: "deploy", Namespace: "namespace", @@ -132,6 +142,8 @@ func TestExtractDeployment(t *testing.T) { Tags: []string{ "kube_condition_available:false", "kube_condition_progressing:false", + "application:foo", + "annotation_key:bar", }, }, }, @@ -213,7 +225,14 @@ func TestExtractDeployment(t *testing.T) { } for name, tc := range tests { t.Run(name, func(t *testing.T) { - assert.Equal(t, &tc.expected, ExtractDeployment(&tc.input)) + pctx := &processors.K8sProcessorContext{ + LabelsAsTags: tc.labelsAsTags, + AnnotationsAsTags: tc.annotationsAsTags, + } + actual := ExtractDeployment(pctx, &tc.input) + sort.Strings(actual.Tags) + sort.Strings(tc.expected.Tags) + assert.Equal(t, &tc.expected, actual) }) } } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/horizontalpodautoscaler.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/horizontalpodautoscaler.go index 353850e5ee46c..877ffd2c0851d 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/horizontalpodautoscaler.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/horizontalpodautoscaler.go @@ -8,6 +8,7 @@ package k8s import ( + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" v2 "k8s.io/api/autoscaling/v2" model "github.com/DataDog/agent-payload/v5/process" @@ -17,7 +18,7 @@ import ( // ExtractHorizontalPodAutoscaler returns the protobuf model corresponding to a Kubernetes Horizontal Pod Autoscaler resource. // https://github.com/kubernetes/api/blob/v0.23.15/autoscaling/v2/types.go#L33 -func ExtractHorizontalPodAutoscaler(v *v2.HorizontalPodAutoscaler) *model.HorizontalPodAutoscaler { +func ExtractHorizontalPodAutoscaler(ctx processors.ProcessorContext, v *v2.HorizontalPodAutoscaler) *model.HorizontalPodAutoscaler { if v == nil { return &model.HorizontalPodAutoscaler{} } @@ -34,7 +35,9 @@ func ExtractHorizontalPodAutoscaler(v *v2.HorizontalPodAutoscaler) *model.Horizo m.Tags = append(m.Tags, conditionTags...) } + pctx := ctx.(*processors.K8sProcessorContext) m.Tags = append(m.Tags, transformers.RetrieveUnifiedServiceTags(v.ObjectMeta.Labels)...) + m.Tags = append(m.Tags, transformers.RetrieveMetadataTags(v.ObjectMeta.Labels, v.ObjectMeta.Annotations, pctx.LabelsAsTags, pctx.AnnotationsAsTags)...) return m } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/horizontalpodautoscaler_test.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/horizontalpodautoscaler_test.go index 9ed9bfb707c1b..635326438d706 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/horizontalpodautoscaler_test.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/horizontalpodautoscaler_test.go @@ -8,6 +8,7 @@ package k8s import ( + "sort" "testing" "time" @@ -18,6 +19,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" ) func TestExtractHorizontalPodAutoscaler(t *testing.T) { @@ -34,8 +36,10 @@ func TestExtractHorizontalPodAutoscaler(t *testing.T) { *averageUtilization = 60 tests := map[string]struct { - input v2.HorizontalPodAutoscaler - expected model.HorizontalPodAutoscaler + input v2.HorizontalPodAutoscaler + labelsAsTags map[string]string + annotationsAsTags map[string]string + expected model.HorizontalPodAutoscaler }{ "standard": { input: v2.HorizontalPodAutoscaler{ @@ -258,6 +262,12 @@ func TestExtractHorizontalPodAutoscaler(t *testing.T) { }, }, }, + labelsAsTags: map[string]string{ + "app": "application", + }, + annotationsAsTags: map[string]string{ + "annotation": "annotation_key", + }, expected: model.HorizontalPodAutoscaler{ Metadata: &model.Metadata{ Name: "HPATest", @@ -475,6 +485,8 @@ func TestExtractHorizontalPodAutoscaler(t *testing.T) { }, }, Tags: []string{ + "application:my-app", + "annotation_key:my-annotation", "kube_condition_abletoscale:true", "kube_condition_scalingactive:true", "kube_condition_scalinglimited:false", @@ -897,7 +909,14 @@ func TestExtractHorizontalPodAutoscaler(t *testing.T) { } for name, tc := range tests { t.Run(name, func(t *testing.T) { - assert.Equal(t, &tc.expected, ExtractHorizontalPodAutoscaler(&tc.input)) + pctx := &processors.K8sProcessorContext{ + LabelsAsTags: tc.labelsAsTags, + AnnotationsAsTags: tc.annotationsAsTags, + } + actual := ExtractHorizontalPodAutoscaler(pctx, &tc.input) + sort.Strings(tc.expected.Tags) + sort.Strings(actual.Tags) + assert.Equal(t, &tc.expected, actual) }) } } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/ingress.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/ingress.go index 30d41a54dc642..a99bea3e2a7a6 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/ingress.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/ingress.go @@ -8,6 +8,7 @@ package k8s import ( + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/transformers" netv1 "k8s.io/api/networking/v1" @@ -16,7 +17,7 @@ import ( // ExtractIngress returns the protobuf model corresponding to a Kubernetes // Ingress resource. -func ExtractIngress(in *netv1.Ingress) *model.Ingress { +func ExtractIngress(ctx processors.ProcessorContext, in *netv1.Ingress) *model.Ingress { ingress := model.Ingress{ Metadata: extractMetadata(&in.ObjectMeta), Spec: &model.IngressSpec{}, @@ -43,7 +44,9 @@ func ExtractIngress(in *netv1.Ingress) *model.Ingress { ingress.Status = extractIngressStatus(in.Status) } + pctx := ctx.(*processors.K8sProcessorContext) ingress.Tags = append(ingress.Tags, transformers.RetrieveUnifiedServiceTags(in.ObjectMeta.Labels)...) + ingress.Tags = append(ingress.Tags, transformers.RetrieveMetadataTags(in.ObjectMeta.Labels, in.ObjectMeta.Annotations, pctx.LabelsAsTags, pctx.AnnotationsAsTags)...) return &ingress } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/ingress_test.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/ingress_test.go index 6841efe59fca8..41de57919fc4e 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/ingress_test.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/ingress_test.go @@ -8,9 +8,11 @@ package k8s import ( + "sort" "testing" model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "github.com/DataDog/datadog-agent/pkg/util/pointer" "github.com/stretchr/testify/assert" @@ -23,8 +25,10 @@ func TestExtractIngress(t *testing.T) { pathType := netv1.PathTypeImplementationSpecific tests := map[string]struct { - input netv1.Ingress - expected model.Ingress + input netv1.Ingress + labelsAsTags map[string]string + annotationsAsTags map[string]string + expected model.Ingress }{ "empty": {input: netv1.Ingress{}, expected: model.Ingress{Metadata: &model.Metadata{}, Spec: &model.IngressSpec{}, Status: &model.IngressStatus{}}}, "with spec and status": { @@ -32,7 +36,8 @@ func TestExtractIngress(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "ingress", Namespace: "namespace", - Annotations: map[string]string{"key": "val"}, + Annotations: map[string]string{"annotation": "my-annotation"}, + Labels: map[string]string{"app": "my-app"}, }, Spec: netv1.IngressSpec{ Rules: []netv1.IngressRule{ @@ -80,11 +85,18 @@ func TestExtractIngress(t *testing.T) { }, }, }, + labelsAsTags: map[string]string{ + "app": "application", + }, + annotationsAsTags: map[string]string{ + "annotation": "annotation_key", + }, expected: model.Ingress{ Metadata: &model.Metadata{ Name: "ingress", Namespace: "namespace", - Annotations: []string{"key:val"}, + Annotations: []string{"annotation:my-annotation"}, + Labels: []string{"app:my-app"}, }, Spec: &model.IngressSpec{ DefaultBackend: &model.IngressBackend{ @@ -122,12 +134,23 @@ func TestExtractIngress(t *testing.T) { {Hostname: "foo.us-east-1.elb.amazonaws.com"}, }, }, + Tags: []string{ + "application:my-app", + "annotation_key:my-annotation", + }, }, }, } for name, tc := range tests { t.Run(name, func(t *testing.T) { - assert.Equal(t, &tc.expected, ExtractIngress(&tc.input)) + pctx := &processors.K8sProcessorContext{ + LabelsAsTags: tc.labelsAsTags, + AnnotationsAsTags: tc.annotationsAsTags, + } + actual := ExtractIngress(pctx, &tc.input) + sort.Strings(actual.Tags) + sort.Strings(tc.expected.Tags) + assert.Equal(t, &tc.expected, actual) }) } } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/job.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/job.go index 3227b52a5c839..e4c34e0abeafb 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/job.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/job.go @@ -9,14 +9,14 @@ package k8s import ( model "github.com/DataDog/agent-payload/v5/process" - batchv1 "k8s.io/api/batch/v1" - + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/transformers" + batchv1 "k8s.io/api/batch/v1" ) // ExtractJob returns the protobuf model corresponding to a Kubernetes Job // resource. -func ExtractJob(j *batchv1.Job) *model.Job { +func ExtractJob(ctx processors.ProcessorContext, j *batchv1.Job) *model.Job { job := model.Job{ Metadata: extractMetadata(&j.ObjectMeta), Spec: &model.JobSpec{}, @@ -61,7 +61,10 @@ func ExtractJob(j *batchv1.Job) *model.Job { } job.Spec.ResourceRequirements = ExtractPodTemplateResourceRequirements(j.Spec.Template) + + pctx := ctx.(*processors.K8sProcessorContext) job.Tags = append(job.Tags, transformers.RetrieveUnifiedServiceTags(j.ObjectMeta.Labels)...) + job.Tags = append(job.Tags, transformers.RetrieveMetadataTags(j.ObjectMeta.Labels, j.ObjectMeta.Annotations, pctx.LabelsAsTags, pctx.AnnotationsAsTags)...) return &job } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/job_test.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/job_test.go index 57d7777bc23ef..d051d239a08ce 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/job_test.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/job_test.go @@ -8,11 +8,12 @@ package k8s import ( + "sort" "testing" "time" model "github.com/DataDog/agent-payload/v5/process" - + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "github.com/DataDog/datadog-agent/pkg/util/pointer" "github.com/stretchr/testify/assert" @@ -29,8 +30,10 @@ func TestExtractJob(t *testing.T) { lastTransitionTime := metav1.NewTime(time.Date(2021, time.April, 16, 14, 35, 0, 0, time.UTC)) tests := map[string]struct { - input batchv1.Job - expected model.Job + input batchv1.Job + labelsAsTags map[string]string + annotationsAsTags map[string]string + expected model.Job }{ "job started by cronjob (in progress)": { input: batchv1.Job{ @@ -39,9 +42,12 @@ func TestExtractJob(t *testing.T) { "annotation": "my-annotation", }, CreationTimestamp: creationTime, - Labels: map[string]string{"controller-uid": "43739057-c6d7-4a5e-ab63-d0c8844e5272"}, - Name: "job", - Namespace: "project", + Labels: map[string]string{ + "controller-uid": "43739057-c6d7-4a5e-ab63-d0c8844e5272", + "app": "my-app", + }, + Name: "job", + Namespace: "project", OwnerReferences: []metav1.OwnerReference{ { APIVersion: "batch/v1beta1", @@ -69,11 +75,17 @@ func TestExtractJob(t *testing.T) { StartTime: &startTime, }, }, + labelsAsTags: map[string]string{ + "app": "application", + }, + annotationsAsTags: map[string]string{ + "annotation": "annotation_key", + }, expected: model.Job{ Metadata: &model.Metadata{ Annotations: []string{"annotation:my-annotation"}, CreationTimestamp: creationTime.Unix(), - Labels: []string{"controller-uid:43739057-c6d7-4a5e-ab63-d0c8844e5272"}, + Labels: []string{"controller-uid:43739057-c6d7-4a5e-ab63-d0c8844e5272", "app:my-app"}, Name: "job", Namespace: "project", OwnerReferences: []*model.OwnerReference{ @@ -102,6 +114,10 @@ func TestExtractJob(t *testing.T) { Active: 1, StartTime: startTime.Unix(), }, + Tags: []string{ + "application:my-app", + "annotation_key:my-annotation", + }, }, }, "job started by cronjob (completed)": { @@ -308,7 +324,16 @@ func TestExtractJob(t *testing.T) { } for name, tc := range tests { t.Run(name, func(t *testing.T) { - assert.Equal(t, &tc.expected, ExtractJob(&tc.input)) + pctx := &processors.K8sProcessorContext{ + LabelsAsTags: tc.labelsAsTags, + AnnotationsAsTags: tc.annotationsAsTags, + } + actual := ExtractJob(pctx, &tc.input) + sort.Strings(actual.Tags) + sort.Strings(tc.expected.Tags) + sort.Strings(actual.Metadata.Labels) + sort.Strings(tc.expected.Metadata.Labels) + assert.Equal(t, &tc.expected, actual) }) } } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/limitrange.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/limitrange.go index 86951274127a0..ba787ea65a20a 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/limitrange.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/limitrange.go @@ -8,6 +8,8 @@ package k8s import ( + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/transformers" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -15,7 +17,7 @@ import ( ) // ExtractLimitRange returns the protobuf model corresponding to a Kubernetes LimitRange resource. -func ExtractLimitRange(lr *corev1.LimitRange) *model.LimitRange { +func ExtractLimitRange(ctx processors.ProcessorContext, lr *corev1.LimitRange) *model.LimitRange { msg := &model.LimitRange{ Metadata: extractMetadata(&lr.ObjectMeta), Spec: &model.LimitRangeSpec{}, @@ -34,6 +36,9 @@ func ExtractLimitRange(lr *corev1.LimitRange) *model.LimitRange { msg.Spec.Limits = append(msg.Spec.Limits, limit) } + pctx := ctx.(*processors.K8sProcessorContext) + msg.Tags = append(msg.Tags, transformers.RetrieveMetadataTags(lr.ObjectMeta.Labels, lr.ObjectMeta.Annotations, pctx.LabelsAsTags, pctx.AnnotationsAsTags)...) + return msg } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/limitrange_test.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/limitrange_test.go index 091a4eeba21c9..deff3d9bae956 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/limitrange_test.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/limitrange_test.go @@ -8,6 +8,7 @@ package k8s import ( + "sort" "testing" "github.com/stretchr/testify/assert" @@ -16,12 +17,15 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" ) func TestExtractResourceLimit(t *testing.T) { input := corev1.LimitRange{ ObjectMeta: metav1.ObjectMeta{ - Name: "limit-range", + Name: "limit-range", + Labels: map[string]string{"app": "my-app"}, + Annotations: map[string]string{"annotation": "my-annotation"}, }, Spec: corev1.LimitRangeSpec{ Limits: []corev1.LimitRangeItem{ @@ -55,7 +59,9 @@ func TestExtractResourceLimit(t *testing.T) { expected := &model.LimitRange{ LimitTypes: []string{"Container"}, Metadata: &model.Metadata{ - Name: "limit-range", + Name: "limit-range", + Labels: []string{"app:my-app"}, + Annotations: []string{"annotation:my-annotation"}, }, Spec: &model.LimitRangeSpec{ Limits: []*model.LimitRangeItem{ @@ -84,8 +90,17 @@ func TestExtractResourceLimit(t *testing.T) { }, }, }, + Tags: []string{ + "application:my-app", + "annotation_key:my-annotation", + }, } - - out := ExtractLimitRange(&input) - assert.Equal(t, expected, out) + pctx := &processors.K8sProcessorContext{ + LabelsAsTags: map[string]string{"app": "application"}, + AnnotationsAsTags: map[string]string{"annotation": "annotation_key"}, + } + actual := ExtractLimitRange(pctx, &input) + sort.Strings(actual.Tags) + sort.Strings(expected.Tags) + assert.Equal(t, expected, actual) } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/namespace.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/namespace.go index 44e5dc4eef397..49bb18f96d7b1 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/namespace.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/namespace.go @@ -9,13 +9,14 @@ package k8s import ( model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" corev1 "k8s.io/api/core/v1" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/transformers" ) // ExtractNamespace returns the protobuf model corresponding to a Kubernetes Namespace resource. -func ExtractNamespace(ns *corev1.Namespace) *model.Namespace { +func ExtractNamespace(ctx processors.ProcessorContext, ns *corev1.Namespace) *model.Namespace { n := &model.Namespace{ Metadata: extractMetadata(&ns.ObjectMeta), // status value based on https://github.com/kubernetes/kubernetes/blob/1e12d92a5179dbfeb455c79dbf9120c8536e5f9c/pkg/printers/internalversion/printers.go#L1350 @@ -28,7 +29,9 @@ func ExtractNamespace(ns *corev1.Namespace) *model.Namespace { n.Tags = append(n.Tags, conditionTags...) } + pctx := ctx.(*processors.K8sProcessorContext) n.Tags = append(n.Tags, transformers.RetrieveUnifiedServiceTags(ns.ObjectMeta.Labels)...) + n.Tags = append(n.Tags, transformers.RetrieveMetadataTags(ns.ObjectMeta.Labels, ns.ObjectMeta.Annotations, pctx.LabelsAsTags, pctx.AnnotationsAsTags)...) return n } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/namespace_test.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/namespace_test.go index 0b024acc57c8f..1869ccc3435fc 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/namespace_test.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/namespace_test.go @@ -8,12 +8,14 @@ package k8s import ( + "sort" "testing" "time" corev1 "k8s.io/api/core/v1" model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "github.com/stretchr/testify/assert" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -24,8 +26,10 @@ func TestExtractNamespace(t *testing.T) { creationTime := metav1.NewTime(time.Date(2021, time.April, 16, 14, 30, 0, 0, time.UTC)) tests := map[string]struct { - input corev1.Namespace - expected model.Namespace + input corev1.Namespace + labelsAsTags map[string]string + annotationsAsTags map[string]string + expected model.Namespace }{ "standard": { input: corev1.Namespace{ @@ -64,6 +68,12 @@ func TestExtractNamespace(t *testing.T) { }, }, }, + labelsAsTags: map[string]string{ + "app": "application", + }, + annotationsAsTags: map[string]string{ + "annotation": "annotation_key", + }, expected: model.Namespace{ Metadata: &model.Metadata{ Annotations: []string{"annotation:my-annotation"}, @@ -98,6 +108,8 @@ func TestExtractNamespace(t *testing.T) { "kube_condition_namespacefinalizersremaining:false", "kube_condition_namespacedeletioncontentfailure:true", "kube_condition_namespacedeletiondiscoveryfailure:true", + "application:my-app", + "annotation_key:my-annotation", }, }, }, @@ -115,7 +127,14 @@ func TestExtractNamespace(t *testing.T) { } for name, tc := range tests { t.Run(name, func(t *testing.T) { - assert.Equal(t, &tc.expected, ExtractNamespace(&tc.input)) + pctx := &processors.K8sProcessorContext{ + LabelsAsTags: tc.labelsAsTags, + AnnotationsAsTags: tc.annotationsAsTags, + } + actual := ExtractNamespace(pctx, &tc.input) + sort.Strings(tc.expected.Tags) + sort.Strings(actual.Tags) + assert.Equal(t, &tc.expected, actual) }) } } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/networkpolicy.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/networkpolicy.go index f21b09a19397f..2812a901c3e43 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/networkpolicy.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/networkpolicy.go @@ -9,6 +9,7 @@ package k8s import ( model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/transformers" @@ -16,13 +17,15 @@ import ( ) // ExtractNetworkPolicy returns the protobuf model corresponding to a Kubernetes -func ExtractNetworkPolicy(n *networkingv1.NetworkPolicy) *model.NetworkPolicy { +func ExtractNetworkPolicy(ctx processors.ProcessorContext, n *networkingv1.NetworkPolicy) *model.NetworkPolicy { networkPolicy := model.NetworkPolicy{ Metadata: extractMetadata(&n.ObjectMeta), Spec: extractNetworkPolicySpec(&n.Spec), } + pctx := ctx.(*processors.K8sProcessorContext) networkPolicy.Tags = append(networkPolicy.Tags, transformers.RetrieveUnifiedServiceTags(n.ObjectMeta.Labels)...) + networkPolicy.Tags = append(networkPolicy.Tags, transformers.RetrieveMetadataTags(n.ObjectMeta.Labels, n.ObjectMeta.Annotations, pctx.LabelsAsTags, pctx.AnnotationsAsTags)...) return &networkPolicy } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/networkpolicy_test.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/networkpolicy_test.go index 5895e29d6dda7..bd71ddb7e03d4 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/networkpolicy_test.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/networkpolicy_test.go @@ -8,9 +8,11 @@ package k8s import ( + "sort" "testing" model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" @@ -22,8 +24,10 @@ import ( func TestExtractNetworkPolicy(t *testing.T) { protocol := v1.Protocol("TCP") tests := map[string]struct { - input networkingv1.NetworkPolicy - expected *model.NetworkPolicy + input networkingv1.NetworkPolicy + labelsAsTags map[string]string + annotationsAsTags map[string]string + expected *model.NetworkPolicy }{ "standard": { input: networkingv1.NetworkPolicy{ @@ -31,6 +35,9 @@ func TestExtractNetworkPolicy(t *testing.T) { Annotations: map[string]string{ "annotation": "my-annotation", }, + Labels: map[string]string{ + "app": "my-app", + }, }, Spec: networkingv1.NetworkPolicySpec{ Ingress: []networkingv1.NetworkPolicyIngressRule{ @@ -52,9 +59,16 @@ func TestExtractNetworkPolicy(t *testing.T) { }, }, }, + labelsAsTags: map[string]string{ + "app": "application", + }, + annotationsAsTags: map[string]string{ + "annotation": "annotation_key", + }, expected: &model.NetworkPolicy{ Metadata: &model.Metadata{ Annotations: []string{"annotation:my-annotation"}, + Labels: []string{"app:my-app"}, }, Spec: &model.NetworkPolicySpec{ Ingress: []*model.NetworkPolicyIngressRule{ @@ -75,6 +89,10 @@ func TestExtractNetworkPolicy(t *testing.T) { }, }, }, + Tags: []string{ + "application:my-app", + "annotation_key:my-annotation", + }, }, }, "nil-safety": { @@ -90,7 +108,14 @@ func TestExtractNetworkPolicy(t *testing.T) { } for name, tc := range tests { t.Run(name, func(t *testing.T) { - assert.Equal(t, tc.expected, ExtractNetworkPolicy(&tc.input)) + pctx := &processors.K8sProcessorContext{ + LabelsAsTags: tc.labelsAsTags, + AnnotationsAsTags: tc.annotationsAsTags, + } + actual := ExtractNetworkPolicy(pctx, &tc.input) + sort.Strings(actual.Tags) + sort.Strings(tc.expected.Tags) + assert.Equal(t, tc.expected, actual) }) } } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/node.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/node.go index 2e9fafd648068..08da9ab0d35a3 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/node.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/node.go @@ -9,6 +9,7 @@ package k8s import ( "fmt" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "strings" model "github.com/DataDog/agent-payload/v5/process" @@ -23,7 +24,7 @@ import ( // ExtractNode returns the protobuf model corresponding to a Kubernetes Node // resource. -func ExtractNode(n *corev1.Node) *model.Node { +func ExtractNode(ctx processors.ProcessorContext, n *corev1.Node) *model.Node { msg := &model.Node{ Metadata: extractMetadata(&n.ObjectMeta), PodCIDR: n.Spec.PodCIDR, @@ -88,7 +89,9 @@ func ExtractNode(n *corev1.Node) *model.Node { addAdditionalNodeTags(msg) + pctx := ctx.(*processors.K8sProcessorContext) msg.Tags = append(msg.Tags, transformers.RetrieveUnifiedServiceTags(n.ObjectMeta.Labels)...) + msg.Tags = append(msg.Tags, transformers.RetrieveMetadataTags(n.ObjectMeta.Labels, n.ObjectMeta.Annotations, pctx.LabelsAsTags, pctx.AnnotationsAsTags)...) return msg } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/node_test.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/node_test.go index 118b0f5d4e5f1..2932c9d4b0064 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/node_test.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/node_test.go @@ -8,10 +8,12 @@ package k8s import ( + "sort" "testing" "time" model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" @@ -23,8 +25,10 @@ import ( func TestExtractNode(t *testing.T) { timestamp := metav1.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)) // 1389744000 tests := map[string]struct { - input corev1.Node - expected model.Node + input corev1.Node + labelsAsTags map[string]string + annotationsAsTags map[string]string + expected model.Node }{ "full node": { input: corev1.Node{ @@ -34,9 +38,10 @@ func TestExtractNode(t *testing.T) { CreationTimestamp: timestamp, Labels: map[string]string{ "kubernetes.io/role": "data", + "app": "my-app", }, Annotations: map[string]string{ - "annotation": "bar", + "annotation": "my-annotation", }, ResourceVersion: "1234", }, @@ -86,13 +91,20 @@ func TestExtractNode(t *testing.T) { Message: "ready", }}, }, - }, expected: model.Node{ + }, + labelsAsTags: map[string]string{ + "app": "application", + }, + annotationsAsTags: map[string]string{ + "annotation": "annotation_key", + }, + expected: model.Node{ Metadata: &model.Metadata{ Name: "node", Uid: "e42e5adc-0749-11e8-a2b8-000c29dea4f6", CreationTimestamp: 1389744000, - Labels: []string{"kubernetes.io/role:data"}, - Annotations: []string{"annotation:bar"}, + Labels: []string{"kubernetes.io/role:data", "app:my-app"}, + Annotations: []string{"annotation:my-annotation"}, ResourceVersion: "1234", }, Status: &model.NodeStatus{ @@ -128,7 +140,13 @@ func TestExtractNode(t *testing.T) { }, PodCIDR: "1234-5678-90", Unschedulable: true, - Tags: []string{"node_status:ready", "node_schedulable:false", "kube_node_role:data"}, + Tags: []string{ + "node_status:ready", + "node_schedulable:false", + "kube_node_role:data", + "application:my-app", + "annotation_key:my-annotation", + }, Taints: []*model.Taint{{ Key: "taint2NoTimeStamp", Value: "val1", @@ -209,7 +227,16 @@ func TestExtractNode(t *testing.T) { } for name, tc := range tests { t.Run(name, func(t *testing.T) { - assert.Equal(t, &tc.expected, ExtractNode(&tc.input)) + pctx := &processors.K8sProcessorContext{ + LabelsAsTags: tc.labelsAsTags, + AnnotationsAsTags: tc.annotationsAsTags, + } + actual := ExtractNode(pctx, &tc.input) + sort.Strings(actual.Tags) + sort.Strings(tc.expected.Tags) + sort.Strings(actual.Metadata.Labels) + sort.Strings(tc.expected.Metadata.Labels) + assert.Equal(t, &tc.expected, actual) }) } } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/persistentvolume.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/persistentvolume.go index d7b9a059942e1..bbd24a6e43a3e 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/persistentvolume.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/persistentvolume.go @@ -8,6 +8,7 @@ package k8s import ( + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "strings" corev1 "k8s.io/api/core/v1" @@ -18,7 +19,7 @@ import ( // ExtractPersistentVolume returns the protobuf model corresponding to a Kubernetes // PersistentVolume resource. -func ExtractPersistentVolume(pv *corev1.PersistentVolume) *model.PersistentVolume { +func ExtractPersistentVolume(ctx processors.ProcessorContext, pv *corev1.PersistentVolume) *model.PersistentVolume { message := &model.PersistentVolume{ Metadata: extractMetadata(&pv.ObjectMeta), Spec: &model.PersistentVolumeSpec{ @@ -82,7 +83,9 @@ func ExtractPersistentVolume(pv *corev1.PersistentVolume) *model.PersistentVolum addAdditionalPersistentVolumeTags(message) + pctx := ctx.(*processors.K8sProcessorContext) message.Tags = append(message.Tags, transformers.RetrieveUnifiedServiceTags(pv.ObjectMeta.Labels)...) + message.Tags = append(message.Tags, transformers.RetrieveMetadataTags(pv.ObjectMeta.Labels, pv.ObjectMeta.Annotations, pctx.LabelsAsTags, pctx.AnnotationsAsTags)...) return message } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/persistentvolume_test.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/persistentvolume_test.go index 07cf4591941bf..8e39aa68c2821 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/persistentvolume_test.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/persistentvolume_test.go @@ -8,11 +8,13 @@ package k8s import ( + "sort" "strings" "testing" "time" model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" @@ -167,11 +169,18 @@ func TestExtractPersistentVolume(t *testing.T) { } for name, tc := range tests { t.Run(name, func(t *testing.T) { + pctx := &processors.K8sProcessorContext{ + LabelsAsTags: map[string]string{"app": "application"}, + AnnotationsAsTags: map[string]string{"annotation": "annotation_key"}, + } tc.basicInputPV.Spec.PersistentVolumeSource = tc.inputSource tc.basicExpectedPV.Spec.PersistentVolumeType = tc.expectedType tc.basicExpectedPV.Spec.PersistentVolumeSource = tc.expectedSource tc.basicExpectedPV.Tags = append(tc.basicExpectedPV.Tags, "pv_type:"+strings.ToLower(tc.expectedType)) - assert.Equal(t, &tc.basicExpectedPV, ExtractPersistentVolume(&tc.basicInputPV)) + actual := ExtractPersistentVolume(pctx, &tc.basicInputPV) + sort.Strings(actual.Tags) + sort.Strings(tc.basicExpectedPV.Tags) + assert.Equal(t, &tc.basicExpectedPV, actual) }) } } @@ -307,7 +316,11 @@ func newExpectedPV() model.PersistentVolume { Message: "test", Reason: "test", }, - Tags: []string{"pv_phase:pending"}, + Tags: []string{ + "pv_phase:pending", + "application:my-app", + "annotation_key:my-annotation", + }, } } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/persistentvolumeclaim.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/persistentvolumeclaim.go index aa172ff30d9e0..bcc3a330e8738 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/persistentvolumeclaim.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/persistentvolumeclaim.go @@ -9,6 +9,7 @@ package k8s import ( model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/transformers" corev1 "k8s.io/api/core/v1" @@ -16,7 +17,7 @@ import ( // ExtractPersistentVolumeClaim returns the protobuf model corresponding to a // Kubernetes PersistentVolumeClaim resource. -func ExtractPersistentVolumeClaim(pvc *corev1.PersistentVolumeClaim) *model.PersistentVolumeClaim { +func ExtractPersistentVolumeClaim(ctx processors.ProcessorContext, pvc *corev1.PersistentVolumeClaim) *model.PersistentVolumeClaim { message := &model.PersistentVolumeClaim{ Metadata: extractMetadata(&pvc.ObjectMeta), Spec: &model.PersistentVolumeClaimSpec{ @@ -30,7 +31,10 @@ func ExtractPersistentVolumeClaim(pvc *corev1.PersistentVolumeClaim) *model.Pers } extractSpec(pvc, message) extractStatus(pvc, message) + + pctx := ctx.(*processors.K8sProcessorContext) message.Tags = append(message.Tags, transformers.RetrieveUnifiedServiceTags(pvc.ObjectMeta.Labels)...) + message.Tags = append(message.Tags, transformers.RetrieveMetadataTags(pvc.ObjectMeta.Labels, pvc.ObjectMeta.Annotations, pctx.LabelsAsTags, pctx.AnnotationsAsTags)...) return message } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/persistentvolumeclaim_test.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/persistentvolumeclaim_test.go index 600159f833f52..d710a5374aa5c 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/persistentvolumeclaim_test.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/persistentvolumeclaim_test.go @@ -8,10 +8,12 @@ package k8s import ( + "sort" "testing" "time" model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "github.com/DataDog/datadog-agent/pkg/util/pointer" "github.com/stretchr/testify/assert" @@ -27,8 +29,10 @@ func TestExtractPersistentVolumeClaim(t *testing.T) { parsedResource := resource.MustParse("2Gi") tests := map[string]struct { - input corev1.PersistentVolumeClaim - expected model.PersistentVolumeClaim + input corev1.PersistentVolumeClaim + labelsAsTags map[string]string + annotationsAsTags map[string]string + expected model.PersistentVolumeClaim }{ "full pvc": { input: corev1.PersistentVolumeClaim{ @@ -77,6 +81,12 @@ func TestExtractPersistentVolumeClaim(t *testing.T) { }, }, }, + labelsAsTags: map[string]string{ + "app": "application", + }, + annotationsAsTags: map[string]string{ + "annotation": "annotation_key", + }, expected: model.PersistentVolumeClaim{ Metadata: &model.Metadata{ Annotations: []string{"annotation:my-annotation"}, @@ -114,12 +124,23 @@ func TestExtractPersistentVolumeClaim(t *testing.T) { Reason: "OfflineResize", }}, }, + Tags: []string{ + "application:my-app", + "annotation_key:my-annotation", + }, }, }, } for name, tc := range tests { t.Run(name, func(t *testing.T) { - assert.Equal(t, &tc.expected, ExtractPersistentVolumeClaim(&tc.input)) + pctx := &processors.K8sProcessorContext{ + LabelsAsTags: tc.labelsAsTags, + AnnotationsAsTags: tc.annotationsAsTags, + } + actual := ExtractPersistentVolumeClaim(pctx, &tc.input) + sort.Strings(actual.Tags) + sort.Strings(tc.expected.Tags) + assert.Equal(t, &tc.expected, actual) }) } } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/pod.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/pod.go index d7713226d4497..8f9eb74df5377 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/pod.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/pod.go @@ -9,6 +9,8 @@ package k8s import ( "fmt" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/transformers" "hash/fnv" "sort" "strconv" @@ -30,7 +32,7 @@ const ( // ExtractPod returns the protobuf model corresponding to a Kubernetes Pod // resource. -func ExtractPod(p *corev1.Pod) *model.Pod { +func ExtractPod(ctx processors.ProcessorContext, p *corev1.Pod) *model.Pod { podModel := model.Pod{ Metadata: extractMetadata(&p.ObjectMeta), } @@ -74,9 +76,75 @@ func ExtractPod(p *corev1.Pod) *model.Pod { } } + if p.Spec.Affinity != nil && p.Spec.Affinity.NodeAffinity != nil { + podModel.NodeAffinity = &model.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: convertNodeSelector(p.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution), + PreferredDuringSchedulingIgnoredDuringExecution: convertPreferredSchedulingTerm(p.Spec.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution), + } + } + + pctx := ctx.(*processors.K8sProcessorContext) + podModel.Tags = append(podModel.Tags, transformers.RetrieveMetadataTags(p.ObjectMeta.Labels, p.ObjectMeta.Annotations, pctx.LabelsAsTags, pctx.AnnotationsAsTags)...) + return &podModel } +func convertNodeSelector(ns *corev1.NodeSelector) *model.NodeSelector { + if ns == nil { + return nil + } + return &model.NodeSelector{ + NodeSelectorTerms: convertNodeSelectorTerms(ns.NodeSelectorTerms), + } +} + +func convertPreferredSchedulingTerm(terms []corev1.PreferredSchedulingTerm) []*model.PreferredSchedulingTerm { + if len(terms) == 0 { + return nil + } + var preferredTerms []*model.PreferredSchedulingTerm + for _, term := range terms { + preferredTerms = append(preferredTerms, &model.PreferredSchedulingTerm{ + Preference: convertNodeSelectorTerm(term.Preference), + Weight: term.Weight, + }) + } + return preferredTerms +} + +func convertNodeSelectorTerms(terms []corev1.NodeSelectorTerm) []*model.NodeSelectorTerm { + if len(terms) == 0 { + return nil + } + var nodeSelectorTerms []*model.NodeSelectorTerm + for _, term := range terms { + nodeSelectorTerms = append(nodeSelectorTerms, convertNodeSelectorTerm(term)) + } + return nodeSelectorTerms +} + +func convertNodeSelectorTerm(term corev1.NodeSelectorTerm) *model.NodeSelectorTerm { + return &model.NodeSelectorTerm{ + MatchExpressions: convertNodeSelectorRequirements(term.MatchExpressions), + MatchFields: convertNodeSelectorRequirements(term.MatchFields), + } +} + +func convertNodeSelectorRequirements(requirements []corev1.NodeSelectorRequirement) []*model.LabelSelectorRequirement { + if len(requirements) == 0 { + return nil + } + var nodeSelectorRequirements []*model.LabelSelectorRequirement + for _, req := range requirements { + nodeSelectorRequirements = append(nodeSelectorRequirements, &model.LabelSelectorRequirement{ + Key: req.Key, + Operator: string(req.Operator), + Values: req.Values, + }) + } + return nodeSelectorRequirements +} + // ExtractPodTemplateResourceRequirements extracts resource requirements of containers and initContainers into model.ResourceRequirements func ExtractPodTemplateResourceRequirements(template corev1.PodTemplateSpec) []*model.ResourceRequirements { return extractPodResourceRequirements(template.Spec.Containers, template.Spec.InitContainers) diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/pod_test.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/pod_test.go index 29ffbf77af410..3de2e71ff1fdd 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/pod_test.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/pod_test.go @@ -9,10 +9,13 @@ package k8s import ( "fmt" + "reflect" + "sort" "testing" "time" model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" @@ -72,8 +75,10 @@ func TestExtractPod(t *testing.T) { parseRequests := resource.MustParse("250M") parseLimits := resource.MustParse("550M") tests := map[string]struct { - input v1.Pod - expected model.Pod + input v1.Pod + labelsAsTags map[string]string + annotationsAsTags map[string]string + expected model.Pod }{ "full pod with containers without resourceRequirements": { input: v1.Pod{ @@ -142,10 +147,10 @@ func TestExtractPod(t *testing.T) { Namespace: "namespace", CreationTimestamp: timestamp, Labels: map[string]string{ - "label": "foo", + "app": "my-app", }, Annotations: map[string]string{ - "annotation": "bar", + "annotation": "my-annotation", }, OwnerReferences: []metav1.OwnerReference{ { @@ -165,14 +170,21 @@ func TestExtractPod(t *testing.T) { }, PriorityClassName: "high-priority", }, - }, expected: model.Pod{ + }, + labelsAsTags: map[string]string{ + "app": "application", + }, + annotationsAsTags: map[string]string{ + "annotation": "annotation_key", + }, + expected: model.Pod{ Metadata: &model.Metadata{ Name: "pod", Namespace: "namespace", Uid: "e42e5adc-0749-11e8-a2b8-000c29dea4f6", CreationTimestamp: 1389744000, - Labels: []string{"label:foo"}, - Annotations: []string{"annotation:bar"}, + Labels: []string{"app:my-app"}, + Annotations: []string{"annotation:my-annotation"}, OwnerReferences: []*model.OwnerReference{ { Name: "test-controller", @@ -230,7 +242,12 @@ func TestExtractPod(t *testing.T) { LastTransitionTime: timestamp.Unix(), }, }, - Tags: []string{"kube_condition_ready:true", "kube_condition_podscheduled:true"}, + Tags: []string{ + "kube_condition_ready:true", + "kube_condition_podscheduled:true", + "application:my-app", + "annotation_key:my-annotation", + }, ResourceRequirements: []*model.ResourceRequirements{ { Limits: map[string]int64{}, @@ -523,7 +540,14 @@ func TestExtractPod(t *testing.T) { } for name, tc := range tests { t.Run(name, func(t *testing.T) { - assert.Equal(t, &tc.expected, ExtractPod(&tc.input)) + pctx := &processors.K8sProcessorContext{ + LabelsAsTags: tc.labelsAsTags, + AnnotationsAsTags: tc.annotationsAsTags, + } + actual := ExtractPod(pctx, &tc.input) + sort.Strings(actual.Tags) + sort.Strings(tc.expected.Tags) + assert.Equal(t, &tc.expected, actual) }) } } @@ -966,3 +990,238 @@ func TestMapToTags(t *testing.T) { assert.ElementsMatch(t, []string{"foo:bar", "node-role.kubernetes.io/nodeless"}, tags) assert.Len(t, tags, 2) } + +func TestConvertNodeSelector(t *testing.T) { + tests := []struct { + name string + input *v1.NodeSelector + want *model.NodeSelector + }{ + { + name: "nil input", + input: nil, + want: nil, + }, + { + name: "empty NodeSelector", + input: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{}, + }, + want: &model.NodeSelector{NodeSelectorTerms: nil}, + }, + { + name: "with MatchExpressions and MatchFields", + input: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + {Key: "key1", Operator: v1.NodeSelectorOpIn, Values: []string{"v1", "v2"}}, + }, + MatchFields: []v1.NodeSelectorRequirement{ + {Key: "field1", Operator: v1.NodeSelectorOpNotIn, Values: []string{"v3"}}, + }, + }, + }, + }, + want: &model.NodeSelector{ + NodeSelectorTerms: []*model.NodeSelectorTerm{ + { + MatchExpressions: []*model.LabelSelectorRequirement{ + {Key: "key1", Operator: "In", Values: []string{"v1", "v2"}}, + }, + MatchFields: []*model.LabelSelectorRequirement{ + {Key: "field1", Operator: "NotIn", Values: []string{"v3"}}, + }, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := convertNodeSelector(tt.input) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("convertNodeSelector() = %#v, want %#v", got, tt.want) + } + }) + } +} + +func TestConvertPreferredSchedulingTerm(t *testing.T) { + tests := []struct { + name string + input []v1.PreferredSchedulingTerm + want []*model.PreferredSchedulingTerm + }{ + { + name: "empty terms", + input: []v1.PreferredSchedulingTerm{}, + want: nil, + }, + { + name: "single preferred scheduling term", + input: []v1.PreferredSchedulingTerm{ + { + Preference: v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + {Key: "k", Operator: v1.NodeSelectorOpExists}, + }, + }, + Weight: 10, + }, + }, + want: []*model.PreferredSchedulingTerm{ + { + Preference: &model.NodeSelectorTerm{ + MatchExpressions: []*model.LabelSelectorRequirement{ + {Key: "k", Operator: "Exists", Values: nil}, + }, + MatchFields: nil, + }, + Weight: 10, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := convertPreferredSchedulingTerm(tt.input) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("convertPreferredSchedulingTerm() = %#v, want %#v", got, tt.want) + } + }) + } +} + +func TestConvertNodeSelectorTerms(t *testing.T) { + tests := []struct { + name string + input []v1.NodeSelectorTerm + want []*model.NodeSelectorTerm + }{ + { + name: "empty terms", + input: []v1.NodeSelectorTerm{}, + want: nil, + }, + { + name: "multiple NodeSelectorTerms", + input: []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + {Key: "k1", Operator: v1.NodeSelectorOpIn, Values: []string{"v1"}}, + }, + }, + { + MatchExpressions: []v1.NodeSelectorRequirement{ + {Key: "k2", Operator: v1.NodeSelectorOpNotIn, Values: []string{"v2"}}, + }, + }, + }, + want: []*model.NodeSelectorTerm{ + { + MatchExpressions: []*model.LabelSelectorRequirement{ + {Key: "k1", Operator: "In", Values: []string{"v1"}}, + }, + MatchFields: nil, + }, + { + MatchExpressions: []*model.LabelSelectorRequirement{ + {Key: "k2", Operator: "NotIn", Values: []string{"v2"}}, + }, + MatchFields: nil, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := convertNodeSelectorTerms(tt.input) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("convertNodeSelectorTerms() = %#v, want %#v", got, tt.want) + } + }) + } +} + +func TestConvertNodeSelectorTerm(t *testing.T) { + tests := []struct { + name string + input v1.NodeSelectorTerm + want *model.NodeSelectorTerm + }{ + { + name: "empty term", + input: v1.NodeSelectorTerm{}, + want: &model.NodeSelectorTerm{ + MatchExpressions: nil, + MatchFields: nil, + }, + }, + { + name: "with match expressions and fields", + input: v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + {Key: "k1", Operator: v1.NodeSelectorOpExists}, + }, + MatchFields: []v1.NodeSelectorRequirement{ + {Key: "f1", Operator: v1.NodeSelectorOpDoesNotExist}, + }, + }, + want: &model.NodeSelectorTerm{ + MatchExpressions: []*model.LabelSelectorRequirement{ + {Key: "k1", Operator: "Exists"}, + }, + MatchFields: []*model.LabelSelectorRequirement{ + {Key: "f1", Operator: "DoesNotExist"}, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := convertNodeSelectorTerm(tt.input) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("convertNodeSelectorTerm() = %#v, want %#v", got, tt.want) + } + }) + } +} + +func TestConvertNodeSelectorRequirements(t *testing.T) { + tests := []struct { + name string + input []v1.NodeSelectorRequirement + want []*model.LabelSelectorRequirement + }{ + { + name: "no requirements", + input: []v1.NodeSelectorRequirement{}, + want: nil, + }, + { + name: "with multiple requirements", + input: []v1.NodeSelectorRequirement{ + {Key: "k1", Operator: v1.NodeSelectorOpIn, Values: []string{"v1", "v2"}}, + {Key: "k2", Operator: v1.NodeSelectorOpNotIn, Values: []string{"v3"}}, + }, + want: []*model.LabelSelectorRequirement{ + {Key: "k1", Operator: "In", Values: []string{"v1", "v2"}}, + {Key: "k2", Operator: "NotIn", Values: []string{"v3"}}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := convertNodeSelectorRequirements(tt.input) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("convertNodeSelectorRequirements() = %#v, want %#v", got, tt.want) + } + }) + } +} diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/poddisruptionbudget.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/poddisruptionbudget.go index 3838c30ef0f7f..e4d8def87f3f9 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/poddisruptionbudget.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/poddisruptionbudget.go @@ -9,15 +9,15 @@ package k8s import ( model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/transformers" - policyv1 "k8s.io/api/policy/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" ) // ExtractPodDisruptionBudget returns the protobuf model corresponding to a Kubernetes -func ExtractPodDisruptionBudget(pdb *policyv1.PodDisruptionBudget) *model.PodDisruptionBudget { +func ExtractPodDisruptionBudget(ctx processors.ProcessorContext, pdb *policyv1.PodDisruptionBudget) *model.PodDisruptionBudget { if pdb == nil { return nil } @@ -26,7 +26,9 @@ func ExtractPodDisruptionBudget(pdb *policyv1.PodDisruptionBudget) *model.PodDis Spec: extractPodDisruptionBudgetSpec(&pdb.Spec), Status: extractPodDisruptionBudgetStatus(&pdb.Status), } + pctx := ctx.(*processors.K8sProcessorContext) result.Tags = append(result.Tags, transformers.RetrieveUnifiedServiceTags(pdb.ObjectMeta.Labels)...) + result.Tags = append(result.Tags, transformers.RetrieveMetadataTags(pdb.ObjectMeta.Labels, pdb.ObjectMeta.Annotations, pctx.LabelsAsTags, pctx.AnnotationsAsTags)...) return &result } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/poddisruptionbudget_test.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/poddisruptionbudget_test.go index ba32a4b1bd36b..00593e77a96ef 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/poddisruptionbudget_test.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/poddisruptionbudget_test.go @@ -15,6 +15,7 @@ import ( "github.com/stretchr/testify/assert" model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "github.com/DataDog/datadog-agent/pkg/util/kubernetes" policyv1 "k8s.io/api/policy/v1" @@ -174,8 +175,10 @@ func TestExtractPodDisruptionBudget(t *testing.T) { t1 := t0.Add(time.Minute) for name, tc := range map[string]struct { - in *policyv1.PodDisruptionBudget - expect *model.PodDisruptionBudget + in *policyv1.PodDisruptionBudget + labelsAsTags map[string]string + annotationsAsTags map[string]string + expect *model.PodDisruptionBudget }{ "nil": { in: nil, @@ -210,6 +213,10 @@ func TestExtractPodDisruptionBudget(t *testing.T) { Labels: map[string]string{ kubernetes.VersionTagLabelKey: "ultimate", kubernetes.ServiceTagLabelKey: "honorable", + "app": "my-app", + }, + Annotations: map[string]string{ + "annotation": "my-annotation", }, }, Spec: policyv1.PodDisruptionBudgetSpec{ @@ -239,6 +246,12 @@ func TestExtractPodDisruptionBudget(t *testing.T) { }, }, }, + labelsAsTags: map[string]string{ + "app": "application", + }, + annotationsAsTags: map[string]string{ + "annotation": "annotation_key", + }, expect: &model.PodDisruptionBudget{ Metadata: &model.Metadata{ Name: "gwern", @@ -250,7 +263,9 @@ func TestExtractPodDisruptionBudget(t *testing.T) { Labels: []string{ fmt.Sprintf("%s:ultimate", kubernetes.VersionTagLabelKey), fmt.Sprintf("%s:honorable", kubernetes.ServiceTagLabelKey), + "app:my-app", }, + Annotations: []string{"annotation:my-annotation"}, }, Spec: &model.PodDisruptionBudgetSpec{ MinAvailable: &model.IntOrString{ @@ -289,12 +304,18 @@ func TestExtractPodDisruptionBudget(t *testing.T) { Tags: []string{ "version:ultimate", "service:honorable", + "application:my-app", + "annotation_key:my-annotation", }, }, }, } { t.Run(name, func(t *testing.T) { - got := ExtractPodDisruptionBudget(tc.in) + pctx := &processors.K8sProcessorContext{ + LabelsAsTags: tc.labelsAsTags, + AnnotationsAsTags: tc.annotationsAsTags, + } + got := ExtractPodDisruptionBudget(pctx, tc.in) if tc.expect == nil { assert.Nil(t, got) } else { diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/replicaset.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/replicaset.go index 66219033bcbc1..eefde04dab03f 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/replicaset.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/replicaset.go @@ -9,6 +9,7 @@ package k8s import ( model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" appsv1 "k8s.io/api/apps/v1" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/transformers" @@ -16,7 +17,7 @@ import ( // ExtractReplicaSet returns the protobuf model corresponding to a Kubernetes // ReplicaSet resource. -func ExtractReplicaSet(rs *appsv1.ReplicaSet) *model.ReplicaSet { +func ExtractReplicaSet(ctx processors.ProcessorContext, rs *appsv1.ReplicaSet) *model.ReplicaSet { replicaSet := model.ReplicaSet{ Metadata: extractMetadata(&rs.ObjectMeta), } @@ -42,7 +43,10 @@ func ExtractReplicaSet(rs *appsv1.ReplicaSet) *model.ReplicaSet { } replicaSet.ResourceRequirements = ExtractPodTemplateResourceRequirements(rs.Spec.Template) + + pctx := ctx.(*processors.K8sProcessorContext) replicaSet.Tags = append(replicaSet.Tags, transformers.RetrieveUnifiedServiceTags(rs.ObjectMeta.Labels)...) + replicaSet.Tags = append(replicaSet.Tags, transformers.RetrieveMetadataTags(rs.ObjectMeta.Labels, rs.ObjectMeta.Annotations, pctx.LabelsAsTags, pctx.AnnotationsAsTags)...) return &replicaSet } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/replicaset_test.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/replicaset_test.go index c5ab710aeb3b8..907caccb7ad90 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/replicaset_test.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/replicaset_test.go @@ -8,10 +8,12 @@ package k8s import ( + "sort" "testing" "time" model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "github.com/stretchr/testify/assert" appsv1 "k8s.io/api/apps/v1" @@ -24,8 +26,10 @@ func TestExtractReplicaSet(t *testing.T) { timestamp := metav1.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)) // 1389744000 testInt32 := int32(2) tests := map[string]struct { - input appsv1.ReplicaSet - expected model.ReplicaSet + input appsv1.ReplicaSet + labelsAsTags map[string]string + annotationsAsTags map[string]string + expected model.ReplicaSet }{ "full rs": { input: appsv1.ReplicaSet{ @@ -72,7 +76,14 @@ func TestExtractReplicaSet(t *testing.T) { }, }, }, - }, expected: model.ReplicaSet{ + }, + labelsAsTags: map[string]string{ + "label": "application", + }, + annotationsAsTags: map[string]string{ + "annotation": "annotation_key", + }, + expected: model.ReplicaSet{ Metadata: &model.Metadata{ Name: "replicaset", Namespace: "namespace", @@ -91,7 +102,11 @@ func TestExtractReplicaSet(t *testing.T) { Message: "test message", }, }, - Tags: []string{"kube_condition_replicafailure:false"}, + Tags: []string{ + "kube_condition_replicafailure:false", + "application:foo", + "annotation_key:bar", + }, Selectors: []*model.LabelSelectorRequirement{ { Key: "app", @@ -144,7 +159,14 @@ func TestExtractReplicaSet(t *testing.T) { } for name, tc := range tests { t.Run(name, func(t *testing.T) { - assert.Equal(t, &tc.expected, ExtractReplicaSet(&tc.input)) + pctx := &processors.K8sProcessorContext{ + LabelsAsTags: tc.labelsAsTags, + AnnotationsAsTags: tc.annotationsAsTags, + } + actual := ExtractReplicaSet(pctx, &tc.input) + sort.Strings(actual.Tags) + sort.Strings(tc.expected.Tags) + assert.Equal(t, &tc.expected, actual) }) } } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/role.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/role.go index 3f8d32dd85376..4ea33e4906c93 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/role.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/role.go @@ -9,6 +9,7 @@ package k8s import ( model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/transformers" rbacv1 "k8s.io/api/rbac/v1" @@ -16,13 +17,14 @@ import ( // ExtractRole returns the protobuf model corresponding to a Kubernetes Role // resource. -func ExtractRole(r *rbacv1.Role) *model.Role { +func ExtractRole(ctx processors.ProcessorContext, r *rbacv1.Role) *model.Role { msg := &model.Role{ Metadata: extractMetadata(&r.ObjectMeta), Rules: extractPolicyRules(r.Rules), } + pctx := ctx.(*processors.K8sProcessorContext) msg.Tags = append(msg.Tags, transformers.RetrieveUnifiedServiceTags(r.ObjectMeta.Labels)...) - + msg.Tags = append(msg.Tags, transformers.RetrieveMetadataTags(r.ObjectMeta.Labels, r.ObjectMeta.Annotations, pctx.LabelsAsTags, pctx.AnnotationsAsTags)...) return msg } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/role_test.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/role_test.go index 36ee55a9843a9..9277e851a026a 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/role_test.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/role_test.go @@ -8,10 +8,12 @@ package k8s import ( + "sort" "testing" "time" model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "github.com/stretchr/testify/assert" rbacv1 "k8s.io/api/rbac/v1" @@ -23,8 +25,10 @@ func TestExtractRole(t *testing.T) { creationTime := metav1.NewTime(time.Date(2021, time.April, 16, 14, 30, 0, 0, time.UTC)) tests := map[string]struct { - input rbacv1.Role - expected model.Role + input rbacv1.Role + labelsAsTags map[string]string + annotationsAsTags map[string]string + expected model.Role }{ "standard": { input: rbacv1.Role{ @@ -59,6 +63,12 @@ func TestExtractRole(t *testing.T) { }, }, }, + labelsAsTags: map[string]string{ + "app": "application", + }, + annotationsAsTags: map[string]string{ + "annotation": "annotation_key", + }, expected: model.Role{ Metadata: &model.Metadata{ Annotations: []string{"annotation:my-annotation"}, @@ -86,12 +96,23 @@ func TestExtractRole(t *testing.T) { Verbs: []string{"create"}, }, }, + Tags: []string{ + "application:my-app", + "annotation_key:my-annotation", + }, }, }, } for name, tc := range tests { t.Run(name, func(t *testing.T) { - assert.Equal(t, &tc.expected, ExtractRole(&tc.input)) + pctx := &processors.K8sProcessorContext{ + LabelsAsTags: tc.labelsAsTags, + AnnotationsAsTags: tc.annotationsAsTags, + } + actual := ExtractRole(pctx, &tc.input) + sort.Strings(actual.Tags) + sort.Strings(tc.expected.Tags) + assert.Equal(t, &tc.expected, actual) }) } } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/rolebinding.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/rolebinding.go index b6f50d941a3a6..14c8eac529868 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/rolebinding.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/rolebinding.go @@ -9,6 +9,7 @@ package k8s import ( model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/transformers" rbacv1 "k8s.io/api/rbac/v1" @@ -16,14 +17,16 @@ import ( // ExtractRoleBinding returns the protobuf model corresponding to a Kubernetes // RoleBinding resource. -func ExtractRoleBinding(rb *rbacv1.RoleBinding) *model.RoleBinding { +func ExtractRoleBinding(ctx processors.ProcessorContext, rb *rbacv1.RoleBinding) *model.RoleBinding { msg := &model.RoleBinding{ Metadata: extractMetadata(&rb.ObjectMeta), RoleRef: extractRoleRef(&rb.RoleRef), Subjects: extractSubjects(rb.Subjects), } + pctx := ctx.(*processors.K8sProcessorContext) msg.Tags = append(msg.Tags, transformers.RetrieveUnifiedServiceTags(rb.ObjectMeta.Labels)...) + msg.Tags = append(msg.Tags, transformers.RetrieveMetadataTags(rb.ObjectMeta.Labels, rb.ObjectMeta.Annotations, pctx.LabelsAsTags, pctx.AnnotationsAsTags)...) return msg } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/rolebinding_test.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/rolebinding_test.go index 7ea856b651ece..f6c3370af301e 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/rolebinding_test.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/rolebinding_test.go @@ -8,10 +8,12 @@ package k8s import ( + "sort" "testing" "time" model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "github.com/stretchr/testify/assert" rbacv1 "k8s.io/api/rbac/v1" @@ -23,8 +25,10 @@ func TestExtractRoleBinding(t *testing.T) { creationTime := metav1.NewTime(time.Date(2021, time.April, 16, 14, 30, 0, 0, time.UTC)) tests := map[string]struct { - input rbacv1.RoleBinding - expected model.RoleBinding + input rbacv1.RoleBinding + labelsAsTags map[string]string + annotationsAsTags map[string]string + expected model.RoleBinding }{ "standard": { input: rbacv1.RoleBinding{ @@ -54,6 +58,12 @@ func TestExtractRoleBinding(t *testing.T) { }, }, }, + labelsAsTags: map[string]string{ + "app": "application", + }, + annotationsAsTags: map[string]string{ + "annotation": "annotation_key", + }, expected: model.RoleBinding{ Metadata: &model.Metadata{ Annotations: []string{"annotation:my-annotation"}, @@ -76,12 +86,23 @@ func TestExtractRoleBinding(t *testing.T) { Name: "firstname.lastname@company.com", }, }, + Tags: []string{ + "application:my-app", + "annotation_key:my-annotation", + }, }, }, } for name, tc := range tests { t.Run(name, func(t *testing.T) { - assert.Equal(t, &tc.expected, ExtractRoleBinding(&tc.input)) + pctx := &processors.K8sProcessorContext{ + LabelsAsTags: tc.labelsAsTags, + AnnotationsAsTags: tc.annotationsAsTags, + } + actual := ExtractRoleBinding(pctx, &tc.input) + sort.Strings(actual.Tags) + sort.Strings(tc.expected.Tags) + assert.Equal(t, &tc.expected, actual) }) } } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/service.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/service.go index 23f346c5897a2..8444aca6bcac1 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/service.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/service.go @@ -8,6 +8,7 @@ package k8s import ( + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "strings" model "github.com/DataDog/agent-payload/v5/process" @@ -18,7 +19,7 @@ import ( // ExtractService returns the protobuf model corresponding to a Kubernetes // Service resource. -func ExtractService(s *corev1.Service) *model.Service { +func ExtractService(ctx processors.ProcessorContext, s *corev1.Service) *model.Service { message := &model.Service{ Metadata: extractMetadata(&s.ObjectMeta), Spec: &model.ServiceSpec{ @@ -79,7 +80,9 @@ func ExtractService(s *corev1.Service) *model.Service { }) } + pctx := ctx.(*processors.K8sProcessorContext) message.Tags = append(message.Tags, transformers.RetrieveUnifiedServiceTags(s.ObjectMeta.Labels)...) + message.Tags = append(message.Tags, transformers.RetrieveMetadataTags(s.ObjectMeta.Labels, s.ObjectMeta.Annotations, pctx.LabelsAsTags, pctx.AnnotationsAsTags)...) return message } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/service_test.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/service_test.go index ad8bc2b23487f..5daf4a33ee3e0 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/service_test.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/service_test.go @@ -8,10 +8,12 @@ package k8s import ( + "sort" "testing" "time" model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" @@ -21,8 +23,10 @@ import ( func TestExtractService(t *testing.T) { tests := map[string]struct { - input corev1.Service - expected model.Service + input corev1.Service + labelsAsTags map[string]string + annotationsAsTags map[string]string + expected model.Service }{ "ClusterIP": { input: corev1.Service{ @@ -56,6 +60,12 @@ func TestExtractService(t *testing.T) { }, Status: corev1.ServiceStatus{}, }, + labelsAsTags: map[string]string{ + "app": "application", + }, + annotationsAsTags: map[string]string{ + "prefix/name": "annotation_key", + }, expected: model.Service{ Metadata: &model.Metadata{ Annotations: []string{"prefix/name:annotation-value"}, @@ -88,6 +98,10 @@ func TestExtractService(t *testing.T) { Type: "ClusterIP", }, Status: &model.ServiceStatus{}, + Tags: []string{ + "application:app-1", + "annotation_key:annotation-value", + }, }, }, "ExternalName": { @@ -292,6 +306,13 @@ func TestExtractService(t *testing.T) { }, } for _, test := range tests { - assert.Equal(t, &test.expected, ExtractService(&test.input)) + pctx := &processors.K8sProcessorContext{ + LabelsAsTags: test.labelsAsTags, + AnnotationsAsTags: test.annotationsAsTags, + } + actual := ExtractService(pctx, &test.input) + sort.Strings(actual.Tags) + sort.Strings(test.expected.Tags) + assert.Equal(t, &test.expected, actual) } } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/serviceaccount.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/serviceaccount.go index 9a03d49f435cd..8b5d24d764367 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/serviceaccount.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/serviceaccount.go @@ -9,6 +9,7 @@ package k8s import ( model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" corev1 "k8s.io/api/core/v1" @@ -17,7 +18,7 @@ import ( // ExtractServiceAccount returns the protobuf model corresponding to a // Kubernetes ServiceAccount resource. -func ExtractServiceAccount(sa *corev1.ServiceAccount) *model.ServiceAccount { +func ExtractServiceAccount(ctx processors.ProcessorContext, sa *corev1.ServiceAccount) *model.ServiceAccount { serviceAccount := &model.ServiceAccount{ Metadata: extractMetadata(&sa.ObjectMeta), } @@ -43,7 +44,9 @@ func ExtractServiceAccount(sa *corev1.ServiceAccount) *model.ServiceAccount { }) } + pctx := ctx.(*processors.K8sProcessorContext) serviceAccount.Tags = append(serviceAccount.Tags, transformers.RetrieveUnifiedServiceTags(sa.ObjectMeta.Labels)...) + serviceAccount.Tags = append(serviceAccount.Tags, transformers.RetrieveMetadataTags(sa.ObjectMeta.Labels, sa.ObjectMeta.Annotations, pctx.LabelsAsTags, pctx.AnnotationsAsTags)...) return serviceAccount } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/serviceaccount_test.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/serviceaccount_test.go index 9204ea20a3a3e..1f50ec0005545 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/serviceaccount_test.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/serviceaccount_test.go @@ -8,10 +8,12 @@ package k8s import ( + "sort" "testing" "time" model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "github.com/DataDog/datadog-agent/pkg/util/pointer" "github.com/stretchr/testify/assert" @@ -24,8 +26,10 @@ func TestExtractServiceAccount(t *testing.T) { creationTime := metav1.NewTime(time.Date(2021, time.April, 16, 14, 30, 0, 0, time.UTC)) tests := map[string]struct { - input corev1.ServiceAccount - expected model.ServiceAccount + input corev1.ServiceAccount + labelsAsTags map[string]string + annotationsAsTags map[string]string + expected model.ServiceAccount }{ "standard": { input: corev1.ServiceAccount{ @@ -54,6 +58,12 @@ func TestExtractServiceAccount(t *testing.T) { }, }, }, + labelsAsTags: map[string]string{ + "app": "application", + }, + annotationsAsTags: map[string]string{ + "annotation": "annotation_key", + }, expected: model.ServiceAccount{ AutomountServiceAccountToken: true, ImagePullSecrets: []*model.TypedLocalObjectReference{ @@ -75,12 +85,23 @@ func TestExtractServiceAccount(t *testing.T) { Name: "default-token-uudge", }, }, + Tags: []string{ + "application:my-app", + "annotation_key:my-annotation", + }, }, }, } for name, tc := range tests { t.Run(name, func(t *testing.T) { - assert.Equal(t, &tc.expected, ExtractServiceAccount(&tc.input)) + pctx := &processors.K8sProcessorContext{ + LabelsAsTags: tc.labelsAsTags, + AnnotationsAsTags: tc.annotationsAsTags, + } + actual := ExtractServiceAccount(pctx, &tc.input) + sort.Strings(actual.Tags) + sort.Strings(tc.expected.Tags) + assert.Equal(t, &tc.expected, actual) }) } } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/statefulset.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/statefulset.go index 13b05a5b6a24d..afa555506a5a5 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/statefulset.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/statefulset.go @@ -9,15 +9,14 @@ package k8s import ( model "github.com/DataDog/agent-payload/v5/process" - + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/transformers" - v1 "k8s.io/api/apps/v1" ) // ExtractStatefulSet returns the protobuf model corresponding to a // Kubernetes StatefulSet resource. -func ExtractStatefulSet(sts *v1.StatefulSet) *model.StatefulSet { +func ExtractStatefulSet(ctx processors.ProcessorContext, sts *v1.StatefulSet) *model.StatefulSet { statefulSet := model.StatefulSet{ Metadata: extractMetadata(&sts.ObjectMeta), Spec: &model.StatefulSetSpec{ @@ -54,7 +53,10 @@ func ExtractStatefulSet(sts *v1.StatefulSet) *model.StatefulSet { } statefulSet.Spec.ResourceRequirements = ExtractPodTemplateResourceRequirements(sts.Spec.Template) + + pctx := ctx.(*processors.K8sProcessorContext) statefulSet.Tags = append(statefulSet.Tags, transformers.RetrieveUnifiedServiceTags(sts.ObjectMeta.Labels)...) + statefulSet.Tags = append(statefulSet.Tags, transformers.RetrieveMetadataTags(sts.ObjectMeta.Labels, sts.ObjectMeta.Annotations, pctx.LabelsAsTags, pctx.AnnotationsAsTags)...) return &statefulSet } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/statefulset_test.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/statefulset_test.go index bb24525b9c22e..cbcf2924951f0 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/statefulset_test.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/statefulset_test.go @@ -8,10 +8,12 @@ package k8s import ( + "sort" "testing" "time" model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "github.com/stretchr/testify/assert" appsv1 "k8s.io/api/apps/v1" @@ -24,8 +26,10 @@ func TestExtractStatefulSet(t *testing.T) { timestamp := metav1.NewTime(time.Date(2014, time.January, 15, 0, 0, 0, 0, time.UTC)) // 1389744000 testInt32 := int32(2) tests := map[string]struct { - input appsv1.StatefulSet - expected model.StatefulSet + input appsv1.StatefulSet + labelsAsTags map[string]string + annotationsAsTags map[string]string + expected model.StatefulSet }{ "full sts": { input: appsv1.StatefulSet{ @@ -72,7 +76,14 @@ func TestExtractStatefulSet(t *testing.T) { Replicas: 2, UpdatedReplicas: 2, }, - }, expected: model.StatefulSet{ + }, + labelsAsTags: map[string]string{ + "label": "application", + }, + annotationsAsTags: map[string]string{ + "annotation": "annotation_key", + }, + expected: model.StatefulSet{ Metadata: &model.Metadata{ Name: "sts", Namespace: "namespace", @@ -91,7 +102,11 @@ func TestExtractStatefulSet(t *testing.T) { Message: "123", }, }, - Tags: []string{"kube_condition_test:false"}, + Tags: []string{ + "kube_condition_test:false", + "application:foo", + "annotation_key:bar", + }, Spec: &model.StatefulSetSpec{ DesiredReplicas: 2, UpdateStrategy: "RollingUpdate", @@ -144,7 +159,14 @@ func TestExtractStatefulSet(t *testing.T) { } for name, tc := range tests { t.Run(name, func(t *testing.T) { - assert.Equal(t, &tc.expected, ExtractStatefulSet(&tc.input)) + pctx := &processors.K8sProcessorContext{ + LabelsAsTags: tc.labelsAsTags, + AnnotationsAsTags: tc.annotationsAsTags, + } + actual := ExtractStatefulSet(pctx, &tc.input) + sort.Strings(actual.Tags) + sort.Strings(tc.expected.Tags) + assert.Equal(t, &tc.expected, actual) }) } } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/storageclass.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/storageclass.go index 479330f512db6..941ac73aba19a 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/storageclass.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/storageclass.go @@ -8,6 +8,8 @@ package k8s import ( + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/transformers" corev1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" @@ -15,7 +17,7 @@ import ( ) // ExtractStorageClass returns the protobuf model corresponding to a Kubernetes StorageClass resource. -func ExtractStorageClass(sc *storagev1.StorageClass) *model.StorageClass { +func ExtractStorageClass(ctx processors.ProcessorContext, sc *storagev1.StorageClass) *model.StorageClass { msg := &model.StorageClass{ AllowedTopologies: extractStorageClassTopologies(sc.AllowedTopologies), Metadata: extractMetadata(&sc.ObjectMeta), @@ -39,6 +41,9 @@ func ExtractStorageClass(sc *storagev1.StorageClass) *model.StorageClass { msg.VolumeBindingMode = string(*sc.VolumeBindingMode) } + pctx := ctx.(*processors.K8sProcessorContext) + msg.Tags = append(msg.Tags, transformers.RetrieveMetadataTags(sc.ObjectMeta.Labels, sc.ObjectMeta.Annotations, pctx.LabelsAsTags, pctx.AnnotationsAsTags)...) + return msg } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/storageclass_test.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/storageclass_test.go index af542eb820ba6..79de2ed872f1b 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/storageclass_test.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/storageclass_test.go @@ -8,6 +8,7 @@ package k8s import ( + "sort" "testing" "time" @@ -18,6 +19,7 @@ import ( "github.com/stretchr/testify/assert" model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" "github.com/DataDog/datadog-agent/pkg/util/pointer" ) @@ -80,9 +82,17 @@ func TestExtractStorageClass(t *testing.T) { Provisioner: "provisioner", ReclaimPolicy: string(corev1.PersistentVolumeReclaimDelete), VolumeBindingMode: string(storagev1.VolumeBindingImmediate), + Tags: []string{ + "application:my-app", + "annotation_key:my-annotation", + }, } - actual := ExtractStorageClass(sc) + pctx := &processors.K8sProcessorContext{ + LabelsAsTags: map[string]string{"app": "application"}, + AnnotationsAsTags: map[string]string{"annotation": "annotation_key"}, + } + actual := ExtractStorageClass(pctx, sc) assert.Equal(t, expected, actual) }) t.Run("standard", func(t *testing.T) { @@ -162,9 +172,19 @@ func TestExtractStorageClass(t *testing.T) { Provisioner: "provisioner", ReclaimPolicy: string(corev1.PersistentVolumeReclaimRetain), VolumeBindingMode: string(storagev1.VolumeBindingWaitForFirstConsumer), + Tags: []string{ + "application:my-app", + "annotation_key:my-annotation", + }, } - actual := ExtractStorageClass(sc) + pctx := &processors.K8sProcessorContext{ + LabelsAsTags: map[string]string{"app": "application"}, + AnnotationsAsTags: map[string]string{"annotation": "annotation_key"}, + } + actual := ExtractStorageClass(pctx, sc) + sort.Strings(actual.Tags) + sort.Strings(expected.Tags) assert.Equal(t, expected, actual) }) } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/verticalpodautoscaler.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/verticalpodautoscaler.go index 6d1ce327aa06c..22aa5bacfcaa6 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/verticalpodautoscaler.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/verticalpodautoscaler.go @@ -8,6 +8,7 @@ package k8s import ( + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" corev1 "k8s.io/api/core/v1" v1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1" @@ -17,7 +18,7 @@ import ( ) // ExtractVerticalPodAutoscaler returns the protobuf model corresponding to a Kubernetes Vertical Pod Autoscaler resource. -func ExtractVerticalPodAutoscaler(v *v1.VerticalPodAutoscaler) *model.VerticalPodAutoscaler { +func ExtractVerticalPodAutoscaler(ctx processors.ProcessorContext, v *v1.VerticalPodAutoscaler) *model.VerticalPodAutoscaler { if v == nil { return &model.VerticalPodAutoscaler{} } @@ -35,7 +36,10 @@ func ExtractVerticalPodAutoscaler(v *v1.VerticalPodAutoscaler) *model.VerticalPo m.Tags = append(m.Tags, conditionTags...) } + pctx := ctx.(*processors.K8sProcessorContext) m.Tags = append(m.Tags, transformers.RetrieveUnifiedServiceTags(v.ObjectMeta.Labels)...) + m.Tags = append(m.Tags, transformers.RetrieveMetadataTags(v.ObjectMeta.Labels, v.ObjectMeta.Annotations, pctx.LabelsAsTags, pctx.AnnotationsAsTags)...) + return m } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/verticalpodautoscaler_test.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/verticalpodautoscaler_test.go index 6054c58092b67..7892adea05104 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/verticalpodautoscaler_test.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/k8s/verticalpodautoscaler_test.go @@ -8,6 +8,7 @@ package k8s import ( + "sort" "testing" "time" @@ -19,6 +20,7 @@ import ( v1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1" model "github.com/DataDog/agent-payload/v5/process" + "github.com/DataDog/datadog-agent/pkg/collector/corechecks/cluster/orchestrator/processors" ) func TestExtractVerticalPodAutoscaler(t *testing.T) { @@ -28,8 +30,10 @@ func TestExtractVerticalPodAutoscaler(t *testing.T) { controlledValues := v1.ContainerControlledValuesRequestsAndLimits tests := map[string]struct { - input v1.VerticalPodAutoscaler - expected model.VerticalPodAutoscaler + input v1.VerticalPodAutoscaler + labelsAsTags map[string]string + annotationsAsTags map[string]string + expected model.VerticalPodAutoscaler }{ "standard": { input: v1.VerticalPodAutoscaler{ @@ -116,6 +120,12 @@ func TestExtractVerticalPodAutoscaler(t *testing.T) { }, }, }, + labelsAsTags: map[string]string{ + "app": "application", + }, + annotationsAsTags: map[string]string{ + "annotation": "annotation_key", + }, expected: model.VerticalPodAutoscaler{ Metadata: &model.Metadata{ Name: "VPATest", @@ -211,6 +221,8 @@ func TestExtractVerticalPodAutoscaler(t *testing.T) { Tags: []string{ "kube_condition_recommendationprovided:true", "kube_condition_nopodsmatched:true", + "application:my-app", + "annotation_key:my-annotation", }, }, }, @@ -348,7 +360,14 @@ func TestExtractVerticalPodAutoscaler(t *testing.T) { } for name, tc := range tests { t.Run(name, func(t *testing.T) { - assert.Equal(t, &tc.expected, ExtractVerticalPodAutoscaler(&tc.input)) + pctx := &processors.K8sProcessorContext{ + LabelsAsTags: tc.labelsAsTags, + AnnotationsAsTags: tc.annotationsAsTags, + } + actual := ExtractVerticalPodAutoscaler(pctx, &tc.input) + sort.Strings(actual.Tags) + sort.Strings(tc.expected.Tags) + assert.Equal(t, &tc.expected, actual) }) } } diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/ust.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/resourcetags.go similarity index 75% rename from pkg/collector/corechecks/cluster/orchestrator/transformers/ust.go rename to pkg/collector/corechecks/cluster/orchestrator/transformers/resourcetags.go index fe0974a49fac2..7d45639ff70d3 100644 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/ust.go +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/resourcetags.go @@ -10,7 +10,6 @@ package transformers import ( "fmt" - pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes" ) @@ -49,3 +48,26 @@ func RetrieveUnifiedServiceTags(labels map[string]string) []string { } return tags } + +func RetrieveMetadataTags( + labels map[string]string, + annotations map[string]string, + labelsAsTags map[string]string, + annotationsAsTags map[string]string, +) []string { + tags := []string{} + + for name, value := range labels { + if tagKey, ok := labelsAsTags[name]; ok { + tags = append(tags, fmt.Sprintf("%s:%s", tagKey, value)) + } + } + + for name, value := range annotations { + if tagKey, ok := annotationsAsTags[name]; ok { + tags = append(tags, fmt.Sprintf("%s:%s", tagKey, value)) + } + } + + return tags +} diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/resourcetags_test.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/resourcetags_test.go new file mode 100644 index 0000000000000..d397a03eb1b8f --- /dev/null +++ b/pkg/collector/corechecks/cluster/orchestrator/transformers/resourcetags_test.go @@ -0,0 +1,121 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build kubeapiserver && orchestrator + +package transformers + +import ( + "reflect" + "testing" + + configmock "github.com/DataDog/datadog-agent/pkg/config/mock" + "github.com/DataDog/datadog-agent/pkg/util/kubernetes" + + "github.com/stretchr/testify/assert" +) + +func TestRetrieveUST(t *testing.T) { + cfg := configmock.New(t) + cfg.SetWithoutSource("env", "staging") + cfg.SetWithoutSource(tagKeyService, "not-applied") + cfg.SetWithoutSource(tagKeyVersion, "not-applied") + + tests := []struct { + name string + labels map[string]string + want []string + }{ + { + name: "label contains ust, labels ust takes precedence", + labels: map[string]string{kubernetes.EnvTagLabelKey: "prod", kubernetes.VersionTagLabelKey: "123", kubernetes.ServiceTagLabelKey: "app"}, + want: []string{"env:prod", "version:123", "service:app"}, + }, + { + name: "label does not contain env, takes from config", + labels: map[string]string{}, + want: []string{"env:staging"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := RetrieveUnifiedServiceTags(tt.labels); !reflect.DeepEqual(got, tt.want) { + t.Errorf("RetrieveUnifiedServiceTags() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestRetrieveMetadataTags(t *testing.T) { + tests := []struct { + name string + labels map[string]string + annotations map[string]string + labelsAsTags map[string]string + annotationsAsTags map[string]string + want []string + }{ + { + name: "labels and annotations have matching tags", + labels: map[string]string{ + "app": "my-app", + "team": "my-team", + }, + annotations: map[string]string{ + "annotation-key": "annotation-value", + }, + labelsAsTags: map[string]string{ + "app": "application", + "team": "team-name", + }, + annotationsAsTags: map[string]string{ + "annotation-key": "annotation_key", + }, + want: []string{"application:my-app", "team-name:my-team", "annotation_key:annotation-value"}, + }, + { + name: "no matching labels or annotations", + labels: map[string]string{ + "random": "value", + }, + annotations: map[string]string{ + "another-random": "value", + }, + labelsAsTags: map[string]string{"app": "application"}, + annotationsAsTags: map[string]string{"annotation-key": "annotation_key"}, + want: []string{}, + }, + { + name: "only annotations match", + labels: map[string]string{ + "random": "value", + }, + annotations: map[string]string{ + "annotation-key": "annotation-value", + }, + labelsAsTags: map[string]string{"app": "application"}, + annotationsAsTags: map[string]string{"annotation-key": "annotation_key"}, + want: []string{"annotation_key:annotation-value"}, + }, + { + name: "only labels match", + labels: map[string]string{ + "app": "my-app", + }, + annotations: map[string]string{ + "random-annotation": "value", + }, + labelsAsTags: map[string]string{"app": "application"}, + annotationsAsTags: map[string]string{"annotation-key": "annotation_key"}, + want: []string{"application:my-app"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := RetrieveMetadataTags(tt.labels, tt.annotations, tt.labelsAsTags, tt.annotationsAsTags) + assert.ElementsMatch(t, tt.want, got) + }) + } +} diff --git a/pkg/collector/corechecks/cluster/orchestrator/transformers/ust_test.go b/pkg/collector/corechecks/cluster/orchestrator/transformers/ust_test.go deleted file mode 100644 index 530eab9ce178a..0000000000000 --- a/pkg/collector/corechecks/cluster/orchestrator/transformers/ust_test.go +++ /dev/null @@ -1,47 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -//go:build kubeapiserver && orchestrator - -package transformers - -import ( - "reflect" - "testing" - - configmock "github.com/DataDog/datadog-agent/pkg/config/mock" - "github.com/DataDog/datadog-agent/pkg/util/kubernetes" -) - -func TestRetrieveUST(t *testing.T) { - cfg := configmock.New(t) - cfg.SetWithoutSource("env", "staging") - cfg.SetWithoutSource(tagKeyService, "not-applied") - cfg.SetWithoutSource(tagKeyVersion, "not-applied") - - tests := []struct { - name string - labels map[string]string - want []string - }{ - { - name: "label contains ust, labels ust takes precedence", - labels: map[string]string{kubernetes.EnvTagLabelKey: "prod", kubernetes.VersionTagLabelKey: "123", kubernetes.ServiceTagLabelKey: "app"}, - want: []string{"env:prod", "version:123", "service:app"}, - }, - { - name: "label does not contain env, takes from config", - labels: map[string]string{}, - want: []string{"env:staging"}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := RetrieveUnifiedServiceTags(tt.labels); !reflect.DeepEqual(got, tt.want) { - t.Errorf("RetrieveUnifiedServiceTags() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/pkg/collector/corechecks/containerimage/check.go b/pkg/collector/corechecks/containerimage/check.go index 1656d568e3632..aca194f46445b 100644 --- a/pkg/collector/corechecks/containerimage/check.go +++ b/pkg/collector/corechecks/containerimage/check.go @@ -20,7 +20,7 @@ import ( core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -95,8 +95,8 @@ type Check struct { } // Factory returns a new check factory -func Factory(store workloadmeta.Component, tagger tagger.Component) optional.Option[func() check.Check] { - return optional.NewOption(func() check.Check { +func Factory(store workloadmeta.Component, tagger tagger.Component) option.Option[func() check.Check] { + return option.New(func() check.Check { return core.NewLongRunningCheckWrapper(&Check{ CheckBase: core.NewCheckBase(CheckName), workloadmetaStore: store, diff --git a/pkg/collector/corechecks/containerlifecycle/check.go b/pkg/collector/corechecks/containerlifecycle/check.go index b7a20adaa4c90..b712744289751 100644 --- a/pkg/collector/corechecks/containerlifecycle/check.go +++ b/pkg/collector/corechecks/containerlifecycle/check.go @@ -21,7 +21,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/config/env" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -173,8 +173,8 @@ func (c *Check) Cancel() { close(c.stopCh) } func (c *Check) Interval() time.Duration { return 0 } // Factory returns a new check factory -func Factory(store workloadmeta.Component) optional.Option[func() check.Check] { - return optional.NewOption(func() check.Check { +func Factory(store workloadmeta.Component) option.Option[func() check.Check] { + return option.New(func() check.Check { return core.NewLongRunningCheckWrapper(&Check{ CheckBase: core.NewCheckBase(CheckName), workloadmetaStore: store, diff --git a/pkg/collector/corechecks/containers/containerd/check.go b/pkg/collector/corechecks/containers/containerd/check.go index ab901f4a371a1..8cc4e67c6af13 100644 --- a/pkg/collector/corechecks/containers/containerd/check.go +++ b/pkg/collector/corechecks/containers/containerd/check.go @@ -30,7 +30,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/containers" "github.com/DataDog/datadog-agent/pkg/util/containers/metrics" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/util/prometheus" ) @@ -39,6 +39,12 @@ const ( CheckName = "containerd" pullImageGrpcMethod = "PullImage" cacheValidity = 2 * time.Second + + imageSizeQueryInterval = 10 * time.Minute + imageCreateEvent = "/images/create" + imageUpdateEvent = "/images/update" + imageDeleteEvent = "/images/delete" + imageWildcardEvent = "/images/*" ) var matchAllCap = regexp.MustCompile("([a-z0-9])([A-Z])") @@ -64,8 +70,8 @@ type ContainerdConfig struct { } // Factory is used to create register the check and initialize it. -func Factory(store workloadmeta.Component, tagger tagger.Component) optional.Option[func() check.Check] { - return optional.NewOption(func() check.Check { +func Factory(store workloadmeta.Component, tagger tagger.Component) option.Option[func() check.Check] { + return option.New(func() check.Check { return &ContainerdCheck{ CheckBase: corechecks.NewCheckBase(CheckName), instance: &ContainerdConfig{}, @@ -102,10 +108,19 @@ func (c *ContainerdCheck) Configure(senderManager sender.SenderManager, _ uint64 } c.httpClient = http.Client{Timeout: time.Duration(1) * time.Second} - c.processor = generic.NewProcessor(metrics.GetProvider(optional.NewOption(c.store)), generic.NewMetadataContainerAccessor(c.store), metricsAdapter{}, getProcessorFilter(c.containerFilter, c.store), c.tagger) + c.processor = generic.NewProcessor(metrics.GetProvider(option.New(c.store)), generic.NewMetadataContainerAccessor(c.store), metricsAdapter{}, getProcessorFilter(c.containerFilter, c.store), c.tagger) c.processor.RegisterExtension("containerd-custom-metrics", &containerdCustomMetricsExtension{}) c.subscriber = createEventSubscriber("ContainerdCheck", c.client, cutil.FiltersWithNamespaces(c.instance.ContainerdFilters)) + c.subscriber.isCacheConfigValid = c.isEventConfigValid() + if err := c.initializeImageCache(); err != nil { + log.Warnf("Failed to initialize image size cache: %v", err) + } + if !c.subscriber.isCacheConfigValid { + log.Debugf("Image event collection not configured. Starting periodic cache updates.") + go c.periodicImageSizeQuery() + } + return nil } @@ -153,8 +168,8 @@ func (c *ContainerdCheck) runContainerdCustom(sender sender.Sender) error { } for _, namespace := range namespaces { - if err := c.collectImageSizes(sender, c.client, namespace); err != nil { - log.Infof("Failed to scrape containerd openmetrics endpoint, err: %s", err) + if err := c.collectImageSizes(sender, namespace); err != nil { + log.Infof("Namespace skipped: %s", err) } } @@ -215,25 +230,15 @@ func (c *ContainerdCheck) scrapeOpenmetricsEndpoint(sender sender.Sender) error return nil } -func (c *ContainerdCheck) collectImageSizes(sender sender.Sender, cl cutil.ContainerdItf, namespace string) error { - // Report images size - images, err := cl.ListImages(namespace) - if err != nil { - return err +func (c *ContainerdCheck) collectImageSizes(sender sender.Sender, namespace string) error { + imageSizes := c.subscriber.GetImageSizes() + cachedImages, ok := imageSizes[namespace] + if !ok { + return fmt.Errorf("no cached images found for namespace: %s", namespace) } - for _, image := range images { - var size int64 - - if err := cl.CallWithClientContext(namespace, func(c context.Context) error { - size, err = image.Size(c) - return err - }); err != nil { - log.Debugf("Unable to get image size for image: %s, err: %s", image.Name(), err) - continue - } - - sender.Gauge("containerd.image.size", float64(size), "", getImageTags(image.Name())) + for imageName, size := range cachedImages { + sender.Gauge("containerd.image.size", float64(size), "", getImageTags(imageName)) } return nil @@ -252,3 +257,77 @@ func (c *ContainerdCheck) collectEvents(sender sender.Sender) { // Process events c.computeEvents(events, sender, c.containerFilter) } + +func (c *ContainerdCheck) initializeImageCache() error { + namespaces, err := cutil.NamespacesToWatch(context.TODO(), c.client) + if err != nil { + return fmt.Errorf("failed to list namespaces: %w", err) + } + + newCache := make(map[string]map[string]int64) + + for _, namespace := range namespaces { + images, err := c.client.ListImages(namespace) + if err != nil { + log.Warnf("Failed to list images for namespace %s: %v", namespace, err) + continue + } + + newCache[namespace] = make(map[string]int64) + + for _, image := range images { + size, err := c.subscriber.getImageSize(namespace, image.Name()) + if err != nil { + log.Debugf("Failed to get size for image %s in namespace %s: %v", image.Name(), namespace, err) + continue + } + + newCache[namespace][image.Name()] = size + } + } + + c.subscriber.imageSizeCacheLock.Lock() + c.subscriber.imageSizeCache = newCache + c.subscriber.imageSizeCacheLock.Unlock() + + return nil +} + +func (c *ContainerdCheck) isEventConfigValid() bool { + if !c.instance.CollectEvents { + return false + } + + hasImageEvents := map[string]bool{ + imageCreateEvent: false, + imageUpdateEvent: false, + imageDeleteEvent: false, + } + + for _, filter := range c.instance.ContainerdFilters { + strippedFilter := strings.Trim(strings.TrimPrefix(filter, "topic=="), `"`) + if strippedFilter == imageWildcardEvent { + return true + } + if _, ok := hasImageEvents[strippedFilter]; ok { + hasImageEvents[strippedFilter] = true + } + } + for _, included := range hasImageEvents { + if !included { + return false + } + } + return true +} + +func (c *ContainerdCheck) periodicImageSizeQuery() { + ticker := time.NewTicker(imageSizeQueryInterval) + defer ticker.Stop() + + for range ticker.C { + if err := c.initializeImageCache(); err != nil { + log.Warnf("Failed to refresh image size cache: %v", err) + } + } +} diff --git a/pkg/collector/corechecks/containers/containerd/events.go b/pkg/collector/corechecks/containers/containerd/events.go index 92b87e5df7f87..b815eab9261f2 100644 --- a/pkg/collector/corechecks/containers/containerd/events.go +++ b/pkg/collector/corechecks/containers/containerd/events.go @@ -111,6 +111,10 @@ type subscriber struct { CollectionTimestamp int64 running bool client ctrUtil.ContainerdItf + + isCacheConfigValid bool + imageSizeCache map[string]map[string]int64 // namespace -> image -> size + imageSizeCacheLock sync.RWMutex } func createEventSubscriber(name string, client ctrUtil.ContainerdItf, f []string) *subscriber { @@ -119,6 +123,7 @@ func createEventSubscriber(name string, client ctrUtil.ContainerdItf, f []string CollectionTimestamp: time.Now().Unix(), Filters: f, client: client, + imageSizeCache: make(map[string]map[string]int64), } } @@ -276,6 +281,7 @@ func (s *subscriber) run(ctx context.Context) error { event.Extra = updated.Labels event.Message = fmt.Sprintf("Image %s updated", updated.Name) s.addEvents(event) + s.handleImageUpdate(message.Namespace, updated.Name) case "/images/create": created := &events.ImageCreate{} err := proto.Unmarshal(message.Event.GetValue(), created) @@ -288,6 +294,7 @@ func (s *subscriber) run(ctx context.Context) error { event.Message = fmt.Sprintf("Image %s created", created.Name) event.Extra = created.Labels s.addEvents(event) + s.handleImageCreate(message.Namespace, created.Name) case "/images/delete": deleted := &events.ImageDelete{} err := proto.Unmarshal(message.Event.GetValue(), deleted) @@ -298,6 +305,7 @@ func (s *subscriber) run(ctx context.Context) error { event := processMessage(deleted.Name, message) event.Message = fmt.Sprintf("Image %s created", deleted.Name) s.addEvents(event) + s.handleImageDelete(message.Namespace, deleted.Name) case "/tasks/create": created := &events.TaskCreate{} err := proto.Unmarshal(message.Event.GetValue(), created) @@ -488,3 +496,84 @@ func pauseContainersIDs(client ctrUtil.ContainerdItf) (setPauseContainers, error return pauseContainers, nil } + +func (s *subscriber) GetImageSizes() map[string]map[string]int64 { + s.imageSizeCacheLock.RLock() + defer s.imageSizeCacheLock.RUnlock() + + // Create a snapshot of the cache + snapshot := make(map[string]map[string]int64) + for namespace, images := range s.imageSizeCache { + snapshot[namespace] = make(map[string]int64) + for imageName, size := range images { + snapshot[namespace][imageName] = size + } + } + + return snapshot +} + +func (s *subscriber) handleImageCreate(namespace, imageName string) { + if !s.isCacheConfigValid { + return + } + size, err := s.getImageSize(namespace, imageName) + if err != nil { + log.Debugf("Failed to fetch size for new image %s in namespace %s: %v", imageName, namespace, err) + return + } + s.imageSizeCacheLock.Lock() + defer s.imageSizeCacheLock.Unlock() + + if _, exists := s.imageSizeCache[namespace]; !exists { + s.imageSizeCache[namespace] = make(map[string]int64) + } + s.imageSizeCache[namespace][imageName] = size +} + +func (s *subscriber) handleImageDelete(namespace, imageName string) { + if !s.isCacheConfigValid { + return + } + s.imageSizeCacheLock.Lock() + defer s.imageSizeCacheLock.Unlock() + + if images, exists := s.imageSizeCache[namespace]; exists { + delete(images, imageName) + + if len(images) == 0 { + delete(s.imageSizeCache, namespace) + } + } +} + +func (s *subscriber) handleImageUpdate(namespace, imageName string) { + if !s.isCacheConfigValid { + return + } + size, err := s.getImageSize(namespace, imageName) + if err != nil { + log.Debugf("Failed to fetch size for updated image %s in namespace %s: %v", imageName, namespace, err) + return + } + s.imageSizeCacheLock.Lock() + defer s.imageSizeCacheLock.Unlock() + + if _, exists := s.imageSizeCache[namespace]; !exists { + s.imageSizeCache[namespace] = make(map[string]int64) + } + s.imageSizeCache[namespace][imageName] = size +} + +func (s *subscriber) getImageSize(namespace, imageName string) (int64, error) { + var size int64 + err := s.client.CallWithClientContext(namespace, func(ctx context.Context) error { + image, err := s.client.Image(namespace, imageName) + if err != nil { + return err + } + size, err = image.Size(ctx) + return err + }) + return size, err +} diff --git a/pkg/collector/corechecks/containers/containerd/stub.go b/pkg/collector/corechecks/containers/containerd/stub.go index f0ea5996d6349..bda7724a7f611 100644 --- a/pkg/collector/corechecks/containers/containerd/stub.go +++ b/pkg/collector/corechecks/containers/containerd/stub.go @@ -12,7 +12,7 @@ import ( tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/collector/check" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -21,6 +21,6 @@ const ( ) // Factory is used to create register the check and initialize it. -func Factory(workloadmeta.Component, tagger.Component) optional.Option[func() check.Check] { - return optional.NewNoneOption[func() check.Check]() +func Factory(workloadmeta.Component, tagger.Component) option.Option[func() check.Check] { + return option.None[func() check.Check]() } diff --git a/pkg/collector/corechecks/containers/cri/check.go b/pkg/collector/corechecks/containers/cri/check.go index 3e4b6f545fe66..485b1ae085595 100644 --- a/pkg/collector/corechecks/containers/cri/check.go +++ b/pkg/collector/corechecks/containers/cri/check.go @@ -25,7 +25,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/containers/metrics" "github.com/DataDog/datadog-agent/pkg/util/kubernetes" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -49,8 +49,8 @@ type CRICheck struct { } // Factory is exported for integration testing -func Factory(store workloadmeta.Component, tagger tagger.Component) optional.Option[func() check.Check] { - return optional.NewOption(func() check.Check { +func Factory(store workloadmeta.Component, tagger tagger.Component) option.Option[func() check.Check] { + return option.New(func() check.Check { return &CRICheck{ CheckBase: core.NewCheckBase(CheckName), instance: &CRIConfig{}, @@ -84,7 +84,7 @@ func (c *CRICheck) Configure(senderManager sender.SenderManager, _ uint64, confi log.Warnf("Can't get container include/exclude filter, no filtering will be applied: %v", err) } - c.processor = generic.NewProcessor(metrics.GetProvider(optional.NewOption(c.store)), generic.NewMetadataContainerAccessor(c.store), metricsAdapter{}, getProcessorFilter(containerFilter, c.store), c.tagger) + c.processor = generic.NewProcessor(metrics.GetProvider(option.New(c.store)), generic.NewMetadataContainerAccessor(c.store), metricsAdapter{}, getProcessorFilter(containerFilter, c.store), c.tagger) if c.instance.CollectDisk { c.processor.RegisterExtension("cri-custom-metrics", &criCustomMetricsExtension{criGetter: func() (cri.CRIClient, error) { return cri.GetUtil() diff --git a/pkg/collector/corechecks/containers/cri/stub.go b/pkg/collector/corechecks/containers/cri/stub.go index 9e09d380112fd..0a0d7dc074499 100644 --- a/pkg/collector/corechecks/containers/cri/stub.go +++ b/pkg/collector/corechecks/containers/cri/stub.go @@ -12,7 +12,7 @@ import ( tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/collector/check" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -21,6 +21,6 @@ const ( ) // Factory creates a new check instance -func Factory(workloadmeta.Component, tagger.Component) optional.Option[func() check.Check] { - return optional.NewNoneOption[func() check.Check]() +func Factory(workloadmeta.Component, tagger.Component) option.Option[func() check.Check] { + return option.None[func() check.Check]() } diff --git a/pkg/collector/corechecks/containers/docker/check.go b/pkg/collector/corechecks/containers/docker/check.go index 3d4ea3f0567eb..1ddd68499ad57 100644 --- a/pkg/collector/corechecks/containers/docker/check.go +++ b/pkg/collector/corechecks/containers/docker/check.go @@ -35,7 +35,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/hostname" "github.com/DataDog/datadog-agent/pkg/util/kubernetes" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -73,8 +73,8 @@ type DockerCheck struct { } // Factory returns a new docker corecheck factory -func Factory(store workloadmeta.Component, tagger tagger.Component) optional.Option[func() check.Check] { - return optional.NewOption(func() check.Check { +func Factory(store workloadmeta.Component, tagger tagger.Component) option.Option[func() check.Check] { + return option.New(func() check.Check { return &DockerCheck{ CheckBase: core.NewCheckBase(CheckName), instance: &DockerConfig{}, @@ -133,7 +133,7 @@ func (d *DockerCheck) Configure(senderManager sender.SenderManager, _ uint64, co log.Warnf("Can't get container include/exclude filter, no filtering will be applied: %v", err) } - d.processor = generic.NewProcessor(metrics.GetProvider(optional.NewOption(d.store)), generic.NewMetadataContainerAccessor(d.store), metricsAdapter{}, getProcessorFilter(d.containerFilter, d.store), d.tagger) + d.processor = generic.NewProcessor(metrics.GetProvider(option.New(d.store)), generic.NewMetadataContainerAccessor(d.store), metricsAdapter{}, getProcessorFilter(d.containerFilter, d.store), d.tagger) d.processor.RegisterExtension("docker-custom-metrics", &dockerCustomMetricsExtension{}) d.configureNetworkProcessor(&d.processor) d.setOkExitCodes() diff --git a/pkg/collector/corechecks/containers/docker/stub.go b/pkg/collector/corechecks/containers/docker/stub.go index 899aef9653812..11c40d9a21caa 100644 --- a/pkg/collector/corechecks/containers/docker/stub.go +++ b/pkg/collector/corechecks/containers/docker/stub.go @@ -12,7 +12,7 @@ import ( tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/collector/check" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -20,6 +20,6 @@ const ( CheckName = "docker" ) -func Factory(workloadmeta.Component, tagger.Component) optional.Option[func() check.Check] { - return optional.NewNoneOption[func() check.Check]() +func Factory(workloadmeta.Component, tagger.Component) option.Option[func() check.Check] { + return option.None[func() check.Check]() } diff --git a/pkg/collector/corechecks/containers/generic/check.go b/pkg/collector/corechecks/containers/generic/check.go index a0686f1052a1c..496d1183e0c0f 100644 --- a/pkg/collector/corechecks/containers/generic/check.go +++ b/pkg/collector/corechecks/containers/generic/check.go @@ -19,7 +19,7 @@ import ( core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" "github.com/DataDog/datadog-agent/pkg/util/containers" "github.com/DataDog/datadog-agent/pkg/util/containers/metrics" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -46,8 +46,8 @@ type ContainerCheck struct { } // Factory returns a new check factory -func Factory(store workloadmeta.Component, tagger tagger.Component) optional.Option[func() check.Check] { - return optional.NewOption(func() check.Check { +func Factory(store workloadmeta.Component, tagger tagger.Component) option.Option[func() check.Check] { + return option.New(func() check.Check { return &ContainerCheck{ CheckBase: core.NewCheckBase(CheckName), instance: &ContainerConfig{}, @@ -68,7 +68,7 @@ func (c *ContainerCheck) Configure(senderManager sender.SenderManager, _ uint64, if err != nil { return err } - c.processor = NewProcessor(metrics.GetProvider(optional.NewOption(c.store)), NewMetadataContainerAccessor(c.store), GenericMetricsAdapter{}, LegacyContainerFilter{OldFilter: filter, Store: c.store}, c.tagger) + c.processor = NewProcessor(metrics.GetProvider(option.New(c.store)), NewMetadataContainerAccessor(c.store), GenericMetricsAdapter{}, LegacyContainerFilter{OldFilter: filter, Store: c.store}, c.tagger) return c.instance.Parse(config) } diff --git a/pkg/collector/corechecks/containers/kubelet/kubelet.go b/pkg/collector/corechecks/containers/kubelet/kubelet.go index 86d7386ecdba7..c913666bc2bd3 100644 --- a/pkg/collector/corechecks/containers/kubelet/kubelet.go +++ b/pkg/collector/corechecks/containers/kubelet/kubelet.go @@ -27,7 +27,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/containers" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/kubelet" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -102,8 +102,8 @@ func initProviders(filter *containers.Filter, config *common.KubeletConfig, podU } // Factory returns a new KubeletCheck factory -func Factory(store workloadmeta.Component, tagger tagger.Component) optional.Option[func() check.Check] { - return optional.NewOption(func() check.Check { +func Factory(store workloadmeta.Component, tagger tagger.Component) option.Option[func() check.Check] { + return option.New(func() check.Check { return NewKubeletCheck(core.NewCheckBase(CheckName), &common.KubeletConfig{}, store, tagger) }) } diff --git a/pkg/collector/corechecks/containers/kubelet/stub.go b/pkg/collector/corechecks/containers/kubelet/stub.go index e07885b903134..5fe4c31b38c57 100644 --- a/pkg/collector/corechecks/containers/kubelet/stub.go +++ b/pkg/collector/corechecks/containers/kubelet/stub.go @@ -12,7 +12,7 @@ import ( tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/collector/check" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -20,6 +20,6 @@ const ( CheckName = "kubelet" ) -func Factory(workloadmeta.Component, tagger.Component) optional.Option[func() check.Check] { - return optional.NewNoneOption[func() check.Check]() +func Factory(workloadmeta.Component, tagger.Component) option.Option[func() check.Check] { + return option.None[func() check.Check]() } diff --git a/pkg/collector/corechecks/ebpf/ebpf.go b/pkg/collector/corechecks/ebpf/ebpf.go index 1638f60cef9d4..243df1d6452d1 100644 --- a/pkg/collector/corechecks/ebpf/ebpf.go +++ b/pkg/collector/corechecks/ebpf/ebpf.go @@ -24,7 +24,7 @@ import ( ebpfcheck "github.com/DataDog/datadog-agent/pkg/collector/corechecks/ebpf/probe/ebpfcheck/model" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -44,8 +44,8 @@ type EBPFCheck struct { } // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewOption(newCheck) +func Factory() option.Option[func() check.Check] { + return option.New(newCheck) } func newCheck() check.Check { diff --git a/pkg/collector/corechecks/ebpf/oomkill/oom_kill.go b/pkg/collector/corechecks/ebpf/oomkill/oom_kill.go index 8f4261d3f4213..feff584e5c10c 100644 --- a/pkg/collector/corechecks/ebpf/oomkill/oom_kill.go +++ b/pkg/collector/corechecks/ebpf/oomkill/oom_kill.go @@ -28,7 +28,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/metrics/event" "github.com/DataDog/datadog-agent/pkg/util/cgroups" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -50,8 +50,8 @@ type OOMKillCheck struct { } // Factory creates a new check factory -func Factory(tagger tagger.Component) optional.Option[func() check.Check] { - return optional.NewOption(func() check.Check { +func Factory(tagger tagger.Component) option.Option[func() check.Check] { + return option.New(func() check.Check { return newCheck(tagger) }) } diff --git a/pkg/collector/corechecks/ebpf/oomkill/stub.go b/pkg/collector/corechecks/ebpf/oomkill/stub.go index 279c7d9fd304e..c13bffebed5d9 100644 --- a/pkg/collector/corechecks/ebpf/oomkill/stub.go +++ b/pkg/collector/corechecks/ebpf/oomkill/stub.go @@ -11,7 +11,7 @@ package oomkill import ( tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" "github.com/DataDog/datadog-agent/pkg/collector/check" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -20,6 +20,6 @@ const ( ) // Factory creates a new check factory -func Factory(tagger tagger.Component) optional.Option[func() check.Check] { - return optional.NewNoneOption[func() check.Check]() +func Factory(tagger tagger.Component) option.Option[func() check.Check] { + return option.None[func() check.Check]() } diff --git a/pkg/collector/corechecks/ebpf/stub.go b/pkg/collector/corechecks/ebpf/stub.go index 178d250e5e91f..fe5c28128bd2b 100644 --- a/pkg/collector/corechecks/ebpf/stub.go +++ b/pkg/collector/corechecks/ebpf/stub.go @@ -10,7 +10,7 @@ package ebpf import ( "github.com/DataDog/datadog-agent/pkg/collector/check" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -19,6 +19,6 @@ const ( ) // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewNoneOption[func() check.Check]() +func Factory() option.Option[func() check.Check] { + return option.None[func() check.Check]() } diff --git a/pkg/collector/corechecks/ebpf/tcpqueuelength/stub.go b/pkg/collector/corechecks/ebpf/tcpqueuelength/stub.go index 03dc73e4bc15a..06db17e665964 100644 --- a/pkg/collector/corechecks/ebpf/tcpqueuelength/stub.go +++ b/pkg/collector/corechecks/ebpf/tcpqueuelength/stub.go @@ -11,7 +11,7 @@ package tcpqueuelength import ( tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" "github.com/DataDog/datadog-agent/pkg/collector/check" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -20,6 +20,6 @@ const ( ) // Factory creates a new check factory -func Factory(tagger.Component) optional.Option[func() check.Check] { - return optional.NewNoneOption[func() check.Check]() +func Factory(tagger.Component) option.Option[func() check.Check] { + return option.None[func() check.Check]() } diff --git a/pkg/collector/corechecks/ebpf/tcpqueuelength/tcp_queue_length.go b/pkg/collector/corechecks/ebpf/tcpqueuelength/tcp_queue_length.go index 9644dd511882a..e7d2317321d30 100644 --- a/pkg/collector/corechecks/ebpf/tcpqueuelength/tcp_queue_length.go +++ b/pkg/collector/corechecks/ebpf/tcpqueuelength/tcp_queue_length.go @@ -25,7 +25,7 @@ import ( pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/cgroups" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -47,8 +47,8 @@ type TCPQueueLengthCheck struct { } // Factory creates a new check factory -func Factory(tagger tagger.Component) optional.Option[func() check.Check] { - return optional.NewOption(func() check.Check { +func Factory(tagger tagger.Component) option.Option[func() check.Check] { + return option.New(func() check.Check { return newCheck(tagger) }) } diff --git a/pkg/collector/corechecks/embed/apm/apm.go b/pkg/collector/corechecks/embed/apm/apm.go index b724e77720b63..4e8a78f0d16c1 100644 --- a/pkg/collector/corechecks/embed/apm/apm.go +++ b/pkg/collector/corechecks/embed/apm/apm.go @@ -30,7 +30,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/diagnose/diagnosis" "github.com/DataDog/datadog-agent/pkg/util/hostname" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -248,8 +248,8 @@ func (c *APMCheck) GetDiagnoses() ([]diagnosis.Diagnosis, error) { } // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewOption(newCheck) +func Factory() option.Option[func() check.Check] { + return option.New(newCheck) } func newCheck() check.Check { diff --git a/pkg/collector/corechecks/embed/apm/stub.go b/pkg/collector/corechecks/embed/apm/stub.go index 5ed387c9a2d8f..41d889597f879 100644 --- a/pkg/collector/corechecks/embed/apm/stub.go +++ b/pkg/collector/corechecks/embed/apm/stub.go @@ -10,7 +10,7 @@ package apm import ( "github.com/DataDog/datadog-agent/pkg/collector/check" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -19,6 +19,6 @@ const ( ) // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewNoneOption[func() check.Check]() +func Factory() option.Option[func() check.Check] { + return option.None[func() check.Check]() } diff --git a/pkg/collector/corechecks/embed/process/process_agent.go b/pkg/collector/corechecks/embed/process/process_agent.go index 0c4f971467a9e..32d00ccdef6dd 100644 --- a/pkg/collector/corechecks/embed/process/process_agent.go +++ b/pkg/collector/corechecks/embed/process/process_agent.go @@ -30,7 +30,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/diagnose/diagnosis" "github.com/DataDog/datadog-agent/pkg/util/executable" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -242,8 +242,8 @@ func (c *ProcessAgentCheck) GetDiagnoses() ([]diagnosis.Diagnosis, error) { } // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewOption(newCheck) +func Factory() option.Option[func() check.Check] { + return option.New(newCheck) } func newCheck() check.Check { diff --git a/pkg/collector/corechecks/embed/process/stub.go b/pkg/collector/corechecks/embed/process/stub.go index 2279f0ea3c370..f22b676fb8829 100644 --- a/pkg/collector/corechecks/embed/process/stub.go +++ b/pkg/collector/corechecks/embed/process/stub.go @@ -10,7 +10,7 @@ package process import ( "github.com/DataDog/datadog-agent/pkg/collector/check" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -19,6 +19,6 @@ const ( ) // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewNoneOption[func() check.Check]() +func Factory() option.Option[func() check.Check] { + return option.None[func() check.Check]() } diff --git a/pkg/collector/corechecks/gpu/gpu.go b/pkg/collector/corechecks/gpu/gpu.go index 3320e9cdf8c28..aa794d88d8a86 100644 --- a/pkg/collector/corechecks/gpu/gpu.go +++ b/pkg/collector/corechecks/gpu/gpu.go @@ -21,6 +21,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" taggertypes "github.com/DataDog/datadog-agent/comp/core/tagger/types" + "github.com/DataDog/datadog-agent/comp/core/telemetry" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" "github.com/DataDog/datadog-agent/pkg/collector/check" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" @@ -28,7 +29,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/gpu/nvidia" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -47,21 +48,42 @@ type Check struct { collectors []nvidia.Collector // collectors for NVML metrics nvmlLib nvml.Interface // NVML library interface tagger tagger.Component // Tagger instance to add tags to outgoing metrics + telemetry *checkTelemetry // Telemetry component to emit internal telemetry +} + +type checkTelemetry struct { + nvmlMetricsSent telemetry.Counter + collectorErrors telemetry.Counter + sysprobeChecks telemetry.Counter + activeMetrics telemetry.Gauge + sysprobeMetricsSent telemetry.Counter } // Factory creates a new check factory -func Factory(tagger tagger.Component) optional.Option[func() check.Check] { - return optional.NewOption(func() check.Check { - return newCheck(tagger) +func Factory(tagger tagger.Component, telemetry telemetry.Component) option.Option[func() check.Check] { + return option.New(func() check.Check { + return newCheck(tagger, telemetry) }) } -func newCheck(tagger tagger.Component) check.Check { +func newCheck(tagger tagger.Component, telemetry telemetry.Component) check.Check { return &Check{ CheckBase: core.NewCheckBase(CheckName), config: &CheckConfig{}, activeMetrics: make(map[model.StatsKey]bool), tagger: tagger, + telemetry: newCheckTelemetry(telemetry), + } +} + +func newCheckTelemetry(tm telemetry.Component) *checkTelemetry { + subsystem := CheckName + return &checkTelemetry{ + nvmlMetricsSent: tm.NewCounter(subsystem, "nvml_metrics_sent", []string{"collector"}, "Number of NVML metrics sent"), + collectorErrors: tm.NewCounter(subsystem, "collector_errors", []string{"collector"}, "Number of errors from NVML collectors"), + sysprobeChecks: tm.NewCounter(subsystem, "sysprobe_checks", []string{"status"}, "Number of sysprobe checks, by status"), + activeMetrics: tm.NewGauge(subsystem, "active_metrics", nil, "Number of active metrics"), + sysprobeMetricsSent: tm.NewCounter(subsystem, "sysprobe_metrics_sent", nil, "Number of metrics sent based on system probe data"), } } @@ -85,7 +107,7 @@ func (c *Check) Configure(senderManager sender.SenderManager, _ uint64, config, } var err error - c.collectors, err = nvidia.BuildCollectors(c.nvmlLib) + c.collectors, err = nvidia.BuildCollectors(&nvidia.CollectorDependencies{NVML: c.nvmlLib, Tagger: c.tagger}) if err != nil { return fmt.Errorf("failed to build NVML collectors: %w", err) } @@ -126,8 +148,10 @@ func (c *Check) Run() error { func (c *Check) emitSysprobeMetrics(snd sender.Sender) error { stats, err := sysprobeclient.GetCheck[model.GPUStats](c.sysProbeClient, sysconfig.GPUMonitoringModule) if err != nil { + c.telemetry.sysprobeChecks.Add(1, "error") return fmt.Errorf("cannot get data from system-probe: %w", err) } + c.telemetry.sysprobeChecks.Add(1, "success") // Set all metrics to inactive, so we can remove the ones that we don't see // and send the final metrics @@ -146,6 +170,8 @@ func (c *Check) emitSysprobeMetrics(snd sender.Sender) error { c.activeMetrics[key] = true } + c.telemetry.sysprobeMetricsSent.Add(float64(3 * len(stats.Metrics))) + // Remove the PIDs that we didn't see in this check for key, active := range c.activeMetrics { if !active { @@ -158,25 +184,36 @@ func (c *Check) emitSysprobeMetrics(snd sender.Sender) error { } } + c.telemetry.activeMetrics.Set(float64(len(c.activeMetrics))) + return nil } func (c *Check) getTagsForKey(key model.StatsKey) []string { - entityID := taggertypes.NewEntityID(taggertypes.ContainerID, key.ContainerID) - tags, err := c.tagger.Tag(entityID, c.tagger.ChecksCardinality()) + // PID is always added + tags := []string{ + // Per-PID metrics are subject to change due to high cardinality + fmt.Sprintf("pid:%d", key.PID), + } + + // Container ID tag will be added or not depending on the tagger configuration + containerEntityID := taggertypes.NewEntityID(taggertypes.ContainerID, key.ContainerID) + containerTags, err := c.tagger.Tag(containerEntityID, c.tagger.ChecksCardinality()) if err != nil { log.Errorf("Error collecting container tags for process %d: %s", key.PID, err) + } else { + tags = append(tags, containerTags...) } - // Container ID tag will be added or not depending on the tagger configuration - // PID and GPU UUID are always added as they're not relying on the tagger yet - keyTags := []string{ - // Per-PID metrics are subject to change due to high cardinality - fmt.Sprintf("pid:%d", key.PID), - fmt.Sprintf("gpu_uuid:%s", key.DeviceUUID), + gpuEntityID := taggertypes.NewEntityID(taggertypes.GPU, key.DeviceUUID) + gpuTags, err := c.tagger.Tag(gpuEntityID, c.tagger.ChecksCardinality()) + if err != nil { + log.Errorf("Error collecting GPU tags for process %d: %s", key.PID, err) + } else { + tags = append(tags, gpuTags...) } - return append(tags, keyTags...) + return tags } func (c *Check) emitNvmlMetrics(snd sender.Sender) error { @@ -186,6 +223,7 @@ func (c *Check) emitNvmlMetrics(snd sender.Sender) error { log.Debugf("Collecting metrics from NVML collector: %s", collector.Name()) metrics, collectErr := collector.Collect() if collectErr != nil { + c.telemetry.collectorErrors.Add(1, collector.Name()) err = multierror.Append(err, fmt.Errorf("collector %s failed. %w", collector.Name(), collectErr)) } @@ -193,6 +231,8 @@ func (c *Check) emitNvmlMetrics(snd sender.Sender) error { metricName := gpuMetricsNs + metric.Name snd.Gauge(metricName, metric.Value, "", metric.Tags) } + + c.telemetry.nvmlMetricsSent.Add(float64(len(metrics)), collector.Name()) } return err diff --git a/pkg/collector/corechecks/gpu/gpu_stub.go b/pkg/collector/corechecks/gpu/gpu_stub.go index 4815e0851b106..238603e3a2fe8 100644 --- a/pkg/collector/corechecks/gpu/gpu_stub.go +++ b/pkg/collector/corechecks/gpu/gpu_stub.go @@ -9,11 +9,12 @@ package gpu import ( tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" + "github.com/DataDog/datadog-agent/comp/core/telemetry" "github.com/DataDog/datadog-agent/pkg/collector/check" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // Factory creates a new check factory -func Factory(_ tagger.Component) optional.Option[func() check.Check] { - return optional.NewNoneOption[func() check.Check]() +func Factory(_ tagger.Component, _ telemetry.Component) option.Option[func() check.Check] { + return option.None[func() check.Check]() } diff --git a/pkg/collector/corechecks/gpu/nvidia/collector.go b/pkg/collector/corechecks/gpu/nvidia/collector.go index e801336dadb7e..a00e9241f9802 100644 --- a/pkg/collector/corechecks/gpu/nvidia/collector.go +++ b/pkg/collector/corechecks/gpu/nvidia/collector.go @@ -18,15 +18,11 @@ import ( "github.com/NVIDIA/go-nvml/pkg/nvml" + tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" + taggertypes "github.com/DataDog/datadog-agent/comp/core/tagger/types" "github.com/DataDog/datadog-agent/pkg/util/log" ) -const ( - tagVendor = "gpu_vendor:nvidia" - tagNameModel = "gpu_model" - tagNameUUID = "gpu_uuid" -) - // Collector defines a collector that gets metric from a specific NVML subsystem and device type Collector interface { // Collect collects metrics from the given NVML device. This method should not fill the tags @@ -63,29 +59,42 @@ var allSubsystems = map[string]subsystemBuilder{ clocksCollectorName: newClocksCollector, } +// CollectorDependencies holds the dependencies needed to create a set of collectors. +type CollectorDependencies struct { + // Tagger is the tagger component used to tag the metrics. + Tagger tagger.Component + + // NVML is the NVML library interface used to interact with the NVIDIA devices. + NVML nvml.Interface +} + // BuildCollectors returns a set of collectors that can be used to collect metrics from NVML. -func BuildCollectors(lib nvml.Interface) ([]Collector, error) { - return buildCollectors(lib, allSubsystems) +func BuildCollectors(deps *CollectorDependencies) ([]Collector, error) { + return buildCollectors(deps, allSubsystems) } -func buildCollectors(lib nvml.Interface, subsystems map[string]subsystemBuilder) ([]Collector, error) { +func buildCollectors(deps *CollectorDependencies, subsystems map[string]subsystemBuilder) ([]Collector, error) { var collectors []Collector - devCount, ret := lib.DeviceGetCount() + devCount, ret := deps.NVML.DeviceGetCount() if ret != nvml.SUCCESS { return nil, fmt.Errorf("failed to get device count: %s", nvml.ErrorString(ret)) } for i := 0; i < devCount; i++ { - dev, ret := lib.DeviceGetHandleByIndex(i) + dev, ret := deps.NVML.DeviceGetHandleByIndex(i) if ret != nvml.SUCCESS { return nil, fmt.Errorf("failed to get device handle for index %d: %s", i, nvml.ErrorString(ret)) } - tags := getTagsFromDevice(dev) + tags, err := getTagsFromDevice(dev, deps.Tagger) + if err != nil { + log.Warnf("failed to get tags for device %s: %s", dev, err) + continue + } for name, builder := range subsystems { - subsystem, err := builder(lib, dev, tags) + subsystem, err := builder(deps.NVML, dev, tags) if errors.Is(err, errUnsupportedDevice) { log.Warnf("device %s does not support collector %s", dev, name) continue @@ -102,22 +111,17 @@ func buildCollectors(lib nvml.Interface, subsystems map[string]subsystemBuilder) } // getTagsFromDevice returns the tags associated with the given NVML device. -func getTagsFromDevice(dev nvml.Device) []string { - tags := []string{tagVendor} - +func getTagsFromDevice(dev nvml.Device, tagger tagger.Component) ([]string, error) { uuid, ret := dev.GetUUID() - if ret == nvml.SUCCESS { - tags = append(tags, fmt.Sprintf("%s:%s", tagNameUUID, uuid)) - } else { - log.Warnf("failed to get device UUID: %s", nvml.ErrorString(ret)) + if ret != nvml.SUCCESS { + return nil, fmt.Errorf("failed to get device UUID: %s", nvml.ErrorString(ret)) } - name, ret := dev.GetName() - if ret == nvml.SUCCESS { - tags = append(tags, fmt.Sprintf("%s:%s", tagNameModel, name)) - } else { - log.Warnf("failed to get device name: %s", nvml.ErrorString(ret)) + entityID := taggertypes.NewEntityID(taggertypes.GPU, uuid) + tags, err := tagger.Tag(entityID, tagger.ChecksCardinality()) + if err != nil { + log.Warnf("Error collecting GPU tags for GPU UUID %s: %s", uuid, err) } - return tags + return tags, nil } diff --git a/pkg/collector/corechecks/gpu/nvidia/collector_test.go b/pkg/collector/corechecks/gpu/nvidia/collector_test.go index 67dbfd0f6f92f..5557f3f8d5f67 100644 --- a/pkg/collector/corechecks/gpu/nvidia/collector_test.go +++ b/pkg/collector/corechecks/gpu/nvidia/collector_test.go @@ -14,6 +14,8 @@ import ( "github.com/NVIDIA/go-nvml/pkg/nvml" nvmlmock "github.com/NVIDIA/go-nvml/pkg/nvml/mock" "github.com/stretchr/testify/require" + + taggerMock "github.com/DataDog/datadog-agent/comp/core/tagger/mock" ) func getBasicNvmlDeviceMock() nvml.Device { @@ -54,22 +56,10 @@ func TestCollectorsStillInitIfOneFails(t *testing.T) { return nil, errors.New("failure") } - collectors, err := buildCollectors(getBasicNvmlMock(), map[string]subsystemBuilder{"ok": factory, "fail": factory}) + nvmlMock := getBasicNvmlMock() + fakeTagger := taggerMock.SetupFakeTagger(t) + deps := &CollectorDependencies{NVML: nvmlMock, Tagger: fakeTagger} + collectors, err := buildCollectors(deps, map[string]subsystemBuilder{"ok": factory, "fail": factory}) require.NotNil(t, collectors) require.NoError(t, err) } - -func TestGetTagsFromDeviceGetsTagsEvenIfOneFails(t *testing.T) { - device := &nvmlmock.Device{ - GetUUIDFunc: func() (string, nvml.Return) { - return "GPU-123", nvml.SUCCESS - }, - GetNameFunc: func() (string, nvml.Return) { - return "", nvml.ERROR_GPU_IS_LOST - }, - } - - result := getTagsFromDevice(device) - expected := []string{tagVendor, tagNameUUID + ":GPU-123"} - require.ElementsMatch(t, expected, result) -} diff --git a/pkg/collector/corechecks/loader.go b/pkg/collector/corechecks/loader.go index b5aa6de4e8cc9..45398f3208787 100644 --- a/pkg/collector/corechecks/loader.go +++ b/pkg/collector/corechecks/loader.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check" "github.com/DataDog/datadog-agent/pkg/collector/loaders" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // CheckFactory factory function type to instantiate checks @@ -26,7 +26,7 @@ type CheckFactory func() check.Check var catalog = make(map[string]CheckFactory) // RegisterCheck adds a check to the catalog -func RegisterCheck(name string, checkFactory optional.Option[func() check.Check]) { +func RegisterCheck(name string, checkFactory option.Option[func() check.Check]) { if v, ok := checkFactory.Get(); ok { catalog[name] = v } @@ -83,7 +83,7 @@ func (gl *GoCheckLoader) String() string { } func init() { - factory := func(sender.SenderManager, optional.Option[integrations.Component], tagger.Component) (check.Loader, error) { + factory := func(sender.SenderManager, option.Option[integrations.Component], tagger.Component) (check.Loader, error) { return NewGoCheckLoader() } diff --git a/pkg/collector/corechecks/loader_test.go b/pkg/collector/corechecks/loader_test.go index 97efe00e1c794..28e6b5343b664 100644 --- a/pkg/collector/corechecks/loader_test.go +++ b/pkg/collector/corechecks/loader_test.go @@ -15,7 +15,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator/sender" "github.com/DataDog/datadog-agent/pkg/collector/check" "github.com/DataDog/datadog-agent/pkg/collector/check/stub" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // FIXTURE @@ -40,8 +40,8 @@ func TestNewGoCheckLoader(t *testing.T) { } } -func testCheckNew() optional.Option[func() check.Check] { - return optional.NewOption(func() check.Check { +func testCheckNew() option.Option[func() check.Check] { + return option.New(func() check.Check { return &TestCheck{} }) } diff --git a/pkg/collector/corechecks/net/network/network.go b/pkg/collector/corechecks/net/network/network.go index d4a1e5cd6ebe7..d49ba549be7cb 100644 --- a/pkg/collector/corechecks/net/network/network.go +++ b/pkg/collector/corechecks/net/network/network.go @@ -25,7 +25,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -308,8 +308,8 @@ func (c *NetworkCheck) Configure(senderManager sender.SenderManager, _ uint64, r } // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewOption(newCheck) +func Factory() option.Option[func() check.Check] { + return option.New(newCheck) } func newCheck() check.Check { diff --git a/pkg/collector/corechecks/net/network/stub.go b/pkg/collector/corechecks/net/network/stub.go index 2f9b81170f3dc..bff033509075e 100644 --- a/pkg/collector/corechecks/net/network/stub.go +++ b/pkg/collector/corechecks/net/network/stub.go @@ -10,7 +10,7 @@ package network import ( "github.com/DataDog/datadog-agent/pkg/collector/check" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -19,6 +19,6 @@ const ( ) // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewNoneOption[func() check.Check]() +func Factory() option.Option[func() check.Check] { + return option.None[func() check.Check]() } diff --git a/pkg/collector/corechecks/net/ntp/ntp.go b/pkg/collector/corechecks/net/ntp/ntp.go index c3f91deebf552..4f1ee4605d103 100644 --- a/pkg/collector/corechecks/net/ntp/ntp.go +++ b/pkg/collector/corechecks/net/ntp/ntp.go @@ -26,7 +26,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/telemetry" "github.com/DataDog/datadog-agent/pkg/util/cloudproviders" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -242,8 +242,8 @@ func (c *NTPCheck) queryOffset() (float64, error) { } // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewOption(newCheck) +func Factory() option.Option[func() check.Check] { + return option.New(newCheck) } func newCheck() check.Check { diff --git a/pkg/collector/corechecks/network-devices/cisco-sdwan/sdwan.go b/pkg/collector/corechecks/network-devices/cisco-sdwan/sdwan.go index b56f5190c4817..d1ab6b9ea5835 100644 --- a/pkg/collector/corechecks/network-devices/cisco-sdwan/sdwan.go +++ b/pkg/collector/corechecks/network-devices/cisco-sdwan/sdwan.go @@ -20,7 +20,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/network-devices/cisco-sdwan/report" "github.com/DataDog/datadog-agent/pkg/snmp/utils" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -295,8 +295,8 @@ func boolPointer(b bool) *bool { } // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewOption(newCheck) +func Factory() option.Option[func() check.Check] { + return option.New(newCheck) } func newCheck() check.Check { diff --git a/pkg/collector/corechecks/networkpath/networkpath.go b/pkg/collector/corechecks/networkpath/networkpath.go index 28bef69f2bbca..6e010fa6eda90 100644 --- a/pkg/collector/corechecks/networkpath/networkpath.go +++ b/pkg/collector/corechecks/networkpath/networkpath.go @@ -23,11 +23,10 @@ import ( "github.com/DataDog/datadog-agent/pkg/networkpath/metricsender" "github.com/DataDog/datadog-agent/pkg/networkpath/payload" "github.com/DataDog/datadog-agent/pkg/networkpath/telemetry" + "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute" "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/config" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" - - "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // CheckName defines the name of the @@ -140,8 +139,8 @@ func (c *Check) Configure(senderManager sender.SenderManager, integrationConfigD } // Factory creates a new check factory -func Factory(telemetry telemetryComp.Component) optional.Option[func() check.Check] { - return optional.NewOption(func() check.Check { +func Factory(telemetry telemetryComp.Component) option.Option[func() check.Check] { + return option.New(func() check.Check { return &Check{ CheckBase: core.NewCheckBase(CheckName), telemetryComp: telemetry, diff --git a/pkg/collector/corechecks/nvidia/jetson/jetson.go b/pkg/collector/corechecks/nvidia/jetson/jetson.go index 01ee8bbcdba71..8f39465db56c6 100644 --- a/pkg/collector/corechecks/nvidia/jetson/jetson.go +++ b/pkg/collector/corechecks/nvidia/jetson/jetson.go @@ -23,7 +23,7 @@ import ( core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -201,8 +201,8 @@ func (c *JetsonCheck) Configure(senderManager sender.SenderManager, _ uint64, da } // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewOption(newCheck) +func Factory() option.Option[func() check.Check] { + return option.New(newCheck) } func newCheck() check.Check { diff --git a/pkg/collector/corechecks/nvidia/jetson/stub.go b/pkg/collector/corechecks/nvidia/jetson/stub.go index 18f9a1eca80b8..d356331df2ba3 100644 --- a/pkg/collector/corechecks/nvidia/jetson/stub.go +++ b/pkg/collector/corechecks/nvidia/jetson/stub.go @@ -9,7 +9,7 @@ package nvidia import ( "github.com/DataDog/datadog-agent/pkg/collector/check" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -18,6 +18,6 @@ const ( ) // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewNoneOption[func() check.Check]() +func Factory() option.Option[func() check.Check] { + return option.None[func() check.Check]() } diff --git a/pkg/collector/corechecks/oracle/activity.go b/pkg/collector/corechecks/oracle/activity.go index b6e96bb6749fd..22c0efea8ec27 100644 --- a/pkg/collector/corechecks/oracle/activity.go +++ b/pkg/collector/corechecks/oracle/activity.go @@ -16,7 +16,6 @@ import ( "time" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/oracle/common" - "github.com/DataDog/datadog-agent/pkg/obfuscate" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -157,8 +156,7 @@ AND status = 'ACTIVE'`) return fmt.Errorf("failed to collect session sampling activity: %w \n%s", err, activityQuery) } - o := obfuscate.NewObfuscator(obfuscate.Config{SQL: c.config.ObfuscatorOptions}) - defer o.Stop() + o := c.LazyInitObfuscator() var payloadSent bool var lastNow string for _, sample := range sessionSamples { diff --git a/pkg/collector/corechecks/oracle/obfuscate.go b/pkg/collector/corechecks/oracle/obfuscate.go new file mode 100644 index 0000000000000..e3a4181648680 --- /dev/null +++ b/pkg/collector/corechecks/oracle/obfuscate.go @@ -0,0 +1,41 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build oracle + +package oracle + +import ( + "sync" + + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + "github.com/DataDog/datadog-agent/pkg/config/structure" + "github.com/DataDog/datadog-agent/pkg/obfuscate" + "github.com/DataDog/datadog-agent/pkg/util/log" +) + +var ( + obfuscatorLock sync.Mutex +) + +// LazyInitObfuscator inits an obfuscator a single time the first time it is called +func (c *Check) LazyInitObfuscator() *obfuscate.Obfuscator { + // Ensure thread safe initialization + obfuscatorLock.Lock() + defer obfuscatorLock.Unlock() + + if c.obfuscator == nil { + var obfuscaterConfig obfuscate.Config + if err := structure.UnmarshalKey(pkgconfigsetup.Datadog(), "apm_config.obfuscation", &obfuscaterConfig); err != nil { + log.Errorf("Failed to unmarshal apm_config.obfuscation: %s", err.Error()) + obfuscaterConfig = obfuscate.Config{} + } + obfuscaterConfig.SQL = c.config.ObfuscatorOptions + + c.obfuscator = obfuscate.NewObfuscator(obfuscaterConfig) + } + + return c.obfuscator +} diff --git a/pkg/collector/corechecks/oracle/oracle.go b/pkg/collector/corechecks/oracle/oracle.go index 224555ba85bd6..fe67b5f3d0076 100644 --- a/pkg/collector/corechecks/oracle/oracle.go +++ b/pkg/collector/corechecks/oracle/oracle.go @@ -25,7 +25,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/obfuscate" "github.com/DataDog/datadog-agent/pkg/util/hostname" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/version" "github.com/benbjohnson/clock" @@ -114,6 +114,7 @@ type Check struct { legacyIntegrationCompatibilityMode bool clock clock.Clock lastSampleID uint64 + obfuscator *obfuscate.Obfuscator } type vDatabase struct { @@ -423,8 +424,8 @@ func (c *Check) Configure(senderManager sender.SenderManager, integrationConfigD } // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewOption(newCheck) +func Factory() option.Option[func() check.Check] { + return option.New(newCheck) } func newCheck() check.Check { diff --git a/pkg/collector/corechecks/oracle/statements.go b/pkg/collector/corechecks/oracle/statements.go index 333087f3a6c6a..18fb8dd1b79f5 100644 --- a/pkg/collector/corechecks/oracle/statements.go +++ b/pkg/collector/corechecks/oracle/statements.go @@ -423,8 +423,7 @@ func (c *Check) StatementMetrics() (int, error) { return 0, nil } - o := obfuscate.NewObfuscator(obfuscate.Config{SQL: c.config.ObfuscatorOptions}) - defer o.Stop() + o := c.LazyInitObfuscator() var diff OracleRowMonotonicCount planErrors = 0 sendPlan := true diff --git a/pkg/collector/corechecks/oracle/stub.go b/pkg/collector/corechecks/oracle/stub.go index eecc138c0a375..aa96b458b0359 100644 --- a/pkg/collector/corechecks/oracle/stub.go +++ b/pkg/collector/corechecks/oracle/stub.go @@ -9,7 +9,7 @@ package oracle import ( "github.com/DataDog/datadog-agent/pkg/collector/check" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -20,6 +20,6 @@ const ( ) // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewNoneOption[func() check.Check]() +func Factory() option.Option[func() check.Check] { + return option.None[func() check.Check]() } diff --git a/pkg/collector/corechecks/orchestrator/ecs/ecs.go b/pkg/collector/corechecks/orchestrator/ecs/ecs.go index 2db68516a85e1..dc91fff255c8f 100644 --- a/pkg/collector/corechecks/orchestrator/ecs/ecs.go +++ b/pkg/collector/corechecks/orchestrator/ecs/ecs.go @@ -36,7 +36,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/process/checks" "github.com/DataDog/datadog-agent/pkg/util/hostname" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // CheckName is the name of the check @@ -61,8 +61,8 @@ type Check struct { } // Factory creates a new check factory -func Factory(store workloadmeta.Component, tagger tagger.Component) optional.Option[func() check.Check] { - return optional.NewOption(func() check.Check { return newCheck(store, tagger) }) +func Factory(store workloadmeta.Component, tagger tagger.Component) option.Option[func() check.Check] { + return option.New(func() check.Check { return newCheck(store, tagger) }) } func newCheck(store workloadmeta.Component, tagger tagger.Component) check.Check { diff --git a/pkg/collector/corechecks/orchestrator/ecs/stub.go b/pkg/collector/corechecks/orchestrator/ecs/stub.go index 153401d5fbc49..c4c13fcc57325 100644 --- a/pkg/collector/corechecks/orchestrator/ecs/stub.go +++ b/pkg/collector/corechecks/orchestrator/ecs/stub.go @@ -12,7 +12,7 @@ import ( tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/collector/check" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -21,6 +21,6 @@ const ( ) // Factory creates a new check factory -func Factory(workloadmeta.Component, tagger.Component) optional.Option[func() check.Check] { - return optional.NewNoneOption[func() check.Check]() +func Factory(workloadmeta.Component, tagger.Component) option.Option[func() check.Check] { + return option.None[func() check.Check]() } diff --git a/pkg/collector/corechecks/orchestrator/pod/pod.go b/pkg/collector/corechecks/orchestrator/pod/pod.go index 97e92b617d647..39d489df8a354 100644 --- a/pkg/collector/corechecks/orchestrator/pod/pod.go +++ b/pkg/collector/corechecks/orchestrator/pod/pod.go @@ -32,7 +32,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/kubernetes/clustername" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/kubelet" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // CheckName is the name of the check @@ -60,8 +60,8 @@ type Check struct { } // Factory creates a new check factory -func Factory(store workloadmeta.Component, cfg config.Component, tagger tagger.Component) optional.Option[func() check.Check] { - return optional.NewOption( +func Factory(store workloadmeta.Component, cfg config.Component, tagger tagger.Component) option.Option[func() check.Check] { + return option.New( func() check.Check { return newCheck(store, cfg, tagger) }, diff --git a/pkg/collector/corechecks/orchestrator/pod/stub.go b/pkg/collector/corechecks/orchestrator/pod/stub.go index 556d236381204..6108ca51c5bf8 100644 --- a/pkg/collector/corechecks/orchestrator/pod/stub.go +++ b/pkg/collector/corechecks/orchestrator/pod/stub.go @@ -13,7 +13,7 @@ import ( tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/collector/check" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -22,6 +22,6 @@ const ( ) // Factory creates a new check factory -func Factory(workloadmeta.Component, config.Component, tagger.Component) optional.Option[func() check.Check] { - return optional.NewNoneOption[func() check.Check]() +func Factory(workloadmeta.Component, config.Component, tagger.Component) option.Option[func() check.Check] { + return option.None[func() check.Check]() } diff --git a/pkg/collector/corechecks/sbom/check.go b/pkg/collector/corechecks/sbom/check.go index b646b5a51e55e..c43a6bf6d735f 100644 --- a/pkg/collector/corechecks/sbom/check.go +++ b/pkg/collector/corechecks/sbom/check.go @@ -24,7 +24,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/sbom" "github.com/DataDog/datadog-agent/pkg/sbom/collectors" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -118,8 +118,8 @@ type Check struct { } // Factory returns a new check factory -func Factory(store workloadmeta.Component, cfg config.Component, tagger tagger.Component) optional.Option[func() check.Check] { - return optional.NewOption(func() check.Check { +func Factory(store workloadmeta.Component, cfg config.Component, tagger tagger.Component) option.Option[func() check.Check] { + return option.New(func() check.Check { return core.NewLongRunningCheckWrapper(&Check{ CheckBase: core.NewCheckBase(CheckName), workloadmetaStore: store, diff --git a/pkg/collector/corechecks/sbom/check_no_trivy.go b/pkg/collector/corechecks/sbom/check_no_trivy.go index 752d65e0217e7..90c1157bd8731 100644 --- a/pkg/collector/corechecks/sbom/check_no_trivy.go +++ b/pkg/collector/corechecks/sbom/check_no_trivy.go @@ -12,7 +12,7 @@ import ( tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/collector/check" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -21,6 +21,6 @@ const ( ) // Factory returns a new check factory -func Factory(workloadmeta.Component, config.Component, tagger.Component) optional.Option[func() check.Check] { - return optional.NewNoneOption[func() check.Check]() +func Factory(workloadmeta.Component, config.Component, tagger.Component) option.Option[func() check.Check] { + return option.None[func() check.Check]() } diff --git a/pkg/collector/corechecks/sbom/convert.go b/pkg/collector/corechecks/sbom/convert.go index b18dd24a94dca..625d63eff96db 100644 --- a/pkg/collector/corechecks/sbom/convert.go +++ b/pkg/collector/corechecks/sbom/convert.go @@ -130,7 +130,7 @@ func convertBOM(in *cyclonedx.BOM) *cyclonedx_v1_4.Bom { } return &cyclonedx_v1_4.Bom{ - SpecVersion: in.SpecVersion.String(), + SpecVersion: cyclonedx.SpecVersion1_4.String(), Version: pointer.Ptr(int32(in.Version)), SerialNumber: stringPtr(in.SerialNumber), Metadata: convertMetadata(in.Metadata), diff --git a/pkg/collector/corechecks/sbom/convert_test.go b/pkg/collector/corechecks/sbom/convert_test.go index c3da013acad1b..de254f7a38a02 100644 --- a/pkg/collector/corechecks/sbom/convert_test.go +++ b/pkg/collector/corechecks/sbom/convert_test.go @@ -22,9 +22,12 @@ func FuzzConvertBOM(f *testing.F) { var bom cyclonedx.BOM f.Fuzz(&bom) + bom.SpecVersion = cyclonedx.SpecVersion1_6 pb := convertBOM(&bom) _, err := proto.Marshal(pb) + assert.Nil(t, err) + assert.Equal(t, pb.SpecVersion, cyclonedx.SpecVersion1_4.String()) }) } diff --git a/pkg/collector/corechecks/sbom/processor_test.go b/pkg/collector/corechecks/sbom/processor_test.go index 4fcc1c3ba770e..10f77052e564e 100644 --- a/pkg/collector/corechecks/sbom/processor_test.go +++ b/pkg/collector/corechecks/sbom/processor_test.go @@ -37,7 +37,7 @@ import ( sbomscanner "github.com/DataDog/datadog-agent/pkg/sbom/scanner" "github.com/DataDog/datadog-agent/pkg/util/fxutil" "github.com/DataDog/datadog-agent/pkg/util/hostname" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/util/pointer" ) @@ -597,7 +597,7 @@ func TestProcessEvents(t *testing.T) { cacheDir := t.TempDir() cfg := configmock.New(t) - wmeta := fxutil.Test[optional.Option[workloadmeta.Component]](t, fx.Options( + wmeta := fxutil.Test[option.Option[workloadmeta.Component]](t, fx.Options( core.MockBundle(), workloadmetafxmock.MockModule(workloadmeta.NewParams()), fx.Replace(configcomp.MockParams{ diff --git a/pkg/collector/corechecks/servicediscovery/errors.go b/pkg/collector/corechecks/servicediscovery/errors.go index b38ecbcca2122..65b0b3940c1e5 100644 --- a/pkg/collector/corechecks/servicediscovery/errors.go +++ b/pkg/collector/corechecks/servicediscovery/errors.go @@ -12,10 +12,7 @@ import ( type errCode string const ( - errorCodePortPoller errCode = "port_poller" - errorCodeRepeatedServiceName errCode = "repeated_service_name" - errorCodeSystemProbeConn errCode = "system_probe_conn" - errorCodeSystemProbeServices errCode = "system_probe_services" + errorCodeSystemProbeServices errCode = "system_probe_services" //nolint:unused ) type errWithCode struct { diff --git a/pkg/collector/corechecks/servicediscovery/impl_linux.go b/pkg/collector/corechecks/servicediscovery/impl_linux.go index 2f0b3df61d63f..97cfcce0c29cd 100644 --- a/pkg/collector/corechecks/servicediscovery/impl_linux.go +++ b/pkg/collector/corechecks/servicediscovery/impl_linux.go @@ -31,21 +31,16 @@ type linuxImpl struct { getDiscoveryServices func(client *http.Client) (*model.ServicesResponse, error) time timer - ignoreCfg map[string]bool - - ignoreProcs map[int]bool aliveServices map[int]*serviceInfo potentialServices map[int]*serviceInfo sysProbeClient *http.Client } -func newLinuxImpl(ignoreCfg map[string]bool) (osImpl, error) { +func newLinuxImpl() (osImpl, error) { return &linuxImpl{ getDiscoveryServices: getDiscoveryServices, time: realTime{}, - ignoreCfg: ignoreCfg, - ignoreProcs: make(map[int]bool), aliveServices: make(map[int]*serviceInfo), potentialServices: make(map[int]*serviceInfo), sysProbeClient: sysprobeclient.Get(pkgconfigsetup.SystemProbe().GetString("system_probe_config.sysprobe_socket")), @@ -99,19 +94,10 @@ func (li *linuxImpl) DiscoverServices() (*discoveredServices, error) { // check open ports - these will be potential new services if they are still alive in the next iteration. for _, service := range response.Services { pid := service.PID - if li.ignoreProcs[pid] { - continue - } if _, ok := li.aliveServices[pid]; !ok { log.Debugf("[pid: %d] found new process with open ports", pid) svc := li.getServiceInfo(service) - if li.ignoreCfg[svc.meta.Name] { - log.Debugf("[pid: %d] process ignored from config: %s", pid, svc.meta.Name) - li.ignoreProcs[pid] = true - continue - } - log.Debugf("[pid: %d] adding process to potential: %s", pid, svc.meta.Name) li.potentialServices[pid] = &svc } @@ -136,15 +122,7 @@ func (li *linuxImpl) DiscoverServices() (*discoveredServices, error) { } } - // check if services previously marked as ignore are still alive. - for pid := range li.ignoreProcs { - if _, ok := serviceMap[pid]; !ok { - delete(li.ignoreProcs, pid) - } - } - return &discoveredServices{ - ignoreProcs: li.ignoreProcs, potentials: li.potentialServices, runningServices: li.aliveServices, events: events, diff --git a/pkg/collector/corechecks/servicediscovery/impl_linux_test.go b/pkg/collector/corechecks/servicediscovery/impl_linux_test.go index ba5e0e5a1caad..762a9a35d2b84 100644 --- a/pkg/collector/corechecks/servicediscovery/impl_linux_test.go +++ b/pkg/collector/corechecks/servicediscovery/impl_linux_test.go @@ -52,21 +52,11 @@ var ( env: []string{}, cwd: "", } - procIgnoreService1 = testProc{ - pid: 100, - env: nil, - cwd: "", - } procTestService1Repeat = testProc{ pid: 101, env: []string{}, cwd: "", } - procTestService1DifferentPID = testProc{ - pid: 102, - env: []string{}, - cwd: "", - } ) var ( @@ -104,29 +94,6 @@ var ( StartTimeMilli: procLaunchedMilli, ContainerID: dummyContainerID, } - portTCP8080DifferentPID = model.Service{ - PID: procTestService1DifferentPID.pid, - Name: "test-service-1", - GeneratedName: "test-service-1-generated", - GeneratedNameSource: "test-service-1-generated-source", - ContainerServiceName: "test-service-1-container", - ContainerServiceNameSource: "service", - DDService: "test-service-1", - DDServiceInjected: true, - Ports: []uint16{8080}, - APMInstrumentation: string(apm.Injected), - CommandLine: []string{"test-service-1"}, - StartTimeMilli: procLaunchedMilli, - ContainerID: dummyContainerID, - } - portTCP8081 = model.Service{ - PID: procIgnoreService1.pid, - Name: "ignore-1", - GeneratedName: "ignore-1", - Ports: []uint16{8081}, - StartTimeMilli: procLaunchedMilli, - ContainerID: dummyContainerID, - } portTCP5000 = model.Service{ PID: procPythonService.pid, Name: "python-service", @@ -189,7 +156,6 @@ func cmpEvents(a, b *event) bool { func Test_linuxImpl(t *testing.T) { host := "test-host" - cfgYaml := `ignore_processes: ["ignore-1", "ignore-2"]` t.Setenv("DD_DISCOVERY_ENABLED", "true") type checkRun struct { @@ -209,7 +175,6 @@ func Test_linuxImpl(t *testing.T) { servicesResp: &model.ServicesResponse{Services: []model.Service{ portTCP5000, portTCP8080, - portTCP8081, }}, time: calcTime(0), }, @@ -217,7 +182,6 @@ func Test_linuxImpl(t *testing.T) { servicesResp: &model.ServicesResponse{Services: []model.Service{ portTCP5000, portTCP8080, - portTCP8081, }}, time: calcTime(1 * time.Minute), }, @@ -225,7 +189,6 @@ func Test_linuxImpl(t *testing.T) { servicesResp: &model.ServicesResponse{Services: []model.Service{ portTCP5000, portTCP8080UpdatedRSS, - portTCP8081, }}, time: calcTime(20 * time.Minute), }, @@ -372,7 +335,6 @@ func Test_linuxImpl(t *testing.T) { { servicesResp: &model.ServicesResponse{Services: []model.Service{ portTCP8080, - portTCP8081, portTCP5432, }}, time: calcTime(0), @@ -380,7 +342,6 @@ func Test_linuxImpl(t *testing.T) { { servicesResp: &model.ServicesResponse{Services: []model.Service{ portTCP8080, - portTCP8081, portTCP5432, }}, time: calcTime(1 * time.Minute), @@ -388,7 +349,6 @@ func Test_linuxImpl(t *testing.T) { { servicesResp: &model.ServicesResponse{Services: []model.Service{ portTCP8080, - portTCP8081, portTCP5432, }}, time: calcTime(20 * time.Minute), @@ -523,93 +483,6 @@ func Test_linuxImpl(t *testing.T) { }, }, }, - { - // in case we detect a service is restarted, we skip the stop event and send - // another start event instead. - name: "restart_service", - checkRun: []*checkRun{ - { - servicesResp: &model.ServicesResponse{Services: []model.Service{ - portTCP8080, - portTCP8081, - }}, - time: calcTime(0), - }, - { - servicesResp: &model.ServicesResponse{Services: []model.Service{ - portTCP8080, - portTCP8081, - }}, - time: calcTime(1 * time.Minute), - }, - { - servicesResp: &model.ServicesResponse{Services: []model.Service{ - portTCP8080DifferentPID, - }}, - time: calcTime(21 * time.Minute), - }, - { - servicesResp: &model.ServicesResponse{Services: []model.Service{ - portTCP8080DifferentPID, - }}, - time: calcTime(22 * time.Minute), - }, - }, - wantEvents: []*event{ - { - RequestType: "start-service", - APIVersion: "v2", - Payload: &eventPayload{ - NamingSchemaVersion: "1", - ServiceName: "test-service-1", - GeneratedServiceName: "test-service-1-generated", - GeneratedServiceNameSource: "test-service-1-generated-source", - ContainerServiceName: "test-service-1-container", - ContainerServiceNameSource: "service", - DDService: "test-service-1", - ServiceNameSource: "injected", - ServiceType: "web_service", - HostName: host, - Env: "", - StartTime: calcTime(0).Unix(), - StartTimeMilli: calcTime(0).UnixMilli(), - LastSeen: calcTime(1 * time.Minute).Unix(), - Ports: []uint16{8080}, - PID: 99, - CommandLine: []string{"test-service-1"}, - APMInstrumentation: "none", - RSSMemory: 100 * 1024 * 1024, - CPUCores: 1.5, - ContainerID: dummyContainerID, - }, - }, - { - RequestType: "start-service", - APIVersion: "v2", - Payload: &eventPayload{ - NamingSchemaVersion: "1", - ServiceName: "test-service-1", - GeneratedServiceName: "test-service-1-generated", - GeneratedServiceNameSource: "test-service-1-generated-source", - ContainerServiceName: "test-service-1-container", - ContainerServiceNameSource: "service", - DDService: "test-service-1", - ServiceNameSource: "injected", - ServiceType: "web_service", - HostName: host, - Env: "", - StartTime: calcTime(0).Unix(), - StartTimeMilli: calcTime(0).UnixMilli(), - LastSeen: calcTime(22 * time.Minute).Unix(), - Ports: []uint16{8080}, - PID: 102, - CommandLine: []string{"test-service-1"}, - APMInstrumentation: "injected", - ContainerID: dummyContainerID, - }, - }, - }, - }, } for _, tc := range tests { @@ -626,7 +499,7 @@ func Test_linuxImpl(t *testing.T) { err := check.Configure( mSender.GetSenderManager(), integration.FakeConfigHash, - integration.Data(cfgYaml), + integration.Data{}, nil, "test", ) diff --git a/pkg/collector/corechecks/servicediscovery/module/comm_test.go b/pkg/collector/corechecks/servicediscovery/module/comm_test.go index 5cdcac6d5a368..c8f5dc7cc7765 100644 --- a/pkg/collector/corechecks/servicediscovery/module/comm_test.go +++ b/pkg/collector/corechecks/servicediscovery/module/comm_test.go @@ -31,7 +31,7 @@ const ( func TestIgnoreComm(t *testing.T) { serverDir := buildFakeServer(t) url, mockContainerProvider := setupDiscoveryModule(t) - mockContainerProvider.EXPECT().GetContainers(1*time.Minute, nil).AnyTimes() + mockContainerProvider.EXPECT().GetContainers(containerCacheValidatity, nil).AnyTimes() ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(func() { cancel() }) diff --git a/pkg/collector/corechecks/servicediscovery/module/impl_linux.go b/pkg/collector/corechecks/servicediscovery/module/impl_linux.go index defee82dc4c72..ac77a9fc2e8c9 100644 --- a/pkg/collector/corechecks/servicediscovery/module/impl_linux.go +++ b/pkg/collector/corechecks/servicediscovery/module/impl_linux.go @@ -41,6 +41,11 @@ import ( const ( pathServices = "/services" + + // Use a low cache validity to ensure that we refresh information every time + // the check is run if needed. This is the same as cacheValidityNoRT in + // pkg/process/checks/container.go. + containerCacheValidatity = 2 * time.Second ) // Ensure discovery implements the module.Module interface. @@ -695,7 +700,7 @@ func (s *discovery) getServices() (*[]model.Service, error) { var services []model.Service alivePids := make(map[int32]struct{}, len(pids)) - containers, _, pidToCid, err := s.containerProvider.GetContainers(1*time.Minute, nil) + containers, _, pidToCid, err := s.containerProvider.GetContainers(containerCacheValidatity, nil) if err != nil { log.Errorf("could not get containers: %s", err) } diff --git a/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go b/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go index 357409b59712c..1f388f538cd58 100644 --- a/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go +++ b/pkg/collector/corechecks/servicediscovery/module/impl_linux_test.go @@ -200,7 +200,7 @@ func startProcessWithFile(t *testing.T, f *os.File) *exec.Cmd { // Check that we get (only) listening processes for all expected protocols. func TestBasic(t *testing.T) { url, mockContainerProvider := setupDiscoveryModule(t) - mockContainerProvider.EXPECT().GetContainers(1*time.Minute, nil).AnyTimes() + mockContainerProvider.EXPECT().GetContainers(containerCacheValidatity, nil).AnyTimes() var expectedPIDs []int var unexpectedPIDs []int @@ -254,7 +254,7 @@ func TestBasic(t *testing.T) { // Check that we get all listening ports for a process func TestPorts(t *testing.T) { url, mockContainerProvider := setupDiscoveryModule(t) - mockContainerProvider.EXPECT().GetContainers(1*time.Minute, nil).AnyTimes() + mockContainerProvider.EXPECT().GetContainers(containerCacheValidatity, nil).AnyTimes() var expectedPorts []uint16 var unexpectedPorts []uint16 @@ -311,7 +311,7 @@ func TestPorts(t *testing.T) { func TestPortsLimits(t *testing.T) { url, mockContainerProvider := setupDiscoveryModule(t) - mockContainerProvider.EXPECT().GetContainers(1*time.Minute, nil).AnyTimes() + mockContainerProvider.EXPECT().GetContainers(containerCacheValidatity, nil).AnyTimes() var expectedPorts []int @@ -346,7 +346,7 @@ func TestPortsLimits(t *testing.T) { func TestServiceName(t *testing.T) { url, mockContainerProvider := setupDiscoveryModule(t) - mockContainerProvider.EXPECT().GetContainers(1*time.Minute, nil).AnyTimes() + mockContainerProvider.EXPECT().GetContainers(containerCacheValidatity, nil).AnyTimes() listener, err := net.Listen("tcp", "") require.NoError(t, err) @@ -387,7 +387,7 @@ func TestServiceName(t *testing.T) { func TestInjectedServiceName(t *testing.T) { url, mockContainerProvider := setupDiscoveryModule(t) - mockContainerProvider.EXPECT().GetContainers(1*time.Minute, nil).AnyTimes() + mockContainerProvider.EXPECT().GetContainers(containerCacheValidatity, nil).AnyTimes() createEnvsMemfd(t, []string{ "OTHER_ENV=test", @@ -415,7 +415,7 @@ func TestInjectedServiceName(t *testing.T) { func TestAPMInstrumentationInjected(t *testing.T) { url, mockContainerProvider := setupDiscoveryModule(t) - mockContainerProvider.EXPECT().GetContainers(1*time.Minute, nil).AnyTimes() + mockContainerProvider.EXPECT().GetContainers(containerCacheValidatity, nil).AnyTimes() createEnvsMemfd(t, []string{ "DD_INJECTION_ENABLED=service_name,tracer", @@ -512,7 +512,7 @@ func testCaptureWrappedCommands(t *testing.T, script string, commandWrapper []st t.Cleanup(func() { _ = proc.Kill() }) url, mockContainerProvider := setupDiscoveryModule(t) - mockContainerProvider.EXPECT().GetContainers(1*time.Minute, nil).AnyTimes() + mockContainerProvider.EXPECT().GetContainers(containerCacheValidatity, nil).AnyTimes() pid := int(proc.Pid) require.EventuallyWithT(t, func(collect *assert.CollectT) { svcMap := getServicesMap(collect, url) @@ -553,7 +553,7 @@ func TestAPMInstrumentationProvided(t *testing.T) { serverDir := buildFakeServer(t) url, mockContainerProvider := setupDiscoveryModule(t) - mockContainerProvider.EXPECT().GetContainers(1*time.Minute, nil).AnyTimes() + mockContainerProvider.EXPECT().GetContainers(containerCacheValidatity, nil).AnyTimes() for name, test := range testCases { t.Run(name, func(t *testing.T) { @@ -635,7 +635,7 @@ func assertCPU(t require.TestingT, url string, pid int) { func TestCommandLineSanitization(t *testing.T) { serverDir := buildFakeServer(t) url, mockContainerProvider := setupDiscoveryModule(t) - mockContainerProvider.EXPECT().GetContainers(1*time.Minute, nil).AnyTimes() + mockContainerProvider.EXPECT().GetContainers(containerCacheValidatity, nil).AnyTimes() ctx, cancel := context.WithCancel(context.Background()) t.Cleanup(func() { cancel() }) @@ -667,7 +667,7 @@ func TestNodeDocker(t *testing.T) { require.NoError(t, err) url, mockContainerProvider := setupDiscoveryModule(t) - mockContainerProvider.EXPECT().GetContainers(1*time.Minute, nil).AnyTimes() + mockContainerProvider.EXPECT().GetContainers(containerCacheValidatity, nil).AnyTimes() pid := int(nodeJSPID) require.EventuallyWithT(t, func(collect *assert.CollectT) { @@ -726,7 +726,7 @@ func TestAPMInstrumentationProvidedWithMaps(t *testing.T) { require.NoError(t, err) url, mockContainerProvider := setupDiscoveryModule(t) - mockContainerProvider.EXPECT().GetContainers(1*time.Minute, nil).AnyTimes() + mockContainerProvider.EXPECT().GetContainers(containerCacheValidatity, nil).AnyTimes() pid := cmd.Process.Pid require.EventuallyWithT(t, func(collect *assert.CollectT) { @@ -743,7 +743,7 @@ func TestAPMInstrumentationProvidedWithMaps(t *testing.T) { // Check that we can get listening processes in other namespaces. func TestNamespaces(t *testing.T) { url, mockContainerProvider := setupDiscoveryModule(t) - mockContainerProvider.EXPECT().GetContainers(1*time.Minute, nil).AnyTimes() + mockContainerProvider.EXPECT().GetContainers(containerCacheValidatity, nil).AnyTimes() // Needed when changing namespaces runtime.LockOSThread() @@ -832,7 +832,7 @@ func TestDocker(t *testing.T) { pid1111 = process.PID mockContainerProvider. EXPECT(). - GetContainers(1*time.Minute, nil). + GetContainers(containerCacheValidatity, nil). Return( []*agentPayload.Container{ {Id: "dummyCID", Tags: []string{ @@ -870,7 +870,7 @@ func TestCache(t *testing.T) { mockCtrl := gomock.NewController(t) mockContainerProvider := proccontainersmocks.NewMockContainerProvider(mockCtrl) - mockContainerProvider.EXPECT().GetContainers(1*time.Minute, nil).MinTimes(1) + mockContainerProvider.EXPECT().GetContainers(containerCacheValidatity, nil).MinTimes(1) discovery := newDiscovery(mockContainerProvider) ctx, cancel := context.WithCancel(context.Background()) diff --git a/pkg/collector/corechecks/servicediscovery/servicediscovery.go b/pkg/collector/corechecks/servicediscovery/servicediscovery.go index 6f96ccc775bb5..eccd5ae14cb87 100644 --- a/pkg/collector/corechecks/servicediscovery/servicediscovery.go +++ b/pkg/collector/corechecks/servicediscovery/servicediscovery.go @@ -8,12 +8,9 @@ package servicediscovery import ( "errors" - "fmt" "runtime" "time" - "gopkg.in/yaml.v2" - "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" "github.com/DataDog/datadog-agent/pkg/collector/check" @@ -21,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/servicediscovery/model" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) //go:generate mockgen -source=$GOFILE -package=$GOPACKAGE -destination=servicediscovery_mock.go @@ -47,7 +44,6 @@ type serviceEvents struct { } type discoveredServices struct { - ignoreProcs map[int]bool potentials map[int]*serviceInfo runningServices map[int]*serviceInfo @@ -58,38 +54,24 @@ type osImpl interface { DiscoverServices() (*discoveredServices, error) } -var newOSImpl func(ignoreCfg map[string]bool) (osImpl, error) - -type config struct { - IgnoreProcesses []string `yaml:"ignore_processes"` -} - -// Parse parses the configuration -func (c *config) Parse(data []byte) error { - if err := yaml.Unmarshal(data, c); err != nil { - return err - } - return nil -} +var newOSImpl func() (osImpl, error) // Check reports discovered services. type Check struct { corechecks.CheckBase - cfg *config - os osImpl - sender *telemetrySender - sentRepeatedEventPIDs map[int]bool + os osImpl + sender *telemetrySender } // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { +func Factory() option.Option[func() check.Check] { // Since service_discovery is enabled by default, we want to prevent returning an error in Configure() for platforms // where the check is not implemented. Instead of that, we return an empty check. if newOSImpl == nil { - return optional.NewNoneOption[func() check.Check]() + return option.None[func() check.Check]() } - return optional.NewOption(func() check.Check { + return option.New(func() check.Check { return newCheck() }) } @@ -97,9 +79,7 @@ func Factory() optional.Option[func() check.Check] { // TODO: add metastore param func newCheck() *Check { return &Check{ - CheckBase: corechecks.NewCheckBase(CheckName), - cfg: &config{}, - sentRepeatedEventPIDs: make(map[int]bool), + CheckBase: corechecks.NewCheckBase(CheckName), } } @@ -111,14 +91,6 @@ func (c *Check) Configure(senderManager sender.SenderManager, _ uint64, instance if err := c.CommonConfigure(senderManager, initConfig, instanceConfig, source); err != nil { return err } - if err := c.cfg.Parse(instanceConfig); err != nil { - return fmt.Errorf("failed to parse config: %w", err) - } - - ignoreCfg := map[string]bool{} - for _, pName := range c.cfg.IgnoreProcesses { - ignoreCfg[pName] = true - } s, err := c.GetSender() if err != nil { @@ -126,7 +98,7 @@ func (c *Check) Configure(senderManager sender.SenderManager, _ uint64, instance } c.sender = newTelemetrySender(s) - c.os, err = newOSImpl(ignoreCfg) + c.os, err = newOSImpl() if err != nil { return err } @@ -152,112 +124,25 @@ func (c *Check) Run() error { return err } - log.Debugf("ignoreProcs: %d | runningServices: %d | potentials: %d", - len(disc.ignoreProcs), + log.Debugf("runningServices: %d | potentials: %d", len(disc.runningServices), len(disc.potentials), ) metricDiscoveredServices.Set(float64(len(disc.runningServices))) - runningServicesByName := map[string][]*serviceInfo{} - for _, svc := range disc.runningServices { - runningServicesByName[svc.meta.Name] = append(runningServicesByName[svc.meta.Name], svc) - } - for _, svcs := range runningServicesByName { - if len(svcs) <= 1 { - continue - } - for _, svc := range svcs { - if c.sentRepeatedEventPIDs[svc.service.PID] { - continue - } - err := fmt.Errorf("found repeated service name: %s", svc.meta.Name) - telemetryFromError(errWithCode{ - err: err, - code: errorCodeRepeatedServiceName, - svc: &svc.meta, - }) - // track the PID, so we don't increase this counter in every run of the check. - c.sentRepeatedEventPIDs[svc.service.PID] = true - } - } - - potentialNames := map[string]bool{} - for _, p := range disc.potentials { - potentialNames[p.meta.Name] = true - } - - // group events by name in order to find repeated events for the same service name. - eventsByName := make(eventsByNameMap) for _, p := range disc.events.start { - eventsByName.addStart(p) + c.sender.sendStartServiceEvent(p) } for _, p := range disc.events.heartbeat { - eventsByName.addHeartbeat(p) + c.sender.sendHeartbeatServiceEvent(p) } for _, p := range disc.events.stop { - if potentialNames[p.meta.Name] { - // we consider this situation a restart, so we skip the stop event. - log.Debugf("there is a potential service with the same name as a stopped one, skipping end-service event (name: %q)", p.meta.Name) - continue - } - eventsByName.addStop(p) - if c.sentRepeatedEventPIDs[p.service.PID] { - // delete this process from the map, so we track it if the PID gets reused - delete(c.sentRepeatedEventPIDs, p.service.PID) - } - } - - for name, ev := range eventsByName { - if len(ev.start) > 0 && len(ev.stop) > 0 || len(ev.heartbeat) > 0 && len(ev.stop) > 0 { - // this is a consequence of the possibility of generating the same service name for different processes. - // at this point, we just skip the end-service events so at least these services don't disappear in the UI. - log.Debugf("got multiple start/heartbeat/end service events for the same service name, skipping end-service events (name: %q)", name) - clear(ev.stop) - } - for _, svc := range ev.start { - c.sender.sendStartServiceEvent(svc) - } - for _, svc := range ev.heartbeat { - c.sender.sendHeartbeatServiceEvent(svc) - } - for _, svc := range ev.stop { - c.sender.sendEndServiceEvent(svc) - } + c.sender.sendEndServiceEvent(p) } return nil } -type eventsByNameMap map[string]*serviceEvents - -func (m eventsByNameMap) addStart(svc serviceInfo) { - events, ok := m[svc.meta.Name] - if !ok { - events = &serviceEvents{} - } - events.start = append(events.start, svc) - m[svc.meta.Name] = events -} - -func (m eventsByNameMap) addHeartbeat(svc serviceInfo) { - events, ok := m[svc.meta.Name] - if !ok { - events = &serviceEvents{} - } - events.heartbeat = append(events.heartbeat, svc) - m[svc.meta.Name] = events -} - -func (m eventsByNameMap) addStop(svc serviceInfo) { - events, ok := m[svc.meta.Name] - if !ok { - events = &serviceEvents{} - } - events.stop = append(events.stop, svc) - m[svc.meta.Name] = events -} - // Interval returns how often the check should run. func (c *Check) Interval() time.Duration { return refreshInterval diff --git a/pkg/collector/corechecks/snmp/internal/checkconfig/config.go b/pkg/collector/corechecks/snmp/internal/checkconfig/config.go index 688b2cd9350e2..ab5cc66f49f97 100644 --- a/pkg/collector/corechecks/snmp/internal/checkconfig/config.go +++ b/pkg/collector/corechecks/snmp/internal/checkconfig/config.go @@ -22,9 +22,9 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/pkg/collector/check/defaults" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" - coreutil "github.com/DataDog/datadog-agent/pkg/util" "github.com/DataDog/datadog-agent/pkg/util/hostname" "github.com/DataDog/datadog-agent/pkg/util/log" + coreutilsort "github.com/DataDog/datadog-agent/pkg/util/sort" "github.com/DataDog/datadog-agent/pkg/networkdevice/pinger" "github.com/DataDog/datadog-agent/pkg/networkdevice/profile/profiledefinition" @@ -260,7 +260,7 @@ func (c *CheckConfig) RebuildMetadataMetricsAndTags() { // UpdateDeviceIDAndTags updates DeviceID and DeviceIDTags func (c *CheckConfig) UpdateDeviceIDAndTags() { - c.DeviceIDTags = coreutil.SortUniqInPlace(c.getDeviceIDTags()) + c.DeviceIDTags = coreutilsort.UniqInPlace(c.getDeviceIDTags()) c.DeviceID = c.Namespace + ":" + c.IPAddress } diff --git a/pkg/collector/corechecks/snmp/internal/fetch/fetch.go b/pkg/collector/corechecks/snmp/internal/fetch/fetch.go index f6d5a1692f1d6..54987adef0ed2 100644 --- a/pkg/collector/corechecks/snmp/internal/fetch/fetch.go +++ b/pkg/collector/corechecks/snmp/internal/fetch/fetch.go @@ -44,17 +44,11 @@ func Fetch(sess session.Session, config *checkconfig.CheckConfig) (*valuestore.R return nil, fmt.Errorf("failed to fetch scalar oids with batching: %v", err) } - // fetch column values - oids := make(map[string]string, len(config.OidConfig.ColumnOids)) - for _, value := range config.OidConfig.ColumnOids { - oids[value] = value - } - - columnResults, err := fetchColumnOidsWithBatching(sess, oids, config.OidBatchSize, config.BulkMaxRepetitions, useGetBulk) + columnResults, err := fetchColumnOidsWithBatching(sess, config.OidConfig.ColumnOids, config.OidBatchSize, config.BulkMaxRepetitions, useGetBulk) if err != nil { log.Debugf("failed to fetch oids with GetBulk batching: %v", err) - columnResults, err = fetchColumnOidsWithBatching(sess, oids, config.OidBatchSize, config.BulkMaxRepetitions, useGetNext) + columnResults, err = fetchColumnOidsWithBatching(sess, config.OidConfig.ColumnOids, config.OidBatchSize, config.BulkMaxRepetitions, useGetNext) if err != nil { return nil, fmt.Errorf("failed to fetch oids with GetNext batching: %v", err) } diff --git a/pkg/collector/corechecks/snmp/internal/fetch/fetch_column.go b/pkg/collector/corechecks/snmp/internal/fetch/fetch_column.go index 2ebfce8c4acd5..92984f14ba331 100644 --- a/pkg/collector/corechecks/snmp/internal/fetch/fetch_column.go +++ b/pkg/collector/corechecks/snmp/internal/fetch/fetch_column.go @@ -20,23 +20,16 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/snmp/internal/valuestore" ) -func fetchColumnOidsWithBatching(sess session.Session, oids map[string]string, oidBatchSize int, bulkMaxRepetitions uint32, fetchStrategy columnFetchStrategy) (valuestore.ColumnResultValuesType, error) { +func fetchColumnOidsWithBatching(sess session.Session, oids []string, oidBatchSize int, bulkMaxRepetitions uint32, fetchStrategy columnFetchStrategy) (valuestore.ColumnResultValuesType, error) { retValues := make(valuestore.ColumnResultValuesType, len(oids)) - columnOids := getOidsMapKeys(oids) - sort.Strings(columnOids) // sorting ColumnOids to make them deterministic for testing purpose - batches, err := common.CreateStringBatches(columnOids, oidBatchSize) + batches, err := common.CreateStringBatches(oids, oidBatchSize) if err != nil { return nil, fmt.Errorf("failed to create column oid batches: %s", err) } for _, batchColumnOids := range batches { - oidsToFetch := make(map[string]string, len(batchColumnOids)) - for _, oid := range batchColumnOids { - oidsToFetch[oid] = oids[oid] - } - - results, err := fetchColumnOids(sess, oidsToFetch, bulkMaxRepetitions, fetchStrategy) + results, err := fetchColumnOids(sess, batchColumnOids, bulkMaxRepetitions, fetchStrategy) if err != nil { return nil, fmt.Errorf("failed to fetch column oids: %s", err) } @@ -54,13 +47,17 @@ func fetchColumnOidsWithBatching(sess session.Session, oids map[string]string, o return retValues, nil } -// fetchColumnOids has an `oids` argument representing a `map[string]string`, -// the key of the map is the column oid, and the value is the oid used to fetch the next value for the column. -// The value oid might be equal to column oid or a row oid of the same column. -func fetchColumnOids(sess session.Session, oids map[string]string, bulkMaxRepetitions uint32, fetchStrategy columnFetchStrategy) (valuestore.ColumnResultValuesType, error) { +// fetchColumnOids fetches all values for each specified column OID. +// bulkMaxRepetitions is the number of entries to request per OID per SNMP +// request when fetchStrategy = useGetBulk; it is ignored when fetchStrategy is +// useGetNext. +func fetchColumnOids(sess session.Session, oids []string, bulkMaxRepetitions uint32, fetchStrategy columnFetchStrategy) (valuestore.ColumnResultValuesType, error) { returnValues := make(valuestore.ColumnResultValuesType, len(oids)) alreadyProcessedOids := make(map[string]bool) - curOids := oids + curOids := make(map[string]string, len(oids)) + for _, oid := range oids { + curOids[oid] = oid + } for { if len(curOids) == 0 { break @@ -131,13 +128,3 @@ func updateColumnResultValues(valuesToUpdate valuestore.ColumnResultValuesType, } } } - -func getOidsMapKeys(oidsMap map[string]string) []string { - keys := make([]string, len(oidsMap)) - i := 0 - for k := range oidsMap { - keys[i] = k - i++ - } - return keys -} diff --git a/pkg/collector/corechecks/snmp/internal/fetch/fetch_test.go b/pkg/collector/corechecks/snmp/internal/fetch/fetch_test.go index c0bf331b2f7e5..63c2c4ce937f3 100644 --- a/pkg/collector/corechecks/snmp/internal/fetch/fetch_test.go +++ b/pkg/collector/corechecks/snmp/internal/fetch/fetch_test.go @@ -87,7 +87,7 @@ func Test_fetchColumnOids(t *testing.T) { sess.On("GetBulk", []string{"1.1.1.3"}, checkconfig.DefaultBulkMaxRepetitions).Return(&bulkPacket2, nil) sess.On("GetBulk", []string{"1.1.1.5"}, checkconfig.DefaultBulkMaxRepetitions).Return(&bulkPacket3, nil) - oids := map[string]string{"1.1.1": "1.1.1", "1.1.2": "1.1.2"} + oids := []string{"1.1.1", "1.1.2"} columnValues, err := fetchColumnOidsWithBatching(sess, oids, 100, checkconfig.DefaultBulkMaxRepetitions, useGetBulk) assert.Nil(t, err) @@ -178,7 +178,7 @@ func Test_fetchColumnOidsBatch_usingGetBulk(t *testing.T) { // Third bulk iteration sess.On("GetBulk", []string{"1.1.1.5"}, checkconfig.DefaultBulkMaxRepetitions).Return(&bulkPacket3, nil) - oids := map[string]string{"1.1.1": "1.1.1", "1.1.2": "1.1.2"} + oids := []string{"1.1.1", "1.1.2"} columnValues, err := fetchColumnOidsWithBatching(sess, oids, 2, 10, useGetBulk) assert.Nil(t, err) @@ -275,7 +275,7 @@ func Test_fetchColumnOidsBatch_usingGetNext(t *testing.T) { sess.On("GetNext", []string{"1.1.3"}).Return(&secondBatchPacket1, nil) sess.On("GetNext", []string{"1.1.3.1"}).Return(&secondBatchPacket2, nil) - oids := map[string]string{"1.1.1": "1.1.1", "1.1.2": "1.1.2", "1.1.3": "1.1.3"} + oids := []string{"1.1.1", "1.1.2", "1.1.3"} columnValues, err := fetchColumnOidsWithBatching(sess, oids, 2, 10, useGetBulk) assert.Nil(t, err) @@ -870,7 +870,7 @@ func Test_fetchColumnOids_alreadyProcessed(t *testing.T) { sess.On("GetBulk", []string{"1.1.1.3", "1.1.2.3"}, checkconfig.DefaultBulkMaxRepetitions).Return(&bulkPacket2, nil) sess.On("GetBulk", []string{"1.1.1.5", "1.1.2.5"}, checkconfig.DefaultBulkMaxRepetitions).Return(&bulkPacket3, nil) - oids := map[string]string{"1.1.1": "1.1.1", "1.1.2": "1.1.2"} + oids := []string{"1.1.1", "1.1.2"} columnValues, err := fetchColumnOidsWithBatching(sess, oids, 100, checkconfig.DefaultBulkMaxRepetitions, useGetBulk) assert.Nil(t, err) diff --git a/pkg/collector/corechecks/snmp/internal/profile/config_profile.go b/pkg/collector/corechecks/snmp/internal/profile/config_profile.go index 0cf4355fe3ea7..8e74b43c7b7e4 100644 --- a/pkg/collector/corechecks/snmp/internal/profile/config_profile.go +++ b/pkg/collector/corechecks/snmp/internal/profile/config_profile.go @@ -7,6 +7,7 @@ package profile import ( "github.com/DataDog/datadog-agent/pkg/networkdevice/profile/profiledefinition" + "time" ) // Provider is an interface that provides profiles by name @@ -15,13 +16,18 @@ type Provider interface { HasProfile(profileName string) bool // GetProfile returns the profile with this name, or nil if there isn't one. GetProfile(profileName string) *ProfileConfig - // GetProfileNameForSysObjectID returns the best matching profile for this sysObjectID, or nil if there isn't one. + // GetProfileNameForSysObjectID returns the name of the best matching profile for this sysObjectID, or "" if there isn't one. GetProfileNameForSysObjectID(sysObjectID string) (string, error) + // GetProfileForSysObjectID returns the best matching profile for this sysObjectID, or nil if there isn't one. + GetProfileForSysObjectID(sysObjectID string) (*ProfileConfig, error) + // LastUpdated returns when this Provider last changed + LastUpdated() time.Time } // staticProvider is a static implementation of Provider type staticProvider struct { - configMap ProfileConfigMap + configMap ProfileConfigMap + lastUpdated time.Time } func (s *staticProvider) GetProfile(name string) *ProfileConfig { @@ -40,10 +46,23 @@ func (s *staticProvider) GetProfileNameForSysObjectID(sysObjectID string) (strin return getProfileForSysObjectID(s.configMap, sysObjectID) } +func (s *staticProvider) GetProfileForSysObjectID(sysObjectID string) (*ProfileConfig, error) { + name, err := getProfileForSysObjectID(s.configMap, sysObjectID) + if err != nil { + return nil, err + } + return s.GetProfile(name), nil +} + +func (s *staticProvider) LastUpdated() time.Time { + return s.lastUpdated +} + // StaticProvider makes a provider that serves the static data from this config map. func StaticProvider(profiles ProfileConfigMap) Provider { return &staticProvider{ - configMap: profiles, + configMap: profiles, + lastUpdated: time.Now(), } } diff --git a/pkg/collector/corechecks/snmp/internal/profile/profile.go b/pkg/collector/corechecks/snmp/internal/profile/profile.go index 916be04fe7035..d6e3d1219410f 100644 --- a/pkg/collector/corechecks/snmp/internal/profile/profile.go +++ b/pkg/collector/corechecks/snmp/internal/profile/profile.go @@ -74,6 +74,9 @@ func getProfileForSysObjectID(profiles ProfileConfigMap, sysObjectID string) (st matchedOids = append(matchedOids, oidPattern) } } + if len(matchedOids) == 0 { + return "", fmt.Errorf("no profiles found for sysObjectID %q", sysObjectID) + } oid, err := getMostSpecificOid(matchedOids) if err != nil { return "", fmt.Errorf("failed to get most specific profile for sysObjectID %q, for matched oids %v: %w", sysObjectID, matchedOids, err) diff --git a/pkg/collector/corechecks/snmp/internal/profile/profile_initconfig.go b/pkg/collector/corechecks/snmp/internal/profile/profile_initconfig.go index d6020eb8b3fd4..700b6a75ffa0a 100644 --- a/pkg/collector/corechecks/snmp/internal/profile/profile_initconfig.go +++ b/pkg/collector/corechecks/snmp/internal/profile/profile_initconfig.go @@ -17,6 +17,9 @@ func loadInitConfigProfiles(rawInitConfigProfiles ProfileConfigMap) (ProfileConf log.Warnf("unable to load profile %q: %s", name, err) continue } + if profDefinition.Name == "" { + profDefinition.Name = name + } profConfig.Definition = *profDefinition } initConfigProfiles[name] = profConfig diff --git a/pkg/collector/corechecks/snmp/internal/profile/profile_test.go b/pkg/collector/corechecks/snmp/internal/profile/profile_test.go index cb46896c2db7b..07fff6ec06c99 100644 --- a/pkg/collector/corechecks/snmp/internal/profile/profile_test.go +++ b/pkg/collector/corechecks/snmp/internal/profile/profile_test.go @@ -331,7 +331,7 @@ func Test_getProfileForSysObjectID(t *testing.T) { profiles: mockProfilesWithInvalidPatternError, sysObjectID: "1.3.6.1.4.1.3375.2.1.3.4.5.11", expectedProfile: "", - expectedError: "failed to get most specific profile for sysObjectID \"1.3.6.1.4.1.3375.2.1.3.4.5.11\", for matched oids []: cannot get most specific oid from empty list of oids", + expectedError: "no profiles found for sysObjectID \"1.3.6.1.4.1.3375.2.1.3.4.5.11\"", }, { name: "duplicate sysobjectid", diff --git a/pkg/collector/corechecks/snmp/internal/profile/profile_yaml.go b/pkg/collector/corechecks/snmp/internal/profile/profile_yaml.go index 600809768a026..b1dabf406a27d 100644 --- a/pkg/collector/corechecks/snmp/internal/profile/profile_yaml.go +++ b/pkg/collector/corechecks/snmp/internal/profile/profile_yaml.go @@ -73,6 +73,9 @@ func getProfileDefinitions(profilesFolder string, isUserProfile bool) (ProfileCo log.Warnf("cannot load profile %q: %v", profileName, err) continue } + if definition.Name == "" { + definition.Name = profileName + } profiles[profileName] = ProfileConfig{ Definition: *definition, IsUserProfile: isUserProfile, diff --git a/pkg/collector/corechecks/snmp/internal/profile/testing_utils.go b/pkg/collector/corechecks/snmp/internal/profile/testing_utils.go index 6831ad07c2827..71154c3d29f65 100644 --- a/pkg/collector/corechecks/snmp/internal/profile/testing_utils.go +++ b/pkg/collector/corechecks/snmp/internal/profile/testing_utils.go @@ -64,6 +64,7 @@ func FixtureProfileDefinitionMap() ProfileConfigMap { return ProfileConfigMap{ "f5-big-ip": ProfileConfig{ Definition: profiledefinition.ProfileDefinition{ + Name: "f5-big-ip", Metrics: metrics, Extends: []string{"_base.yaml", "_generic-if.yaml"}, Device: profiledefinition.DeviceMeta{Vendor: "f5"}, @@ -183,6 +184,7 @@ func FixtureProfileDefinitionMap() ProfileConfigMap { }, "another_profile": ProfileConfig{ Definition: profiledefinition.ProfileDefinition{ + Name: "another_profile", SysObjectIDs: profiledefinition.StringArray{"1.3.6.1.4.1.32473.1.1"}, Metrics: []profiledefinition.MetricsConfig{ {Symbol: profiledefinition.SymbolConfig{OID: "1.3.6.1.2.1.1.999.0", Name: "anotherMetric"}, MetricType: ""}, diff --git a/pkg/collector/corechecks/snmp/internal/report/report_device_metadata.go b/pkg/collector/corechecks/snmp/internal/report/report_device_metadata.go index 6bdcec852a1d8..9bc800985b18b 100644 --- a/pkg/collector/corechecks/snmp/internal/report/report_device_metadata.go +++ b/pkg/collector/corechecks/snmp/internal/report/report_device_metadata.go @@ -14,8 +14,8 @@ import ( "time" "github.com/DataDog/datadog-agent/comp/forwarder/eventplatform" - "github.com/DataDog/datadog-agent/pkg/util" "github.com/DataDog/datadog-agent/pkg/util/log" + sortutil "github.com/DataDog/datadog-agent/pkg/util/sort" devicemetadata "github.com/DataDog/datadog-agent/pkg/networkdevice/metadata" "github.com/DataDog/datadog-agent/pkg/networkdevice/profile/profiledefinition" @@ -53,7 +53,7 @@ var supportedDeviceTypes = map[string]bool{ // ReportNetworkDeviceMetadata reports device metadata func (ms *MetricSender) ReportNetworkDeviceMetadata(config *checkconfig.CheckConfig, store *valuestore.ResultValueStore, origTags []string, collectTime time.Time, deviceStatus devicemetadata.DeviceStatus, pingStatus devicemetadata.DeviceStatus, diagnoses []devicemetadata.DiagnosisMetadata) { tags := utils.CopyStrings(origTags) - tags = util.SortUniqInPlace(tags) + tags = sortutil.UniqInPlace(tags) metadataStore := buildMetadataStore(config.Metadata, store) diff --git a/pkg/collector/corechecks/snmp/snmp.go b/pkg/collector/corechecks/snmp/snmp.go index 29e2ae2327c5b..630dd25c31848 100644 --- a/pkg/collector/corechecks/snmp/snmp.go +++ b/pkg/collector/corechecks/snmp/snmp.go @@ -18,9 +18,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator/sender" "github.com/DataDog/datadog-agent/pkg/collector/check" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" - "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" - "github.com/DataDog/datadog-agent/pkg/collector/corechecks/snmp/internal/checkconfig" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/snmp/internal/common" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/snmp/internal/devicecheck" @@ -28,6 +25,8 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/corechecks/snmp/internal/report" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/snmp/internal/session" "github.com/DataDog/datadog-agent/pkg/diagnose/diagnosis" + "github.com/DataDog/datadog-agent/pkg/util/log" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -198,8 +197,8 @@ func (c *Check) GetDiagnoses() ([]diagnosis.Diagnosis, error) { } // Factory creates a new check factory -func Factory(agentConfig config.Component) optional.Option[func() check.Check] { - return optional.NewOption(func() check.Check { +func Factory(agentConfig config.Component) option.Option[func() check.Check] { + return option.New(func() check.Check { return newCheck(agentConfig) }) } diff --git a/pkg/collector/corechecks/snmp/snmp_test.go b/pkg/collector/corechecks/snmp/snmp_test.go index 2f1095dc52027..cf17abb2e15d9 100644 --- a/pkg/collector/corechecks/snmp/snmp_test.go +++ b/pkg/collector/corechecks/snmp/snmp_test.go @@ -12,7 +12,6 @@ import ( "testing" "time" - compressionmock "github.com/DataDog/datadog-agent/comp/serializer/compression/fx-mock" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/snmp/internal/report" "github.com/gosnmp/gosnmp" @@ -53,7 +52,7 @@ type deps struct { } func createDeps(t *testing.T) deps { - return fxutil.Test[deps](t, compressionmock.MockModule(), demultiplexerimpl.MockModule(), defaultforwarder.MockModule(), core.MockBundle()) + return fxutil.Test[deps](t, demultiplexerimpl.MockModule(), defaultforwarder.MockModule(), core.MockBundle()) } func Test_Run_simpleCase(t *testing.T) { diff --git a/pkg/collector/corechecks/system/cpu/cpu/cpu.go b/pkg/collector/corechecks/system/cpu/cpu/cpu.go index 827f032bba9e5..2dd0b15be6ea8 100644 --- a/pkg/collector/corechecks/system/cpu/cpu/cpu.go +++ b/pkg/collector/corechecks/system/cpu/cpu/cpu.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const CheckName = "cpu" @@ -172,8 +172,8 @@ func (c *Check) Configure(senderManager sender.SenderManager, _ uint64, rawInsta } // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewOption(newCheck) +func Factory() option.Option[func() check.Check] { + return option.New(newCheck) } func newCheck() check.Check { diff --git a/pkg/collector/corechecks/system/cpu/cpu/cpu_windows.go b/pkg/collector/corechecks/system/cpu/cpu/cpu_windows.go index 78322a667e002..fe273436f1c97 100644 --- a/pkg/collector/corechecks/system/cpu/cpu/cpu_windows.go +++ b/pkg/collector/corechecks/system/cpu/cpu/cpu_windows.go @@ -22,7 +22,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/util/pdhutil" ) @@ -184,8 +184,8 @@ func (c *Check) Configure(senderManager sender.SenderManager, _ uint64, data int } // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewOption(newCheck) +func Factory() option.Option[func() check.Check] { + return option.New(newCheck) } func newCheck() check.Check { diff --git a/pkg/collector/corechecks/system/cpu/load/load.go b/pkg/collector/corechecks/system/cpu/load/load.go index 52be3ea30e08a..bf61efa02837f 100644 --- a/pkg/collector/corechecks/system/cpu/load/load.go +++ b/pkg/collector/corechecks/system/cpu/load/load.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -86,8 +86,8 @@ func (c *LoadCheck) Configure(senderManager sender.SenderManager, _ uint64, data } // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewOption(newCheck) +func Factory() option.Option[func() check.Check] { + return option.New(newCheck) } func newCheck() check.Check { diff --git a/pkg/collector/corechecks/system/cpu/load/stub.go b/pkg/collector/corechecks/system/cpu/load/stub.go index d6fd598c2f2c0..3f87fcb23b315 100644 --- a/pkg/collector/corechecks/system/cpu/load/stub.go +++ b/pkg/collector/corechecks/system/cpu/load/stub.go @@ -9,7 +9,7 @@ package load import ( "github.com/DataDog/datadog-agent/pkg/collector/check" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -18,6 +18,6 @@ const ( ) // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewNoneOption[func() check.Check]() +func Factory() option.Option[func() check.Check] { + return option.None[func() check.Check]() } diff --git a/pkg/collector/corechecks/system/disk/disk/disk.go b/pkg/collector/corechecks/system/disk/disk/disk.go index 0aa49da6b1a11..fdbb4eaecea22 100644 --- a/pkg/collector/corechecks/system/disk/disk/disk.go +++ b/pkg/collector/corechecks/system/disk/disk/disk.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/pkg/collector/check" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -175,8 +175,8 @@ func (c *Check) applyDeviceTags(device, mountpoint string, tags []string) []stri } // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewOption(newCheck) +func Factory() option.Option[func() check.Check] { + return option.New(newCheck) } func newCheck() check.Check { diff --git a/pkg/collector/corechecks/system/disk/disk/stub.go b/pkg/collector/corechecks/system/disk/disk/stub.go index 4f1126ef39787..615b201dfa84c 100644 --- a/pkg/collector/corechecks/system/disk/disk/stub.go +++ b/pkg/collector/corechecks/system/disk/disk/stub.go @@ -9,7 +9,7 @@ package disk import ( "github.com/DataDog/datadog-agent/pkg/collector/check" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -18,6 +18,6 @@ const ( ) // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewNoneOption[func() check.Check]() +func Factory() option.Option[func() check.Check] { + return option.None[func() check.Check]() } diff --git a/pkg/collector/corechecks/system/disk/io/iostats.go b/pkg/collector/corechecks/system/disk/io/iostats.go index b40315ebff0af..5936dd7706905 100644 --- a/pkg/collector/corechecks/system/disk/io/iostats.go +++ b/pkg/collector/corechecks/system/disk/io/iostats.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -64,8 +64,8 @@ func (c *IOCheck) commonConfigure(senderManager sender.SenderManager, data integ } // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewOption(newCheck) +func Factory() option.Option[func() check.Check] { + return option.New(newCheck) } func newCheck() check.Check { diff --git a/pkg/collector/corechecks/system/filehandles/file_handles.go b/pkg/collector/corechecks/system/filehandles/file_handles.go index 567e99338469a..66b1ee125bbdf 100644 --- a/pkg/collector/corechecks/system/filehandles/file_handles.go +++ b/pkg/collector/corechecks/system/filehandles/file_handles.go @@ -15,7 +15,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // CheckName is the name of the check @@ -89,8 +89,8 @@ func (c *fhCheck) Run() error { } // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewOption(newCheck) +func Factory() option.Option[func() check.Check] { + return option.New(newCheck) } func newCheck() check.Check { diff --git a/pkg/collector/corechecks/system/filehandles/file_handles_bsd.go b/pkg/collector/corechecks/system/filehandles/file_handles_bsd.go index 917db6ad42799..f969ea02688cc 100644 --- a/pkg/collector/corechecks/system/filehandles/file_handles_bsd.go +++ b/pkg/collector/corechecks/system/filehandles/file_handles_bsd.go @@ -14,7 +14,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // For testing purpose @@ -63,8 +63,8 @@ func (c *fhCheck) Configure(senderManager sender.SenderManager, _ uint64, data i } // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewOption(newCheck) +func Factory() option.Option[func() check.Check] { + return option.New(newCheck) } func newCheck() check.Check { diff --git a/pkg/collector/corechecks/system/filehandles/file_handles_windows.go b/pkg/collector/corechecks/system/filehandles/file_handles_windows.go index 1670861cba5d1..135a66f090e0e 100644 --- a/pkg/collector/corechecks/system/filehandles/file_handles_windows.go +++ b/pkg/collector/corechecks/system/filehandles/file_handles_windows.go @@ -11,7 +11,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator/sender" "github.com/DataDog/datadog-agent/pkg/collector/check" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/util/pdhutil" ) @@ -74,8 +74,8 @@ func (c *fhCheck) Configure(senderManager sender.SenderManager, _ uint64, data i } // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewOption(newCheck) +func Factory() option.Option[func() check.Check] { + return option.New(newCheck) } func newCheck() check.Check { diff --git a/pkg/collector/corechecks/system/memory/memory.go b/pkg/collector/corechecks/system/memory/memory.go index 589f80bdbc26a..35653633964ea 100644 --- a/pkg/collector/corechecks/system/memory/memory.go +++ b/pkg/collector/corechecks/system/memory/memory.go @@ -9,15 +9,15 @@ package memory import ( "github.com/DataDog/datadog-agent/pkg/collector/check" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // CheckName is the name of the check const CheckName = "memory" // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewOption(newCheck) +func Factory() option.Option[func() check.Check] { + return option.New(newCheck) } func newCheck() check.Check { diff --git a/pkg/collector/corechecks/system/uptime/uptime.go b/pkg/collector/corechecks/system/uptime/uptime.go index 441394031ec73..cfeb004868071 100644 --- a/pkg/collector/corechecks/system/uptime/uptime.go +++ b/pkg/collector/corechecks/system/uptime/uptime.go @@ -10,7 +10,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // CheckName is the name of the check @@ -41,8 +41,8 @@ func (c *Check) Run() error { } // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewOption(newCheck) +func Factory() option.Option[func() check.Check] { + return option.New(newCheck) } func newCheck() check.Check { diff --git a/pkg/collector/corechecks/system/wincrashdetect/stub.go b/pkg/collector/corechecks/system/wincrashdetect/stub.go index f6473062aa6b7..21876eda6760b 100644 --- a/pkg/collector/corechecks/system/wincrashdetect/stub.go +++ b/pkg/collector/corechecks/system/wincrashdetect/stub.go @@ -10,7 +10,7 @@ package wincrashdetect import ( "github.com/DataDog/datadog-agent/pkg/collector/check" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -19,6 +19,6 @@ const ( ) // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewNoneOption[func() check.Check]() +func Factory() option.Option[func() check.Check] { + return option.None[func() check.Check]() } diff --git a/pkg/collector/corechecks/system/wincrashdetect/wincrashdetect.go b/pkg/collector/corechecks/system/wincrashdetect/wincrashdetect.go index 5ce5f21dbf8de..ed316ca1746aa 100644 --- a/pkg/collector/corechecks/system/wincrashdetect/wincrashdetect.go +++ b/pkg/collector/corechecks/system/wincrashdetect/wincrashdetect.go @@ -22,7 +22,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/metrics/event" "github.com/DataDog/datadog-agent/pkg/util/crashreport" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -51,8 +51,8 @@ type WinCrashDetect struct { } // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewOption(newCheck) +func Factory() option.Option[func() check.Check] { + return option.New(newCheck) } func newCheck() check.Check { diff --git a/pkg/collector/corechecks/system/wincrashdetect/wincrashdetect_windows_test.go b/pkg/collector/corechecks/system/wincrashdetect/wincrashdetect_windows_test.go index 004a31057f55d..b2e8d319780ad 100644 --- a/pkg/collector/corechecks/system/wincrashdetect/wincrashdetect_windows_test.go +++ b/pkg/collector/corechecks/system/wincrashdetect/wincrashdetect_windows_test.go @@ -49,7 +49,8 @@ func TestWinCrashReporting(t *testing.T) { mockSysProbeConfig.SetWithoutSource("system_probe_config.enabled", true) mockSysProbeConfig.SetWithoutSource("system_probe_config.sysprobe_socket", systemProbeTestPipeName) - listener, err := server.NewListener(systemProbeTestPipeName) + // The test named pipe allows the current user. + listener, err := server.NewListenerForCurrentUser(systemProbeTestPipeName) require.NoError(t, err) t.Cleanup(func() { _ = listener.Close() }) @@ -182,7 +183,8 @@ func TestCrashReportingStates(t *testing.T) { var crashStatus *probe.WinCrashStatus - listener, err := server.NewListener(systemProbeTestPipeName) + // The test named pipe allows the current user. + listener, err := server.NewListenerForCurrentUser(systemProbeTestPipeName) require.NoError(t, err) t.Cleanup(func() { _ = listener.Close() }) diff --git a/pkg/collector/corechecks/system/winkmem/stub.go b/pkg/collector/corechecks/system/winkmem/stub.go index 7b95dfdf5a4a0..ae720722b75c5 100644 --- a/pkg/collector/corechecks/system/winkmem/stub.go +++ b/pkg/collector/corechecks/system/winkmem/stub.go @@ -10,7 +10,7 @@ package winkmem import ( "github.com/DataDog/datadog-agent/pkg/collector/check" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -19,6 +19,6 @@ const ( ) // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewNoneOption[func() check.Check]() +func Factory() option.Option[func() check.Check] { + return option.None[func() check.Check]() } diff --git a/pkg/collector/corechecks/system/winkmem/winkmem.go b/pkg/collector/corechecks/system/winkmem/winkmem.go index 9ec3f31d0aa05..1da8ddf6ad919 100644 --- a/pkg/collector/corechecks/system/winkmem/winkmem.go +++ b/pkg/collector/corechecks/system/winkmem/winkmem.go @@ -20,8 +20,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" - + "github.com/DataDog/datadog-agent/pkg/util/option" "golang.org/x/sys/windows" ) @@ -60,8 +59,8 @@ type KMemCheck struct { } // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewOption(newCheck) +func Factory() option.Option[func() check.Check] { + return option.New(newCheck) } func newCheck() check.Check { diff --git a/pkg/collector/corechecks/system/winproc/stub.go b/pkg/collector/corechecks/system/winproc/stub.go index 2e1f88a9485ec..ddc26f2581d87 100644 --- a/pkg/collector/corechecks/system/winproc/stub.go +++ b/pkg/collector/corechecks/system/winproc/stub.go @@ -10,7 +10,7 @@ package winproc import ( "github.com/DataDog/datadog-agent/pkg/collector/check" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -19,6 +19,6 @@ const ( ) // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewNoneOption[func() check.Check]() +func Factory() option.Option[func() check.Check] { + return option.None[func() check.Check]() } diff --git a/pkg/collector/corechecks/system/winproc/winproc_windows.go b/pkg/collector/corechecks/system/winproc/winproc_windows.go index 2acecb7f28885..cc1151f0ce106 100644 --- a/pkg/collector/corechecks/system/winproc/winproc_windows.go +++ b/pkg/collector/corechecks/system/winproc/winproc_windows.go @@ -12,7 +12,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator/sender" "github.com/DataDog/datadog-agent/pkg/collector/check" core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/util/pdhutil" ) @@ -77,8 +77,8 @@ func (c *processChk) Configure(senderManager sender.SenderManager, _ uint64, dat } // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewOption(newCheck) +func Factory() option.Option[func() check.Check] { + return option.New(newCheck) } func newCheck() check.Check { diff --git a/pkg/collector/corechecks/systemd/stub.go b/pkg/collector/corechecks/systemd/stub.go index a1e7ab65e166c..bb1b52170eb58 100644 --- a/pkg/collector/corechecks/systemd/stub.go +++ b/pkg/collector/corechecks/systemd/stub.go @@ -9,7 +9,7 @@ package systemd import ( "github.com/DataDog/datadog-agent/pkg/collector/check" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -18,6 +18,6 @@ const ( ) // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewNoneOption[func() check.Check]() +func Factory() option.Option[func() check.Check] { + return option.None[func() check.Check]() } diff --git a/pkg/collector/corechecks/systemd/systemd.go b/pkg/collector/corechecks/systemd/systemd.go index 186403222d5fc..a1b8161f6b8db 100644 --- a/pkg/collector/corechecks/systemd/systemd.go +++ b/pkg/collector/corechecks/systemd/systemd.go @@ -20,12 +20,11 @@ import ( "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" "github.com/DataDog/datadog-agent/pkg/collector/check" + core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" "github.com/DataDog/datadog-agent/pkg/config/env" "github.com/DataDog/datadog-agent/pkg/metrics/servicecheck" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" - - core "github.com/DataDog/datadog-agent/pkg/collector/corechecks" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -569,8 +568,8 @@ func (c *SystemdCheck) Configure(senderManager sender.SenderManager, integration } // Factory creates a new check factory -func Factory() optional.Option[func() check.Check] { - return optional.NewOption(newCheck) +func Factory() option.Option[func() check.Check] { + return option.New(newCheck) } func newCheck() check.Check { diff --git a/pkg/collector/corechecks/telemetry/check.go b/pkg/collector/corechecks/telemetry/check.go index 34299831a4412..b120adc6c6b43 100644 --- a/pkg/collector/corechecks/telemetry/check.go +++ b/pkg/collector/corechecks/telemetry/check.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/check" "github.com/DataDog/datadog-agent/pkg/collector/corechecks" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -107,8 +107,8 @@ func (c *checkImpl) buildTags(lps []*dto.LabelPair) []string { } // Factory creates a new check factory -func Factory(telemetry telemetry.Component) optional.Option[func() check.Check] { - return optional.NewOption(func() check.Check { +func Factory(telemetry telemetry.Component) option.Option[func() check.Check] { + return option.New(func() check.Check { return &checkImpl{ CheckBase: corechecks.NewCheckBase(CheckName), telemetry: telemetry, diff --git a/pkg/collector/loaders/loaders.go b/pkg/collector/loaders/loaders.go index 93a2fd8c0b126..abb7f5364f07e 100644 --- a/pkg/collector/loaders/loaders.go +++ b/pkg/collector/loaders/loaders.go @@ -15,13 +15,13 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator/sender" "github.com/DataDog/datadog-agent/pkg/collector/check" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // LoaderFactory helps to defer actual instantiation of Check Loaders, // mostly helpful with code involving calls to cgo (for example, the Python // interpreter might not be initialized when `init`ing a package) -type LoaderFactory func(sender.SenderManager, optional.Option[integrations.Component], tagger.Component) (check.Loader, error) +type LoaderFactory func(sender.SenderManager, option.Option[integrations.Component], tagger.Component) (check.Loader, error) var factoryCatalog = make(map[int][]LoaderFactory) var loaderCatalog = []check.Loader{} @@ -33,7 +33,7 @@ func RegisterLoader(order int, factory LoaderFactory) { } // LoaderCatalog returns the loaders sorted by desired sequence order -func LoaderCatalog(senderManager sender.SenderManager, logReceiver optional.Option[integrations.Component], tagger tagger.Component) []check.Loader { +func LoaderCatalog(senderManager sender.SenderManager, logReceiver option.Option[integrations.Component], tagger tagger.Component) []check.Loader { // the catalog is supposed to be built only once, don't see a clear // use case to add Loaders at runtime once.Do(func() { diff --git a/pkg/collector/loaders/loaders_test.go b/pkg/collector/loaders/loaders_test.go index 026be454683d6..73eda7049d9f9 100644 --- a/pkg/collector/loaders/loaders_test.go +++ b/pkg/collector/loaders/loaders_test.go @@ -19,7 +19,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/aggregator/mocksender" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" "github.com/DataDog/datadog-agent/pkg/collector/check" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) type LoaderOne struct{} @@ -60,15 +60,15 @@ func (lt *LoaderThree) Load(_ sender.SenderManager, _ integration.Config, _ inte func TestLoaderCatalog(t *testing.T) { l1 := LoaderOne{} - factory1 := func(sender.SenderManager, optional.Option[integrations.Component], tagger.Component) (check.Loader, error) { + factory1 := func(sender.SenderManager, option.Option[integrations.Component], tagger.Component) (check.Loader, error) { return l1, nil } l2 := LoaderTwo{} - factory2 := func(sender.SenderManager, optional.Option[integrations.Component], tagger.Component) (check.Loader, error) { + factory2 := func(sender.SenderManager, option.Option[integrations.Component], tagger.Component) (check.Loader, error) { return l2, nil } var l3 *LoaderThree - factory3 := func(sender.SenderManager, optional.Option[integrations.Component], tagger.Component) (check.Loader, error) { + factory3 := func(sender.SenderManager, option.Option[integrations.Component], tagger.Component) (check.Loader, error) { return l3, errors.New("error") } @@ -76,7 +76,7 @@ func TestLoaderCatalog(t *testing.T) { RegisterLoader(10, factory2) RegisterLoader(30, factory3) senderManager := mocksender.CreateDefaultDemultiplexer() - logReceiver := optional.NewNoneOption[integrations.Component]() + logReceiver := option.None[integrations.Component]() tagger := nooptagger.NewComponent() require.Len(t, LoaderCatalog(senderManager, logReceiver, tagger), 2) assert.Equal(t, l1, LoaderCatalog(senderManager, logReceiver, tagger)[1]) diff --git a/pkg/collector/python/check_context.go b/pkg/collector/python/check_context.go index 78344ee06eb65..78c43918e6f93 100644 --- a/pkg/collector/python/check_context.go +++ b/pkg/collector/python/check_context.go @@ -15,7 +15,7 @@ import ( integrations "github.com/DataDog/datadog-agent/comp/logs/integrations/def" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) var checkCtx *checkContext @@ -27,7 +27,7 @@ var checkContextMutex = sync.Mutex{} // per dependency used inside SubmitMetric like methods. type checkContext struct { senderManager sender.SenderManager - logReceiver optional.Option[integrations.Component] + logReceiver option.Option[integrations.Component] tagger tagger.Component } @@ -41,7 +41,7 @@ func getCheckContext() (*checkContext, error) { return checkCtx, nil } -func initializeCheckContext(senderManager sender.SenderManager, logReceiver optional.Option[integrations.Component], tagger tagger.Component) { +func initializeCheckContext(senderManager sender.SenderManager, logReceiver option.Option[integrations.Component], tagger tagger.Component) { checkContextMutex.Lock() if checkCtx == nil { checkCtx = &checkContext{ diff --git a/pkg/collector/python/datadog_agent.go b/pkg/collector/python/datadog_agent.go index 95a0b2f69f19d..c247d3d3b17a9 100644 --- a/pkg/collector/python/datadog_agent.go +++ b/pkg/collector/python/datadog_agent.go @@ -25,7 +25,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/config/structure" "github.com/DataDog/datadog-agent/pkg/obfuscate" "github.com/DataDog/datadog-agent/pkg/persistentcache" - "github.com/DataDog/datadog-agent/pkg/util" hostnameUtil "github.com/DataDog/datadog-agent/pkg/util/hostname" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/clustername" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -96,7 +95,7 @@ func TracemallocEnabled() C.bool { // //export Headers func Headers(yamlPayload **C.char) { - h := util.HTTPHeaders() + h := httpHeaders() data, err := yaml.Marshal(h) if err != nil { @@ -245,6 +244,7 @@ var ( // the GIL is always locked when calling c code from python which means that the exported functions in this file // will only ever be called by one goroutine at a time obfuscator *obfuscate.Obfuscator + obfuscaterConfig obfuscate.Config // For testing purposes obfuscatorLoader sync.Once ) @@ -253,21 +253,23 @@ var ( // will definitely be initialized by the time one of the python checks runs func lazyInitObfuscator() *obfuscate.Obfuscator { obfuscatorLoader.Do(func() { - var cfg obfuscate.Config - if err := structure.UnmarshalKey(pkgconfigsetup.Datadog(), "apm_config.obfuscation", &cfg); err != nil { + if err := structure.UnmarshalKey(pkgconfigsetup.Datadog(), "apm_config.obfuscation", &obfuscaterConfig); err != nil { log.Errorf("Failed to unmarshal apm_config.obfuscation: %s", err.Error()) - cfg = obfuscate.Config{} + obfuscaterConfig = obfuscate.Config{} } - if !cfg.SQLExecPlan.Enabled { - cfg.SQLExecPlan = defaultSQLPlanObfuscateSettings + if !obfuscaterConfig.SQLExecPlan.Enabled { + obfuscaterConfig.SQLExecPlan = defaultSQLPlanObfuscateSettings } - if !cfg.SQLExecPlanNormalize.Enabled { - cfg.SQLExecPlanNormalize = defaultSQLPlanNormalizeSettings + if !obfuscaterConfig.SQLExecPlanNormalize.Enabled { + obfuscaterConfig.SQLExecPlanNormalize = defaultSQLPlanNormalizeSettings } - if !cfg.Mongo.Enabled { - cfg.Mongo = defaultMongoObfuscateSettings + if len(obfuscaterConfig.Mongo.KeepValues) == 0 { + obfuscaterConfig.Mongo.KeepValues = defaultMongoObfuscateSettings.KeepValues } - obfuscator = obfuscate.NewObfuscator(cfg) + if len(obfuscaterConfig.Mongo.ObfuscateSQLValues) == 0 { + obfuscaterConfig.Mongo.ObfuscateSQLValues = defaultMongoObfuscateSettings.ObfuscateSQLValues + } + obfuscator = obfuscate.NewObfuscator(obfuscaterConfig) }) return obfuscator } @@ -735,3 +737,13 @@ func EmitAgentTelemetry(checkName *C.char, metricName *C.char, metricValue C.dou log.Warnf("EmitAgentTelemetry: unsupported metric type %s requested by %s for %s", goMetricType, goCheckName, goMetricName) } } + +// httpHeaders returns a http headers including various basic information (User-Agent, Content-Type...). +func httpHeaders() map[string]string { + av, _ := version.Agent() + return map[string]string{ + "User-Agent": fmt.Sprintf("Datadog Agent/%s", av.GetNumber()), + "Content-Type": "application/x-www-form-urlencoded", + "Accept": "text/html, */*", + } +} diff --git a/pkg/collector/python/datadog_agent_test.go b/pkg/collector/python/datadog_agent_test.go index 5a73902e55911..d98a23fefdc7c 100644 --- a/pkg/collector/python/datadog_agent_test.go +++ b/pkg/collector/python/datadog_agent_test.go @@ -38,3 +38,7 @@ func TestSetExternalTags(t *testing.T) { func TestEmitAgentTelemetry(t *testing.T) { testEmitAgentTelemetry(t) } + +func TestObfuscaterConfig(t *testing.T) { + testObfuscaterConfig(t) +} diff --git a/pkg/collector/python/loader.go b/pkg/collector/python/loader.go index 2135295ec22a6..636d714e69be7 100644 --- a/pkg/collector/python/loader.go +++ b/pkg/collector/python/loader.go @@ -28,7 +28,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/tagset" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/version" ) @@ -60,7 +60,7 @@ const ( ) func init() { - factory := func(senderManager sender.SenderManager, logReceiver optional.Option[integrations.Component], tagger tagger.Component) (check.Loader, error) { + factory := func(senderManager sender.SenderManager, logReceiver option.Option[integrations.Component], tagger tagger.Component) (check.Loader, error) { return NewPythonCheckLoader(senderManager, logReceiver, tagger) } loaders.RegisterLoader(20, factory) @@ -86,11 +86,11 @@ func init() { // //nolint:revive // TODO(AML) Fix revive linter type PythonCheckLoader struct { - logReceiver optional.Option[integrations.Component] + logReceiver option.Option[integrations.Component] } // NewPythonCheckLoader creates an instance of the Python checks loader -func NewPythonCheckLoader(senderManager sender.SenderManager, logReceiver optional.Option[integrations.Component], tagger tagger.Component) (*PythonCheckLoader, error) { +func NewPythonCheckLoader(senderManager sender.SenderManager, logReceiver option.Option[integrations.Component], tagger tagger.Component) (*PythonCheckLoader, error) { initializeCheckContext(senderManager, logReceiver, tagger) return &PythonCheckLoader{ logReceiver: logReceiver, diff --git a/pkg/collector/python/test_aggregator.go b/pkg/collector/python/test_aggregator.go index 66d39d003d8c6..83cbcbb1d5748 100644 --- a/pkg/collector/python/test_aggregator.go +++ b/pkg/collector/python/test_aggregator.go @@ -18,7 +18,7 @@ import ( checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" "github.com/DataDog/datadog-agent/pkg/metrics/event" "github.com/DataDog/datadog-agent/pkg/metrics/servicecheck" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // #include @@ -26,7 +26,7 @@ import "C" func testSubmitMetric(t *testing.T) { sender := mocksender.NewMockSender(checkid.ID("testID")) - logReceiver := optional.NewNoneOption[integrations.Component]() + logReceiver := option.None[integrations.Component]() tagger := nooptagger.NewComponent() release := scopeInitCheckContext(sender.GetSenderManager(), logReceiver, tagger) defer release() @@ -103,7 +103,7 @@ func testSubmitMetric(t *testing.T) { func testSubmitMetricEmptyTags(t *testing.T) { sender := mocksender.NewMockSender(checkid.ID("testID")) - logReceiver := optional.NewNoneOption[integrations.Component]() + logReceiver := option.None[integrations.Component]() tagger := nooptagger.NewComponent() release := scopeInitCheckContext(sender.GetSenderManager(), logReceiver, tagger) defer release() @@ -124,7 +124,7 @@ func testSubmitMetricEmptyTags(t *testing.T) { func testSubmitMetricEmptyHostname(t *testing.T) { sender := mocksender.NewMockSender(checkid.ID("testID")) - logReceiver := optional.NewNoneOption[integrations.Component]() + logReceiver := option.None[integrations.Component]() tagger := nooptagger.NewComponent() release := scopeInitCheckContext(sender.GetSenderManager(), logReceiver, tagger) defer release() @@ -145,7 +145,7 @@ func testSubmitMetricEmptyHostname(t *testing.T) { func testSubmitServiceCheck(t *testing.T) { sender := mocksender.NewMockSender(checkid.ID("testID")) - logReceiver := optional.NewNoneOption[integrations.Component]() + logReceiver := option.None[integrations.Component]() tagger := nooptagger.NewComponent() release := scopeInitCheckContext(sender.GetSenderManager(), logReceiver, tagger) defer release() @@ -165,7 +165,7 @@ func testSubmitServiceCheck(t *testing.T) { func testSubmitServiceCheckEmptyTag(t *testing.T) { sender := mocksender.NewMockSender(checkid.ID("testID")) - logReceiver := optional.NewNoneOption[integrations.Component]() + logReceiver := option.None[integrations.Component]() tagger := nooptagger.NewComponent() release := scopeInitCheckContext(sender.GetSenderManager(), logReceiver, tagger) defer release() @@ -185,7 +185,7 @@ func testSubmitServiceCheckEmptyTag(t *testing.T) { func testSubmitServiceCheckEmptyHostame(t *testing.T) { sender := mocksender.NewMockSender(checkid.ID("testID")) - logReceiver := optional.NewNoneOption[integrations.Component]() + logReceiver := option.None[integrations.Component]() tagger := nooptagger.NewComponent() release := scopeInitCheckContext(sender.GetSenderManager(), logReceiver, tagger) defer release() @@ -205,7 +205,7 @@ func testSubmitServiceCheckEmptyHostame(t *testing.T) { func testSubmitEvent(t *testing.T) { sender := mocksender.NewMockSender(checkid.ID("testID")) - logReceiver := optional.NewNoneOption[integrations.Component]() + logReceiver := option.None[integrations.Component]() tagger := nooptagger.NewComponent() release := scopeInitCheckContext(sender.GetSenderManager(), logReceiver, tagger) defer release() @@ -243,7 +243,7 @@ func testSubmitEvent(t *testing.T) { func testSubmitHistogramBucket(t *testing.T) { sender := mocksender.NewMockSender(checkid.ID("testID")) - logReceiver := optional.NewNoneOption[integrations.Component]() + logReceiver := option.None[integrations.Component]() tagger := nooptagger.NewComponent() release := scopeInitCheckContext(sender.GetSenderManager(), logReceiver, tagger) defer release() @@ -268,7 +268,7 @@ func testSubmitHistogramBucket(t *testing.T) { func testSubmitEventPlatformEvent(t *testing.T) { sender := mocksender.NewMockSender("testID") - logReceiver := optional.NewNoneOption[integrations.Component]() + logReceiver := option.None[integrations.Component]() tagger := nooptagger.NewComponent() release := scopeInitCheckContext(sender.GetSenderManager(), logReceiver, tagger) defer release() @@ -284,7 +284,7 @@ func testSubmitEventPlatformEvent(t *testing.T) { sender.AssertEventPlatformEvent(t, []byte("raw-event"), "dbm-sample") } -func scopeInitCheckContext(senderManager sender.SenderManager, logReceiver optional.Option[integrations.Component], taggerComp tagger.Component) func() { +func scopeInitCheckContext(senderManager sender.SenderManager, logReceiver option.Option[integrations.Component], taggerComp tagger.Component) func() { // Ensure the check context is released before initializing a new one releaseCheckContext() initializeCheckContext(senderManager, logReceiver, taggerComp) diff --git a/pkg/collector/python/test_datadog_agent.go b/pkg/collector/python/test_datadog_agent.go index df436c43ab697..3b1cf1aa8d68e 100644 --- a/pkg/collector/python/test_datadog_agent.go +++ b/pkg/collector/python/test_datadog_agent.go @@ -10,6 +10,7 @@ package python import ( "context" "math/rand/v2" + "strings" "sync" "testing" "time" @@ -19,7 +20,9 @@ import ( yaml "gopkg.in/yaml.v2" "github.com/DataDog/datadog-agent/pkg/collector/externalhost" - "github.com/DataDog/datadog-agent/pkg/util" + pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + "github.com/DataDog/datadog-agent/pkg/obfuscate" "github.com/DataDog/datadog-agent/pkg/util/hostname" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/clustername" "github.com/DataDog/datadog-agent/pkg/version" @@ -58,7 +61,7 @@ func testHeaders(t *testing.T) { Headers(&headers) require.NotNil(t, headers) - h := util.HTTPHeaders() + h := httpHeaders() yamlPayload, _ := yaml.Marshal(h) assert.Equal(t, string(yamlPayload), C.GoString(headers)) } @@ -122,3 +125,48 @@ func testEmitAgentTelemetry(t *testing.T) { assert.True(t, true) } + +func testObfuscaterConfig(t *testing.T) { + pkgconfigmodel.CleanOverride(t) + conf := pkgconfigmodel.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) // nolint: forbidigo // legit use case + pkgconfigsetup.InitConfig(conf) + o := lazyInitObfuscator() + o.Stop() + expected := obfuscate.Config{ + ES: obfuscate.JSONConfig{ + Enabled: true, + KeepValues: []string{}, + ObfuscateSQLValues: []string{}, + }, + OpenSearch: obfuscate.JSONConfig{ + Enabled: true, + KeepValues: []string{}, + ObfuscateSQLValues: []string{}, + }, + Mongo: defaultMongoObfuscateSettings, + SQLExecPlan: defaultSQLPlanObfuscateSettings, + SQLExecPlanNormalize: defaultSQLPlanNormalizeSettings, + HTTP: obfuscate.HTTPConfig{ + RemoveQueryString: false, + RemovePathDigits: false, + }, + Redis: obfuscate.RedisConfig{ + Enabled: true, + RemoveAllArgs: false, + }, + Memcached: obfuscate.MemcachedConfig{ + Enabled: true, + KeepCommand: false, + }, + CreditCard: obfuscate.CreditCardsConfig{ + Enabled: true, + Luhn: false, + KeepValues: []string{}, + }, + Cache: obfuscate.CacheConfig{ + Enabled: true, + MaxSize: 5000000, + }, + } + assert.Equal(t, expected, obfuscaterConfig) +} diff --git a/pkg/collector/python/test_loader.go b/pkg/collector/python/test_loader.go index 77233e4e2cb36..b4c3c52d13aff 100644 --- a/pkg/collector/python/test_loader.go +++ b/pkg/collector/python/test_loader.go @@ -15,8 +15,7 @@ import ( nooptagger "github.com/DataDog/datadog-agent/comp/core/tagger/impl-noop" integrations "github.com/DataDog/datadog-agent/comp/logs/integrations/def" "github.com/DataDog/datadog-agent/pkg/aggregator/mocksender" - "github.com/DataDog/datadog-agent/pkg/util/optional" - + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/stretchr/testify/assert" ) @@ -142,7 +141,7 @@ func testLoadCustomCheck(t *testing.T) { rtloader = newMockRtLoaderPtr() defer func() { rtloader = nil }() senderManager := mocksender.CreateDefaultDemultiplexer() - logReceiver := optional.NewNoneOption[integrations.Component]() + logReceiver := option.None[integrations.Component]() tagger := nooptagger.NewComponent() loader, err := NewPythonCheckLoader(senderManager, logReceiver, tagger) assert.Nil(t, err) @@ -181,7 +180,7 @@ func testLoadWheelCheck(t *testing.T) { defer func() { rtloader = nil }() senderManager := mocksender.CreateDefaultDemultiplexer() - logReceiver := optional.NewNoneOption[integrations.Component]() + logReceiver := option.None[integrations.Component]() tagger := nooptagger.NewComponent() loader, err := NewPythonCheckLoader(senderManager, logReceiver, tagger) assert.Nil(t, err) diff --git a/pkg/collector/python/test_tagger.go b/pkg/collector/python/test_tagger.go index a050437874d51..94b04822fd4c0 100644 --- a/pkg/collector/python/test_tagger.go +++ b/pkg/collector/python/test_tagger.go @@ -19,7 +19,7 @@ import ( integrations "github.com/DataDog/datadog-agent/comp/logs/integrations/def" "github.com/DataDog/datadog-agent/pkg/aggregator/mocksender" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) /* @@ -41,7 +41,7 @@ import "C" func testTags(t *testing.T) { sender := mocksender.NewMockSender(checkid.ID("testID")) - logReceiver := optional.NewNoneOption[integrations.Component]() + logReceiver := option.None[integrations.Component]() tagger := mock.SetupFakeTagger(t) tagger.SetTags(types.NewEntityID(types.ContainerID, "test"), "foo", []string{"tag1", "tag2", "tag3"}, nil, nil, nil) release := scopeInitCheckContext(sender.GetSenderManager(), logReceiver, tagger) @@ -64,7 +64,7 @@ func testTags(t *testing.T) { func testTagsNull(t *testing.T) { sender := mocksender.NewMockSender(checkid.ID("testID")) - logReceiver := optional.NewNoneOption[integrations.Component]() + logReceiver := option.None[integrations.Component]() tagger := mock.SetupFakeTagger(t) tagger.SetTags(types.NewEntityID(types.ContainerID, "test"), "foo", nil, nil, nil, nil) release := scopeInitCheckContext(sender.GetSenderManager(), logReceiver, tagger) @@ -79,7 +79,7 @@ func testTagsNull(t *testing.T) { func testTagsEmpty(t *testing.T) { sender := mocksender.NewMockSender(checkid.ID("testID")) - logReceiver := optional.NewNoneOption[integrations.Component]() + logReceiver := option.None[integrations.Component]() tagger := mock.SetupFakeTagger(t) tagger.SetTags(types.NewEntityID(types.ContainerID, "test"), "foo", []string{}, nil, nil, nil) release := scopeInitCheckContext(sender.GetSenderManager(), logReceiver, tagger) diff --git a/pkg/collector/scheduler.go b/pkg/collector/scheduler.go index 1e2d76d6fd767..21473d72a8d47 100644 --- a/pkg/collector/scheduler.go +++ b/pkg/collector/scheduler.go @@ -25,7 +25,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/collector/loaders" "github.com/DataDog/datadog-agent/pkg/util/containers" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" yaml "gopkg.in/yaml.v2" ) @@ -58,13 +58,13 @@ func init() { type CheckScheduler struct { configToChecks map[string][]checkid.ID // cache the ID of checks we load for each config loaders []check.Loader - collector optional.Option[collector.Component] + collector option.Option[collector.Component] senderManager sender.SenderManager m sync.RWMutex } // InitCheckScheduler creates and returns a check scheduler -func InitCheckScheduler(collector optional.Option[collector.Component], senderManager sender.SenderManager, logReceiver optional.Option[integrations.Component], tagger tagger.Component) *CheckScheduler { +func InitCheckScheduler(collector option.Option[collector.Component], senderManager sender.SenderManager, logReceiver option.Option[integrations.Component], tagger tagger.Component) *CheckScheduler { checkScheduler = &CheckScheduler{ collector: collector, senderManager: senderManager, diff --git a/pkg/commonchecks/corechecks.go b/pkg/commonchecks/corechecks.go index cb20f9412ab2a..7bf5101c2188e 100644 --- a/pkg/commonchecks/corechecks.go +++ b/pkg/commonchecks/corechecks.go @@ -77,7 +77,7 @@ func RegisterChecks(store workloadmeta.Component, tagger tagger.Component, cfg c corecheckLoader.RegisterCheck(helm.CheckName, helm.Factory()) corecheckLoader.RegisterCheck(pod.CheckName, pod.Factory(store, cfg, tagger)) corecheckLoader.RegisterCheck(ebpf.CheckName, ebpf.Factory()) - corecheckLoader.RegisterCheck(gpu.CheckName, gpu.Factory(tagger)) + corecheckLoader.RegisterCheck(gpu.CheckName, gpu.Factory(tagger, telemetry)) corecheckLoader.RegisterCheck(ecs.CheckName, ecs.Factory(store, tagger)) corecheckLoader.RegisterCheck(oomkill.CheckName, oomkill.Factory(tagger)) corecheckLoader.RegisterCheck(tcpqueuelength.CheckName, tcpqueuelength.Factory(tagger)) diff --git a/pkg/compliance/agent.go b/pkg/compliance/agent.go index f839b5a7cb8c1..fb29c581ef8cc 100644 --- a/pkg/compliance/agent.go +++ b/pkg/compliance/agent.go @@ -33,8 +33,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/compliance/utils" "github.com/DataDog/datadog-agent/pkg/config/env" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" - "github.com/DataDog/datadog-agent/pkg/security/rules/filtermodel" - secl "github.com/DataDog/datadog-agent/pkg/security/secl/rules" "github.com/DataDog/datadog-agent/pkg/security/telemetry" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -133,14 +131,7 @@ func xccdfEnabled() bool { return pkgconfigsetup.Datadog().GetBool("compliance_config.xccdf.enabled") || pkgconfigsetup.Datadog().GetBool("compliance_config.host_benchmarks.enabled") } -var defaultSECLRuleFilter = sync.OnceValues(func() (*secl.SECLRuleFilter, error) { - ruleFilterModel, err := filtermodel.NewRuleFilterModel(nil, "") - if err != nil { - return nil, fmt.Errorf("failed to create default SECL rule filter: %w", err) - } - filter := secl.NewSECLRuleFilter(ruleFilterModel) - return filter, nil -}) +var defaultSECLRuleFilter = sync.OnceValues(newSECLRuleFilter) // DefaultRuleFilter implements the default filtering of benchmarks' rules. It // will exclude rules based on the evaluation context / environment running @@ -165,9 +156,7 @@ func DefaultRuleFilter(r *Rule) bool { return false } - accepted, err := seclRuleFilter.IsRuleAccepted(&secl.RuleDefinition{ - Filters: r.Filters, - }) + accepted, err := seclRuleFilter.isRuleAccepted(r.Filters) if err != nil { log.Errorf("failed to apply rule filters: %s", err) return false @@ -422,16 +411,13 @@ func (a *Agent) runKubernetesConfigurationsExport(ctx context.Context) { } func (a *Agent) runAptConfigurationExport(ctx context.Context) { - ruleFilterModel, err := filtermodel.NewRuleFilterModel(nil, "") + seclRuleFilter, err := newSECLRuleFilter() if err != nil { log.Errorf("failed to run apt configuration export: %v", err) return } - seclRuleFilter := secl.NewSECLRuleFilter(ruleFilterModel) - accepted, err := seclRuleFilter.IsRuleAccepted(&secl.RuleDefinition{ - Filters: []string{aptconfig.SeclFilter}, - }) + accepted, err := seclRuleFilter.isRuleAccepted([]string{aptconfig.SeclFilter}) if !accepted || err != nil { return } diff --git a/pkg/compliance/reporter.go b/pkg/compliance/reporter.go index cf59ee9043489..9f9fd87710571 100644 --- a/pkg/compliance/reporter.go +++ b/pkg/compliance/reporter.go @@ -14,6 +14,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/hostname/hostnameimpl" "github.com/DataDog/datadog-agent/comp/logs/agent/agentimpl" "github.com/DataDog/datadog-agent/comp/logs/agent/config" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/def" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/logs/auditor" @@ -38,13 +39,13 @@ type LogReporter struct { } // NewLogReporter instantiates a new log LogReporter -func NewLogReporter(hostname string, sourceName, sourceType string, endpoints *config.Endpoints, dstcontext *client.DestinationsContext) *LogReporter { +func NewLogReporter(hostname string, sourceName, sourceType string, endpoints *config.Endpoints, dstcontext *client.DestinationsContext, compression logscompression.Component) *LogReporter { // setup the auditor auditor := auditor.NewNullAuditor() auditor.Start() // setup the pipeline provider that provides pairs of processor and sender - pipelineProvider := pipeline.NewProvider(4, auditor, &diagnostic.NoopMessageReceiver{}, nil, endpoints, dstcontext, agentimpl.NewStatusProvider(), hostnameimpl.NewHostnameService(), pkgconfigsetup.Datadog()) + pipelineProvider := pipeline.NewProvider(4, auditor, &diagnostic.NoopMessageReceiver{}, nil, endpoints, dstcontext, agentimpl.NewStatusProvider(), hostnameimpl.NewHostnameService(), pkgconfigsetup.Datadog(), compression) pipelineProvider.Start() logSource := sources.NewLogSource( diff --git a/pkg/compliance/resolver.go b/pkg/compliance/resolver.go index 057cd7a413a3f..6ff5e98c4a39b 100644 --- a/pkg/compliance/resolver.go +++ b/pkg/compliance/resolver.go @@ -19,6 +19,7 @@ import ( "strings" "time" + "github.com/distribution/reference" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/image" "github.com/docker/docker/api/types/network" @@ -556,6 +557,14 @@ func (r *defaultResolver) resolveAudit(_ context.Context, spec InputSpecAudit) ( return resolved, nil } +func parseImageRepo(name string) string { + ref, err := reference.ParseNormalizedNamed(name) + if err == nil { + return reference.Path(ref) + } + return "" +} + func (r *defaultResolver) resolveDocker(ctx context.Context, spec InputSpecDocker) (interface{}, error) { cl := r.dockerCl if cl == nil { @@ -574,10 +583,12 @@ func (r *defaultResolver) resolveDocker(ctx context.Context, spec InputSpecDocke if err != nil { return nil, err } + imageRepo := parseImageRepo(image.Config.Image) resolved = append(resolved, map[string]interface{}{ - "id": image.ID, - "tags": image.RepoTags, - "inspect": image, + "id": image.ID, + "tags": image.RepoTags, + "image_repo": imageRepo, + "inspect": image, }) } case "container": @@ -590,11 +601,13 @@ func (r *defaultResolver) resolveDocker(ctx context.Context, spec InputSpecDocke if err != nil { return nil, err } + imageRepo := parseImageRepo(container.Config.Image) resolved = append(resolved, map[string]interface{}{ - "id": container.ID, - "name": container.Name, - "image": container.Image, - "inspect": container, + "id": container.ID, + "name": container.Name, + "image": container.Image, + "image_repo": imageRepo, + "inspect": container, }) } case "network": diff --git a/pkg/compliance/rulefilter.go b/pkg/compliance/rulefilter.go new file mode 100644 index 0000000000000..74ab4548643ee --- /dev/null +++ b/pkg/compliance/rulefilter.go @@ -0,0 +1,39 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package compliance implements a specific part of the datadog-agent +// responsible for scanning host and containers and report various +// misconfigurations and compliance issues. +package compliance + +import ( + "fmt" + + "github.com/DataDog/datadog-agent/pkg/security/rules/filtermodel" + "github.com/DataDog/datadog-agent/pkg/security/secl/rules/filter" +) + +// seclRuleFilter defines a SECL rule filter +type seclRuleFilter struct { + inner *filter.SECLRuleFilter +} + +// newSECLRuleFilter returns a new agent version based rule filter +func newSECLRuleFilter() (*seclRuleFilter, error) { + cfg := filtermodel.RuleFilterEventConfig{} + model, err := filtermodel.NewRuleFilterModel(cfg) + if err != nil { + return nil, fmt.Errorf("failed to create default SECL rule filter: %w", err) + } + + return &seclRuleFilter{ + inner: filter.NewSECLRuleFilter(model), + }, nil +} + +// isRuleAccepted checks whether the rule is accepted +func (r *seclRuleFilter) isRuleAccepted(filters []string) (bool, error) { + return r.inner.IsAccepted(filters) +} diff --git a/pkg/compliance/status_provider_test.go b/pkg/compliance/status_provider_test.go index 200084ea995b9..69159b985e7c5 100644 --- a/pkg/compliance/status_provider_test.go +++ b/pkg/compliance/status_provider_test.go @@ -12,6 +12,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/DataDog/datadog-agent/comp/logs/agent/config" + compressionfx "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx-mock" "github.com/DataDog/datadog-agent/pkg/logs/client" ) @@ -19,7 +20,7 @@ func TestStatus(t *testing.T) { provider := statusProvider{ agent: &Agent{ opts: AgentOptions{ - Reporter: NewLogReporter("test", "test", "test", &config.Endpoints{}, &client.DestinationsContext{}), + Reporter: NewLogReporter("test", "test", "test", &config.Endpoints{}, &client.DestinationsContext{}, compressionfx.NewMockCompressor()), }, }, } diff --git a/pkg/config/autodiscovery/autodiscovery.go b/pkg/config/autodiscovery/autodiscovery.go index e0feabf9968ff..6da90f2a3a473 100644 --- a/pkg/config/autodiscovery/autodiscovery.go +++ b/pkg/config/autodiscovery/autodiscovery.go @@ -125,5 +125,11 @@ func DiscoverComponentsFromEnv() ([]pkgconfigsetup.ConfigurationProviders, []pkg log.Info("Adding Kubelet listener from environment") } + isGPUEnv := env.IsFeaturePresent(env.NVML) + if isGPUEnv { + detectedProviders = append(detectedProviders, pkgconfigsetup.ConfigurationProviders{Name: names.GPU}) + log.Info("Adding GPU provider from environment") + } + return detectedProviders, detectedListeners } diff --git a/pkg/config/config_template.yaml b/pkg/config/config_template.yaml index 95d7f8c806e39..4d510511a5d74 100644 --- a/pkg/config/config_template.yaml +++ b/pkg/config/config_template.yaml @@ -1235,28 +1235,28 @@ api_key: # remove_stack_traces: false # # sql_exec_plan: - ## @param DD_APM_SQL_EXEC_PLAN_ENABLED - boolean - optional + ## @param DD_APM_OBFUSCATION_SQL_EXEC_PLAN_ENABLED - boolean - optional ## Enables obfuscation rules for JSON query execution plans. Disabled by default. # enabled: false - ## @param DD_APM_SQL_EXEC_PLAN_KEEP_VALUES - object - optional + ## @param DD_APM_OBFUSCATION_SQL_EXEC_PLAN_KEEP_VALUES - object - optional ## List of keys that should not be obfuscated. # keep_values: # - id1 - ## @param DD_APM_SQL_EXEC_PLAN_OBFUSCATE_SQL_VALUES - boolean - optional + ## @param DD_APM_OBFUSCATION_SQL_EXEC_PLAN_OBFUSCATE_SQL_VALUES - boolean - optional ## The set of keys for which their values will be passed through SQL obfuscation # obfuscate_sql_values: # - val1 # # sql_exec_plan_normalize: - ## @param DD_APM_SQL_EXEC_PLAN_NORMALIZE_ENABLED - boolean - optional + ## @param DD_APM_OBFUSCATION_SQL_EXEC_PLAN_NORMALIZE_ENABLED - boolean - optional ## Enables obfuscation rules for JSON query execution plans, including cost and row estimates. ## Produces a normalized execution plan. Disabled by default. # enabled: false - ## @param DD_APM_SQL_EXEC_PLAN_NORMALIZE_KEEP_VALUES - object - optional + ## @param DD_APM_OBFUSCATION_SQL_EXEC_PLAN_NORMALIZE_KEEP_VALUES - object - optional ## List of keys that should not be obfuscated. # keep_values: # - id1 - ## @param DD_APM_SQL_EXEC_PLAN_NORMALIZE_OBFUSCATE_SQL_VALUES - boolean - optional + ## @param DD_APM_OBFUSCATION_SQL_EXEC_PLAN_NORMALIZE_OBFUSCATE_SQL_VALUES - boolean - optional ## The set of keys for which their values will be passed through SQL obfuscation # obfuscate_sql_values: # - val1 @@ -1265,6 +1265,19 @@ api_key: ## Enables caching obfuscated statements. Currently supported for SQL and MongoDB queries. ## Enabled by default. # enabled: true + ## @param DD_APM_OBFUSCATION_CACHE_MAX_SIZE - integer - optional - default: 5000000 + ## The maximum size of the cache in bytes. The maximum allowed resource length is 5000. + ## Datadog stores a minimum of 1000 queries (5000000 / 5000) by default. + # max_size: 5000000 + + ## @sql_obfuscation_mode - string - optional - default: "" + ## @env DD_APM_SQL_OBFUSCATION_MODE - string - optional - default: "" + ## Obfuscator mode for SQL queries. + ## Leave empty to use the default obfuscator. + ## Set to "obfuscate_only" to obfuscate the query with the new `sqllexer` obfuscator. + ## If you use DBM, set to "obfuscate_and_normalize" to obfuscate and normalize the query for better APM/DBM correlation. + # + # sql_obfuscation_mode: "" ## @param filter_tags - object - optional ## @env DD_APM_FILTER_TAGS_REQUIRE - object - optional @@ -1379,7 +1392,7 @@ api_key: ## The list of items available under apm_config.features is not guaranteed to persist across versions; ## a feature may eventually be promoted to its own configuration option on the agent, or dropped entirely. # - # features: ["error_rare_sample_tracer_drop","table_names","component2name","sqllexer","enable_otlp_compute_top_level_by_span_kind","enable_receive_resource_spans_v2"] + # features: ["error_rare_sample_tracer_drop","table_names","component2name","sqllexer","enable_otlp_compute_top_level_by_span_kind","enable_receive_resource_spans_v2", "enable_operation_and_resource_name_logic_v2"] ## @param additional_endpoints - object - optional ## @env DD_APM_ADDITIONAL_ENDPOINTS - object - optional diff --git a/pkg/config/env/environment_container_features.go b/pkg/config/env/environment_container_features.go index ffced09b49c61..ebb854289e938 100644 --- a/pkg/config/env/environment_container_features.go +++ b/pkg/config/env/environment_container_features.go @@ -33,4 +33,6 @@ const ( Podman Feature = "podman" // PodResources socket present PodResources Feature = "podresources" + // NVML library present for GPU detection + NVML Feature = "nvml" ) diff --git a/pkg/config/env/environment_containers.go b/pkg/config/env/environment_containers.go index 7dbb0cf9ce58f..4ec4b44f6ee13 100644 --- a/pkg/config/env/environment_containers.go +++ b/pkg/config/env/environment_containers.go @@ -16,6 +16,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/log" + "github.com/DataDog/datadog-agent/pkg/util/system" "github.com/DataDog/datadog-agent/pkg/util/system/socket" ) @@ -29,6 +30,7 @@ const ( defaultPodmanContainersStoragePath = "/var/lib/containers/storage" unixSocketPrefix = "unix://" winNamedPipePrefix = "npipe://" + defaultNVMLLibraryName = "libnvidia-ml.so.1" socketTimeout = 500 * time.Millisecond ) @@ -47,6 +49,7 @@ func init() { registerFeature(CloudFoundry) registerFeature(Podman) registerFeature(PodResources) + registerFeature(NVML) } // IsAnyContainerFeaturePresent checks if any of known container features is present @@ -71,6 +74,7 @@ func detectContainerFeatures(features FeatureMap, cfg model.Reader) { detectCloudFoundry(features, cfg) detectPodman(features, cfg) detectPodResources(features, cfg) + detectNVML(features) } func detectKubernetes(features FeatureMap, cfg model.Reader) { @@ -243,6 +247,19 @@ func detectPodResources(features FeatureMap, cfg model.Reader) { } } +func detectNVML(features FeatureMap) { + // Use dlopen to search for the library to avoid importing the go-nvml package here, + // which is 1MB in size and would increase the agent binary size, when we don't really + // need it for anything else. + if err := system.CheckLibraryExists(defaultNVMLLibraryName); err != nil { + log.Debugf("Agent did not find NVML library: %v", err) + return + } + + features[NVML] = struct{}{} + log.Infof("Agent found NVML library") +} + func getHostMountPrefixes() []string { if IsContainerized() { return []string{"", defaultHostMountPrefix} diff --git a/pkg/config/env/go.mod b/pkg/config/env/go.mod index 7b16228030c49..c883447144fa6 100644 --- a/pkg/config/env/go.mod +++ b/pkg/config/env/go.mod @@ -13,15 +13,17 @@ replace ( require ( github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 github.com/DataDog/datadog-agent/pkg/util/system/socket v0.56.0-rc.3 github.com/stretchr/testify v1.10.0 ) require ( - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect github.com/DataDog/viper v1.14.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect @@ -32,24 +34,32 @@ require ( github.com/go-ole/go-ole v1.3.0 // indirect github.com/hashicorp/hcl v1.0.1-vault-5 // indirect github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect + github.com/tklauser/go-sysconf v0.3.14 // indirect + github.com/tklauser/numcpus v0.8.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) replace github.com/DataDog/datadog-agent/pkg/version => ../../version + +replace github.com/DataDog/datadog-agent/pkg/util/pointer => ../../util/pointer + +replace github.com/DataDog/datadog-agent/pkg/util/system => ../../util/system + +replace github.com/DataDog/datadog-agent/pkg/util/testutil => ../../util/testutil diff --git a/pkg/config/env/go.sum b/pkg/config/env/go.sum index 4a2c4e5bdc610..44ade18bb1bb9 100644 --- a/pkg/config/env/go.sum +++ b/pkg/config/env/go.sum @@ -100,6 +100,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -124,8 +126,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -148,8 +150,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -160,8 +162,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= @@ -177,6 +179,10 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= +github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= +github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= +github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20200122045848-3419fae592fc/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= @@ -200,8 +206,6 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -238,8 +242,8 @@ golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= diff --git a/pkg/config/fetcher/from_processes.go b/pkg/config/fetcher/from_processes.go index e6f86f6c44e19..ee3f1e0062ce5 100644 --- a/pkg/config/fetcher/from_processes.go +++ b/pkg/config/fetcher/from_processes.go @@ -71,7 +71,7 @@ func TraceAgentConfig(config config.Reader) (string, error) { c := util.GetClient(false) c.Timeout = config.GetDuration("server_timeout") * time.Second - ipcAddressWithPort := fmt.Sprintf("https://127.0.0.1:%d/config", port) + ipcAddressWithPort := fmt.Sprintf("http://127.0.0.1:%d/config", port) client := settingshttp.NewClient(c, ipcAddressWithPort, "trace-agent", settingshttp.NewHTTPClientOptions(util.CloseConnection)) return client.FullConfig() diff --git a/pkg/config/mock/go.mod b/pkg/config/mock/go.mod index f4edf2bbb8c8e..a372d81219786 100644 --- a/pkg/config/mock/go.mod +++ b/pkg/config/mock/go.mod @@ -20,7 +20,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../pkg/util/fxutil github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../../pkg/util/hostname/validate github.com/DataDog/datadog-agent/pkg/util/log => ../../../pkg/util/log - github.com/DataDog/datadog-agent/pkg/util/optional => ../../../pkg/util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../../pkg/util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../pkg/util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../pkg/util/scrubber github.com/DataDog/datadog-agent/pkg/util/system => ../../../pkg/util/system @@ -30,7 +30,7 @@ replace ( ) require ( - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 github.com/stretchr/testify v1.10.0 ) @@ -39,19 +39,19 @@ require ( github.com/DataDog/datadog-agent/comp/core/secrets v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect github.com/DataDog/viper v1.14.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect @@ -63,24 +63,24 @@ require ( github.com/hashicorp/hcl v1.0.1-vault-5 // indirect github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/numcpus v0.8.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/config/mock/go.sum b/pkg/config/mock/go.sum index 7fdf16db5981c..15a68c06d091d 100644 --- a/pkg/config/mock/go.sum +++ b/pkg/config/mock/go.sum @@ -71,7 +71,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -109,8 +108,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -137,8 +136,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -155,8 +154,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -169,8 +168,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -181,8 +180,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -235,8 +234,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -273,8 +272,8 @@ golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= @@ -302,8 +301,8 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/config/model/go.mod b/pkg/config/model/go.mod index c77134c93ba10..98ed7d7d6beb9 100644 --- a/pkg/config/model/go.mod +++ b/pkg/config/model/go.mod @@ -14,7 +14,6 @@ require ( github.com/DataDog/viper v1.14.0 github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 github.com/stretchr/testify v1.10.0 - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 ) require ( @@ -23,17 +22,18 @@ require ( github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/fsnotify/fsnotify v1.8.0 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/hashicorp/hcl v1.0.1-vault-5 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/config/model/go.sum b/pkg/config/model/go.sum index 4ffd1e9d2ce0a..5f700e05d94a8 100644 --- a/pkg/config/model/go.sum +++ b/pkg/config/model/go.sum @@ -147,8 +147,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= @@ -185,8 +185,6 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -219,8 +217,8 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= diff --git a/pkg/config/model/types.go b/pkg/config/model/types.go index f97ec7efff5c5..825cf52d750e9 100644 --- a/pkg/config/model/types.go +++ b/pkg/config/model/types.go @@ -56,7 +56,16 @@ type Reader interface { // Note that it returns the keys lowercased. AllKeysLowercased() []string + // IsSet return true if a non nil values is found in the configuration, including defaults. This is legacy + // behavior from viper and don't answer the need to know if something was set by the user (see IsConfigured for + // this). + // + // Deprecated: this method will be removed once all settings have a default, use 'IsConfigured' instead. IsSet(key string) bool + // IsConfigured returns true if a setting exists, has a value and doesn't come from the defaults (ie: was + // configured by the user). If a setting is configured by the user with the same value than the defaults this + // method will still return true as it tests the source of a setting not its value. + IsConfigured(key string) bool // UnmarshalKey Unmarshal a configuration key into a struct UnmarshalKey(key string, rawVal interface{}, opts ...viper.DecoderConfigOption) error diff --git a/pkg/config/model/viper.go b/pkg/config/model/viper.go index a977863b7368d..56e5519346c7d 100644 --- a/pkg/config/model/viper.go +++ b/pkg/config/model/viper.go @@ -12,17 +12,17 @@ import ( "io" "os" "path" + "path/filepath" "reflect" + "runtime" + "slices" "strconv" "strings" "sync" "time" - "path/filepath" - "github.com/DataDog/viper" "github.com/mohae/deepcopy" - "golang.org/x/exp/slices" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -32,6 +32,8 @@ type Source string // Declare every known Source const ( + // SourceSchema are settings define in the schema for the configuration but without any default. + SourceSchema Source = "schema" // SourceDefault are the values from defaults. SourceDefault Source = "default" // SourceUnknown are the values from unknown source. This should only be used in tests when calling @@ -74,6 +76,7 @@ var sources = []Source{ // sourcesPriority give each source a priority, the higher the more important a source. This is used when merging // configuration tree (a higher priority overwrites a lower one). var sourcesPriority = map[Source]int{ + SourceSchema: -1, SourceDefault: 0, SourceUnknown: 1, SourceFile: 2, @@ -150,6 +153,12 @@ func (c *safeConfig) OnUpdate(callback NotificationReceiver) { c.notificationReceivers = append(c.notificationReceivers, callback) } +func getCallerLocation(nbStack int) string { + _, file, line, _ := runtime.Caller(nbStack + 1) + fileParts := strings.Split(file, "DataDog/datadog-agent/") + return fmt.Sprintf("%s:%d", fileParts[len(fileParts)-1], line) +} + // Set wraps Viper for concurrent access func (c *safeConfig) Set(key string, newValue interface{}, source Source) { if source == SourceDefault { @@ -160,18 +169,37 @@ func (c *safeConfig) Set(key string, newValue interface{}, source Source) { // modify the config then release the lock to avoid deadlocks while notifying var receivers []NotificationReceiver c.Lock() - previousValue := c.Viper.Get(key) - c.configSources[source].Set(key, newValue) - c.mergeViperInstances(key) - if !reflect.DeepEqual(previousValue, newValue) { + + oldValue := c.Viper.Get(key) + + // First we check if the layer changed + previousValueFromLayer := c.configSources[source].Get(key) + if !reflect.DeepEqual(previousValueFromLayer, newValue) { + c.configSources[source].Set(key, newValue) + c.mergeViperInstances(key) + } else { + // nothing changed:w + log.Debugf("Updating setting '%s' for source '%s' with the same value, skipping notification", key, source) + c.Unlock() + return + } + + // We might have updated a layer that is itself overridden by another (ex: updating a setting a the 'file' level + // already overridden at the 'cli' level. If it the case we do nothing. + latestValue := c.Viper.Get(key) + if !reflect.DeepEqual(oldValue, latestValue) { + log.Debugf("Updating setting '%s' for source '%s' with new value. notifying %d listeners", key, source, len(c.notificationReceivers)) // if the value has not changed, do not duplicate the slice so that no callback is called receivers = slices.Clone(c.notificationReceivers) + } else { + log.Debugf("Updating setting '%s' for source '%s' with the same value, skipping notification", key, source) } c.Unlock() // notifying all receiver about the updated setting for _, receiver := range receivers { - receiver(key, previousValue, newValue) + log.Debugf("notifying %s about configuration change for '%s'", getCallerLocation(1), key) + receiver(key, oldValue, latestValue) } } @@ -318,6 +346,13 @@ func (c *safeConfig) IsSet(key string) bool { return c.Viper.IsSet(key) } +// IsConfigured returns true if a settings was configured by the user (ie: the value doesn't come from defaults) +func (c *safeConfig) IsConfigured(key string) bool { + c.RLock() + defer c.RUnlock() + return c.Viper.IsConfigured(key) +} + func (c *safeConfig) AllKeysLowercased() []string { c.Lock() defer c.Unlock() diff --git a/pkg/config/model/viper_test.go b/pkg/config/model/viper_test.go index 018b8d95ab804..7a00d6d679989 100644 --- a/pkg/config/model/viper_test.go +++ b/pkg/config/model/viper_test.go @@ -225,13 +225,19 @@ func TestNotificationNoChange(t *testing.T) { updatedKeyCB1 := []string{} - config.OnUpdate(func(key string, _, _ any) { updatedKeyCB1 = append(updatedKeyCB1, key) }) + config.OnUpdate(func(key string, _, newValue any) { updatedKeyCB1 = append(updatedKeyCB1, key+":"+newValue.(string)) }) config.Set("foo", "bar", SourceFile) - assert.Equal(t, []string{"foo"}, updatedKeyCB1) + assert.Equal(t, []string{"foo:bar"}, updatedKeyCB1) config.Set("foo", "bar", SourceFile) - assert.Equal(t, []string{"foo"}, updatedKeyCB1) + assert.Equal(t, []string{"foo:bar"}, updatedKeyCB1) + + config.Set("foo", "baz", SourceAgentRuntime) + assert.Equal(t, []string{"foo:bar", "foo:baz"}, updatedKeyCB1) + + config.Set("foo", "bar2", SourceFile) + assert.Equal(t, []string{"foo:bar", "foo:baz"}, updatedKeyCB1) } func TestCheckKnownKey(t *testing.T) { diff --git a/pkg/config/nodetreemodel/config.go b/pkg/config/nodetreemodel/config.go index e9854f1db99d1..7b10eb4e2b7bb 100644 --- a/pkg/config/nodetreemodel/config.go +++ b/pkg/config/nodetreemodel/config.go @@ -11,15 +11,14 @@ import ( "fmt" "io" "os" + "path/filepath" "reflect" + "slices" "strings" "sync" - "path/filepath" - "github.com/DataDog/viper" "go.uber.org/atomic" - "golang.org/x/exp/slices" "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -103,7 +102,7 @@ type ntmConfig struct { // configEnvVars is the set of env vars that are consulted for // any given configuration key. Multiple env vars can be associated with one key - configEnvVars map[string]string + configEnvVars map[string][]string // known keys are all the keys that meet at least one of these criteria: // 1) have a default, 2) have an environment variable binded, 3) are an alias or 4) have been SetKnown() @@ -165,14 +164,14 @@ func (c *ntmConfig) getTreeBySource(source model.Source) (InnerNode, error) { case model.SourceCLI: return c.cli, nil } - return nil, fmt.Errorf("unknown source tree: %s", source) + return nil, fmt.Errorf("invalid source tree: %s", source) } // Set assigns the newValue to the given key and marks it as originating from the given source func (c *ntmConfig) Set(key string, newValue interface{}, source model.Source) { tree, err := c.getTreeBySource(source) if err != nil { - log.Errorf("unknown source: %s", source) + log.Errorf("Set invalid source: %s", source) return } @@ -334,7 +333,7 @@ func (c *ntmConfig) SetKnown(key string) { panic("cannot SetKnown() once the config has been marked as ready for use") } - c.addToKnownKeys(key) + c.addToSchema(key, model.SourceSchema) } // IsKnown returns whether a key is known @@ -388,19 +387,25 @@ func (c *ntmConfig) mergeAllLayers() error { } c.root = root + // recompile allSettings now that we have the full config + c.allSettings = c.computeAllSettings(c.schema, "") return nil } -func computeAllSettings(node InnerNode, path string) []string { +func (c *ntmConfig) computeAllSettings(node InnerNode, path string) []string { knownKeys := []string{} for _, name := range node.ChildrenKeys() { newPath := joinKey(path, name) child, _ := node.GetChild(name) - if _, ok := child.(LeafNode); ok { - knownKeys = append(knownKeys, newPath) + if leaf, ok := child.(LeafNode); ok { + if leaf.Source() != model.SourceSchema { + knownKeys = append(knownKeys, newPath) + } else if c.leafAtPathFromNode(newPath, c.root) != missingLeaf { + knownKeys = append(knownKeys, newPath) + } } else if inner, ok := child.(InnerNode); ok { - knownKeys = append(knownKeys, computeAllSettings(inner, newPath)...) + knownKeys = append(knownKeys, c.computeAllSettings(inner, newPath)...) } else { log.Errorf("unknown node type in the tree: %T", child) } @@ -418,7 +423,7 @@ func (c *ntmConfig) BuildSchema() { if err := c.mergeAllLayers(); err != nil { c.warnings = append(c.warnings, err.Error()) } - c.allSettings = computeAllSettings(c.schema, "") + c.allSettings = c.computeAllSettings(c.schema, "") } // Stringify stringifies the config, but only with the test build tag @@ -448,9 +453,11 @@ func (c *ntmConfig) buildEnvVars() { envkey := pair[0] envval := pair[1] - if configKey, found := c.configEnvVars[envkey]; found { - if err := c.insertNodeFromString(root, configKey, envval); err != nil { - envWarnings = append(envWarnings, fmt.Sprintf("inserting env var: %s", err)) + if configKeyList, found := c.configEnvVars[envkey]; found { + for _, configKey := range configKeyList { + if err := c.insertNodeFromString(root, configKey, envval); err != nil { + envWarnings = append(envWarnings, fmt.Sprintf("inserting env var: %s", err)) + } } } } @@ -520,6 +527,50 @@ func (c *ntmConfig) IsSet(key string) bool { return true } +func hasNoneDefaultsLeaf(node InnerNode) bool { + // We're on an InnerNode, we need to check if any child leaf are not defaults + for _, name := range node.ChildrenKeys() { + child, _ := node.GetChild(name) + if leaf, ok := child.(LeafNode); ok { + if leaf.Source().IsGreaterThan(model.SourceDefault) { + return true + } + } + if hasNoneDefaultsLeaf(child.(InnerNode)) { + return true + } + } + return false +} + +// IsConfigured checks if a key is set in the config but not from the defaults +func (c *ntmConfig) IsConfigured(key string) bool { + c.RLock() + defer c.RUnlock() + + if !c.isReady() { + log.Errorf("attempt to read key before config is constructed: %s", key) + return false + } + + pathParts := splitKey(key) + var curr Node = c.root + for _, part := range pathParts { + next, err := curr.GetChild(part) + if err != nil { + return false + } + curr = next + } + // if key is a leaf, we just check the source + if leaf, ok := curr.(LeafNode); ok { + return leaf.Source().IsGreaterThan(model.SourceDefault) + } + + // if the key was an InnerNode we need to check all the inner leaf node to check if one was set by the user + return hasNoneDefaultsLeaf(curr.(InnerNode)) +} + // AllKeysLowercased returns all keys lower-cased from the default tree, but not keys that are merely marked as known func (c *ntmConfig) AllKeysLowercased() []string { c.RLock() @@ -529,6 +580,14 @@ func (c *ntmConfig) AllKeysLowercased() []string { } func (c *ntmConfig) leafAtPathFromNode(key string, curr Node) LeafNode { + node := c.nodeAtPathFromNode(key, curr) + if leaf, ok := node.(LeafNode); ok { + return leaf + } + return missingLeaf +} + +func (c *ntmConfig) nodeAtPathFromNode(key string, curr Node) Node { pathParts := splitKey(key) for _, part := range pathParts { next, err := curr.GetChild(part) @@ -537,10 +596,7 @@ func (c *ntmConfig) leafAtPathFromNode(key string, curr Node) LeafNode { } curr = next } - if leaf, ok := curr.(LeafNode); ok { - return leaf - } - return missingLeaf + return curr } // GetNode returns a Node for the given key @@ -592,7 +648,7 @@ func (c *ntmConfig) BindEnv(key string, envvars ...string) { if c.envKeyReplacer != nil { envvar = c.envKeyReplacer.Replace(envvar) } - c.configEnvVars[envvar] = key + c.configEnvVars[envvar] = append(c.configEnvVars[envvar], key) } c.addToSchema(key, model.SourceEnvVar) @@ -622,6 +678,10 @@ func (c *ntmConfig) MergeConfig(in io.Reader) error { c.Lock() defer c.Unlock() + if !c.isReady() { + return fmt.Errorf("attempt to MergeConfig before config is constructed") + } + content, err := io.ReadAll(in) if err != nil { return err @@ -784,11 +844,6 @@ func (c *ntmConfig) ConfigFileUsed() string { return c.configFile } -// SetTypeByDefaultValue is a no-op -func (c *ntmConfig) SetTypeByDefaultValue(_in bool) { - // do nothing: nodetreemodel always does this conversion -} - // BindEnvAndSetDefault binds an environment variable and sets a default for the given key func (c *ntmConfig) BindEnvAndSetDefault(key string, val interface{}, envvars ...string) { c.BindEnv(key, envvars...) //nolint:errcheck @@ -809,7 +864,7 @@ func (c *ntmConfig) Object() model.Reader { func NewConfig(name string, envPrefix string, envKeyReplacer *strings.Replacer) model.Config { config := ntmConfig{ ready: atomic.NewBool(false), - configEnvVars: map[string]string{}, + configEnvVars: map[string][]string{}, knownKeys: map[string]struct{}{}, allSettings: []string{}, unknownKeys: map[string]struct{}{}, diff --git a/pkg/config/nodetreemodel/config_test.go b/pkg/config/nodetreemodel/config_test.go index 0fbac128999c9..ffdaaea1a7fe2 100644 --- a/pkg/config/nodetreemodel/config_test.go +++ b/pkg/config/nodetreemodel/config_test.go @@ -6,10 +6,14 @@ package nodetreemodel import ( + "encoding/json" "fmt" + "io" "os" "sort" + "strconv" "strings" + "sync" "testing" "github.com/DataDog/datadog-agent/pkg/config/model" @@ -18,7 +22,7 @@ import ( ) // Test that a setting with a map value is seen as a leaf by the nodetreemodel config -func TestBuildDefaultMakesTooManyNodes(t *testing.T) { +func TestLeafNodeCanHaveComplexMapValue(t *testing.T) { cfg := NewConfig("test", "", nil) cfg.BindEnvAndSetDefault("kubernetes_node_annotations_as_tags", map[string]string{"cluster.k8s.io/machine": "kube_machine"}) cfg.BuildSchema() @@ -252,6 +256,7 @@ func TestAllSettings(t *testing.T) { cfg.SetDefault("a", 0) cfg.SetDefault("b.c", 0) cfg.SetDefault("b.d", 0) + cfg.SetKnown("b.e") cfg.BuildSchema() cfg.ReadConfig(strings.NewReader("a: 987")) @@ -343,6 +348,44 @@ func TestIsSet(t *testing.T) { assert.False(t, cfg.IsKnown("unknown")) } +func TestIsConfigured(t *testing.T) { + cfg := NewConfig("test", "TEST", nil) + cfg.SetDefault("a", 0) + cfg.SetDefault("b", 0) + cfg.SetKnown("c") + cfg.BindEnv("d") + + t.Setenv("TEST_D", "123") + + cfg.BuildSchema() + + cfg.Set("b", 123, model.SourceAgentRuntime) + + assert.False(t, cfg.IsConfigured("a")) + assert.True(t, cfg.IsConfigured("b")) + assert.False(t, cfg.IsConfigured("c")) + assert.True(t, cfg.IsConfigured("d")) + + assert.False(t, cfg.IsConfigured("unknown")) +} + +func TestEnvVarMultipleSettings(t *testing.T) { + cfg := NewConfig("test", "TEST", nil) + cfg.SetDefault("a", 0) + cfg.SetDefault("b", 0) + cfg.SetDefault("c", 0) + cfg.BindEnv("a", "TEST_MY_ENVVAR") + cfg.BindEnv("b", "TEST_MY_ENVVAR") + + t.Setenv("TEST_MY_ENVVAR", "123") + + cfg.BuildSchema() + + assert.Equal(t, 123, cfg.GetInt("a")) + assert.Equal(t, 123, cfg.GetInt("b")) + assert.Equal(t, 0, cfg.GetInt("c")) +} + func TestAllKeysLowercased(t *testing.T) { cfg := NewConfig("test", "TEST", nil) cfg.SetDefault("a", 0) @@ -592,3 +635,175 @@ func TestMergeFleetPolicy(t *testing.T) { assert.Equal(t, "baz", config.Get("foo")) assert.Equal(t, model.SourceFleetPolicies, config.GetSource("foo")) } + +func TestMergeConfig(t *testing.T) { + config := NewConfig("test", "TEST", strings.NewReplacer(".", "_")) // nolint: forbidigo + config.SetConfigType("yaml") + config.SetDefault("foo", "") + config.BuildSchema() + + file, err := os.CreateTemp("", "datadog.yaml") + assert.NoError(t, err, "failed to create temporary file: %w", err) + file.Write([]byte("foo: baz")) + file.Seek(0, io.SeekStart) + err = config.MergeConfig(file) + assert.NoError(t, err) + + assert.Equal(t, "baz", config.Get("foo")) + assert.Equal(t, model.SourceFile, config.GetSource("foo")) +} + +func TestOnUpdate(t *testing.T) { + cfg := NewConfig("test", "TEST", nil) + cfg.SetDefault("a", 1) + cfg.BuildSchema() + + var wg sync.WaitGroup + + gotSetting := "" + var gotOldValue, gotNewValue interface{} + cfg.OnUpdate(func(setting string, oldValue, newValue any) { + gotSetting = setting + gotOldValue = oldValue + gotNewValue = newValue + wg.Done() + }) + + wg.Add(1) + go func() { + cfg.Set("a", 2, model.SourceAgentRuntime) + }() + wg.Wait() + + assert.Equal(t, 2, cfg.Get("a")) + assert.Equal(t, model.SourceAgentRuntime, cfg.GetSource("a")) + assert.Equal(t, "a", gotSetting) + assert.Equal(t, 1, gotOldValue) + assert.Equal(t, 2, gotNewValue) +} + +func TestSetInvalidSource(t *testing.T) { + cfg := NewConfig("test", "TEST", nil) + cfg.SetDefault("a", 1) + cfg.BuildSchema() + + cfg.Set("a", 2, model.Source("invalid")) + + assert.Equal(t, 1, cfg.Get("a")) + assert.Equal(t, model.SourceDefault, cfg.GetSource("a")) +} + +func TestSetWithoutSource(t *testing.T) { + cfg := NewConfig("test", "TEST", nil) + cfg.SetDefault("a", 1) + cfg.BuildSchema() + + cfg.SetWithoutSource("a", 2) + + assert.Equal(t, 2, cfg.Get("a")) + assert.Equal(t, model.SourceUnknown, cfg.GetSource("a")) +} + +func TestPanicAfterBuildSchema(t *testing.T) { + cfg := NewConfig("test", "TEST", nil) + cfg.SetDefault("a", 1) + cfg.BuildSchema() + + assert.PanicsWithValue(t, "cannot SetDefault() once the config has been marked as ready for use", func() { + cfg.SetDefault("a", 2) + }) + + assert.Equal(t, 1, cfg.Get("a")) + assert.Equal(t, model.SourceDefault, cfg.GetSource("a")) + + assert.PanicsWithValue(t, "cannot SetKnown() once the config has been marked as ready for use", func() { + cfg.SetKnown("a") + }) + assert.PanicsWithValue(t, "cannot BindEnv() once the config has been marked as ready for use", func() { + cfg.BindEnv("a") + }) + assert.PanicsWithValue(t, "cannot SetEnvKeyReplacer() once the config has been marked as ready for use", func() { + cfg.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) + }) +} + +func TestEnvVarTransformers(t *testing.T) { + cfg := NewConfig("test", "TEST", nil) + cfg.BindEnvAndSetDefault("list_of_nums", []float64{}, "TEST_LIST_OF_NUMS") + cfg.BindEnvAndSetDefault("list_of_fruit", []string{}, "TEST_LIST_OF_FRUIT") + cfg.BindEnvAndSetDefault("tag_set", []map[string]string{}, "TEST_TAG_SET") + cfg.BindEnvAndSetDefault("list_keypairs", map[string]interface{}{}, "TEST_LIST_KEYPAIRS") + + os.Setenv("TEST_LIST_OF_NUMS", "34,67.5,901.125") + os.Setenv("TEST_LIST_OF_FRUIT", "apple,banana,cherry") + os.Setenv("TEST_TAG_SET", `[{"cat":"meow"},{"dog":"bark"}]`) + os.Setenv("TEST_LIST_KEYPAIRS", `a=1,b=2,c=3`) + + cfg.ParseEnvAsSlice("list_of_nums", func(in string) []interface{} { + vals := []interface{}{} + for _, str := range strings.Split(in, ",") { + f, err := strconv.ParseFloat(str, 64) + if err != nil { + continue + } + vals = append(vals, f) + } + return vals + }) + cfg.ParseEnvAsStringSlice("list_of_fruit", func(in string) []string { + return strings.Split(in, ",") + }) + cfg.ParseEnvAsSliceMapString("tag_set", func(in string) []map[string]string { + var out []map[string]string + if err := json.Unmarshal([]byte(in), &out); err != nil { + assert.Fail(t, "failed to json.Unmarshal", err) + } + return out + }) + cfg.ParseEnvAsMapStringInterface("list_keypairs", func(in string) map[string]interface{} { + parts := strings.Split(in, ",") + res := map[string]interface{}{} + for _, part := range parts { + elems := strings.Split(part, "=") + val, _ := strconv.ParseInt(elems[1], 10, 64) + res[elems[0]] = int(val) + } + return res + }) + + cfg.BuildSchema() + + var nums []float64 = cfg.GetFloat64Slice("list_of_nums") + assert.Equal(t, []float64{34, 67.5, 901.125}, nums) + + var fruits []string = cfg.GetStringSlice("list_of_fruit") + assert.Equal(t, []string{"apple", "banana", "cherry"}, fruits) + + tagsValue := cfg.Get("tag_set") + tags, converted := tagsValue.([]map[string]string) + assert.Equal(t, true, converted) + assert.Equal(t, []map[string]string{{"cat": "meow"}, {"dog": "bark"}}, tags) + + var kvs map[string]interface{} = cfg.GetStringMap("list_keypairs") + assert.Equal(t, map[string]interface{}{"a": 1, "b": 2, "c": 3}, kvs) +} + +func TestUnmarshalKeyIsDeprecated(t *testing.T) { + cfg := NewConfig("test", "TEST", nil) + cfg.SetDefault("a", []string{"a", "b"}) + cfg.BuildSchema() + + var texts []string + err := cfg.UnmarshalKey("a", &texts) + assert.Error(t, err) +} + +func TestSetConfigFile(t *testing.T) { + config := NewConfig("test", "TEST", strings.NewReplacer(".", "_")) // nolint: forbidigo + config.SetConfigType("yaml") + config.SetConfigFile("datadog.yaml") + config.SetDefault("foo", "") + config.BuildSchema() + + assert.Equal(t, "datadog.yaml", config.ConfigFileUsed()) +} diff --git a/pkg/config/nodetreemodel/getter.go b/pkg/config/nodetreemodel/getter.go index 450271f001f95..edf5ef4d9c5e9 100644 --- a/pkg/config/nodetreemodel/getter.go +++ b/pkg/config/nodetreemodel/getter.go @@ -7,13 +7,13 @@ package nodetreemodel import ( "maps" + "slices" "time" "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/mohae/deepcopy" "github.com/spf13/cast" - "golang.org/x/exp/slices" ) func (c *ntmConfig) leafAtPath(key string) LeafNode { @@ -106,20 +106,68 @@ func (c *ntmConfig) inferTypeFromDefault(key string, value interface{}) (interfa } } + // if we don't have a default and the value is a map[interface{}]interface{} we try to cast is as a + // map[string]interface{}. This mimic the behavior from viper that default to that type. + // + // TODO: once all settings in the config have a default value we can remove this logic + if m, ok := value.(map[interface{}]interface{}); ok { + res := map[string]interface{}{} + + for k, v := range m { + if keyString, ok := k.(string); ok { + res[keyString] = deepcopy.Copy(v) + } else { + goto simplyCopy + } + } + return res, nil + } + // NOTE: should only need to deepcopy for `Get`, because it can be an arbitrary value, // and we shouldn't ever return complex types like maps and slices that could be modified // by callers accidentally or on purpose. By copying, the caller may modify the result safetly +simplyCopy: return deepcopy.Copy(value), nil } +func (c *ntmConfig) getNodeValue(key string) interface{} { + if !c.isReady() { + log.Errorf("attempt to read key before config is constructed: %s", key) + return missingLeaf + } + + node := c.nodeAtPathFromNode(key, c.root) + + if leaf, ok := node.(LeafNode); ok { + return leaf.Get() + } + + // When querying an InnerNode we convert it as a map[string]interface{} to mimic Viper's logic + var converter func(node InnerNode) map[string]interface{} + converter = func(node InnerNode) map[string]interface{} { + res := map[string]interface{}{} + for _, name := range node.ChildrenKeys() { + child, _ := node.GetChild(name) + + if leaf, ok := child.(LeafNode); ok { + res[name] = leaf.Get() + } else { + res[name] = converter(child.(InnerNode)) + } + } + return res + } + + return converter(node.(InnerNode)) +} + // Get returns a copy of the value for the given key func (c *ntmConfig) Get(key string) interface{} { c.RLock() defer c.RUnlock() c.checkKnownKey(key) - val := c.leafAtPath(key).Get() - val, err := c.inferTypeFromDefault(key, val) + val, err := c.inferTypeFromDefault(key, c.getNodeValue(key)) if err != nil { log.Warnf("failed to get configuration value for key %q: %s", key, err) } @@ -149,7 +197,7 @@ func (c *ntmConfig) GetString(key string) string { c.RLock() defer c.RUnlock() c.checkKnownKey(key) - str, err := cast.ToStringE(c.leafAtPath(key).Get()) + str, err := cast.ToStringE(c.getNodeValue(key)) if err != nil { log.Warnf("failed to get configuration value for key %q: %s", key, err) } @@ -161,7 +209,7 @@ func (c *ntmConfig) GetBool(key string) bool { c.RLock() defer c.RUnlock() c.checkKnownKey(key) - b, err := cast.ToBoolE(c.leafAtPath(key).Get()) + b, err := cast.ToBoolE(c.getNodeValue(key)) if err != nil { log.Warnf("failed to get configuration value for key %q: %s", key, err) } @@ -173,7 +221,7 @@ func (c *ntmConfig) GetInt(key string) int { c.RLock() defer c.RUnlock() c.checkKnownKey(key) - val, err := cast.ToIntE(c.leafAtPath(key).Get()) + val, err := cast.ToIntE(c.getNodeValue(key)) if err != nil { log.Warnf("failed to get configuration value for key %q: %s", key, err) } @@ -185,7 +233,7 @@ func (c *ntmConfig) GetInt32(key string) int32 { c.RLock() defer c.RUnlock() c.checkKnownKey(key) - val, err := cast.ToInt32E(c.leafAtPath(key).Get()) + val, err := cast.ToInt32E(c.getNodeValue(key)) if err != nil { log.Warnf("failed to get configuration value for key %q: %s", key, err) } @@ -197,7 +245,7 @@ func (c *ntmConfig) GetInt64(key string) int64 { c.RLock() defer c.RUnlock() c.checkKnownKey(key) - val, err := cast.ToInt64E(c.leafAtPath(key).Get()) + val, err := cast.ToInt64E(c.getNodeValue(key)) if err != nil { log.Warnf("failed to get configuration value for key %q: %s", key, err) } @@ -209,7 +257,7 @@ func (c *ntmConfig) GetFloat64(key string) float64 { c.RLock() defer c.RUnlock() c.checkKnownKey(key) - val, err := cast.ToFloat64E(c.leafAtPath(key).Get()) + val, err := cast.ToFloat64E(c.getNodeValue(key)) if err != nil { log.Warnf("failed to get configuration value for key %q: %s", key, err) } @@ -222,7 +270,7 @@ func (c *ntmConfig) GetFloat64Slice(key string) []float64 { defer c.RUnlock() c.checkKnownKey(key) - list, err := cast.ToStringSliceE(c.leafAtPath(key).Get()) + list, err := cast.ToStringSliceE(c.getNodeValue(key)) if err != nil { log.Warnf("failed to get configuration value for key %q: %s", key, err) } @@ -244,7 +292,7 @@ func (c *ntmConfig) GetDuration(key string) time.Duration { c.RLock() defer c.RUnlock() c.checkKnownKey(key) - val, err := cast.ToDurationE(c.leafAtPath(key).Get()) + val, err := cast.ToDurationE(c.getNodeValue(key)) if err != nil { log.Warnf("failed to get configuration value for key %q: %s", key, err) } @@ -256,7 +304,7 @@ func (c *ntmConfig) GetStringSlice(key string) []string { c.RLock() defer c.RUnlock() c.checkKnownKey(key) - val, err := cast.ToStringSliceE(c.leafAtPath(key).Get()) + val, err := cast.ToStringSliceE(c.getNodeValue(key)) if err != nil { log.Warnf("failed to get configuration value for key %q: %s", key, err) } @@ -268,7 +316,7 @@ func (c *ntmConfig) GetStringMap(key string) map[string]interface{} { c.RLock() defer c.RUnlock() c.checkKnownKey(key) - val, err := cast.ToStringMapE(c.leafAtPath(key).Get()) + val, err := cast.ToStringMapE(c.getNodeValue(key)) if err != nil { log.Warnf("failed to get configuration value for key %q: %s", key, err) } @@ -280,7 +328,7 @@ func (c *ntmConfig) GetStringMapString(key string) map[string]string { c.RLock() defer c.RUnlock() c.checkKnownKey(key) - val, err := cast.ToStringMapStringE(c.leafAtPath(key).Get()) + val, err := cast.ToStringMapStringE(c.getNodeValue(key)) if err != nil { log.Warnf("failed to get configuration value for key %q: %s", key, err) } @@ -292,7 +340,7 @@ func (c *ntmConfig) GetStringMapStringSlice(key string) map[string][]string { c.RLock() defer c.RUnlock() c.checkKnownKey(key) - val, err := cast.ToStringMapStringSliceE(c.leafAtPath(key).Get()) + val, err := cast.ToStringMapStringSliceE(c.getNodeValue(key)) if err != nil { log.Warnf("failed to get configuration value for key %q: %s", key, err) } diff --git a/pkg/config/nodetreemodel/getter_test.go b/pkg/config/nodetreemodel/getter_test.go index 3c05bffc40d27..224179e57a49a 100644 --- a/pkg/config/nodetreemodel/getter_test.go +++ b/pkg/config/nodetreemodel/getter_test.go @@ -44,6 +44,62 @@ func TestGet(t *testing.T) { assert.Equal(t, 9876, cfg.Get("a")) assert.Equal(t, nil, cfg.Get("does_not_exists")) + + // test implicit conversion + cfg.Set("a", "1111", model.SourceAgentRuntime) + assert.Equal(t, 1111, cfg.Get("a")) +} + +func TestGetDefaultType(t *testing.T) { + cfg := NewConfig("test", "", nil) + cfg.SetKnown("a") + cfg.SetKnown("b") + cfg.BuildSchema() + + cfg.ReadConfig(strings.NewReader(`--- +a: + "url1": + - apikey2 + - apikey3 + "url2": + - apikey4 +b: + 1: + - a + - b + 2: + - c +`)) + + expected := map[string]interface{}{ + "url1": []interface{}{"apikey2", "apikey3"}, + "url2": []interface{}{"apikey4"}, + } + assert.Equal(t, expected, cfg.Get("a")) + + expected2 := map[interface{}]interface{}{ + 1: []interface{}{"a", "b"}, + 2: []interface{}{"c"}, + } + assert.Equal(t, expected2, cfg.Get("b")) +} + +func TestGetInnerNode(t *testing.T) { + cfg := NewConfig("test", "", nil) + cfg.SetDefault("a.b.c", 1234) + cfg.SetDefault("a.e", 1234) + cfg.BuildSchema() + + assert.Equal(t, 1234, cfg.Get("a.b.c")) + assert.Equal(t, 1234, cfg.Get("a.e")) + assert.Equal(t, map[string]interface{}{"c": 1234}, cfg.Get("a.b")) + assert.Equal(t, map[string]interface{}{"b": map[string]interface{}{"c": 1234}, "e": 1234}, cfg.Get("a")) + + cfg.Set("a.b.c", 9876, model.SourceAgentRuntime) + assert.Equal(t, 9876, cfg.Get("a.b.c")) + assert.Equal(t, 1234, cfg.Get("a.e")) + assert.Equal(t, map[string]interface{}{"c": 9876}, cfg.Get("a.b")) + assert.Equal(t, map[string]interface{}{"b": map[string]interface{}{"c": 9876}, "e": 1234}, cfg.Get("a")) } func TestGetCastToDefault(t *testing.T) { diff --git a/pkg/config/nodetreemodel/go.mod b/pkg/config/nodetreemodel/go.mod index e0f87c66cb372..5ed7dc5f243c6 100644 --- a/pkg/config/nodetreemodel/go.mod +++ b/pkg/config/nodetreemodel/go.mod @@ -12,14 +12,14 @@ replace ( replace github.com/spf13/cast => github.com/DataDog/cast v1.8.0 require ( - github.com/DataDog/datadog-agent/pkg/config/model v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 github.com/DataDog/viper v1.14.0 github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 - github.com/spf13/cast v1.7.0 + github.com/spf13/cast v1.7.1 github.com/stretchr/testify v1.10.0 go.uber.org/atomic v1.11.0 - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 gopkg.in/yaml.v2 v2.4.0 ) @@ -37,7 +37,7 @@ require ( github.com/spf13/afero v1.11.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/config/nodetreemodel/go.sum b/pkg/config/nodetreemodel/go.sum index c5a94912c5617..a7916d2899ef1 100644 --- a/pkg/config/nodetreemodel/go.sum +++ b/pkg/config/nodetreemodel/go.sum @@ -188,8 +188,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -222,8 +222,8 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= diff --git a/pkg/config/remote/api/http.go b/pkg/config/remote/api/http.go index bdf13e5acd0ea..375ee14ce555b 100644 --- a/pkg/config/remote/api/http.go +++ b/pkg/config/remote/api/http.go @@ -234,7 +234,7 @@ func (c *HTTPClient) FetchOrgStatus(ctx context.Context) (*pbgo.OrgStatusRespons // Token for authentication to the RC backend. func (c *HTTPClient) UpdatePARJWT(jwt string) { c.headerLock.Lock() - c.header.Set("DD-PAR-JWT", jwt) + c.header["DD-PAR-JWT"] = []string{jwt} c.headerLock.Unlock() } diff --git a/pkg/config/remote/client/client.go b/pkg/config/remote/client/client.go index 77ef19c2b1375..f54229491ef0e 100644 --- a/pkg/config/remote/client/client.go +++ b/pkg/config/remote/client/client.go @@ -254,7 +254,7 @@ func newClient(cf ConfigFetcher, opts ...func(opts *Options)) (*Client, error) { var err error if !options.skipTufVerification { - repository, err = state.NewRepository(meta.RootsDirector(options.site, options.directorRootOverride).Last()) + repository, err = state.NewRepository(meta.RootsDirector(options.site, options.directorRootOverride).Root()) } else { repository, err = state.NewUnverifiedRepository() } diff --git a/pkg/config/remote/go.mod b/pkg/config/remote/go.mod index 3b809d121d819..7b495ad1ea791 100644 --- a/pkg/config/remote/go.mod +++ b/pkg/config/remote/go.mod @@ -31,7 +31,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../util/hostname/validate github.com/DataDog/datadog-agent/pkg/util/http => ../../util/http github.com/DataDog/datadog-agent/pkg/util/log => ../../util/log - github.com/DataDog/datadog-agent/pkg/util/optional => ../../util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../../util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../util/scrubber github.com/DataDog/datadog-agent/pkg/util/system => ../../util/system @@ -44,13 +44,13 @@ replace ( require ( github.com/DataDog/datadog-agent/pkg/config/mock v0.59.0 - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 github.com/DataDog/datadog-agent/pkg/proto v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.57.0 github.com/DataDog/datadog-agent/pkg/util/backoff v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/grpc v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 github.com/DataDog/datadog-agent/pkg/util/uuid v0.56.0-rc.3 github.com/Masterminds/semver v1.5.0 github.com/benbjohnson/clock v1.3.5 @@ -59,7 +59,7 @@ require ( github.com/stretchr/testify v1.10.0 go.etcd.io/bbolt v1.3.11 go.uber.org/atomic v1.11.0 - google.golang.org/protobuf v1.35.2 + google.golang.org/protobuf v1.36.3 gopkg.in/DataDog/dd-trace-go.v1 v1.69.1 ) @@ -69,7 +69,7 @@ require ( github.com/DataDog/datadog-agent/pkg/api v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.59.0 // indirect @@ -77,17 +77,17 @@ require ( github.com/DataDog/datadog-agent/pkg/obfuscate v0.48.0 // indirect github.com/DataDog/datadog-agent/pkg/util/cache v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect github.com/DataDog/datadog-go/v5 v5.6.0 // indirect github.com/DataDog/go-libddwaf/v3 v3.5.1 // indirect - github.com/DataDog/go-sqllexer v0.0.17 // indirect + github.com/DataDog/go-sqllexer v0.0.20 // indirect github.com/DataDog/sketches-go v1.4.6 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect @@ -97,30 +97,32 @@ require ( github.com/go-ole/go-ole v1.3.0 // indirect github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 // indirect github.com/google/uuid v1.6.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.0 // indirect github.com/hashicorp/go-secure-stdlib/parseutil v0.1.8 // indirect github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect github.com/hashicorp/go-sockaddr v1.0.6 // indirect github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/outcaste-io/ristretto v0.2.3 // indirect github.com/patrickmn/go-cache v2.1.0+incompatible // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/ryanuber/go-glob v1.0.0 // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/numcpus v0.8.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect golang.org/x/mod v0.22.0 // indirect - golang.org/x/time v0.8.0 // indirect + golang.org/x/time v0.9.0 // indirect golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect modernc.org/sqlite v1.34.1 // indirect ) require ( - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 // indirect github.com/DataDog/go-tuf v1.1.0-0.5.2 github.com/DataDog/viper v1.14.0 // indirect github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect @@ -128,7 +130,6 @@ require ( github.com/fsnotify/fsnotify v1.8.0 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect - github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/hashicorp/hcl v1.0.1-vault-5 // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect @@ -136,18 +137,17 @@ require ( github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.5.2 // indirect - github.com/tinylib/msgp v1.2.4 // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/sys v0.28.0 // indirect + github.com/tinylib/msgp v1.2.5 // indirect + golang.org/x/net v0.34.0 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect - google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect - google.golang.org/grpc v1.67.1 + google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect + google.golang.org/grpc v1.69.4 gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/config/remote/go.sum b/pkg/config/remote/go.sum index 176ccf5fcd4b4..657edc8e6f62e 100644 --- a/pkg/config/remote/go.sum +++ b/pkg/config/remote/go.sum @@ -1,9 +1,8 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.115.1 h1:Jo0SM9cQnSkYfp44+v+NQXHpcHqlnRJk2qxh6yvxxxQ= -cloud.google.com/go/compute v1.28.0 h1:OPtBxMcheSS+DWfci803qvPly3d4w7Eu5ztKBcFfzwk= -cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= -cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= +cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y= +cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= +cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= +cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/appsec-internal-go v1.9.0 h1:cGOneFsg0JTRzWl5U2+og5dbtyW3N8XaYwc5nXe39Vw= github.com/DataDog/appsec-internal-go v1.9.0/go.mod h1:wW0cRfWBo4C044jHGwYiyh5moQV2x0AhnwqMuiX7O/g= @@ -11,8 +10,8 @@ github.com/DataDog/datadog-go/v5 v5.6.0 h1:2oCLxjF/4htd55piM75baflj/KoE6VYS7alEU github.com/DataDog/datadog-go/v5 v5.6.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= github.com/DataDog/go-libddwaf/v3 v3.5.1 h1:GWA4ln4DlLxiXm+X7HA/oj0ZLcdCwOS81KQitegRTyY= github.com/DataDog/go-libddwaf/v3 v3.5.1/go.mod h1:n98d9nZ1gzenRSk53wz8l6d34ikxS+hs62A31Fqmyi4= -github.com/DataDog/go-sqllexer v0.0.17 h1:u47fJAVg/+5DA74ZW3w0Qu+3qXHd3GtnA8ZBYixdPrM= -github.com/DataDog/go-sqllexer v0.0.17/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= +github.com/DataDog/go-sqllexer v0.0.20 h1:0fBknHo42yuhawZS3GtuQSdqcwaiojWjYNT6OdsZRfI= +github.com/DataDog/go-sqllexer v0.0.20/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4= github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= github.com/DataDog/gostackparse v0.7.0 h1:i7dLkXHvYzHV308hnkvVGDL3BR4FWl7IsXNPz/IGQh4= @@ -32,7 +31,6 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= @@ -90,6 +88,10 @@ github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vb github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= @@ -114,7 +116,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -123,7 +124,6 @@ github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5 h1:5iH8iuqE5apketRbSFBy+X1V0o+l+8NF1avt4HWl7cA= github.com/google/pprof v0.0.0-20240827171923-fa2c70bbbfe5/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -135,8 +135,8 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vb github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.0 h1:VD1gqscl4nYs1YxVuSdemTrSgTKrwOWDK0FVFMqm+Cg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.0/go.mod h1:4EgsQoS4TOhJizV+JTFg40qx1Ofh3XmXEQNBpgvNT40= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.8 h1:iBt4Ew4XEGLfh6/bPk4rSYmuZJGizr6/x/AEizP0CQc= github.com/hashicorp/go-secure-stdlib/parseutil v0.1.8/go.mod h1:aiJI+PIApBRQG7FZTEBx5GiiX+HbOHilUdNxUZi4eV0= github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts= @@ -173,8 +173,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -215,8 +215,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -233,8 +233,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -255,8 +255,8 @@ github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkB github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA= github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= @@ -270,8 +270,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -297,8 +297,8 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tinylib/msgp v1.2.4 h1:yLFeUGostXXSGW5vxfT5dXG/qzkn4schv2I7at5+hVU= -github.com/tinylib/msgp v1.2.4/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= +github.com/tinylib/msgp v1.2.5 h1:WeQg1whrXRFiZusidTQqzETkRpGjFjcIhW6uqWH09po= +github.com/tinylib/msgp v1.2.5/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= @@ -318,6 +318,18 @@ github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQ go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= +go.opentelemetry.io/otel/sdk/metric v1.33.0 h1:Gs5VK9/WUJhNXZgn8MR6ITatvAmKeIuCtNbsP3JkNqU= +go.opentelemetry.io/otel/sdk/metric v1.33.0/go.mod h1:dL5ykHZmm1B1nVRk9dDjChwDmt81MjVp3gLkQRwKf/Q= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -347,11 +359,11 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -366,7 +378,6 @@ golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -375,15 +386,13 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= +golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -404,7 +413,6 @@ golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -415,8 +423,8 @@ golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -424,8 +432,8 @@ golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= -golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -454,13 +462,10 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU= -google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= -google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= -google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f h1:gap6+3Gk41EItBuyi4XX/bp4oqJ3UwuIMl25yGinuAA= +google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:Ic02D47M+zbarjYYUlK57y316f2MoN0gjAwI3f2S95o= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= @@ -469,11 +474,10 @@ google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= -google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= +google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/DataDog/dd-trace-go.v1 v1.69.1 h1:grTElrPaCfxUsrJjyPLHlVPbmlKVzWMxVdcBrGZSzEk= gopkg.in/DataDog/dd-trace-go.v1 v1.69.1/go.mod h1:U9AOeBHNAL95JXcd/SPf4a7O5GNeF/yD13sJtli/yaU= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= diff --git a/pkg/config/remote/meta/meta.go b/pkg/config/remote/meta/meta.go index 399d6fa25f3cc..990a16b37e13c 100644 --- a/pkg/config/remote/meta/meta.go +++ b/pkg/config/remote/meta/meta.go @@ -8,70 +8,94 @@ package meta import ( _ "embed" + "encoding/json" + + "github.com/DataDog/datadog-agent/pkg/util/log" + "github.com/DataDog/go-tuf/data" ) var ( - //go:embed prod.1.director.json - prodRootDirector1 []byte - //go:embed prod.1.config.json - prodRootConfig1 []byte + //go:embed prod.director.json + prodRootDirector []byte + //go:embed prod.config.json + prodRootConfig []byte - //go:embed staging.1.director.json - stagingRootDirector1 []byte - //go:embed staging.1.config.json - stagingRootConfig1 []byte + //go:embed staging.director.json + stagingRootDirector []byte + //go:embed staging.config.json + stagingRootConfig []byte ) -// EmbeddedRoot is an embedded root -type EmbeddedRoot []byte - -// EmbeddedRoots is a map of version => EmbeddedRoot -type EmbeddedRoots map[uint64]EmbeddedRoot - -var ( - prodRootsDirector = EmbeddedRoots{1: prodRootDirector1} - prodRootsConfig = EmbeddedRoots{1: prodRootConfig1} +// EmbeddedRoot is an embedded root with its version parsed +type EmbeddedRoot struct { + latest uint64 + root []byte +} - stagingRootsDirector = EmbeddedRoots{1: stagingRootDirector1} - stagingRootsConfig = EmbeddedRoots{1: stagingRootConfig1} -) +// NewEmbeddedRoot creates a new EmbeddedRoot +func NewEmbeddedRoot(embeddedRoot []byte) EmbeddedRoot { + version, err := parseRootVersion(embeddedRoot) + if err != nil { + panic(err) + } + return EmbeddedRoot{ + latest: version, + root: embeddedRoot, + } +} // RootsDirector returns all the roots of the director repo -func RootsDirector(site string, directorRootOverride string) EmbeddedRoots { +func RootsDirector(site string, directorRootOverride string) EmbeddedRoot { if directorRootOverride != "" { - return EmbeddedRoots{ - 1: EmbeddedRoot(directorRootOverride), - } + return NewEmbeddedRoot([]byte(directorRootOverride)) } switch site { case "datad0g.com": - return stagingRootsDirector + return NewEmbeddedRoot(stagingRootDirector) default: - return prodRootsDirector + return NewEmbeddedRoot(prodRootDirector) } } // RootsConfig returns all the roots of the director repo -func RootsConfig(site string, configRootOverride string) EmbeddedRoots { +func RootsConfig(site string, configRootOverride string) EmbeddedRoot { if configRootOverride != "" { - return EmbeddedRoots{ - 1: EmbeddedRoot(configRootOverride), - } + return NewEmbeddedRoot([]byte(configRootOverride)) } + switch site { case "datad0g.com": - return stagingRootsConfig + return NewEmbeddedRoot(stagingRootConfig) default: - return prodRootsConfig + return NewEmbeddedRoot(prodRootConfig) } } -// Last returns the last root the EmbeddedRoots -func (roots EmbeddedRoots) Last() EmbeddedRoot { - return roots[roots.LastVersion()] +// Root returns the last root the EmbeddedRoots +func (root EmbeddedRoot) Root() []byte { + return root.root +} + +// Version returns the last version of the EmbeddedRoots +func (root EmbeddedRoot) Version() uint64 { + return root.latest } -// LastVersion returns the last version of the EmbeddedRoots -func (roots EmbeddedRoots) LastVersion() uint64 { - return uint64(len(roots)) +// parseRootVersion from the embedded roots for easy update +func parseRootVersion(rootBytes []byte) (uint64, error) { + var signedRoot data.Signed + err := json.Unmarshal(rootBytes, &signedRoot) + if err != nil { + log.Errorf("Corrupted root metadata: %v", err) + return 0, err + } + + var root data.Root + err = json.Unmarshal(signedRoot.Signed, &root) + if err != nil { + log.Errorf("Corrupted root metadata: %v", err) + return 0, err + } + + return uint64(root.Version), nil } diff --git a/pkg/config/remote/meta/meta_test.go b/pkg/config/remote/meta/meta_test.go new file mode 100644 index 0000000000000..855cc5a14ffb7 --- /dev/null +++ b/pkg/config/remote/meta/meta_test.go @@ -0,0 +1,36 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package meta + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestParseProdDirectorVersion(t *testing.T) { + v, err := parseRootVersion(prodRootDirector) + require.NoError(t, err) + require.Greater(t, v, uint64(0)) +} + +func TestParseProdTUFVersion(t *testing.T) { + v, err := parseRootVersion(prodRootConfig) + require.NoError(t, err) + require.Greater(t, v, uint64(0)) +} + +func TestParseStagingDirectorVersion(t *testing.T) { + v, err := parseRootVersion(stagingRootDirector) + require.NoError(t, err) + require.Greater(t, v, uint64(0)) +} + +func TestParseStagingTUFVersion(t *testing.T) { + v, err := parseRootVersion(stagingRootConfig) + require.NoError(t, err) + require.Greater(t, v, uint64(0)) +} diff --git a/pkg/config/remote/meta/prod.1.config.json b/pkg/config/remote/meta/prod.config.json similarity index 100% rename from pkg/config/remote/meta/prod.1.config.json rename to pkg/config/remote/meta/prod.config.json diff --git a/pkg/config/remote/meta/prod.1.director.json b/pkg/config/remote/meta/prod.director.json similarity index 100% rename from pkg/config/remote/meta/prod.1.director.json rename to pkg/config/remote/meta/prod.director.json diff --git a/pkg/config/remote/meta/staging.1.config.json b/pkg/config/remote/meta/staging.1.config.json deleted file mode 100644 index 0f347b2980935..0000000000000 --- a/pkg/config/remote/meta/staging.1.config.json +++ /dev/null @@ -1,73 +0,0 @@ -{ - "signatures": [ - { - "keyid": "6aac6a51efedb4e54915bf9fbd2cfb49fbf428d46052bcaf3c72409c33ecdf5e", - "sig": "4af18f0919fb9b8ba7ffc9f6fb325c887083c28a474981e29ccc5bdeea7a2bf2f8568be8f8bd3c6c498dd118e2c8f713d22032196cf400465f8fb700ba800f0d" - }, - { - "keyid": "bd3ea764afdf757f07bab1e9e501a5fda1d49a8da3eaddc53a50dbe2aff92545", - "sig": "2e6bb516308fd8c79faff015a443b65dea0af780842aacc5c05f49ae8fd709bfdd70e191a38d0b64aad03bb4398052b82bd224d6e55c90d4c38220aa9db62705" - } - ], - "signed": { - "_type": "root", - "consistent_snapshot": true, - "expires": "1970-01-01T00:00:00Z", - "keys": { - "6aac6a51efedb4e54915bf9fbd2cfb49fbf428d46052bcaf3c72409c33ecdf5e": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ed25519", - "keyval": { - "public": "09402247ef6252018e52c7ba6a3a484936f14dad6ae921c556a1d092f4a68f0f" - }, - "scheme": "ed25519" - }, - "bd3ea764afdf757f07bab1e9e501a5fda1d49a8da3eaddc53a50dbe2aff92545": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ed25519", - "keyval": { - "public": "cf248bc222a5dfc9676a2a3ef90526c84adb09649db56686705f69f42908d7d8" - }, - "scheme": "ed25519" - } - }, - "roles": { - "root": { - "keyids": [ - "bd3ea764afdf757f07bab1e9e501a5fda1d49a8da3eaddc53a50dbe2aff92545", - "6aac6a51efedb4e54915bf9fbd2cfb49fbf428d46052bcaf3c72409c33ecdf5e" - ], - "threshold": 2 - }, - "snapshot": { - "keyids": [ - "bd3ea764afdf757f07bab1e9e501a5fda1d49a8da3eaddc53a50dbe2aff92545", - "6aac6a51efedb4e54915bf9fbd2cfb49fbf428d46052bcaf3c72409c33ecdf5e" - ], - "threshold": 2 - }, - "targets": { - "keyids": [ - "bd3ea764afdf757f07bab1e9e501a5fda1d49a8da3eaddc53a50dbe2aff92545", - "6aac6a51efedb4e54915bf9fbd2cfb49fbf428d46052bcaf3c72409c33ecdf5e" - ], - "threshold": 2 - }, - "timestamp": { - "keyids": [ - "bd3ea764afdf757f07bab1e9e501a5fda1d49a8da3eaddc53a50dbe2aff92545", - "6aac6a51efedb4e54915bf9fbd2cfb49fbf428d46052bcaf3c72409c33ecdf5e" - ], - "threshold": 2 - } - }, - "spec_version": "1.0", - "version": 1 - } -} diff --git a/pkg/config/remote/meta/staging.1.director.json b/pkg/config/remote/meta/staging.1.director.json deleted file mode 100644 index 7e45aecc99ac0..0000000000000 --- a/pkg/config/remote/meta/staging.1.director.json +++ /dev/null @@ -1,73 +0,0 @@ -{ - "signatures": [ - { - "keyid": "233a529fe7c63b5b9081f6e0e2681cc227f85e04ad434d0a165a2f69b87255a6", - "sig": "6d7ddf4bcbd1ce223b5352cae4671ef42800d79f0c94dda905cf0dd8a6198ba69795a19201dc7230e4bd872cf109e827233678bf76389910933472417488320e" - }, - { - "keyid": "6ca796e7b4883af3bb3d522dc0009984dcbf5ad2a6c9ea354d30acc32d8b75d1", - "sig": "a1236d12903e1c4024fc6340c50a0f2fe9972e967eb2bace8d6594e156f0466f772bfc0c9f30e07067904073c0d7ba7d48ad00341405312daf0d7bc502ccc50f" - } - ], - "signed": { - "_type": "root", - "consistent_snapshot": true, - "expires": "1970-01-01T00:00:00Z", - "keys": { - "233a529fe7c63b5b9081f6e0e2681cc227f85e04ad434d0a165a2f69b87255a6": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ed25519", - "keyval": { - "public": "f7c278f32e69ce7d5ca5b81bd2cbe2b4b44177eee36ed025ec06bd19e47eaefe" - }, - "scheme": "ed25519" - }, - "6ca796e7b4883af3bb3d522dc0009984dcbf5ad2a6c9ea354d30acc32d8b75d1": { - "keyid_hash_algorithms": [ - "sha256", - "sha512" - ], - "keytype": "ed25519", - "keyval": { - "public": "47be15ec10499208aa5ef9a1e32010cc05c047a98d18ad084d6e4e51baa1b93c" - }, - "scheme": "ed25519" - } - }, - "roles": { - "root": { - "keyids": [ - "6ca796e7b4883af3bb3d522dc0009984dcbf5ad2a6c9ea354d30acc32d8b75d1", - "233a529fe7c63b5b9081f6e0e2681cc227f85e04ad434d0a165a2f69b87255a6" - ], - "threshold": 2 - }, - "snapshot": { - "keyids": [ - "6ca796e7b4883af3bb3d522dc0009984dcbf5ad2a6c9ea354d30acc32d8b75d1", - "233a529fe7c63b5b9081f6e0e2681cc227f85e04ad434d0a165a2f69b87255a6" - ], - "threshold": 2 - }, - "targets": { - "keyids": [ - "6ca796e7b4883af3bb3d522dc0009984dcbf5ad2a6c9ea354d30acc32d8b75d1", - "233a529fe7c63b5b9081f6e0e2681cc227f85e04ad434d0a165a2f69b87255a6" - ], - "threshold": 2 - }, - "timestamp": { - "keyids": [ - "6ca796e7b4883af3bb3d522dc0009984dcbf5ad2a6c9ea354d30acc32d8b75d1", - "233a529fe7c63b5b9081f6e0e2681cc227f85e04ad434d0a165a2f69b87255a6" - ], - "threshold": 2 - } - }, - "spec_version": "1.0", - "version": 1 - } -} diff --git a/pkg/config/remote/meta/staging.config.json b/pkg/config/remote/meta/staging.config.json new file mode 100644 index 0000000000000..51bbae121238c --- /dev/null +++ b/pkg/config/remote/meta/staging.config.json @@ -0,0 +1,67 @@ +{ + "signatures": [ + { + "keyid": "bd3ea764afdf757f07bab1e9e501a5fda1d49a8da3eaddc53a50dbe2aff92545", + "sig": "928d0b9de72a1a1c2fad453e52950509a434814ca0dc5fb43db5100fdbd732461b38b522051ffedc7c226426ce102c245bc69895fde0f0ca0d9615f84027c60f" + }, + { + "keyid": "6aac6a51efedb4e54915bf9fbd2cfb49fbf428d46052bcaf3c72409c33ecdf5e", + "sig": "146d301f5dd97125ddd34d13ad5c7b1f071bbd249d7c86d17a095c0fbfd680ed21737f45997361e14e79be973914cfb35da39c02ce58f81df12afd9eb49d0003" + } + ], + "signed": { + "_type": "root", + "consistent_snapshot": true, + "expires": "2025-12-01T17:00:00Z", + "keys": { + "6aac6a51efedb4e54915bf9fbd2cfb49fbf428d46052bcaf3c72409c33ecdf5e": { + "keyid_hash_algorithms": ["sha256", "sha512"], + "keytype": "ed25519", + "keyval": { + "public": "09402247ef6252018e52c7ba6a3a484936f14dad6ae921c556a1d092f4a68f0f" + }, + "scheme": "ed25519" + }, + "bd3ea764afdf757f07bab1e9e501a5fda1d49a8da3eaddc53a50dbe2aff92545": { + "keyid_hash_algorithms": ["sha256", "sha512"], + "keytype": "ed25519", + "keyval": { + "public": "cf248bc222a5dfc9676a2a3ef90526c84adb09649db56686705f69f42908d7d8" + }, + "scheme": "ed25519" + } + }, + "roles": { + "root": { + "keyids": [ + "6aac6a51efedb4e54915bf9fbd2cfb49fbf428d46052bcaf3c72409c33ecdf5e", + "bd3ea764afdf757f07bab1e9e501a5fda1d49a8da3eaddc53a50dbe2aff92545" + ], + "threshold": 2 + }, + "snapshot": { + "keyids": [ + "6aac6a51efedb4e54915bf9fbd2cfb49fbf428d46052bcaf3c72409c33ecdf5e", + "bd3ea764afdf757f07bab1e9e501a5fda1d49a8da3eaddc53a50dbe2aff92545" + ], + "threshold": 2 + }, + "targets": { + "keyids": [ + "6aac6a51efedb4e54915bf9fbd2cfb49fbf428d46052bcaf3c72409c33ecdf5e", + "bd3ea764afdf757f07bab1e9e501a5fda1d49a8da3eaddc53a50dbe2aff92545" + ], + "threshold": 2 + }, + "timestamp": { + "keyids": [ + "6aac6a51efedb4e54915bf9fbd2cfb49fbf428d46052bcaf3c72409c33ecdf5e", + "bd3ea764afdf757f07bab1e9e501a5fda1d49a8da3eaddc53a50dbe2aff92545" + ], + "threshold": 2 + } + }, + "spec_version": "1.0", + "version": 29 + } +} diff --git a/pkg/config/remote/meta/staging.director.json b/pkg/config/remote/meta/staging.director.json new file mode 100644 index 0000000000000..51665939ca43e --- /dev/null +++ b/pkg/config/remote/meta/staging.director.json @@ -0,0 +1,67 @@ +{ + "signatures": [ + { + "keyid": "233a529fe7c63b5b9081f6e0e2681cc227f85e04ad434d0a165a2f69b87255a6", + "sig": "c926bd33ae30267ddf141b8b8fb2f6338b6f1451cbc2b1b704082080c337272b35d599272843a66a720a601d4e08209f87747beb5f000663d851c7b6a13bf901" + }, + { + "keyid": "6ca796e7b4883af3bb3d522dc0009984dcbf5ad2a6c9ea354d30acc32d8b75d1", + "sig": "d4baf2506a736be2ede3dd488d45a11de68a3987f661452853a23d21de943dfaa033e3a099051db8669f1a3837cb07375685b4fd89e226ce83c28d66aeab700a" + } + ], + "signed": { + "_type": "root", + "consistent_snapshot": true, + "expires": "2025-12-01T17:00:00Z", + "keys": { + "233a529fe7c63b5b9081f6e0e2681cc227f85e04ad434d0a165a2f69b87255a6": { + "keyid_hash_algorithms": ["sha256", "sha512"], + "keytype": "ed25519", + "keyval": { + "public": "f7c278f32e69ce7d5ca5b81bd2cbe2b4b44177eee36ed025ec06bd19e47eaefe" + }, + "scheme": "ed25519" + }, + "6ca796e7b4883af3bb3d522dc0009984dcbf5ad2a6c9ea354d30acc32d8b75d1": { + "keyid_hash_algorithms": ["sha256", "sha512"], + "keytype": "ed25519", + "keyval": { + "public": "47be15ec10499208aa5ef9a1e32010cc05c047a98d18ad084d6e4e51baa1b93c" + }, + "scheme": "ed25519" + } + }, + "roles": { + "root": { + "keyids": [ + "233a529fe7c63b5b9081f6e0e2681cc227f85e04ad434d0a165a2f69b87255a6", + "6ca796e7b4883af3bb3d522dc0009984dcbf5ad2a6c9ea354d30acc32d8b75d1" + ], + "threshold": 2 + }, + "snapshot": { + "keyids": [ + "233a529fe7c63b5b9081f6e0e2681cc227f85e04ad434d0a165a2f69b87255a6", + "6ca796e7b4883af3bb3d522dc0009984dcbf5ad2a6c9ea354d30acc32d8b75d1" + ], + "threshold": 2 + }, + "targets": { + "keyids": [ + "233a529fe7c63b5b9081f6e0e2681cc227f85e04ad434d0a165a2f69b87255a6", + "6ca796e7b4883af3bb3d522dc0009984dcbf5ad2a6c9ea354d30acc32d8b75d1" + ], + "threshold": 2 + }, + "timestamp": { + "keyids": [ + "233a529fe7c63b5b9081f6e0e2681cc227f85e04ad434d0a165a2f69b87255a6", + "6ca796e7b4883af3bb3d522dc0009984dcbf5ad2a6c9ea354d30acc32d8b75d1" + ], + "threshold": 2 + } + }, + "spec_version": "1.0", + "version": 25 + } +} diff --git a/pkg/config/remote/service/service_test.go b/pkg/config/remote/service/service_test.go index 6f35c3a4b7b40..4aaf2fb69fe6a 100644 --- a/pkg/config/remote/service/service_test.go +++ b/pkg/config/remote/service/service_test.go @@ -1120,22 +1120,6 @@ type clientTTLTest struct { expected time.Duration } -func TestWithDirectorRootOverride(t *testing.T) { - cfg := configmock.New(t) - cfg.SetWithoutSource("run_path", "/tmp") - - baseRawURL := "https://localhost" - mockTelemetryReporter := newMockRcTelemetryReporter() - options := []Option{ - WithDirectorRootOverride("datadoghq.com", "{\"a\": \"b\"}"), - WithAPIKey("abc"), - } - _, err := NewService(cfg, "Remote Config", baseRawURL, "localhost", getHostTags, mockTelemetryReporter, agentVersion, options...) - // Because we used an invalid root, we should get an error. All we're trying to capture - // with this test is that the builder method is propagating the value properly - assert.Errorf(t, err, "failed to set embedded root in roots bucket: invalid meta: version field is missing") -} - func TestWithClientTTL(t *testing.T) { tests := []clientTTLTest{ { diff --git a/pkg/config/remote/service/util.go b/pkg/config/remote/service/util.go index 4f91ed6ceb48b..cfffdd2505e13 100644 --- a/pkg/config/remote/service/util.go +++ b/pkg/config/remote/service/util.go @@ -100,6 +100,7 @@ func openCacheDB(path string, agentVersion string, apiKey string) (*bbolt.DB, er if errors.Is(err, bbolt.ErrTimeout) { return nil, fmt.Errorf("rc db is locked. Please check if another instance of the agent is running and using the same `run_path` parameter") } + log.Infof("Failed to open remote configuration database %s", err) return recreate(path, agentVersion, apiKeyHash) } @@ -124,6 +125,7 @@ func openCacheDB(path string, agentVersion string, apiKey string) (*bbolt.DB, er }) if err != nil { _ = db.Close() + log.Infof("Failed to validate remote configuration database %s", err) return recreate(path, agentVersion, apiKeyHash) } diff --git a/pkg/config/remote/uptane/client_test.go b/pkg/config/remote/uptane/client_test.go index f2258def85635..17316fa1b492b 100644 --- a/pkg/config/remote/uptane/client_test.go +++ b/pkg/config/remote/uptane/client_test.go @@ -61,8 +61,8 @@ func TestClientState(t *testing.T) { // Testing default state clientState, err := client1.State() assert.NoError(t, err) - assert.Equal(t, meta.RootsConfig("datadoghq.com", cfg.GetString("remote_configuration.config_root")).LastVersion(), clientState.ConfigRootVersion()) - assert.Equal(t, meta.RootsDirector("datadoghq.com", cfg.GetString("remote_configuration.director_root")).LastVersion(), clientState.DirectorRootVersion()) + assert.Equal(t, meta.RootsConfig("datadoghq.com", cfg.GetString("remote_configuration.config_root")).Version(), clientState.ConfigRootVersion()) + assert.Equal(t, meta.RootsDirector("datadoghq.com", cfg.GetString("remote_configuration.director_root")).Version(), clientState.DirectorRootVersion()) _, err = client1.TargetsMeta() assert.Error(t, err) diff --git a/pkg/config/remote/uptane/local_store.go b/pkg/config/remote/uptane/local_store.go index 9c56052922359..8a29edcef8dfb 100644 --- a/pkg/config/remote/uptane/local_store.go +++ b/pkg/config/remote/uptane/local_store.go @@ -33,7 +33,7 @@ type localStore struct { store *transactionalStore } -func newLocalStore(db *transactionalStore, repository string, initialRoots meta.EmbeddedRoots) (*localStore, error) { +func newLocalStore(db *transactionalStore, repository string, initialRoots meta.EmbeddedRoot) (*localStore, error) { s := &localStore{ store: db, metasBucket: fmt.Sprintf("%s_metas", repository), @@ -43,13 +43,12 @@ func newLocalStore(db *transactionalStore, repository string, initialRoots meta. return s, err } -func (s *localStore) init(initialRoots meta.EmbeddedRoots) error { +func (s *localStore) init(initialRoot meta.EmbeddedRoot) error { err := s.store.update(func(tx *transaction) error { - for _, root := range initialRoots { - err := s.writeRoot(tx, json.RawMessage(root)) - if err != nil { - return fmt.Errorf("failed to set embedded root in roots bucket: %v", err) - } + root := initialRoot.Root() + err := s.writeRoot(tx, json.RawMessage(root)) + if err != nil { + return fmt.Errorf("failed to set embedded root in roots bucket: %v", err) } data, err := tx.get(s.metasBucket, metaRoot) @@ -57,7 +56,7 @@ func (s *localStore) init(initialRoots meta.EmbeddedRoots) error { return err } if data == nil { - tx.put(s.metasBucket, metaRoot, initialRoots.Last()) + tx.put(s.metasBucket, metaRoot, initialRoot.Root()) } return nil }) diff --git a/pkg/config/remote/uptane/local_store_test.go b/pkg/config/remote/uptane/local_store_test.go index 2db006ca5cdaa..a75f08add3034 100644 --- a/pkg/config/remote/uptane/local_store_test.go +++ b/pkg/config/remote/uptane/local_store_test.go @@ -29,16 +29,14 @@ func getTestDB(t *testing.T) *bbolt.DB { func TestLocalStore(t *testing.T) { db := getTestDB(t) - embededRoots := map[uint64]meta.EmbeddedRoot{ - 1: []byte(`{"signatures":[{"keyid":"44d70fa8eae4c07f26c2767270827b6b9e11e7972926b3b419b5ea14ec32f796","sig":"366534e35c3ac0749d5b60f12ab32da736863315bb4765eeb7b24417e8b8c40aace37649a12c63f8ad3634fbe2e68711655e72120934cc015414c75725861e08"},{"keyid":"b2b93a6dccc96d053e6db39181124c85ba4156d43503d4351b5500316fa084e8","sig":"ada4a7723d462eb4c1f087025f81f5eab5de48cb18b710de94ad2194ee9e0524fafe6eaddf95e894808f8254380a86f8f7219d69bf693d6e1c80db904a47830e"}],"signed":{"_type":"root","consistent_snapshot":true,"expires":"1970-01-01T00:00:00Z","keys":{"44d70fa8eae4c07f26c2767270827b6b9e11e7972926b3b419b5ea14ec32f796":{"keyid_hash_algorithms":["sha256","sha512"],"keytype":"ed25519","keyval":{"public":"286d6ae328365afec0f92519ceab68cd627e34072cde90b2f5d167badea970f2"},"scheme":"ed25519"},"b2b93a6dccc96d053e6db39181124c85ba4156d43503d4351b5500316fa084e8":{"keyid_hash_algorithms":["sha256","sha512"],"keytype":"ed25519","keyval":{"public":"afdd68be53815d67f8fa99cf101aac4589a358c660adf7dd4e179fe96834d3c9"},"scheme":"ed25519"}},"roles":{"root":{"keyids":["44d70fa8eae4c07f26c2767270827b6b9e11e7972926b3b419b5ea14ec32f796","b2b93a6dccc96d053e6db39181124c85ba4156d43503d4351b5500316fa084e8"],"threshold":2},"snapshot":{"keyids":["44d70fa8eae4c07f26c2767270827b6b9e11e7972926b3b419b5ea14ec32f796","b2b93a6dccc96d053e6db39181124c85ba4156d43503d4351b5500316fa084e8"],"threshold":2},"targets":{"keyids":["44d70fa8eae4c07f26c2767270827b6b9e11e7972926b3b419b5ea14ec32f796","b2b93a6dccc96d053e6db39181124c85ba4156d43503d4351b5500316fa084e8"],"threshold":2},"timestamp":{"keyids":["44d70fa8eae4c07f26c2767270827b6b9e11e7972926b3b419b5ea14ec32f796","b2b93a6dccc96d053e6db39181124c85ba4156d43503d4351b5500316fa084e8"],"threshold":2}},"spec_version":"1.0","version":1}}`), - 2: []byte(`{"signatures":[{"keyid":"key","sig":"sig2"},{"keyid":"b2b93a6dccc96d053e6db39181124c85ba4156d43503d4351b5500316fa084e8","sig":"ada4a7723d462eb4c1f087025f81f5eab5de48cb18b710de94ad2194ee9e0524fafe6eaddf95e894808f8254380a86f8f7219d69bf693d6e1c80db904a47830e"}],"signed":{"_type":"root","consistent_snapshot":true,"expires":"1970-01-01T00:00:00Z","keys":{"44d70fa8eae4c07f26c2767270827b6b9e11e7972926b3b419b5ea14ec32f796":{"keyid_hash_algorithms":["sha256","sha512"],"keytype":"ed25519","keyval":{"public":"286d6ae328365afec0f92519ceab68cd627e34072cde90b2f5d167badea970f2"},"scheme":"ed25519"},"b2b93a6dccc96d053e6db39181124c85ba4156d43503d4351b5500316fa084e8":{"keyid_hash_algorithms":["sha256","sha512"],"keytype":"ed25519","keyval":{"public":"afdd68be53815d67f8fa99cf101aac4589a358c660adf7dd4e179fe96834d3c9"},"scheme":"ed25519"}},"roles":{"root":{"keyids":["44d70fa8eae4c07f26c2767270827b6b9e11e7972926b3b419b5ea14ec32f796","b2b93a6dccc96d053e6db39181124c85ba4156d43503d4351b5500316fa084e8"],"threshold":2},"snapshot":{"keyids":["44d70fa8eae4c07f26c2767270827b6b9e11e7972926b3b419b5ea14ec32f796","b2b93a6dccc96d053e6db39181124c85ba4156d43503d4351b5500316fa084e8"],"threshold":2},"targets":{"keyids":["44d70fa8eae4c07f26c2767270827b6b9e11e7972926b3b419b5ea14ec32f796","b2b93a6dccc96d053e6db39181124c85ba4156d43503d4351b5500316fa084e8"],"threshold":2},"timestamp":{"keyids":["44d70fa8eae4c07f26c2767270827b6b9e11e7972926b3b419b5ea14ec32f796","b2b93a6dccc96d053e6db39181124c85ba4156d43503d4351b5500316fa084e8"],"threshold":2}},"spec_version":"1.0","version":2}}`), - } + root := []byte(`{"signatures":[{"keyid":"b2b93a6dccc96d053e6db39181124c85ba4156d43503d4351b5500316fa084e8","sig":"ada4a7723d462eb4c1f087025f81f5eab5de48cb18b710de94ad2194ee9e0524fafe6eaddf95e894808f8254380a86f8f7219d69bf693d6e1c80db904a47830e"}],"signed":{"_type":"root","consistent_snapshot":true,"expires":"1970-01-01T00:00:00Z","keys":{"44d70fa8eae4c07f26c2767270827b6b9e11e7972926b3b419b5ea14ec32f796":{"keyid_hash_algorithms":["sha256","sha512"],"keytype":"ed25519","keyval":{"public":"286d6ae328365afec0f92519ceab68cd627e34072cde90b2f5d167badea970f2"},"scheme":"ed25519"},"b2b93a6dccc96d053e6db39181124c85ba4156d43503d4351b5500316fa084e8":{"keyid_hash_algorithms":["sha256","sha512"],"keytype":"ed25519","keyval":{"public":"afdd68be53815d67f8fa99cf101aac4589a358c660adf7dd4e179fe96834d3c9"},"scheme":"ed25519"}},"roles":{"root":{"keyids":["44d70fa8eae4c07f26c2767270827b6b9e11e7972926b3b419b5ea14ec32f796","b2b93a6dccc96d053e6db39181124c85ba4156d43503d4351b5500316fa084e8"],"threshold":2},"snapshot":{"keyids":["44d70fa8eae4c07f26c2767270827b6b9e11e7972926b3b419b5ea14ec32f796","b2b93a6dccc96d053e6db39181124c85ba4156d43503d4351b5500316fa084e8"],"threshold":2},"targets":{"keyids":["44d70fa8eae4c07f26c2767270827b6b9e11e7972926b3b419b5ea14ec32f796","b2b93a6dccc96d053e6db39181124c85ba4156d43503d4351b5500316fa084e8"],"threshold":2},"timestamp":{"keyids":["44d70fa8eae4c07f26c2767270827b6b9e11e7972926b3b419b5ea14ec32f796","b2b93a6dccc96d053e6db39181124c85ba4156d43503d4351b5500316fa084e8"],"threshold":2}},"spec_version":"1.0","version":2}}`) + embeddedRoots := meta.NewEmbeddedRoot(root) + transactionalStore := newTransactionalStore(db) - store, err := newLocalStore(transactionalStore, "test", embededRoots) + store, err := newLocalStore(transactionalStore, "test", embeddedRoots) assert.NoError(t, err) - storeRoot1 := json.RawMessage(embededRoots[1]) - storeRoot2 := json.RawMessage(embededRoots[2]) + storeRoot2 := json.RawMessage(root) rootVersion, err := store.GetMetaVersion("root.json") assert.NoError(t, err) @@ -110,10 +108,9 @@ func TestLocalStore(t *testing.T) { "targets.json": storeTargets7, }, metas) - root1, found, err := store.GetRoot(1) + _, found, err := store.GetRoot(1) assert.NoError(t, err) - assert.True(t, found) - assert.Equal(t, []byte(storeRoot1), root1) + assert.False(t, found) root2, found, err := store.GetRoot(2) assert.NoError(t, err) diff --git a/pkg/config/setup/apm.go b/pkg/config/setup/apm.go index a8558fb1d894e..28d9beaf960e2 100644 --- a/pkg/config/setup/apm.go +++ b/pkg/config/setup/apm.go @@ -21,29 +21,30 @@ import ( const Traces DataType = "traces" func setupAPM(config pkgconfigmodel.Setup) { - config.BindEnv("apm_config.obfuscation.elasticsearch.enabled", "DD_APM_OBFUSCATION_ELASTICSEARCH_ENABLED") - config.BindEnv("apm_config.obfuscation.elasticsearch.keep_values", "DD_APM_OBFUSCATION_ELASTICSEARCH_KEEP_VALUES") - config.BindEnv("apm_config.obfuscation.elasticsearch.obfuscate_sql_values", "DD_APM_OBFUSCATION_ELASTICSEARCH_OBFUSCATE_SQL_VALUES") - config.BindEnv("apm_config.obfuscation.opensearch.enabled", "DD_APM_OBFUSCATION_OPENSEARCH_ENABLED") - config.BindEnv("apm_config.obfuscation.opensearch.keep_values", "DD_APM_OBFUSCATION_OPENSEARCH_KEEP_VALUES") - config.BindEnv("apm_config.obfuscation.opensearch.obfuscate_sql_values", "DD_APM_OBFUSCATION_OPENSEARCH_OBFUSCATE_SQL_VALUES") - config.BindEnv("apm_config.obfuscation.mongodb.enabled", "DD_APM_OBFUSCATION_MONGODB_ENABLED") - config.BindEnv("apm_config.obfuscation.mongodb.keep_values", "DD_APM_OBFUSCATION_MONGODB_KEEP_VALUES") - config.BindEnv("apm_config.obfuscation.mongodb.obfuscate_sql_values", "DD_APM_OBFUSCATION_MONGODB_OBFUSCATE_SQL_VALUES") - config.BindEnv("apm_config.obfuscation.sql_exec_plan.enabled", "DD_APM_OBFUSCATION_SQL_EXEC_PLAN_ENABLED") - config.BindEnv("apm_config.obfuscation.sql_exec_plan.keep_values", "DD_APM_OBFUSCATION_SQL_EXEC_PLAN_KEEP_VALUES") - config.BindEnv("apm_config.obfuscation.sql_exec_plan.obfuscate_sql_values", "DD_APM_OBFUSCATION_SQL_EXEC_PLAN_OBFUSCATE_SQL_VALUES") - config.BindEnv("apm_config.obfuscation.sql_exec_plan_normalize.enabled", "DD_APM_OBFUSCATION_SQL_EXEC_PLAN_NORMALIZE_ENABLED") - config.BindEnv("apm_config.obfuscation.sql_exec_plan_normalize.keep_values", "DD_APM_OBFUSCATION_SQL_EXEC_PLAN_NORMALIZE_KEEP_VALUES") - config.BindEnv("apm_config.obfuscation.sql_exec_plan_normalize.obfuscate_sql_values", "DD_APM_OBFUSCATION_SQL_EXEC_PLAN_NORMALIZE_OBFUSCATE_SQL_VALUES") - config.BindEnv("apm_config.obfuscation.http.remove_query_string", "DD_APM_OBFUSCATION_HTTP_REMOVE_QUERY_STRING") - config.BindEnv("apm_config.obfuscation.http.remove_paths_with_digits", "DD_APM_OBFUSCATION_HTTP_REMOVE_PATHS_WITH_DIGITS") - config.BindEnv("apm_config.obfuscation.remove_stack_traces", "DD_APM_OBFUSCATION_REMOVE_STACK_TRACES") - config.BindEnv("apm_config.obfuscation.redis.enabled", "DD_APM_OBFUSCATION_REDIS_ENABLED") - config.BindEnv("apm_config.obfuscation.redis.remove_all_args", "DD_APM_OBFUSCATION_REDIS_REMOVE_ALL_ARGS") - config.BindEnv("apm_config.obfuscation.memcached.enabled", "DD_APM_OBFUSCATION_MEMCACHED_ENABLED") - config.BindEnv("apm_config.obfuscation.memcached.keep_command", "DD_APM_OBFUSCATION_MEMCACHED_KEEP_COMMAND") - config.BindEnv("apm_config.obfuscation.cache.enabled", "DD_APM_OBFUSCATION_CACHE_ENABLED") + config.BindEnvAndSetDefault("apm_config.obfuscation.elasticsearch.enabled", true, "DD_APM_OBFUSCATION_ELASTICSEARCH_ENABLED") + config.BindEnvAndSetDefault("apm_config.obfuscation.elasticsearch.keep_values", []string{}, "DD_APM_OBFUSCATION_ELASTICSEARCH_KEEP_VALUES") + config.BindEnvAndSetDefault("apm_config.obfuscation.elasticsearch.obfuscate_sql_values", []string{}, "DD_APM_OBFUSCATION_ELASTICSEARCH_OBFUSCATE_SQL_VALUES") + config.BindEnvAndSetDefault("apm_config.obfuscation.opensearch.enabled", true, "DD_APM_OBFUSCATION_OPENSEARCH_ENABLED") + config.BindEnvAndSetDefault("apm_config.obfuscation.opensearch.keep_values", []string{}, "DD_APM_OBFUSCATION_OPENSEARCH_KEEP_VALUES") + config.BindEnvAndSetDefault("apm_config.obfuscation.opensearch.obfuscate_sql_values", []string{}, "DD_APM_OBFUSCATION_OPENSEARCH_OBFUSCATE_SQL_VALUES") + config.BindEnvAndSetDefault("apm_config.obfuscation.mongodb.enabled", true, "DD_APM_OBFUSCATION_MONGODB_ENABLED") + config.BindEnvAndSetDefault("apm_config.obfuscation.mongodb.keep_values", []string{}, "DD_APM_OBFUSCATION_MONGODB_KEEP_VALUES") + config.BindEnvAndSetDefault("apm_config.obfuscation.mongodb.obfuscate_sql_values", []string{}, "DD_APM_OBFUSCATION_MONGODB_OBFUSCATE_SQL_VALUES") + config.BindEnvAndSetDefault("apm_config.obfuscation.sql_exec_plan.enabled", false, "DD_APM_OBFUSCATION_SQL_EXEC_PLAN_ENABLED") + config.BindEnvAndSetDefault("apm_config.obfuscation.sql_exec_plan.keep_values", []string{}, "DD_APM_OBFUSCATION_SQL_EXEC_PLAN_KEEP_VALUES") + config.BindEnvAndSetDefault("apm_config.obfuscation.sql_exec_plan.obfuscate_sql_values", []string{}, "DD_APM_OBFUSCATION_SQL_EXEC_PLAN_OBFUSCATE_SQL_VALUES") + config.BindEnvAndSetDefault("apm_config.obfuscation.sql_exec_plan_normalize.enabled", false, "DD_APM_OBFUSCATION_SQL_EXEC_PLAN_NORMALIZE_ENABLED") + config.BindEnvAndSetDefault("apm_config.obfuscation.sql_exec_plan_normalize.keep_values", []string{}, "DD_APM_OBFUSCATION_SQL_EXEC_PLAN_NORMALIZE_KEEP_VALUES") + config.BindEnvAndSetDefault("apm_config.obfuscation.sql_exec_plan_normalize.obfuscate_sql_values", []string{}, "DD_APM_OBFUSCATION_SQL_EXEC_PLAN_NORMALIZE_OBFUSCATE_SQL_VALUES") + config.BindEnvAndSetDefault("apm_config.obfuscation.http.remove_query_string", false, "DD_APM_OBFUSCATION_HTTP_REMOVE_QUERY_STRING") + config.BindEnvAndSetDefault("apm_config.obfuscation.http.remove_paths_with_digits", false, "DD_APM_OBFUSCATION_HTTP_REMOVE_PATHS_WITH_DIGITS") + config.BindEnvAndSetDefault("apm_config.obfuscation.remove_stack_traces", false, "DD_APM_OBFUSCATION_REMOVE_STACK_TRACES") + config.BindEnvAndSetDefault("apm_config.obfuscation.redis.enabled", true, "DD_APM_OBFUSCATION_REDIS_ENABLED") + config.BindEnvAndSetDefault("apm_config.obfuscation.redis.remove_all_args", false, "DD_APM_OBFUSCATION_REDIS_REMOVE_ALL_ARGS") + config.BindEnvAndSetDefault("apm_config.obfuscation.memcached.enabled", true, "DD_APM_OBFUSCATION_MEMCACHED_ENABLED") + config.BindEnvAndSetDefault("apm_config.obfuscation.memcached.keep_command", false, "DD_APM_OBFUSCATION_MEMCACHED_KEEP_COMMAND") + config.BindEnvAndSetDefault("apm_config.obfuscation.cache.enabled", true, "DD_APM_OBFUSCATION_CACHE_ENABLED") + config.BindEnvAndSetDefault("apm_config.obfuscation.cache.max_size", 5000000, "DD_APM_OBFUSCATION_CACHE_MAX_SIZE") config.SetKnown("apm_config.filter_tags.require") config.SetKnown("apm_config.filter_tags.reject") config.SetKnown("apm_config.filter_tags_regex.require") @@ -151,9 +152,10 @@ func setupAPM(config pkgconfigmodel.Setup) { config.BindEnv("apm_config.install_id", "DD_INSTRUMENTATION_INSTALL_ID") config.BindEnv("apm_config.install_type", "DD_INSTRUMENTATION_INSTALL_TYPE") config.BindEnv("apm_config.install_time", "DD_INSTRUMENTATION_INSTALL_TIME") - config.BindEnv("apm_config.obfuscation.credit_cards.enabled", "DD_APM_OBFUSCATION_CREDIT_CARDS_ENABLED") - config.BindEnv("apm_config.obfuscation.credit_cards.luhn", "DD_APM_OBFUSCATION_CREDIT_CARDS_LUHN") - config.BindEnv("apm_config.obfuscation.credit_cards.keep_values", "DD_APM_OBFUSCATION_CREDIT_CARDS_KEEP_VALUES") + config.BindEnvAndSetDefault("apm_config.obfuscation.credit_cards.enabled", true, "DD_APM_OBFUSCATION_CREDIT_CARDS_ENABLED") + config.BindEnvAndSetDefault("apm_config.obfuscation.credit_cards.luhn", false, "DD_APM_OBFUSCATION_CREDIT_CARDS_LUHN") + config.BindEnvAndSetDefault("apm_config.obfuscation.credit_cards.keep_values", []string{}, "DD_APM_OBFUSCATION_CREDIT_CARDS_KEEP_VALUES") + config.BindEnvAndSetDefault("apm_config.sql_obfuscation_mode", "", "DD_APM_SQL_OBFUSCATION_MODE") config.BindEnvAndSetDefault("apm_config.debug.port", 5012, "DD_APM_DEBUG_PORT") config.BindEnv("apm_config.features", "DD_APM_FEATURES") config.ParseEnvAsStringSlice("apm_config.features", func(s string) []string { diff --git a/pkg/config/setup/config.go b/pkg/config/setup/config.go index 53ef845aa8fac..3f3c0ed50d4d6 100644 --- a/pkg/config/setup/config.go +++ b/pkg/config/setup/config.go @@ -33,7 +33,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/config/teeconfig" "github.com/DataDog/datadog-agent/pkg/util/hostname/validate" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/util/scrubber" "github.com/DataDog/datadog-agent/pkg/util/system" ) @@ -86,6 +86,9 @@ const ( // in situations where we have a high value for `GOMAXPROCS`. DefaultZstdCompressionLevel = 1 + // DefaultGzipCompressionLevel is the default gzip compression level for logs. + DefaultGzipCompressionLevel = 6 + // DefaultLogsSenderBackoffFactor is the default logs sender backoff randomness factor DefaultLogsSenderBackoffFactor = 2.0 @@ -412,12 +415,10 @@ func InitConfig(config pkgconfigmodel.Setup) { config.SetKnown("snmp_listener.min_collection_interval") config.SetKnown("snmp_listener.namespace") config.SetKnown("snmp_listener.use_device_id_as_hostname") - config.SetKnown("snmp_listener.ping") config.SetKnown("snmp_listener.ping.enabled") config.SetKnown("snmp_listener.ping.count") config.SetKnown("snmp_listener.ping.interval") config.SetKnown("snmp_listener.ping.timeout") - config.SetKnown("snmp_listener.ping.linux") config.SetKnown("snmp_listener.ping.linux.use_raw_socket") // network_devices.autodiscovery has precedence over snmp_listener config @@ -433,12 +434,10 @@ func InitConfig(config pkgconfigmodel.Setup) { config.SetKnown("network_devices.autodiscovery.min_collection_interval") config.SetKnown("network_devices.autodiscovery.namespace") config.SetKnown("network_devices.autodiscovery.use_device_id_as_hostname") - config.SetKnown("network_devices.autodiscovery.ping") config.SetKnown("network_devices.autodiscovery.ping.enabled") config.SetKnown("network_devices.autodiscovery.ping.count") config.SetKnown("network_devices.autodiscovery.ping.interval") config.SetKnown("network_devices.autodiscovery.ping.timeout") - config.SetKnown("network_devices.autodiscovery.ping.linux") config.SetKnown("network_devices.autodiscovery.ping.linux.use_raw_socket") bindEnvAndSetLogsConfigKeys(config, "network_devices.snmp_traps.forwarder.") @@ -573,7 +572,7 @@ func InitConfig(config pkgconfigmodel.Setup) { config.BindEnvAndSetDefault("ecs_agent_container_name", "ecs-agent") config.BindEnvAndSetDefault("ecs_collect_resource_tags_ec2", false) config.BindEnvAndSetDefault("ecs_resource_tags_replace_colon", false) - config.BindEnvAndSetDefault("ecs_metadata_timeout", 500) // value in milliseconds + config.BindEnvAndSetDefault("ecs_metadata_timeout", 1000) // value in milliseconds config.BindEnvAndSetDefault("ecs_metadata_retry_initial_interval", 100*time.Millisecond) config.BindEnvAndSetDefault("ecs_metadata_retry_max_elapsed_time", 3000*time.Millisecond) config.BindEnvAndSetDefault("ecs_metadata_retry_timeout_factor", 3) @@ -708,8 +707,9 @@ func InitConfig(config pkgconfigmodel.Setup) { // Cluster check Autodiscovery config.BindEnvAndSetDefault("cluster_checks.support_hybrid_ignore_ad_tags", false) // TODO(CINT)(Agent 7.53+) Remove this flag when hybrid ignore_ad_tags is fully deprecated config.BindEnvAndSetDefault("cluster_checks.enabled", false) - config.BindEnvAndSetDefault("cluster_checks.node_expiration_timeout", 30) // value in seconds - config.BindEnvAndSetDefault("cluster_checks.warmup_duration", 30) // value in seconds + config.BindEnvAndSetDefault("cluster_checks.node_expiration_timeout", 30) // value in seconds + config.BindEnvAndSetDefault("cluster_checks.warmup_duration", 30) // value in seconds + config.BindEnvAndSetDefault("cluster_checks.unscheduled_check_threshold", 60) // value in seconds config.BindEnvAndSetDefault("cluster_checks.cluster_tag_name", "cluster_name") config.BindEnvAndSetDefault("cluster_checks.extra_tags", []string{}) config.BindEnvAndSetDefault("cluster_checks.advanced_dispatching_enabled", false) @@ -729,6 +729,9 @@ func InitConfig(config pkgconfigmodel.Setup) { config.BindEnvAndSetDefault("clc_runner_server_readheader_timeout", 10) config.BindEnvAndSetDefault("clc_runner_remote_tagger_enabled", false) + // Remote tagger + config.BindEnvAndSetDefault("remote_tagger.max_concurrent_sync", 3) + // Admission controller config.BindEnvAndSetDefault("admission_controller.enabled", false) config.BindEnvAndSetDefault("admission_controller.validation.enabled", true) @@ -1282,7 +1285,11 @@ func telemetry(config pkgconfigmodel.Setup) { // Agent Telemetry config.BindEnvAndSetDefault("agent_telemetry.enabled", true) + // default compression first setup inside the next bindEnvAndSetLogsConfigKeys() function ... bindEnvAndSetLogsConfigKeys(config, "agent_telemetry.") + // ... and overridden by the following two lines - do not switch these 3 lines order + config.BindEnvAndSetDefault("agent_telemetry.compression_level", 1) + config.BindEnvAndSetDefault("agent_telemetry.use_compression", true) } func serializer(config pkgconfigmodel.Setup) { @@ -1559,6 +1566,7 @@ func logsagent(config pkgconfigmodel.Setup) { // Experimental auto multiline detection settings (these are subject to change until the feature is no longer experimental) config.BindEnvAndSetDefault("logs_config.experimental_auto_multi_line_detection", false) + config.BindEnv("logs_config.auto_multi_line_detection_custom_samples") config.SetKnown("logs_config.auto_multi_line_detection_custom_samples") config.BindEnvAndSetDefault("logs_config.auto_multi_line.enable_json_detection", true) config.BindEnvAndSetDefault("logs_config.auto_multi_line.enable_datetime_detection", true) @@ -1789,12 +1797,12 @@ func LoadProxyFromEnv(config pkgconfigmodel.Config) { // LoadWithoutSecret reads configs files, initializes the config module without decrypting any secrets func LoadWithoutSecret(config pkgconfigmodel.Config, additionalEnvVars []string) (*pkgconfigmodel.Warnings, error) { - return LoadDatadogCustom(config, "datadog.yaml", optional.NewNoneOption[secrets.Component](), additionalEnvVars) + return LoadDatadogCustom(config, "datadog.yaml", option.None[secrets.Component](), additionalEnvVars) } // LoadWithSecret reads config files and initializes config with decrypted secrets func LoadWithSecret(config pkgconfigmodel.Config, secretResolver secrets.Component, additionalEnvVars []string) (*pkgconfigmodel.Warnings, error) { - return LoadDatadogCustom(config, "datadog.yaml", optional.NewOption[secrets.Component](secretResolver), additionalEnvVars) + return LoadDatadogCustom(config, "datadog.yaml", option.New[secrets.Component](secretResolver), additionalEnvVars) } // Merge will merge additional configuration into an existing configuration @@ -1928,6 +1936,8 @@ func findUnknownEnvVars(config pkgconfigmodel.Config, environ []string, addition // these variables are used by source code integration "DD_GIT_COMMIT_SHA": {}, "DD_GIT_REPOSITORY_URL": {}, + // signals whether or not ADP is enabled + "DD_ADP_ENABLED": {}, } for _, key := range config.GetEnvVars() { knownVars[key] = struct{}{} @@ -1977,7 +1987,7 @@ func checkConflictingOptions(config pkgconfigmodel.Config) error { } // LoadDatadogCustom loads the datadog config in the given config -func LoadDatadogCustom(config pkgconfigmodel.Config, origin string, secretResolver optional.Option[secrets.Component], additionalKnownEnvVars []string) (*pkgconfigmodel.Warnings, error) { +func LoadDatadogCustom(config pkgconfigmodel.Config, origin string, secretResolver option.Option[secrets.Component], additionalKnownEnvVars []string) (*pkgconfigmodel.Warnings, error) { // Feature detection running in a defer func as it always need to run (whether config load has been successful or not) // Because some Agents (e.g. trace-agent) will run even if config file does not exist defer func() { @@ -2414,7 +2424,9 @@ func bindEnvAndSetLogsConfigKeys(config pkgconfigmodel.Setup, prefix string) { config.BindEnv(prefix + "dd_url") config.BindEnv(prefix + "additional_endpoints") config.BindEnvAndSetDefault(prefix+"use_compression", true) - config.BindEnvAndSetDefault(prefix+"compression_level", 6) // Default level for the gzip/deflate algorithm + config.BindEnvAndSetDefault(prefix+"compression_kind", "gzip") + config.BindEnvAndSetDefault(prefix+"zstd_compression_level", DefaultZstdCompressionLevel) // Default level for the zstd algorithm + config.BindEnvAndSetDefault(prefix+"compression_level", DefaultGzipCompressionLevel) // Default level for the gzip algorithm config.BindEnvAndSetDefault(prefix+"batch_wait", DefaultBatchWait) config.BindEnvAndSetDefault(prefix+"connection_reset_interval", 0) // in seconds, 0 means disabled config.BindEnvAndSetDefault(prefix+"logs_no_ssl", false) diff --git a/pkg/config/setup/config_secret_test.go b/pkg/config/setup/config_secret_test.go index b939322bae563..9d93af293f69f 100644 --- a/pkg/config/setup/config_secret_test.go +++ b/pkg/config/setup/config_secret_test.go @@ -19,7 +19,7 @@ import ( nooptelemetry "github.com/DataDog/datadog-agent/comp/core/telemetry/noopsimpl" pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) var testAdditionalEndpointsConf = []byte(` @@ -177,7 +177,7 @@ func TestProxyWithSecret(t *testing.T) { c.setup(t, config, configPath, resolver.(secrets.Mock)) } - _, err := LoadDatadogCustom(config, "unit_test", optional.NewOption[secrets.Component](resolver), nil) + _, err := LoadDatadogCustom(config, "unit_test", option.New[secrets.Component](resolver), nil) require.NoError(t, err) c.tests(t, config) diff --git a/pkg/config/setup/config_test.go b/pkg/config/setup/config_test.go index a02ffa8b6af72..f8b6e17052872 100644 --- a/pkg/config/setup/config_test.go +++ b/pkg/config/setup/config_test.go @@ -25,7 +25,7 @@ import ( nooptelemetry "github.com/DataDog/datadog-agent/comp/core/telemetry/noopsimpl" pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/util/scrubber" ) @@ -469,7 +469,7 @@ func TestProxy(t *testing.T) { c.setup(t, config) } - _, err := LoadDatadogCustom(config, "unit_test", optional.NewOption[secrets.Component](resolver), nil) + _, err := LoadDatadogCustom(config, "unit_test", option.New[secrets.Component](resolver), nil) require.NoError(t, err) c.tests(t, config) @@ -577,7 +577,7 @@ func TestDatabaseMonitoringAurora(t *testing.T) { c.setup(t, config) } - _, err := LoadDatadogCustom(config, "unit_test", optional.NewOption[secrets.Component](resolver), nil) + _, err := LoadDatadogCustom(config, "unit_test", option.New[secrets.Component](resolver), nil) require.NoError(t, err) c.tests(t, config) @@ -1471,6 +1471,39 @@ func TestDisableCoreAgent(t *testing.T) { assert.False(t, conf.GetBool("enable_payloads.sketches")) } +func TestAPMObfuscationDefaultValue(t *testing.T) { + pkgconfigmodel.CleanOverride(t) + conf := pkgconfigmodel.NewConfig("datadog", "DD", strings.NewReplacer(".", "_")) // nolint: forbidigo // legit use case + + InitConfig(conf) + assert.True(t, conf.GetBool("apm_config.obfuscation.elasticsearch.enabled")) + assert.Len(t, conf.GetStringSlice("apm_config.obfuscation.elasticsearch.keep_values"), 0) + assert.Len(t, conf.GetStringSlice("apm_config.obfuscation.elasticsearch.obfuscate_sql_values"), 0) + assert.True(t, conf.GetBool("apm_config.obfuscation.opensearch.enabled")) + assert.Len(t, conf.GetStringSlice("apm_config.obfuscation.opensearch.keep_values"), 0) + assert.Len(t, conf.GetStringSlice("apm_config.obfuscation.opensearch.obfuscate_sql_values"), 0) + assert.True(t, conf.GetBool("apm_config.obfuscation.mongodb.enabled")) + assert.Len(t, conf.GetStringSlice("apm_config.obfuscation.mongodb.keep_values"), 0) + assert.Len(t, conf.GetStringSlice("apm_config.obfuscation.mongodb.obfuscate_sql_values"), 0) + assert.False(t, conf.GetBool("apm_config.obfuscation.sql_exec_plan.enabled")) + assert.Len(t, conf.GetStringSlice("apm_config.obfuscation.sql_exec_plan.keep_values"), 0) + assert.Len(t, conf.GetStringSlice("apm_config.obfuscation.sql_exec_plan.obfuscate_sql_values"), 0) + assert.False(t, conf.GetBool("apm_config.obfuscation.sql_exec_plan_normalize.enabled")) + assert.Len(t, conf.GetStringSlice("apm_config.obfuscation.sql_exec_plan_normalize.keep_values"), 0) + assert.Len(t, conf.GetStringSlice("apm_config.obfuscation.sql_exec_plan_normalize.obfuscate_sql_values"), 0) + assert.False(t, conf.GetBool("apm_config.obfuscation.http.remove_query_string")) + assert.False(t, conf.GetBool("apm_config.obfuscation.http.remove_paths_with_digits")) + assert.True(t, conf.GetBool("apm_config.obfuscation.redis.enabled")) + assert.False(t, conf.GetBool("apm_config.obfuscation.redis.remove_all_args")) + assert.True(t, conf.GetBool("apm_config.obfuscation.memcached.enabled")) + assert.False(t, conf.GetBool("apm_config.obfuscation.memcached.keep_command")) + assert.True(t, conf.GetBool("apm_config.obfuscation.credit_cards.enabled")) + assert.False(t, conf.GetBool("apm_config.obfuscation.credit_cards.luhn")) + assert.Len(t, conf.GetStringSlice("apm_config.obfuscation.credit_cards.keep_values"), 0) + assert.True(t, conf.GetBool("apm_config.obfuscation.cache.enabled")) + assert.Equal(t, int64(5000000), conf.GetInt64("apm_config.obfuscation.cache.max_size")) +} + func TestAgentConfigInit(t *testing.T) { conf := newTestConf() @@ -1496,7 +1529,7 @@ flare_stripped_keys: require.NoError(t, err) cfg.SetConfigFile(configPath) - _, err = LoadDatadogCustom(cfg, "test", optional.NewNoneOption[secrets.Component](), []string{}) + _, err = LoadDatadogCustom(cfg, "test", option.None[secrets.Component](), []string{}) require.NoError(t, err) stringToScrub := `api_key: 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' diff --git a/pkg/config/setup/go.mod b/pkg/config/setup/go.mod index efed57c318b83..2ae00007c1036 100644 --- a/pkg/config/setup/go.mod +++ b/pkg/config/setup/go.mod @@ -20,7 +20,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../util/fxutil github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../util/hostname/validate github.com/DataDog/datadog-agent/pkg/util/log => ../../util/log - github.com/DataDog/datadog-agent/pkg/util/optional => ../../util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../../util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../util/scrubber github.com/DataDog/datadog-agent/pkg/util/system => ../../util/system @@ -37,18 +37,18 @@ require ( github.com/DataDog/datadog-agent/comp/core/telemetry v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.59.0 + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.59.0 github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 github.com/stretchr/testify v1.10.0 go.uber.org/fx v1.23.0 gopkg.in/yaml.v2 v2.4.0 @@ -59,8 +59,8 @@ require ( github.com/DataDog/datadog-agent/comp/core/flare/builder v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/core/flare/types v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect github.com/DataDog/viper v1.14.0 // indirect @@ -76,21 +76,21 @@ require ( github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.60.1 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -101,10 +101,10 @@ require ( go.uber.org/dig v1.18.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect - google.golang.org/protobuf v1.35.2 // indirect + google.golang.org/protobuf v1.36.3 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/config/setup/go.sum b/pkg/config/setup/go.sum index eb1a58f09f4b8..435dfd89ebf9c 100644 --- a/pkg/config/setup/go.sum +++ b/pkg/config/setup/go.sum @@ -116,8 +116,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -145,8 +145,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -163,8 +163,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -179,8 +179,8 @@ github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/f github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -244,8 +244,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -282,8 +282,8 @@ golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= @@ -311,8 +311,8 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/config/setup/process.go b/pkg/config/setup/process.go index 088dadaf90376..1375cfe53e48e 100644 --- a/pkg/config/setup/process.go +++ b/pkg/config/setup/process.go @@ -224,7 +224,7 @@ func overrideRunInCoreAgentConfig(config pkgconfigmodel.Config) { // loadProcessTransforms loads transforms associated with process config settings. func loadProcessTransforms(config pkgconfigmodel.Config) { if config.IsSet("process_config.enabled") { - log.Info("process_config.enabled is deprecated, use process_config.container_collection.enabled " + + log.Warn("process_config.enabled is deprecated, use process_config.container_collection.enabled " + "and process_config.process_collection.enabled instead, " + "see https://docs.datadoghq.com/infrastructure/process#installation for more information") procConfigEnabled := strings.ToLower(config.GetString("process_config.enabled")) diff --git a/pkg/config/setup/system_probe.go b/pkg/config/setup/system_probe.go index 12a94b923c7b7..d3326fbbe3fe6 100644 --- a/pkg/config/setup/system_probe.go +++ b/pkg/config/setup/system_probe.go @@ -195,8 +195,11 @@ func InitSystemProbeConfig(cfg pkgconfigmodel.Config) { cfg.BindEnvAndSetDefault(join(spNS, "max_tracked_connections"), 65536) cfg.BindEnv(join(spNS, "max_closed_connections_buffered")) cfg.BindEnv(join(netNS, "max_failed_connections_buffered")) - cfg.BindEnvAndSetDefault(join(spNS, "closed_connection_flush_threshold"), 0) - cfg.BindEnvAndSetDefault(join(spNS, "closed_channel_size"), 500) + cfg.BindEnv(join(spNS, "closed_connection_flush_threshold")) + cfg.BindEnv(join(netNS, "closed_connection_flush_threshold")) + cfg.BindEnv(join(spNS, "closed_channel_size")) + cfg.BindEnv(join(netNS, "closed_channel_size")) + cfg.BindEnvAndSetDefault(join(netNS, "closed_buffer_wakeup_count"), 4) cfg.BindEnvAndSetDefault(join(spNS, "max_connection_state_buffered"), 75000) cfg.BindEnvAndSetDefault(join(spNS, "disable_dns_inspection"), false, "DD_DISABLE_DNS_INSPECTION") @@ -212,6 +215,7 @@ func InitSystemProbeConfig(cfg pkgconfigmodel.Config) { cfg.BindEnvAndSetDefault(join(spNS, "enable_conntrack_all_namespaces"), true, "DD_SYSTEM_PROBE_ENABLE_CONNTRACK_ALL_NAMESPACES") cfg.BindEnvAndSetDefault(join(netNS, "enable_protocol_classification"), true, "DD_ENABLE_PROTOCOL_CLASSIFICATION") cfg.BindEnvAndSetDefault(join(netNS, "enable_ringbuffers"), true, "DD_SYSTEM_PROBE_NETWORK_ENABLE_RINGBUFFERS") + cfg.BindEnvAndSetDefault(join(netNS, "enable_custom_batching"), false, "DD_SYSTEM_PROBE_NETWORK_ENABLE_CUSTOM_BATCHING") cfg.BindEnvAndSetDefault(join(netNS, "enable_tcp_failed_connections"), true, "DD_SYSTEM_PROBE_NETWORK_ENABLE_FAILED_CONNS") cfg.BindEnvAndSetDefault(join(netNS, "ignore_conntrack_init_failure"), false, "DD_SYSTEM_PROBE_NETWORK_IGNORE_CONNTRACK_INIT_FAILURE") cfg.BindEnvAndSetDefault(join(netNS, "conntrack_init_timeout"), 10*time.Second) @@ -276,6 +280,8 @@ func InitSystemProbeConfig(cfg pkgconfigmodel.Config) { cfg.BindEnv(join(smNS, "enable_connection_rollup")) cfg.BindEnv(join(smNS, "enable_ring_buffers")) cfg.BindEnvAndSetDefault(join(smNS, "enable_event_stream"), true) + cfg.BindEnv(join(smNS, "kernel_buffer_pages")) + cfg.BindEnv(join(smNS, "data_channel_size")) oldHTTPRules := join(netNS, "http_replace_rules") newHTTPRules := join(smNS, "http_replace_rules") @@ -357,25 +363,26 @@ func InitSystemProbeConfig(cfg pkgconfigmodel.Config) { cfg.BindEnvAndSetDefault(join(evNS, "network_process", "enabled"), true, "DD_SYSTEM_PROBE_EVENT_MONITORING_NETWORK_PROCESS_ENABLED") eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "enable_all_probes"), false) eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "enable_kernel_filters"), true) - eventMonitorBindEnv(cfg, join(evNS, "enable_approvers")) - eventMonitorBindEnv(cfg, join(evNS, "enable_discarders")) + eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "enable_approvers"), false) // will be set to true by sanitize() if enable_kernel_filters is true + eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "enable_discarders"), false) // will be set to true by sanitize() if enable_kernel_filters is true eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "flush_discarder_window"), 3) eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "pid_cache_size"), 10000) eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "events_stats.tags_cardinality"), "high") eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "custom_sensitive_words"), []string{}) eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "erpc_dentry_resolution_enabled"), true) eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "map_dentry_resolution_enabled"), true) - eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "dentry_cache_size"), 1024) + eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "dentry_cache_size"), 8000) eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "runtime_monitor.enabled"), false) eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "network.lazy_interface_prefixes"), []string{}) eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "network.classifier_priority"), 10) eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "network.classifier_handle"), 0) + eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "network.flow_monitor.enabled"), false) + eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "network.flow_monitor.sk_storage.enabled"), false) + eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "network.flow_monitor.period"), "10s") eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "network.raw_classifier_handle"), 0) eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "event_stream.use_ring_buffer"), true) eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "event_stream.use_fentry"), true) - eventMonitorBindEnv(cfg, join(evNS, "event_stream.use_fentry_amd64")) - eventMonitorBindEnv(cfg, join(evNS, "event_stream.use_fentry_arm64")) - eventMonitorBindEnv(cfg, join(evNS, "event_stream.buffer_size")) + eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "event_stream.buffer_size"), 0) eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "envs_with_value"), []string{"LD_PRELOAD", "LD_LIBRARY_PATH", "PATH", "HISTSIZE", "HISTFILESIZE", "GLIBC_TUNABLES"}) eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "runtime_compilation.enabled"), false) eventMonitorBindEnvAndSetDefault(cfg, join(evNS, "network.enabled"), true) diff --git a/pkg/config/setup/system_probe_cws.go b/pkg/config/setup/system_probe_cws.go index c689e1fd68e28..b831c58087600 100644 --- a/pkg/config/setup/system_probe_cws.go +++ b/pkg/config/setup/system_probe_cws.go @@ -19,7 +19,7 @@ func initCWSSystemProbeConfig(cfg pkgconfigmodel.Config) { // CWS - general config cfg.BindEnvAndSetDefault("runtime_security_config.enabled", false) - cfg.BindEnv("runtime_security_config.fim_enabled") + cfg.BindEnvAndSetDefault("runtime_security_config.fim_enabled", false) cfg.BindEnvAndSetDefault("runtime_security_config.policies.monitor.enabled", false) cfg.BindEnvAndSetDefault("runtime_security_config.policies.monitor.per_rule_enabled", false) cfg.BindEnvAndSetDefault("runtime_security_config.policies.monitor.report_internal_policies", false) @@ -122,7 +122,7 @@ func initCWSSystemProbeConfig(cfg pkgconfigmodel.Config) { cfg.BindEnvAndSetDefault("runtime_security_config.user_sessions.cache_size", 1024) // CWS -eBPF Less - cfg.BindEnv("runtime_security_config.ebpfless.enabled") + cfg.BindEnvAndSetDefault("runtime_security_config.ebpfless.enabled", false) cfg.BindEnvAndSetDefault("runtime_security_config.ebpfless.socket", constants.DefaultEBPFLessProbeAddr) // CWS - IMDS diff --git a/pkg/config/structure/go.mod b/pkg/config/structure/go.mod index 0ad28fd9f92f5..d71e2febbf183 100644 --- a/pkg/config/structure/go.mod +++ b/pkg/config/structure/go.mod @@ -21,7 +21,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../pkg/util/fxutil github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../../pkg/util/hostname/validate github.com/DataDog/datadog-agent/pkg/util/log => ../../../pkg/util/log - github.com/DataDog/datadog-agent/pkg/util/optional => ../../../pkg/util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../../pkg/util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../pkg/util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../pkg/util/scrubber github.com/DataDog/datadog-agent/pkg/util/system => ../../../pkg/util/system @@ -34,10 +34,10 @@ replace ( replace github.com/spf13/cast => github.com/DataDog/cast v1.8.0 require ( - github.com/DataDog/datadog-agent/pkg/config/model v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 github.com/DataDog/viper v1.14.0 - github.com/spf13/cast v1.7.0 + github.com/spf13/cast v1.7.1 github.com/stretchr/testify v1.10.0 ) @@ -58,8 +58,8 @@ require ( github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/config/structure/go.sum b/pkg/config/structure/go.sum index c5a94912c5617..a7916d2899ef1 100644 --- a/pkg/config/structure/go.sum +++ b/pkg/config/structure/go.sum @@ -188,8 +188,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -222,8 +222,8 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= diff --git a/pkg/config/teeconfig/go.mod b/pkg/config/teeconfig/go.mod index daec9f976da68..6ed034b32af15 100644 --- a/pkg/config/teeconfig/go.mod +++ b/pkg/config/teeconfig/go.mod @@ -9,7 +9,7 @@ replace ( ) require ( - github.com/DataDog/datadog-agent/pkg/config/model v0.0.0-00010101000000-000000000000 + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 github.com/DataDog/viper v1.14.0 ) @@ -25,12 +25,11 @@ require ( github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/config/teeconfig/go.sum b/pkg/config/teeconfig/go.sum index 4ffd1e9d2ce0a..5f700e05d94a8 100644 --- a/pkg/config/teeconfig/go.sum +++ b/pkg/config/teeconfig/go.sum @@ -147,8 +147,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= @@ -185,8 +185,6 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -219,8 +217,8 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= diff --git a/pkg/config/teeconfig/teeconfig.go b/pkg/config/teeconfig/teeconfig.go index ba787dd5db77b..02a6c3afdf2d0 100644 --- a/pkg/config/teeconfig/teeconfig.go +++ b/pkg/config/teeconfig/teeconfig.go @@ -138,6 +138,14 @@ func (t *teeConfig) IsSet(key string) bool { return base } +// IsConfigured returns true if a settings is configured by the user (ie: the value doesn't comes from the defaults) +func (t *teeConfig) IsConfigured(key string) bool { + base := t.baseline.IsConfigured(key) + compare := t.compare.IsConfigured(key) + t.compareResult(key, "IsConfigured", base, compare) + return base +} + func (t *teeConfig) AllKeysLowercased() []string { base := t.baseline.AllKeysLowercased() compare := t.compare.AllKeysLowercased() diff --git a/pkg/config/utils/go.mod b/pkg/config/utils/go.mod index ef80f1fde725b..fa26a0e5e4f9a 100644 --- a/pkg/config/utils/go.mod +++ b/pkg/config/utils/go.mod @@ -23,7 +23,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../util/fxutil github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../util/hostname/validate github.com/DataDog/datadog-agent/pkg/util/log => ../../util/log - github.com/DataDog/datadog-agent/pkg/util/optional => ../../util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../../util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../util/scrubber github.com/DataDog/datadog-agent/pkg/util/system => ../../util/system @@ -35,10 +35,10 @@ replace ( require ( github.com/DataDog/datadog-agent/pkg/config/mock v0.59.0 - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 github.com/DataDog/datadog-agent/pkg/version v0.59.1 github.com/stretchr/testify v1.10.0 ) @@ -47,17 +47,17 @@ require ( github.com/DataDog/datadog-agent/comp/core/secrets v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect github.com/DataDog/viper v1.14.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect @@ -68,24 +68,24 @@ require ( github.com/hashicorp/hcl v1.0.1-vault-5 // indirect github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/numcpus v0.8.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/config/utils/go.sum b/pkg/config/utils/go.sum index 7fdf16db5981c..15a68c06d091d 100644 --- a/pkg/config/utils/go.sum +++ b/pkg/config/utils/go.sum @@ -71,7 +71,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -109,8 +108,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -137,8 +136,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -155,8 +154,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -169,8 +168,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -181,8 +180,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -235,8 +234,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -273,8 +272,8 @@ golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= @@ -302,8 +301,8 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/diagnose/check.go b/pkg/diagnose/check.go index 9bef56f3fa4d6..cfdddee24e14e 100644 --- a/pkg/diagnose/check.go +++ b/pkg/diagnose/check.go @@ -23,7 +23,7 @@ import ( pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/diagnose/diagnosis" pkglog "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) func getInstanceDiagnoses(instance check.Check) []diagnosis.Diagnosis { @@ -74,7 +74,7 @@ func diagnoseChecksInAgentProcess(collector collector.Component) []diagnosis.Dia return diagnoses } -func diagnoseChecksInCLIProcess(_ diagnosis.Config, senderManager diagnosesendermanager.Component, _ integrations.Component, secretResolver secrets.Component, wmeta optional.Option[workloadmeta.Component], ac autodiscovery.Component, tagger tagger.Component) []diagnosis.Diagnosis { +func diagnoseChecksInCLIProcess(_ diagnosis.Config, senderManager diagnosesendermanager.Component, _ integrations.Component, secretResolver secrets.Component, wmeta option.Option[workloadmeta.Component], ac autodiscovery.Component, tagger tagger.Component) []diagnosis.Diagnosis { // other choices // run() github.com\DataDog\datadog-agent\pkg\cli\subcommands\check\command.go // runCheck() github.com\DataDog\datadog-agent\cmd\agent\gui\checks.go @@ -110,7 +110,7 @@ func diagnoseChecksInCLIProcess(_ diagnosis.Config, senderManager diagnosesender // Create the CheckScheduler, but do not attach it to // AutoDiscovery. - pkgcollector.InitCheckScheduler(optional.NewNoneOption[collector.Component](), senderManagerInstance, optional.NewNoneOption[integrations.Component](), tagger) + pkgcollector.InitCheckScheduler(option.None[collector.Component](), senderManagerInstance, option.None[integrations.Component](), tagger) // Load matching configurations (should we use common.AC.GetAllConfigs()) waitCtx, cancelTimeout := context.WithTimeout(context.Background(), time.Duration(5*time.Second)) diff --git a/pkg/diagnose/connectivity/core_endpoint.go b/pkg/diagnose/connectivity/core_endpoint.go index 81e1ea946c5d9..7554094775634 100644 --- a/pkg/diagnose/connectivity/core_endpoint.go +++ b/pkg/diagnose/connectivity/core_endpoint.go @@ -25,13 +25,45 @@ import ( "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/diagnose/diagnosis" logshttp "github.com/DataDog/datadog-agent/pkg/logs/client/http" + logstcp "github.com/DataDog/datadog-agent/pkg/logs/client/tcp" "github.com/DataDog/datadog-agent/pkg/util/scrubber" ) -func getLogsHTTPEndpoints() (*logsConfig.Endpoints, error) { +func getLogsEndpoints(useTCP bool) (*logsConfig.Endpoints, error) { datadogConfig := pkgconfigsetup.Datadog() logsConfigKey := logsConfig.NewLogsConfigKeys("logs_config.", datadogConfig) - return logsConfig.BuildHTTPEndpointsWithConfig(datadogConfig, logsConfigKey, "agent-http-intake.logs.", "logs", logsConfig.AgentJSONIntakeProtocol, logsConfig.DefaultIntakeOrigin) + + var endpoints *logsConfig.Endpoints + var err error + + if useTCP { + endpoints, err = logsConfig.BuildEndpointsWithConfig( + datadogConfig, + logsConfigKey, + "agent-http-intake.logs.", + false, + "logs", + logsConfig.AgentJSONIntakeProtocol, + logsConfig.DefaultIntakeOrigin) + } else { + endpoints, err = logsConfig.BuildHTTPEndpointsWithConfig( + datadogConfig, + logsConfigKey, + "agent-http-intake.logs.", + "logs", + logsConfig.AgentJSONIntakeProtocol, + logsConfig.DefaultIntakeOrigin) + } + + return endpoints, err +} + +// getLogsUseTCP returns true if the agent should use TCP to transport logs +func getLogsUseTCP() bool { + datadogConfig := pkgconfigsetup.Datadog() + useTCP := datadogConfig.GetBool("logs_config.force_use_tcp") && !datadogConfig.GetBool("logs_config.force_use_http") + + return useTCP } // Diagnose performs connectivity diagnosis @@ -57,7 +89,8 @@ func Diagnose(diagCfg diagnosis.Config) []diagnosis.Diagnosis { // Create diagnosis for logs if pkgconfigsetup.Datadog().GetBool("logs_enabled") { - endpoints, err := getLogsHTTPEndpoints() + useTCP := getLogsUseTCP() + endpoints, err := getLogsEndpoints(useTCP) if err != nil { diagnoses = append(diagnoses, diagnosis.Diagnosis{ @@ -68,9 +101,16 @@ func Diagnose(diagCfg diagnosis.Config) []diagnosis.Diagnosis { RawError: err.Error(), }) } else { - url, err := logshttp.CheckConnectivityDiagnose(endpoints.Main, pkgconfigsetup.Datadog()) + var url string + connType := "HTTPS" + if useTCP { + connType = "TCP" + url, err = logstcp.CheckConnectivityDiagnose(endpoints.Main, 5) + } else { + url, err = logshttp.CheckConnectivityDiagnose(endpoints.Main, pkgconfigsetup.Datadog()) + } - name := fmt.Sprintf("Connectivity to %s", url) + name := fmt.Sprintf("%s connectivity to %s", connType, url) diag := createDiagnosis(name, url, "", err) diagnoses = append(diagnoses, diag) @@ -105,6 +145,7 @@ func Diagnose(diagCfg diagnosis.Config) []diagnosis.Diagnosis { // Check if there is a response and if it's valid report, reportErr := verifyEndpointResponse(diagCfg, statusCode, responseBody, err) + diagnosisName := "Connectivity to " + logURL d := createDiagnosis(diagnosisName, logURL, report, reportErr) diff --git a/pkg/diagnose/connectivity/core_endpoint_test.go b/pkg/diagnose/connectivity/core_endpoint_test.go index 028f833277388..c71255965a45c 100644 --- a/pkg/diagnose/connectivity/core_endpoint_test.go +++ b/pkg/diagnose/connectivity/core_endpoint_test.go @@ -89,3 +89,14 @@ func TestAcceptRedirection(t *testing.T) { assert.Error(t, err2) } + +func TestGetLogsUseTCP(t *testing.T) { + pkgconfigsetup.Datadog().SetWithoutSource("logs_enabled", true) + assert.False(t, getLogsUseTCP()) + + pkgconfigsetup.Datadog().SetWithoutSource("logs_config.force_use_tcp", true) + assert.True(t, getLogsUseTCP()) + + pkgconfigsetup.Datadog().SetWithoutSource("logs_config.force_use_http", true) + assert.False(t, getLogsUseTCP()) +} diff --git a/pkg/diagnose/ports/ports.go b/pkg/diagnose/ports/ports.go index 17db62bf8f801..03a242d493217 100644 --- a/pkg/diagnose/ports/ports.go +++ b/pkg/diagnose/ports/ports.go @@ -8,7 +8,6 @@ package ports import ( "fmt" - "path" "strings" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" @@ -17,9 +16,8 @@ import ( ) var agentNames = map[string]struct{}{ - "datadog-agent": {}, "agent": {}, "trace-agent": {}, - "process-agent": {}, "system-probe": {}, "security-agent": {}, - "dogstatsd": {}, + "agent": {}, "trace-agent": {}, "process-agent": {}, + "system-probe": {}, "security-agent": {}, } // DiagnosePortSuite displays information about the ports used in the agent configuration @@ -63,7 +61,8 @@ func DiagnosePortSuite() []diagnosis.Diagnosis { } // TODO: check process user/group - if processName, ok := isAgentProcess(port.Process); ok { + processName, ok := isAgentProcess(port.Pid, port.Process) + if ok { diagnoses = append(diagnoses, diagnosis.Diagnosis{ Name: key, Result: diagnosis.DiagnosisSuccess, @@ -76,8 +75,8 @@ func DiagnosePortSuite() []diagnosis.Diagnosis { if port.Pid == 0 { diagnoses = append(diagnoses, diagnosis.Diagnosis{ Name: key, - Result: diagnosis.DiagnosisFail, - Diagnosis: fmt.Sprintf("Required port %d is already used by an another process.", value), + Result: diagnosis.DiagnosisWarning, + Diagnosis: fmt.Sprintf("Required port %d is already used by an another process. Ensure this is the expected process.", value), }) continue } @@ -85,15 +84,19 @@ func DiagnosePortSuite() []diagnosis.Diagnosis { diagnoses = append(diagnoses, diagnosis.Diagnosis{ Name: key, Result: diagnosis.DiagnosisFail, - Diagnosis: fmt.Sprintf("Required port %d is already used by '%s' process (PID=%d) for %s.", value, port.Process, port.Pid, port.Proto), + Diagnosis: fmt.Sprintf("Required port %d is already used by '%s' process (PID=%d) for %s.", value, processName, port.Pid, port.Proto), }) } return diagnoses } -func isAgentProcess(processName string) (string, bool) { - processName = path.Base(processName) +// isAgentProcess checks if the given pid corresponds to an agent process +func isAgentProcess(pid int, processName string) (string, bool) { + processName, err := RetrieveProcessName(pid, processName) + if err != nil { + return "", false + } _, ok := agentNames[processName] return processName, ok } diff --git a/pkg/diagnose/ports/ports_others.go b/pkg/diagnose/ports/ports_others.go new file mode 100644 index 0000000000000..fa93f17062422 --- /dev/null +++ b/pkg/diagnose/ports/ports_others.go @@ -0,0 +1,15 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build !windows + +package ports + +import "path/filepath" + +// RetrieveProcessName returns the base name of the process on non-windows systems +func RetrieveProcessName(_ int, processName string) (string, error) { + return filepath.Base(processName), nil +} diff --git a/pkg/diagnose/ports/ports_windows.go b/pkg/diagnose/ports/ports_windows.go new file mode 100644 index 0000000000000..54f4cfc11ee6a --- /dev/null +++ b/pkg/diagnose/ports/ports_windows.go @@ -0,0 +1,120 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package ports + +import ( + "fmt" + "path/filepath" + "strings" + "syscall" + "unicode/utf16" + "unsafe" +) + +// NTSTATUS is the return type used by many native Windows functions. +type NTSTATUS uint32 + +// ntSuccess is a helper function to check if a status is a success. 0 is success, all other values are failure. +func ntSuccess(status NTSTATUS) bool { + return int32(status) >= 0 +} + +const ( + SystemProcessIDInformationClass = 88 // SystemProcessIDInformationClass gives access to process names without elevated privileges on Windows. +) + +// unicodeString mirrors the Windows unicodeString struct. +type unicodeString struct { + Length uint16 + MaximumLength uint16 + Buffer *uint16 +} + +// SystemProcessIDInformation mirrors the SystemProcessIdInformation struct used by NtQuerySystemInformation. +type SystemProcessIDInformation struct { + ProcessID uintptr + ImageName unicodeString +} + +// Loading NtQuerySystemInformation +var ( + ntdll = syscall.NewLazyDLL("ntdll.dll") + procNtQuerySystemInformation = ntdll.NewProc("NtQuerySystemInformation") +) + +// NtQuerySystemInformation is an *undocumented* function prototype: +// +// NTSTATUS NtQuerySystemInformation( +// SYSTEM_INFORMATION_CLASS SystemInformationClass, +// PVOID SystemInformation, +// ULONG SystemInformationLength, +// PULONG ReturnLength +// ); +func NtQuerySystemInformation( + systemInformationClass uint32, + systemInformation unsafe.Pointer, + systemInformationLength uint32, + returnLength *uint32, +) NTSTATUS { + r0, _, _ := procNtQuerySystemInformation.Call( + uintptr(systemInformationClass), + uintptr(systemInformation), + uintptr(systemInformationLength), + uintptr(unsafe.Pointer(returnLength)), + ) + return NTSTATUS(r0) +} + +// unicodeStringToString is a helper function to convert a unicodeString to a Go string +func unicodeStringToString(u unicodeString) string { + // Length is in bytes; divide by 2 for number of uint16 chars + length := int(u.Length / 2) + if length == 0 || u.Buffer == nil { + return "" + } + // Convert from a pointer to a slice of uint16 + buf := (*[1 << 20]uint16)(unsafe.Pointer(u.Buffer))[:length:length] + // Convert UTF-16 to Go string + return string(utf16.Decode(buf)) +} + +// RetrieveProcessName fetches the process name on Windows using NtQuerySystemInformation +// with SystemProcessIDInformationClass, which does not require elevated privileges. +func RetrieveProcessName(pid int, _ string) (string, error) { + // Allocate a slice of 256 uint16s (512 bytes). + // Used for unicodeString buffer. + buf := make([]uint16, 256) + + // Prepare the SystemProcessIDInformation struct + var info SystemProcessIDInformation + info.ProcessID = uintptr(pid) + info.ImageName.Length = 0 + info.ImageName.MaximumLength = 256 * 2 + info.ImageName.Buffer = &buf[0] + + // Call NtQuerySystemInformation + var returnLength uint32 + status := NtQuerySystemInformation( + SystemProcessIDInformationClass, + unsafe.Pointer(&info), + uint32(unsafe.Sizeof(info)), + &returnLength, + ) + + // If ntSuccess(status) is false, return an error and empty string + if !ntSuccess(status) { + return "", fmt.Errorf("NtQuerySystemInformation failed with NTSTATUS 0x%X", status) + } + + // Convert unicodeString to Go string + imageName := unicodeStringToString(info.ImageName) + + // Extract the base name of the process, remove .exe extension if present + imageName = filepath.Base(imageName) + imageName = strings.TrimSuffix(imageName, ".exe") + + return imageName, nil +} diff --git a/pkg/diagnose/ports/ports_windows_test.go b/pkg/diagnose/ports/ports_windows_test.go new file mode 100644 index 0000000000000..bea7917ba4317 --- /dev/null +++ b/pkg/diagnose/ports/ports_windows_test.go @@ -0,0 +1,31 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package ports + +import ( + "syscall" + "testing" + + "github.com/stretchr/testify/require" +) + +// TestRetrieveProcessName_ValidPID tests that RetrieveProcessName returns a non-empty name +// for the current process (should be the Agent). +func TestRetrieveProcessName_ValidPID(t *testing.T) { + // Grab current process PID + pid := syscall.Getpid() + + name, err := RetrieveProcessName(pid, "") + require.NoError(t, err, "RetrieveProcessName failed with error: %v", err) + require.NotEmpty(t, name, "Expected a non-empty process name for PID %d, but got an empty string", pid) +} + +// TestRetrieveProcessName_InvalidPID tests that RetrieveProcessName returns an error +// if the PID is invalid. +func TestRetrieveProcessName_InvalidPID(t *testing.T) { + _, err := RetrieveProcessName(-1, "") + require.Error(t, err, "Expected an error when calling RetrieveProcessName with an invalid PID (-1), but got nil") +} diff --git a/pkg/diagnose/runner.go b/pkg/diagnose/runner.go index 5d7f2ee88d1db..3515785b5f302 100644 --- a/pkg/diagnose/runner.go +++ b/pkg/diagnose/runner.go @@ -11,10 +11,11 @@ import ( "fmt" "io" "regexp" - "runtime" "sort" "strings" + "github.com/fatih/color" + "github.com/DataDog/datadog-agent/comp/collector/collector" "github.com/DataDog/datadog-agent/comp/core/autodiscovery" "github.com/DataDog/datadog-agent/comp/core/secrets" @@ -24,9 +25,7 @@ import ( integrations "github.com/DataDog/datadog-agent/comp/logs/integrations/def" "github.com/DataDog/datadog-agent/pkg/aggregator/sender" "github.com/DataDog/datadog-agent/pkg/api/util" - "github.com/DataDog/datadog-agent/pkg/util/optional" - - "github.com/fatih/color" + "github.com/DataDog/datadog-agent/pkg/util/option" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/diagnose/connectivity" @@ -407,9 +406,9 @@ func runStdOut(w io.Writer, diagCfg diagnosis.Config, diagnoseResult *diagnosis. // SuitesDeps stores the dependencies for the diagnose suites. type SuitesDeps struct { SenderManager sender.DiagnoseSenderManager - Collector optional.Option[collector.Component] + Collector option.Option[collector.Component] SecretResolver secrets.Component - WMeta optional.Option[workloadmeta.Component] + WMeta option.Option[workloadmeta.Component] AC autodiscovery.Component Tagger tagger.Component } @@ -418,7 +417,7 @@ type SuitesDeps struct { type SuitesDepsInCLIProcess struct { senderManager sender.DiagnoseSenderManager secretResolver secrets.Component - wmeta optional.Option[workloadmeta.Component] + wmeta option.Option[workloadmeta.Component] AC autodiscovery.Component logReceiver integrations.Component tagger tagger.Component @@ -428,7 +427,7 @@ type SuitesDepsInCLIProcess struct { func NewSuitesDepsInCLIProcess( senderManager sender.DiagnoseSenderManager, secretResolver secrets.Component, - wmeta optional.Option[workloadmeta.Component], + wmeta option.Option[workloadmeta.Component], ac autodiscovery.Component, tagger tagger.Component, ) SuitesDepsInCLIProcess { @@ -454,16 +453,16 @@ func NewSuitesDepsInAgentProcess(collector collector.Component) SuitesDepsInAgen } // GetWMeta returns the workload metadata instance -func (s *SuitesDeps) GetWMeta() optional.Option[workloadmeta.Component] { +func (s *SuitesDeps) GetWMeta() option.Option[workloadmeta.Component] { return s.WMeta } // NewSuitesDeps returns a new SuitesDeps. func NewSuitesDeps( senderManager sender.DiagnoseSenderManager, - collector optional.Option[collector.Component], + collector option.Option[collector.Component], secretResolver secrets.Component, - wmeta optional.Option[workloadmeta.Component], ac autodiscovery.Component, + wmeta option.Option[workloadmeta.Component], ac autodiscovery.Component, tagger tagger.Component, ) SuitesDeps { return SuitesDeps{ @@ -529,8 +528,5 @@ func RegisterConnectivityDatadogEventPlatform(catalog *diagnosis.Catalog) { // RegisterPortConflict registers the port-conflict diagnose suite. func RegisterPortConflict(catalog *diagnosis.Catalog) { - // port-conflict suite available in darwin and linux only for now - if runtime.GOOS == "darwin" || runtime.GOOS == "linux" { - catalog.Register("port-conflict", func() []diagnosis.Diagnosis { return ports.DiagnosePortSuite() }) - } + catalog.Register("port-conflict", ports.DiagnosePortSuite) } diff --git a/pkg/dynamicinstrumentation/codegen/c/base_event.h b/pkg/dynamicinstrumentation/codegen/c/base_event.h new file mode 100644 index 0000000000000..04b2da65d0974 --- /dev/null +++ b/pkg/dynamicinstrumentation/codegen/c/base_event.h @@ -0,0 +1,15 @@ +#ifndef DI_BASE_EVENT_H +#define DI_BASE_EVENT_H + +#include "ktypes.h" + +// standard fields which all events created in bpf will contain, regardless of the function that the +// probe is instrumenting +struct base_event { + char probe_id[36]; // identifier for each user-configured instrumentation point, it's a standard 36 character UUID + __u32 pid; // process ID + __u32 uid; // user ID + __u64 program_counters[10]; // program counters representing the stack trace of the instrumented function invocation +}__attribute__((aligned(8))); + +#endif diff --git a/pkg/dynamicinstrumentation/codegen/c/dynamicinstrumentation.c b/pkg/dynamicinstrumentation/codegen/c/dynamicinstrumentation.c index 0f850b0ad8731..b42611f97e205 100644 --- a/pkg/dynamicinstrumentation/codegen/c/dynamicinstrumentation.c +++ b/pkg/dynamicinstrumentation/codegen/c/dynamicinstrumentation.c @@ -2,41 +2,22 @@ #include "bpf_tracing.h" #include "kconfig.h" #include -#include "types.h" - -#define MAX_STRING_SIZE {{ .InstrumentationInfo.InstrumentationOptions.StringMaxSize}} -#define PARAM_BUFFER_SIZE {{ .InstrumentationInfo.InstrumentationOptions.ArgumentsMaxSize}} -#define STACK_DEPTH_LIMIT 10 -#define MAX_SLICE_SIZE 1800 -#define MAX_SLICE_LENGTH 20 - -struct { - __uint(type, BPF_MAP_TYPE_RINGBUF); - __uint(max_entries, 1 << 24); -} events SEC(".maps"); - -struct { - __uint(type, BPF_MAP_TYPE_ARRAY); - __uint(key_size, sizeof(__u32)); - __uint(value_size, sizeof(char[PARAM_BUFFER_SIZE])); - __uint(max_entries, 1); -} zeroval SEC(".maps"); - -struct event { - struct base_event base; - char output[PARAM_BUFFER_SIZE]; -}; +#include "base_event.h" +#include "macros.h" +#include "event.h" +#include "maps.h" +#include "expressions.h" SEC("uprobe/{{.GetBPFFuncName}}") int {{.GetBPFFuncName}}(struct pt_regs *ctx) { - bpf_printk("{{.GetBPFFuncName}} probe in {{.ServiceName}} has triggered"); + log_debug("{{.GetBPFFuncName}} probe in {{.ServiceName}} has triggered"); // reserve space on ringbuffer - struct event *event; - event = bpf_ringbuf_reserve(&events, sizeof(struct event), 0); + event_t *event; + event = bpf_ringbuf_reserve(&events, sizeof(event_t), 0); if (!event) { - bpf_printk("No space available on ringbuffer, dropping event"); + log_debug("No space available on ringbuffer, dropping event"); return 0; } @@ -44,15 +25,27 @@ int {{.GetBPFFuncName}}(struct pt_regs *ctx) __u32 key = 0; zero_string = bpf_map_lookup_elem(&zeroval, &key); if (!zero_string) { - bpf_printk("couldn't lookup zero value in zeroval array map, dropping event for {{.GetBPFFuncName}}"); + log_debug("couldn't lookup zero value in zeroval array map, dropping event for {{.GetBPFFuncName}}"); bpf_ringbuf_discard(event, 0); return 0; } - - bpf_probe_read(&event->base.probe_id, sizeof(event->base.probe_id), zero_string); - bpf_probe_read(&event->base.program_counters, sizeof(event->base.program_counters), zero_string); - bpf_probe_read(&event->output, sizeof(event->output), zero_string); - bpf_probe_read(&event->base.probe_id, {{ .ID | len }}, "{{.ID}}"); + long err; + err = bpf_probe_read_kernel(&event->base.probe_id, sizeof(event->base.probe_id), zero_string); + if (err != 0) { + log_debug("could not zero out probe id buffer"); + } + err = bpf_probe_read_kernel(&event->base.program_counters, sizeof(event->base.program_counters), zero_string); + if (err != 0) { + log_debug("could not zero out program counter buffer"); + } + err = bpf_probe_read_kernel(&event->output, sizeof(event->output), zero_string); + if (err != 0) { + log_debug("could not zero out output buffer"); + } + err = bpf_probe_read_kernel(&event->base.probe_id, {{ .ID | len }}, "{{.ID}}"); + if (err != 0) { + log_debug("could not write probe id to output"); + } // Get tid and tgid u64 pidtgid = bpf_get_current_pid_tgid(); @@ -65,34 +58,84 @@ int {{.GetBPFFuncName}}(struct pt_regs *ctx) // Collect stack trace __u64 currentPC = PT_REGS_IP(ctx); - bpf_probe_read(&event->base.program_counters[0], sizeof(__u64), ¤tPC); + err = bpf_probe_read_kernel(&event->base.program_counters[0], sizeof(__u64), ¤tPC); + if (err != 0) { + log_debug("could not collect first program counter"); + } __u64 bp = PT_REGS_FP(ctx); - bpf_probe_read(&bp, sizeof(__u64), (void*)bp); // dereference bp to get current stack frame + err = bpf_probe_read_user(&bp, sizeof(__u64), (void*)bp); // dereference bp to get current stack frame + if (err != 0) { + log_debug("could not retrieve base pointer for current stack frame"); + } + __u64 ret_addr = PT_REGS_RET(ctx); // when bpf prog enters, the return address hasn't yet been written to the stack int i; + int j; + __u16 n; for (i = 1; i < STACK_DEPTH_LIMIT; i++) { if (bp == 0) { break; } - bpf_probe_read(&event->base.program_counters[i], sizeof(__u64), &ret_addr); - bpf_probe_read(&ret_addr, sizeof(__u64), (void*)(bp-8)); - bpf_probe_read(&bp, sizeof(__u64), (void*)bp); + err = bpf_probe_read_kernel(&event->base.program_counters[i], sizeof(__u64), &ret_addr); + if (err != 0) { + log_debug("error occurred while collecting program counter for stack trace (1)"); + } + err = bpf_probe_read_user(&ret_addr, sizeof(__u64), (void*)(bp-8)); + if (err != 0) { + log_debug("error occurred while collecting program counter for stack trace (2)"); + } + err = bpf_probe_read_user(&bp, sizeof(__u64), (void*)bp); + if (err != 0) { + log_debug("error occurred while collecting program counter for stack trace (3)"); + } } // Collect parameters + __u16 collectionMax = MAX_SLICE_LENGTH; __u8 param_type; __u16 param_size; __u16 slice_length; + __u16 *collectionLimit; + int chunk_size = 0; + + // Set up temporary storage array which is used by some location expressions + // to have memory off the stack to work with + __u64 *temp_storage = bpf_map_lookup_elem(&temp_storage_array, &key) ; + if (!temp_storage) { + log_debug("could not lookup temporary storage array"); + bpf_ringbuf_discard(event, 0); + return 0; + } + - int outputOffset = 0; + expression_context_t context = { + .ctx = ctx, + .event = event, + .temp_storage = temp_storage, + .zero_string = zero_string, + .output_offset = 0, + .stack_counter = 0, + }; {{ .InstrumentationInfo.BPFParametersSourceCode }} bpf_ringbuf_submit(event, 0); + + // Drain the stack map for next invocation + __u8 m = 0; + __u64 placeholder; + long pop_ret = 0; + for (m = 0; m < context.stack_counter; m++) { + pop_ret = bpf_map_pop_elem(¶m_stack, &placeholder); + if (pop_ret != 0) { + break; + } + } + return 0; } diff --git a/pkg/dynamicinstrumentation/codegen/c/event.h b/pkg/dynamicinstrumentation/codegen/c/event.h new file mode 100644 index 0000000000000..2170f7b6800ab --- /dev/null +++ b/pkg/dynamicinstrumentation/codegen/c/event.h @@ -0,0 +1,25 @@ +#ifndef DI_EVENT_H +#define DI_EVENT_H + +#include "ktypes.h" +#include "macros.h" + +// event is the message which is passed back to user space from bpf containing +// all information about the invocation of the instrumented function +typedef struct event { + struct base_event base; + char output[PARAM_BUFFER_SIZE]; // values of parameters +} event_t; + +// expression_context contains state that is meant to be shared across location expressions +// during execution of the full bpf program. +typedef struct expression_context { + __u64 output_offset; // current offset within the output buffer to write to + __u8 stack_counter; // current size of the bpf parameter stack, used for emptying stack + struct pt_regs *ctx; + event_t *event; // output event allocated on ringbuffer + __u64 *temp_storage; // temporary storage array on heap used by some location expressions + char *zero_string; // array of zero's used to zero out buffers +} expression_context_t; + +#endif diff --git a/pkg/dynamicinstrumentation/codegen/c/expressions.h b/pkg/dynamicinstrumentation/codegen/c/expressions.h new file mode 100644 index 0000000000000..71d822a2e8971 --- /dev/null +++ b/pkg/dynamicinstrumentation/codegen/c/expressions.h @@ -0,0 +1,323 @@ +#ifndef DI_EXPRESSIONS_H +#define DI_EXPRESSIONS_H + +// read_register reads `element_size` bytes from register `reg` into a u64 which is then pushed to +// the top of the BPF parameter stack. +static __always_inline int read_register(expression_context_t *context, __u64 reg, __u32 element_size) +{ + long err; + __u64 valueHolder = 0; + err = bpf_probe_read_kernel(&valueHolder, element_size, &context->ctx->DWARF_REGISTER(reg)); + if (err != 0) { + log_debug("error when reading data from register: %ld", err); + } + bpf_map_push_elem(¶m_stack, &valueHolder, 0); + context->stack_counter += 1; + return err; +} + +// read_stack reads `element_size` bytes from the traced program's stack at offset `stack_offset` +// into a u64 which is then pushed to the top of the BPF parameter stack. +static __always_inline int read_stack(expression_context_t *context, size_t stack_offset, __u32 element_size) +{ + long err; + __u64 valueHolder = 0; + err = bpf_probe_read_kernel(&valueHolder, element_size, &context->ctx->DWARF_STACK_REGISTER+stack_offset); + if (err != 0) { + log_debug("error when reading data from stack: %ld", err); + } + bpf_map_push_elem(¶m_stack, &valueHolder, 0); + context->stack_counter += 1; + return err; +} + +// read_register_value_to_output reads `element_size` bytes from register `reg` into a u64 which is then written to +// the output buffer. +static __always_inline int read_register_value_to_output(expression_context_t *context, __u64 reg, __u32 element_size) +{ + long err; + err = bpf_probe_read_kernel(&context->event->output[(context->output_offset)], element_size, &context->ctx->DWARF_REGISTER(reg)); + if (err != 0) { + log_debug("error when reading data while reading register value to output: %ld", err); + } + context->output_offset += element_size; + return err; +} + +// read_stack_to_output reads `element_size` bytes from the traced program's stack at offset `stack_offset` +// into a u64 which is then written to the output buffer. +static __always_inline int read_stack_value_to_output(expression_context_t *context, __u64 stack_offset, __u32 element_size) +{ + long err; + err = bpf_probe_read_kernel(&context->event->output[(context->output_offset)], element_size, &context->ctx->DWARF_STACK_REGISTER+stack_offset); + if (err != 0) { + log_debug("error when reading data while reading stack value to output: %ld", err); + } + context->output_offset += element_size; + return err; +} + +// pop writes to output `num_elements` elements, each of size `element_size, from the top of the stack. +static __always_inline int pop(expression_context_t *context, __u64 num_elements, __u32 element_size) +{ + long return_err; + long err; + __u64 valueHolder; + int i; + __u8 num_elements_byte = (__u8)num_elements; + for(i = 0; i < num_elements_byte; i++) { + bpf_map_pop_elem(¶m_stack, &valueHolder); + context->stack_counter -= 1; + log_debug("Popping to output: %llu", valueHolder); + err = bpf_probe_read_kernel(&context->event->output[(context->output_offset)+i], element_size, &valueHolder); + if (err != 0) { + log_debug("error when reading data while popping from bpf stack: %ld", err); + return_err = err; + } + context->output_offset += element_size; + } + return return_err; +} + +// dereference pops the 8-byte address from the top of the BPF parameter stack and dereferences +// it, reading a value of size `element_size` from it, and pushes that value (encoded as a u64) +// back to the BPF parameter stack. +// It should only be used for types of 8 bytes or less (see `dereference_large`). +static __always_inline int dereference(expression_context_t *context, __u32 element_size) +{ + long err; + __u64 addressHolder = 0; + err = bpf_map_pop_elem(¶m_stack, &addressHolder); + if (err != 0) { + log_debug("Error popping: %ld", err); + } else { + context->stack_counter -= 1; + } + log_debug("Going to dereference 0x%llx", addressHolder); + + __u64 valueHolder = 0; + err = bpf_probe_read_user(&valueHolder, element_size, (void*)addressHolder); + if (err != 0) { + log_debug("error when reading data while dereferencing: %ld", err); + } + // a mask is used to zero out bytes not used by a smaller type encoded into a __u64 + __u64 mask = (element_size == 8) ? ~0ULL : (1ULL << (8 * element_size)) - 1; + __u64 encodedValueHolder = valueHolder & mask; + + bpf_map_push_elem(¶m_stack, &encodedValueHolder, 0); + context->stack_counter += 1; + return err; +} + +// dereference_to_output pops the 8-byte address from the top of the BPF parameter stack and +// dereferences it, reading a value of size `element_size` from it, and writes that value +// directly to the output buffer. +// It should only be used for types of 8 bytes or less (see `dereference_large_to_output`). +static __always_inline int dereference_to_output(expression_context_t *context, __u32 element_size) +{ + long return_err; + long err; + __u64 addressHolder = 0; + bpf_map_pop_elem(¶m_stack, &addressHolder); + context->stack_counter -= 1; + + __u64 valueHolder = 0; + + log_debug("Going to deref to output: 0x%llx", addressHolder); + err = bpf_probe_read_user(&valueHolder, element_size, (void*)addressHolder); + if (err != 0) { + return_err = err; + log_debug("error when reading data while dereferencing to output: %ld", err); + } + // a mask is used to zero out bytes not used by a smaller type encoded into a __u64 + __u64 mask = (element_size == 8) ? ~0ULL : (1ULL << (8 * element_size)) - 1; + __u64 encodedValueHolder = valueHolder & mask; + + log_debug("Writing %llu to output (dereferenced)", encodedValueHolder); + err = bpf_probe_read_kernel(&context->event->output[(context->output_offset)], element_size, &encodedValueHolder); + if (err != 0) { + return_err = err; + log_debug("error when reading data while dereferencing into output: %ld", err); + } + context->output_offset += element_size; + return return_err; +} + +// dereference_large pops the 8-byte address from the top of the BPF parameter stack and dereferences +// it, reading a value of size `element_size` from it, and pushes that value, encoded in 8-byte chunks +// to the BPF parameter stack. This is safe to use for types larger than 8-bytes. +// back to the BPF parameter stack. +static __always_inline int dereference_large(expression_context_t *context, __u32 element_size, __u8 num_chunks) +{ + long return_err; + long err; + __u64 addressHolder = 0; + bpf_map_pop_elem(¶m_stack, &addressHolder); + context->stack_counter -= 1; + + int i; + __u32 chunk_size; + for (i = 0; i < num_chunks; i++) { + chunk_size = (i == num_chunks - 1 && element_size % 8 != 0) ? (element_size % 8) : 8; + err = bpf_probe_read_user(&context->temp_storage[i], element_size, (void*)(addressHolder + (i * 8))); + if (err != 0) { + return_err = err; + log_debug("error when reading data dereferencing large: %ld", err); + } + } + + // Mask the last chunk if element_size is not a multiple of 8 + if (element_size % 8 != 0) { + __u64 mask = (1ULL << (8 * (element_size % 8))) - 1; + context->temp_storage[num_chunks - 1] &= mask; + } + + for (int i = 0; i < num_chunks; i++) { + bpf_map_push_elem(¶m_stack, &context->temp_storage[i], 0); + context->stack_counter += 1; + } + + // zero out shared array + err = bpf_probe_read_kernel(context->temp_storage, element_size*num_chunks, context->zero_string); + if (err != 0) { + return_err = err; + log_debug("error when reading data zeroing out shared memory while dereferencing large: %ld", err); + } + return return_err; +} + +// dereference_large pops the 8-byte address from the top of the BPF parameter stack and dereferences +// it, reading a value of size `element_size` from it, and writes that value to the output buffer. +// This is safe to use for types larger than 8-bytes. +static __always_inline int dereference_large_to_output(expression_context_t *context, __u32 element_size) +{ + long err; + __u64 addressHolder = 0; + bpf_map_pop_elem(¶m_stack, &addressHolder); + context->stack_counter -= 1; + err = bpf_probe_read_user(&context->event->output[(context->output_offset)], element_size, (void*)(addressHolder)); + if (err != 0) { + log_debug("error when reading data: %ld", err); + } + context->output_offset += element_size; + return err; +} + +// apply_offset adds `offset` to the 8-byte address on the top of the BPF parameter stack. +static __always_inline int apply_offset(expression_context_t *context, size_t offset) +{ + __u64 addressHolder = 0; + bpf_map_pop_elem(¶m_stack, &addressHolder); + context->stack_counter -= 1; + + addressHolder += offset; + bpf_map_push_elem(¶m_stack, &addressHolder, 0); + context->stack_counter += 1; + return 0; +} + +// dereference_dynamic_to_output reads an 8-byte length from the top of the BPF parameter stack, followed by +// an 8-byte address. It applies the maximum `bytes_limit` to the length, then dereferences the address to +// the output buffer. +static __always_inline int dereference_dynamic_to_output(expression_context_t *context, __u16 bytes_limit) +{ + long err = 0; + __u64 lengthToRead = 0; + bpf_map_pop_elem(¶m_stack, &lengthToRead); + context->stack_counter -= 1; + + __u64 addressHolder = 0; + bpf_map_pop_elem(¶m_stack, &addressHolder); + context->stack_counter -= 1; + + __u32 collection_size; + collection_size = (__u16)lengthToRead; + if (collection_size > bytes_limit) { + collection_size = bytes_limit; + } + err = bpf_probe_read_user(&context->event->output[(context->output_offset)], collection_size, (void*)addressHolder); + if (err != 0) { + log_debug("error when doing dynamic dereference: %ld", err); + } + + if (collection_size > bytes_limit) { + collection_size = bytes_limit; + } + context->output_offset += collection_size; + return err; +} + +// set_limit_entry is used to set a limit for a specific collection (such as a slice). It reads the true length of the +// collection from the top of the BPF parameter stack, applies the passed `limit` to it, and updates the `collection_limit` +// map entry associated with `collection_identifier` to this limit. The `collection_identifier` is a user space generated +// and track identifier for each collection which can be referenced in BPF code. +static __always_inline int set_limit_entry(expression_context_t *context, __u16 limit, char collection_identifier[6]) +{ + // Read the 2 byte length from top of the stack, then set collectionLimit to the minimum of the two + __u64 length; + bpf_map_pop_elem(¶m_stack, &length); + context->stack_counter -= 1; + + __u16 lengthShort = (__u16)length; + if (lengthShort > limit) { + lengthShort = limit; + } + + long err = 0; + err = bpf_map_update_elem(&collection_limits, collection_identifier, &lengthShort, BPF_ANY); + if (err != 0) { + log_debug("error updating collection limit for %s to %hu: %ld", collection_identifier, lengthShort, err); + } + + log_debug("set limit entry for %s to %hu", collection_identifier, lengthShort); + return 0; +} + +// copy duplicates the u64 element on the top of the BPF parameter stack. +static __always_inline int copy(expression_context_t *context) +{ + __u64 holder; + bpf_map_peek_elem(¶m_stack, &holder); + bpf_map_push_elem(¶m_stack, &holder, 0); + context->stack_counter += 1; + return 0; +} + +// read_str_to_output reads a Go string to the output buffer, limited in length by `limit`. +// In Go, strings are internally implemented as structs with two fields. The fields are length, +// and a pointer to a character array. This expression expects the address of the string struct +// itself to be on the top of the stack. +static __always_inline int read_str_to_output(expression_context_t *context, __u16 limit) +{ + long err; + __u64 stringStructAddressHolder = 0; + err = bpf_map_pop_elem(¶m_stack, &stringStructAddressHolder); + if (err != 0) { + log_debug("error popping string struct addr: %ld", err); + return err; + } + context->stack_counter -= 1; + + char* characterPointer = 0; + err = bpf_probe_read_user(&characterPointer, sizeof(characterPointer), (void*)(stringStructAddressHolder)); + log_debug("Reading from 0x%p", characterPointer); + + __u32 length; + err = bpf_probe_read_user(&length, sizeof(length), (void*)(stringStructAddressHolder+8)); + if (err != 0) { + log_debug("error reading string length: %ld", err); + return err; + } + if (length > limit) { + length = limit; + } + err = bpf_probe_read_user(&context->event->output[(context->output_offset)], length, (char*)characterPointer); + if (err != 0) { + log_debug("error reading string: %ld", err); + } + context->output_offset += length; + log_debug("Read %u bytes (limit = %hu)", length, limit); + + return err; +} +#endif diff --git a/pkg/dynamicinstrumentation/codegen/c/macros.h b/pkg/dynamicinstrumentation/codegen/c/macros.h new file mode 100644 index 0000000000000..dc29c35e14599 --- /dev/null +++ b/pkg/dynamicinstrumentation/codegen/c/macros.h @@ -0,0 +1,9 @@ +#ifndef DI_MACROS_H +#define DI_MACROS_H + +#define MAX_STRING_SIZE {{ .InstrumentationInfo.InstrumentationOptions.StringMaxSize }} +#define PARAM_BUFFER_SIZE {{ .InstrumentationInfo.InstrumentationOptions.ArgumentsMaxSize }} +#define STACK_DEPTH_LIMIT 10 +#define MAX_SLICE_LENGTH {{ .InstrumentationInfo.InstrumentationOptions.SliceMaxLength }} + +#endif diff --git a/pkg/dynamicinstrumentation/codegen/c/maps.h b/pkg/dynamicinstrumentation/codegen/c/maps.h new file mode 100644 index 0000000000000..67f5a2a921eab --- /dev/null +++ b/pkg/dynamicinstrumentation/codegen/c/maps.h @@ -0,0 +1,31 @@ +#ifndef DI_MAPS_H +#define DI_MAPS_H + +#include "map-defs.h" + +// The events map is the ringbuffer used for communicating events from +// bpf to user space. +struct { + __uint(type, BPF_MAP_TYPE_RINGBUF); + __uint(max_entries, 1 << 24); +} events SEC(".maps"); + +// The zeroval map is used to have pre-zero'd data which bpf code can +// use to zero out event buffers (similar to memset, but verifier friendly). +BPF_ARRAY_MAP(zeroval, char[PARAM_BUFFER_SIZE], 1); + +// The temp_storage_array map is used as a temporary location in memory +// not on the bpf stack that location expressions can use for temporarily +// caching data while they operate on it without worrying about exceeding +// the 512 byte bpf stack limit. +BPF_PERCPU_ARRAY_MAP(temp_storage_array, __u64[4000], 1); + +// The collection_limits map is used for setting the known length limit +// of collections such as slices so that they can later be referenced +// when reading the values in the collection. +BPF_HASH_MAP(collection_limits, char[6], __u16, 1024); + +// The param_stack map is used as a stack for the location expressions +// to operate on values and addresses. +BPF_STACK_MAP(param_stack, __u64, 2048); +#endif diff --git a/pkg/dynamicinstrumentation/codegen/c/types.h b/pkg/dynamicinstrumentation/codegen/c/types.h deleted file mode 100644 index f170b91fe7541..0000000000000 --- a/pkg/dynamicinstrumentation/codegen/c/types.h +++ /dev/null @@ -1,14 +0,0 @@ -#ifndef DI_TYPES_H -#define DI_TYPES_H - -#include "ktypes.h" - -// NOTE: Be careful when adding fields, alignment should always be to 8 bytes -struct base_event { - char probe_id[304]; - __u32 pid; - __u32 uid; - __u64 program_counters[10]; -}__attribute__((aligned(8))); - -#endif diff --git a/pkg/dynamicinstrumentation/codegen/codegen.go b/pkg/dynamicinstrumentation/codegen/codegen.go index 9c286513287fa..b02269f1b0ebc 100644 --- a/pkg/dynamicinstrumentation/codegen/codegen.go +++ b/pkg/dynamicinstrumentation/codegen/codegen.go @@ -14,12 +14,10 @@ import ( "fmt" "io" "reflect" - "strings" "text/template" - "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" + "github.com/DataDog/datadog-agent/pkg/util/log" ) // GenerateBPFParamsCode generates the source code associated with the probe and data @@ -29,17 +27,14 @@ func GenerateBPFParamsCode(procInfo *ditypes.ProcessInfo, probe *ditypes.Probe) out := bytes.NewBuffer(parameterBytes) if probe.InstrumentationInfo.InstrumentationOptions.CaptureParameters { - params := applyCaptureDepth(procInfo.TypeMap.Functions[probe.FuncName], probe.InstrumentationInfo.InstrumentationOptions.MaxReferenceDepth) - applyFieldCountLimit(params) + params := procInfo.TypeMap.Functions[probe.FuncName] for i := range params { - flattenedParams := flattenParameters([]ditypes.Parameter{params[i]}) - + flattenedParams := flattenParameters([]*ditypes.Parameter{params[i]}) err := generateHeadersText(flattenedParams, out) if err != nil { return err } - - err = generateParametersText(flattenedParams, out) + err = generateParametersTextViaLocationExpressions(flattenedParams, out) if err != nil { return err } @@ -55,12 +50,9 @@ func GenerateBPFParamsCode(procInfo *ditypes.ProcessInfo, probe *ditypes.Probe) func resolveHeaderTemplate(param *ditypes.Parameter) (*template.Template, error) { switch param.Kind { case uint(reflect.String): - if param.Location.InReg { - return template.New("string_reg_header_template").Parse(stringRegisterHeaderTemplateText) - } - return template.New("string_stack_header_template").Parse(stringStackHeaderTemplateText) + return template.New("string_header_template").Parse(stringHeaderTemplateText) case uint(reflect.Slice): - if param.Location.InReg { + if param.Location != nil && param.Location.InReg { return template.New("slice_reg_header_template").Parse(sliceRegisterHeaderTemplateText) } return template.New("slice_stack_header_template").Parse(sliceStackHeaderTemplateText) @@ -69,7 +61,7 @@ func resolveHeaderTemplate(param *ditypes.Parameter) (*template.Template, error) } } -func generateHeadersText(params []ditypes.Parameter, out io.Writer) error { +func generateHeadersText(params []*ditypes.Parameter, out io.Writer) error { for i := range params { err := generateHeaderText(params[i], out) if err != nil { @@ -79,150 +71,147 @@ func generateHeadersText(params []ditypes.Parameter, out io.Writer) error { return nil } -func generateHeaderText(param ditypes.Parameter, out io.Writer) error { +func generateHeaderText(param *ditypes.Parameter, out io.Writer) error { if reflect.Kind(param.Kind) == reflect.Slice { - return generateSliceHeader(¶m, out) + return generateSliceHeader(param, out) } else if reflect.Kind(param.Kind) == reflect.String { - return generateStringHeader(¶m, out) - } else { //nolint:revive // TODO - tmplt, err := resolveHeaderTemplate(¶m) - if err != nil { - return err - } - err = tmplt.Execute(out, param) - if err != nil { - return err - } - if len(param.ParameterPieces) != 0 { - return generateHeadersText(param.ParameterPieces, out) - } + return generateStringHeader(param, out) } - return nil -} - -func generateParametersText(params []ditypes.Parameter, out io.Writer) error { - for i := range params { - err := generateParameterText(¶ms[i], out) - if err != nil { - return err - } - } - return nil -} - -func generateParameterText(param *ditypes.Parameter, out io.Writer) error { - - if param.Kind == uint(reflect.Array) || - param.Kind == uint(reflect.Struct) || - param.Kind == uint(reflect.Pointer) { - // - Arrays/structs don't have actual values, we just want to generate - // a header for them for the sake of event parsing. - // - Pointers do have actual values, but they're captured when the - // underlying value is also captured. - return nil - } - - template, err := resolveParameterTemplate(param) + template, err := resolveHeaderTemplate(param) if err != nil { return err } - param.Type = cleanupTypeName(param.Type) err = template.Execute(out, param) if err != nil { - return fmt.Errorf("could not execute template for generating read of parameter: %w", err) + return err + } + if len(param.ParameterPieces) != 0 { + return generateHeadersText(param.ParameterPieces, out) } - return nil } -func resolveParameterTemplate(param *ditypes.Parameter) (*template.Template, error) { - notSupported := param.NotCaptureReason == ditypes.Unsupported - cutForFieldLimit := param.NotCaptureReason == ditypes.FieldLimitReached - - if notSupported { - return template.New("unsupported_type_template").Parse(unsupportedTypeTemplateText) - } else if cutForFieldLimit { - return template.New("cut_field_limit_template").Parse(cutForFieldLimitTemplateText) - } - - if param.Location.InReg { - return resolveRegisterParameterTemplate(param) +func generateParametersTextViaLocationExpressions(params []*ditypes.Parameter, out io.Writer) error { + for i := range params { + collectedExpressions := collectLocationExpressions(params[i]) + for _, locationExpression := range collectedExpressions { + template, err := resolveLocationExpressionTemplate(locationExpression) + if err != nil { + return err + } + err = template.Execute(out, locationExpression) + if err != nil { + return fmt.Errorf("could not execute template for generating location expression: %w", err) + } + } } - return resolveStackParameterTemplate(param) + return nil } -func resolveRegisterParameterTemplate(param *ditypes.Parameter) (*template.Template, error) { - needsDereference := param.Location.NeedsDereference - stringType := param.Kind == uint(reflect.String) - sliceType := param.Kind == uint(reflect.Slice) +// collectLocationExpressions goes through the parameter tree (param.ParameterPieces) via +// breadth first traversal, collecting the LocationExpression's from each parameter and appending them +// to a collective slice. +func collectLocationExpressions(param *ditypes.Parameter) []ditypes.LocationExpression { + collectedExpressions := []ditypes.LocationExpression{} + queue := []*ditypes.Parameter{param} + var top *ditypes.Parameter + + for { + if len(queue) == 0 { + break + } + top = queue[0] + queue = queue[1:] - if needsDereference { - // Register Pointer - return template.New("pointer_register_template").Parse(pointerRegisterTemplateText) - } else if stringType { - // Register String - return template.New("string_register_template").Parse(stringRegisterTemplateText) - } else if sliceType { - // Register Slice - return template.New("slice_register_template").Parse(sliceRegisterTemplateText) - } else if !needsDereference { - // Register Normal Value - return template.New("register_template").Parse(normalValueRegisterTemplateText) + if top == nil { + continue + } + queue = append(queue, top.ParameterPieces...) + if len(top.LocationExpressions) > 0 { + collectedExpressions = append(top.LocationExpressions, collectedExpressions...) + top.LocationExpressions = []ditypes.LocationExpression{} + } } - return nil, errors.New("no template created: invalid or unsupported type") + return collectedExpressions } -func resolveStackParameterTemplate(param *ditypes.Parameter) (*template.Template, error) { - needsDereference := param.Location.NeedsDereference - stringType := param.Kind == uint(reflect.String) - sliceType := param.Kind == uint(reflect.Slice) - - if needsDereference { - // Stack Pointer - return template.New("pointer_stack_template").Parse(pointerStackTemplateText) - } else if stringType { - // Stack String - return template.New("string_stack_template").Parse(stringStackTemplateText) - } else if sliceType { - // Stack Slice - return template.New("slice_stack_template").Parse(sliceStackTemplateText) - } else if !needsDereference { - // Stack Normal Value - return template.New("stack_template").Parse(normalValueStackTemplateText) +func resolveLocationExpressionTemplate(locationExpression ditypes.LocationExpression) (*template.Template, error) { + switch locationExpression.Opcode { + case ditypes.OpReadUserRegister: + return template.New("read_register_location_expression").Parse(readRegisterTemplateText) + case ditypes.OpReadUserStack: + return template.New("read_stack_location_expression").Parse(readStackTemplateText) + case ditypes.OpReadUserRegisterToOutput: + return template.New("read_register_to_output_location_expression").Parse(readRegisterValueToOutputTemplateText) + case ditypes.OpReadUserStackToOutput: + return template.New("read_stack_to_output_location_expression").Parse(readStackValueToOutputTemplateText) + case ditypes.OpDereference: + return template.New("dereference_location_expression").Parse(dereferenceTemplateText) + case ditypes.OpDereferenceToOutput: + return template.New("dereference_to_output_location_expression").Parse(dereferenceToOutputTemplateText) + case ditypes.OpDereferenceLarge: + return template.New("dereference_large_location_expression").Parse(dereferenceLargeTemplateText) + case ditypes.OpDereferenceLargeToOutput: + return template.New("dereference_large_to_output_location_expression").Parse(dereferenceLargeToOutputTemplateText) + case ditypes.OpDereferenceDynamic: + return template.New("dereference_dynamic_location_expression").Parse(dereferenceDynamicTemplateText) + case ditypes.OpDereferenceDynamicToOutput: + return template.New("dereference_dynamic_to_output_location_expression").Parse(dereferenceDynamicToOutputTemplateText) + case ditypes.OpReadStringToOutput: + return template.New("read_string_to_output").Parse(readStringToOutputTemplateText) + case ditypes.OpApplyOffset: + return template.New("apply_offset_location_expression").Parse(applyOffsetTemplateText) + case ditypes.OpPop: + return template.New("pop_location_expression").Parse(popTemplateText) + case ditypes.OpCopy: + return template.New("copy_location_expression").Parse(copyTemplateText) + case ditypes.OpLabel: + return template.New("label").Parse(labelTemplateText) + case ditypes.OpSetGlobalLimit: + return template.New("set_limit_entry").Parse(setLimitEntryText) + case ditypes.OpJumpIfGreaterThanLimit: + return template.New("jump_if_greater_than_limit").Parse(jumpIfGreaterThanLimitText) + case ditypes.OpPrintStatement: + return template.New("print_statement").Parse(printStatementText) + case ditypes.OpComment: + return template.New("comment").Parse(commentText) + default: + return nil, errors.New("invalid location expression opcode") } - return nil, errors.New("no template created: invalid or unsupported type") -} - -func cleanupTypeName(s string) string { - return strings.TrimPrefix(s, "*") } func generateSliceHeader(slice *ditypes.Parameter, out io.Writer) error { if slice == nil { return errors.New("nil slice parameter when generating header code") } - if len(slice.ParameterPieces) != 2 { - return errors.New("invalid slice parameter when generating header code") + if len(slice.ParameterPieces) != 3 { + return fmt.Errorf("invalid slice parameter when generating header code: %d fields", len(slice.ParameterPieces)) + } + + // Slices are defined with an "array" pointer as piece 0, which is a pointer to the actual + // type, which is defined as piece 0 under that. + if len(slice.ParameterPieces) != 3 && + len(slice.ParameterPieces[0].ParameterPieces) != 1 { + return errors.New("malformed slice type") } typeHeaderBytes := []byte{} typeHeaderBuf := bytes.NewBuffer(typeHeaderBytes) - err := generateHeaderText(slice.ParameterPieces[0], typeHeaderBuf) + lenHeaderBytes := []byte{} + lenHeaderBuf := bytes.NewBuffer(lenHeaderBytes) + lenHeaderBuf.Write([]byte("// Capture length of slice:")) + err := generateHeaderText(slice.ParameterPieces[0].ParameterPieces[0], typeHeaderBuf) if err != nil { return err } - - lengthHeaderBytes := []byte{} - lengthHeaderBuf := bytes.NewBuffer(lengthHeaderBytes) - err = generateSliceLengthHeader(slice.ParameterPieces[1], lengthHeaderBuf) + err = generateParametersTextViaLocationExpressions([]*ditypes.Parameter{slice.ParameterPieces[1]}, lenHeaderBuf) if err != nil { return err } - + slice.ParameterPieces[1].LocationExpressions = []ditypes.LocationExpression{} w := sliceHeaderWrapper{ Parameter: slice, - SliceTypeHeaderText: typeHeaderBuf.String(), - SliceLengthText: lengthHeaderBuf.String(), + SliceTypeHeaderText: lenHeaderBuf.String() + typeHeaderBuf.String(), } sliceTemplate, err := resolveHeaderTemplate(slice) @@ -245,70 +234,25 @@ func generateStringHeader(stringParam *ditypes.Parameter, out io.Writer) error { if len(stringParam.ParameterPieces) != 2 { return fmt.Errorf("invalid string parameter when generating header code (pieces len %d)", len(stringParam.ParameterPieces)) } - - x := []byte{} - buf := bytes.NewBuffer(x) - err := generateStringLengthHeader(stringParam.ParameterPieces[1], buf) + stringHeaderTemplate, err := resolveHeaderTemplate(stringParam) if err != nil { return err } - - stringHeaderWrapper := stringHeaderWrapper{ - Parameter: stringParam, - StringLengthText: buf.String(), - } - - stringTemplate, err := resolveHeaderTemplate(stringParam) - if err != nil { - return err - } - - err = stringTemplate.Execute(out, stringHeaderWrapper) + err = stringHeaderTemplate.Execute(out, stringParam) if err != nil { return fmt.Errorf("could not execute template for generating string header: %w", err) } - return nil -} - -func generateStringLengthHeader(stringLengthParamPiece ditypes.Parameter, buf *bytes.Buffer) error { - var ( - tmplte *template.Template - err error - ) - if stringLengthParamPiece.Location.InReg { - tmplte, err = template.New("string_register_length_header").Parse(stringLengthRegisterTemplateText) - } else { - tmplte, err = template.New("string_stack_length_header").Parse(stringLengthStackTemplateText) - } + err = generateParametersTextViaLocationExpressions([]*ditypes.Parameter{stringParam.ParameterPieces[1]}, out) if err != nil { return err } - return tmplte.Execute(buf, stringLengthParamPiece) -} - -func generateSliceLengthHeader(sliceLengthParamPiece ditypes.Parameter, buf *bytes.Buffer) error { - var ( - tmplte *template.Template - err error - ) - if sliceLengthParamPiece.Location.InReg { - tmplte, err = template.New("slice_register_length_header").Parse(sliceLengthRegisterTemplateText) - } else { - tmplte, err = template.New("slice_stack_length_header").Parse(sliceLengthStackTemplateText) + if stringParam.ParameterPieces[1] != nil { + stringParam.ParameterPieces[1].LocationExpressions = []ditypes.LocationExpression{} } - if err != nil { - return err - } - return tmplte.Execute(buf, sliceLengthParamPiece) + return nil } type sliceHeaderWrapper struct { Parameter *ditypes.Parameter - SliceLengthText string SliceTypeHeaderText string } - -type stringHeaderWrapper struct { - Parameter *ditypes.Parameter - StringLengthText string -} diff --git a/pkg/dynamicinstrumentation/codegen/expression_templates.go b/pkg/dynamicinstrumentation/codegen/expression_templates.go new file mode 100644 index 0000000000000..8096803b96085 --- /dev/null +++ b/pkg/dynamicinstrumentation/codegen/expression_templates.go @@ -0,0 +1,114 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package codegen + +var readRegisterTemplateText = ` +// Arg1 = register +// Arg2 = size of element +read_register(&context, {{.Arg1}}, {{.Arg2}}); +` + +var readStackTemplateText = ` +// Arg1 = stack offset +// Arg2 = size of element +read_stack(&context, {{.Arg1}}, {{.Arg2}}); +` + +var readRegisterValueToOutputTemplateText = ` +// Arg1 = register +// Arg2 = size of element +read_register_value_to_output(&context, {{.Arg1}}, {{.Arg2}}); +` + +var readStackValueToOutputTemplateText = ` +// Arg1 = stack offset +// Arg2 = size of element +read_stack_value_to_output(&context, {{.Arg1}}, {{.Arg2}}); +` + +var popTemplateText = ` +// Arg1 = number of elements (u64) to pop +// Arg2 = size of each element +pop(&context, {{.Arg1}}, {{.Arg2}}); +` + +var dereferenceTemplateText = ` +// Arg1 = size in bytes of value we're reading from the 8 byte address at the top of the stack +dereference(&context, {{.Arg1}}); +` + +var dereferenceToOutputTemplateText = ` +// Arg1 = size in bytes of value we're reading from the 8 byte address at the top of the stack +dereference_to_output(&context, {{.Arg1}}); +` + +var dereferenceLargeTemplateText = ` +// Arg1 = size in bytes of value we're reading from the 8 byte address at the top of the stack +// Arg2 = number of chunks (should be ({{.Arg1}} + 7) / 8) +dereference_large(&context, {{.Arg1}}, {{.Arg2}}); +` + +var dereferenceLargeToOutputTemplateText = ` +// Arg1 = size in bytes of value we're reading from the 8 byte address at the top of the stack +dereference_large_to_output(&context, {{.Arg1}}); +` + +var applyOffsetTemplateText = ` +// Arg1 = uint value (offset) we're adding to the 8-byte address on top of the stack +apply_offset(&context, {{.Arg1}}); +` + +var dereferenceDynamicTemplateText = ` +// Arg1 = maximum limit on bytes read +// Arg2 = number of chunks (should be (max + 7)/8) +// Arg3 = size of each element +dereference_dynamic(&context, {{.Arg1}}, {{.Arg2}}, {{.Arg3}}); +` + +var dereferenceDynamicToOutputTemplateText = ` +// Arg1 = maximum limit on bytes read +dereference_dynamic_to_output(&context, {{.Arg1}}); +` + +var readStringToOutputTemplateText = ` +// Arg1 = maximum limit on string length +read_str_to_output(&context, {{.Arg1}}); +` + +var copyTemplateText = ` +copy(&context); +` + +var setLimitEntryText = ` +// Arg1 = Maximum limit +set_limit_entry(&context, {{.Arg1}}, "{{.CollectionIdentifier}}"); +` + +var jumpIfGreaterThanLimitText = ` +collectionLimit = bpf_map_lookup_elem(&collection_limits, "{{.CollectionIdentifier}}"); +if (!collectionLimit) { + log_debug("couldn't find collection limit for {{.CollectionIdentifier}}"); + collectionLimit = &collectionMax; +} +if ({{.Arg1}} == *collectionLimit) { + log_debug("collection limit for {{.CollectionIdentifier}} exceeded: %d", *collectionLimit); + goto {{.Label}}; +} +` + +var labelTemplateText = ` +{{.Label}}: +` + +var commentText = ` +// {{.Label}} +` + +var printStatementText = ` +log_debug("{{.Label}}", "{{.CollectionIdentifier}}"); +` diff --git a/pkg/dynamicinstrumentation/codegen/output_offsets.go b/pkg/dynamicinstrumentation/codegen/output_offsets.go index bdad1337ffa73..6e5ffe7e10cfe 100644 --- a/pkg/dynamicinstrumentation/codegen/output_offsets.go +++ b/pkg/dynamicinstrumentation/codegen/output_offsets.go @@ -11,55 +11,15 @@ import ( "math/rand" "reflect" - "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" ) -type paramDepthCounter struct { - depth int - param *ditypes.Parameter -} - -func applyCaptureDepth(params []ditypes.Parameter, maxDepth int) []ditypes.Parameter { - log.Tracef("Applying capture depth: %d", maxDepth) - queue := []paramDepthCounter{} - +func flattenParameters(params []*ditypes.Parameter) []*ditypes.Parameter { + flattenedParams := []*ditypes.Parameter{} for i := range params { - queue = append(queue, paramDepthCounter{ - depth: 0, - param: ¶ms[i], - }) - } - - for len(queue) != 0 { - front := queue[0] - queue = queue[1:] - - if front.depth == maxDepth { - // max capture depth reached, remove parameters below this level. - front.param.ParameterPieces = []ditypes.Parameter{} - if front.param.Kind == uint(reflect.Struct) { - // struct size reflects the number of fields, - // setting to 0 tells the user space parsing not to - // expect anything else. - front.param.TotalSize = 0 - } - } else { - for i := range front.param.ParameterPieces { - queue = append(queue, paramDepthCounter{ - depth: front.depth + 1, - param: &front.param.ParameterPieces[i], - }) - } + if params[i] == nil { + continue } - } - return params -} - -func flattenParameters(params []ditypes.Parameter) []ditypes.Parameter { - flattenedParams := []ditypes.Parameter{} - for i := range params { kind := reflect.Kind(params[i].Kind) if kind == reflect.Slice || kind == reflect.String { // Slices don't get flattened as we need the underlying type. @@ -69,9 +29,9 @@ func flattenParameters(params []ditypes.Parameter) []ditypes.Parameter { flattenedParams = append(flattenedParams, params[i]) } else if hasHeader(kind) { paramHeader := params[i] - paramHeader.ParameterPieces = nil flattenedParams = append(flattenedParams, paramHeader) flattenedParams = append(flattenedParams, flattenParameters(params[i].ParameterPieces)...) + paramHeader.ParameterPieces = nil } else if len(params[i].ParameterPieces) > 0 { flattenedParams = append(flattenedParams, flattenParameters(params[i].ParameterPieces)...) } else { @@ -86,44 +46,6 @@ func flattenParameters(params []ditypes.Parameter) []ditypes.Parameter { return flattenedParams } -func applyFieldCountLimit(params []ditypes.Parameter) { - queue := []*ditypes.Parameter{} - for i := range params { - queue = append(queue, ¶ms[len(params)-1-i]) - } - var ( - current *ditypes.Parameter - max int - ) - for len(queue) != 0 { - current = queue[0] - queue = queue[1:] - - max = len(current.ParameterPieces) - if len(current.ParameterPieces) > ditypes.MaxFieldCount { - max = ditypes.MaxFieldCount - for j := max; j < len(current.ParameterPieces); j++ { - excludeForFieldCount(¤t.ParameterPieces[j]) - } - } - for n := 0; n < max; n++ { - queue = append(queue, ¤t.ParameterPieces[n]) - } - } -} - -func excludeForFieldCount(root *ditypes.Parameter) { - // Exclude all in this tree - if root == nil { - return - } - root.NotCaptureReason = ditypes.FieldLimitReached - root.Kind = ditypes.KindCutFieldLimit - for i := range root.ParameterPieces { - excludeForFieldCount(&root.ParameterPieces[i]) - } -} - func hasHeader(kind reflect.Kind) bool { return kind == reflect.Struct || kind == reflect.Array || diff --git a/pkg/dynamicinstrumentation/codegen/templates.go b/pkg/dynamicinstrumentation/codegen/templates.go index 72fdcade8f44a..87f08d3c9593a 100644 --- a/pkg/dynamicinstrumentation/codegen/templates.go +++ b/pkg/dynamicinstrumentation/codegen/templates.go @@ -11,24 +11,18 @@ var headerTemplateText = ` // Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}} // Write the kind and size to output buffer param_type = {{.Kind}}; -bpf_probe_read(&event->output[outputOffset], sizeof(param_type), ¶m_type); +bpf_probe_read_kernel(&event->output[context.output_offset], sizeof(param_type), ¶m_type); param_size = {{.TotalSize}}; -bpf_probe_read(&event->output[outputOffset+1], sizeof(param_size), ¶m_size); -outputOffset += 3; +bpf_probe_read_kernel(&event->output[context.output_offset+1], sizeof(param_size), ¶m_size); +context.output_offset += 3; ` - -// The length and type of slices aren't known until parsing, so they require -// special headers to read in the length dynamically var sliceRegisterHeaderTemplateText = ` // Name={{.Parameter.Name}} ID={{.Parameter.ID}} TotalSize={{.Parameter.TotalSize}} Kind={{.Parameter.Kind}} // Write the slice kind to output buffer param_type = {{.Parameter.Kind}}; -bpf_probe_read(&event->output[outputOffset], sizeof(param_type), ¶m_type); - -{{.SliceLengthText}} +bpf_probe_read_kernel(&event->output[context.output_offset], sizeof(param_type), ¶m_type); -bpf_probe_read(&event->output[outputOffset+1], sizeof(param_size), ¶m_size); -outputOffset += 3; +context.output_offset += 1; __u16 indexSlice{{.Parameter.ID}}; slice_length = param_size; @@ -44,185 +38,21 @@ for (indexSlice{{.Parameter.ID}} = 0; indexSlice{{.Parameter.ID}} < MAX_SLICE_LE } ` -var sliceLengthRegisterTemplateText = ` -bpf_probe_read(¶m_size, sizeof(param_size), &ctx->DWARF_REGISTER({{.Location.Register}})); -` - -// The length and type of slices aren't known until parsing, so they require -// special headers to read in the length dynamically var sliceStackHeaderTemplateText = ` // Name={{.Parameter.Name}} ID={{.Parameter.ID}} TotalSize={{.Parameter.TotalSize}} Kind={{.Parameter.Kind}} // Write the slice kind to output buffer param_type = {{.Parameter.Kind}}; -bpf_probe_read(&event->output[outputOffset], sizeof(param_type), ¶m_type); - -{{.SliceLengthText}} - -bpf_probe_read(&event->output[outputOffset+1], sizeof(param_size), ¶m_size); -outputOffset += 3; - -__u16 indexSlice{{.Parameter.ID}}; -slice_length = param_size; -if (slice_length > MAX_SLICE_LENGTH) { - slice_length = MAX_SLICE_LENGTH; -} +bpf_probe_read_kernel(&event->output[context.output_offset], sizeof(param_type), ¶m_type); -for (indexSlice{{.Parameter.ID}} = 0; indexSlice{{.Parameter.ID}} < MAX_SLICE_LENGTH; indexSlice{{.Parameter.ID}}++) { - if (indexSlice{{.Parameter.ID}} >= slice_length) { - break; - } - {{.SliceTypeHeaderText}} -} -` +context.output_offset += 1; -var sliceLengthStackTemplateText = ` -bpf_probe_read(¶m_size, sizeof(param_size), &ctx->DWARF_STACK_REGISTER+{{.Parameter.Location.StackOffset}}]); +{{.SliceTypeHeaderText}} ` -// The length of strings aren't known until parsing, so they require -// special headers to read in the length dynamically -var stringRegisterHeaderTemplateText = ` -// Name={{.Parameter.Name}} ID={{.Parameter.ID}} TotalSize={{.Parameter.TotalSize}} Kind={{.Parameter.Kind}} -// Write the string kind to output buffer -param_type = {{.Parameter.Kind}}; -bpf_probe_read(&event->output[outputOffset], sizeof(param_type), ¶m_type); - -// Read string length and write it to output buffer - -{{.StringLengthText}} - -// Limit string length -__u16 string_size_{{.Parameter.ID}} = param_size; -if (string_size_{{.Parameter.ID}} > MAX_STRING_SIZE) { - string_size_{{.Parameter.ID}} = MAX_STRING_SIZE; -} -bpf_probe_read(&event->output[outputOffset+1], sizeof(string_size_{{.Parameter.ID}}), &string_size_{{.Parameter.ID}}); -outputOffset += 3; -` - -var stringLengthRegisterTemplateText = ` -bpf_probe_read(¶m_size, sizeof(param_size), &ctx->DWARF_REGISTER({{.Location.Register}})); -` - -// The length of strings aren't known until parsing, so they require -// special headers to read in the length dynamically -var stringStackHeaderTemplateText = ` -// Name={{.Parameter.Name}} ID={{.Parameter.ID}} TotalSize={{.Parameter.TotalSize}} Kind={{.Parameter.Kind}} -// Write the string kind to output buffer -param_type = {{.Parameter.Kind}}; -bpf_probe_read(&event->output[outputOffset], sizeof(param_type), ¶m_type); - -// Read string length and write it to output buffer -{{.StringLengthText}} - -// Limit string length -__u16 string_size_{{.Parameter.ID}} = param_size; -if (string_size_{{.Parameter.ID}} > MAX_STRING_SIZE) { - string_size_{{.Parameter.ID}} = MAX_STRING_SIZE; -} -bpf_probe_read(&event->output[outputOffset+1], sizeof(string_size_{{.Parameter.ID}}), &string_size_{{.Parameter.ID}}); -outputOffset += 3; -` - -var stringLengthStackTemplateText = ` -bpf_probe_read(¶m_size, sizeof(param_size), (char*)((ctx->DWARF_STACK_REGISTER)+{{.Location.StackOffset}})); -` - -var sliceRegisterTemplateText = ` -// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}} -// Read contents of slice -bpf_probe_read(&event->output[outputOffset], MAX_SLICE_SIZE, (void*)ctx->DWARF_REGISTER({{.Location.Register}})); -outputOffset += MAX_SLICE_SIZE; -` - -var sliceStackTemplateText = ` -// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}} -// Read contents of slice -bpf_probe_read(&event->output[outputOffset], MAX_SLICE_SIZE, (void*)(ctx->DWARF_STACK_REGISTER+{{.Location.StackOffset}}); -outputOffset += MAX_SLICE_SIZE;` - -var stringRegisterTemplateText = ` -// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}} -// Read string length and write it to output buffer - -// We limit string length variable again in case the verifier forgot about it (which often happens) -__u16 string_size_{{.ID}}_new; -string_size_{{.ID}}_new = string_size_{{.ID}}; -if (string_size_{{.ID}}_new > MAX_STRING_SIZE) { - string_size_{{.ID}}_new = MAX_STRING_SIZE; -} - -// Read contents of string -bpf_probe_read(&event->output[outputOffset], string_size_{{.ID}}_new, (void*)ctx->DWARF_REGISTER({{.Location.Register}})); -outputOffset += string_size_{{.ID}}_new; -` - -var stringStackTemplateText = ` -// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}} - -// We limit string length variable again in case the verifier forgot about it (which often happens) -__u16 string_size_{{.ID}}_new; -string_size_{{.ID}}_new = string_size_{{.ID}}; -if (string_size_{{.ID}}_new > MAX_STRING_SIZE) { - string_size_{{.ID}}_new = MAX_STRING_SIZE; -} -// Read contents of string -bpf_probe_read(&ret_addr, sizeof(__u64), (void*)(ctx->DWARF_STACK_REGISTER+{{.Location.StackOffset}})); -bpf_probe_read(&event->output[outputOffset], string_size_{{.ID}}_new, (void*)(ret_addr)); -outputOffset += string_size_{{.ID}}_new; -` - -var pointerRegisterTemplateText = ` -// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}} -// Read the pointer value (address of underlying value) -void *ptrTo{{.ID}}; -bpf_probe_read(&ptrTo{{.ID}}, sizeof(ptrTo{{.ID}}), &ctx->DWARF_REGISTER({{.Location.Register}})); - -// Write the underlying value to output -bpf_probe_read(&event->output[outputOffset], {{.TotalSize}}, ptrTo{{.ID}}+{{.Location.PointerOffset}}); -outputOffset += {{.TotalSize}}; - -// Write the pointer address to output -ptrTo{{.ID}} += {{.Location.PointerOffset}}; -bpf_probe_read(&event->output[outputOffset], sizeof(ptrTo{{.ID}}), &ptrTo{{.ID}}); -` - -var pointerStackTemplateText = ` -// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}} -// Read the pointer value (address of underlying value) -void *ptrTo{{.ID}}; -bpf_probe_read(&ptrTo{{.ID}}, sizeof(ptrTo{{.ID}}), (char*)((ctx->DWARF_STACK_REGISTER)+{{.Location.StackOffset}}+8)); - -// Write the underlying value to output -bpf_probe_read(&event->output[outputOffset], {{.TotalSize}}, ptrTo{{.ID}}+{{.Location.PointerOffset}}); -outputOffset += {{.TotalSize}}; - -// Write the pointer address to output -ptrTo{{.ID}} += {{.Location.PointerOffset}}; -bpf_probe_read(&event->output[outputOffset], sizeof(ptrTo{{.ID}}), &ptrTo{{.ID}}); -` - -var normalValueRegisterTemplateText = ` +var stringHeaderTemplateText = ` // Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}} -bpf_probe_read(&event->output[outputOffset], {{.TotalSize}}, &ctx->DWARF_REGISTER({{.Location.Register}})); -outputOffset += {{.TotalSize}}; -` - -var normalValueStackTemplateText = ` -// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}} -// Read value for {{.Name}} -bpf_probe_read(&event->output[outputOffset], {{.TotalSize}}, (char*)((ctx->DWARF_STACK_REGISTER)+{{.Location.StackOffset}})); -outputOffset += {{.TotalSize}}; -` - -// Unsupported types just get a single `255` value to signify as a placeholder -// that an unsupported type goes here. Size is where we keep the actual type. -var unsupportedTypeTemplateText = ` -// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}} -// No capture, unsupported type -` - -var cutForFieldLimitTemplateText = ` -// Name={{.Name}} ID={{.ID}} TotalSize={{.TotalSize}} Kind={{.Kind}} -// No capture, cut for field limit +// Write the string kind to output buffer +param_type = {{.Kind}}; +bpf_probe_read_kernel(&event->output[context.output_offset], sizeof(param_type), ¶m_type); +context.output_offset += 1; ` diff --git a/pkg/dynamicinstrumentation/di.go b/pkg/dynamicinstrumentation/di.go index 3f39870bafccd..57babad4e61a6 100644 --- a/pkg/dynamicinstrumentation/di.go +++ b/pkg/dynamicinstrumentation/di.go @@ -10,7 +10,6 @@ package dynamicinstrumentation import ( - "encoding/json" "fmt" "io" @@ -51,14 +50,16 @@ func newGoDIStats() GoDIStats { } } -type OfflineOptions struct { //nolint:revive // TODO +// OfflineOptions configures the Offline options for the running Dynamic Instrumentation process +type OfflineOptions struct { Offline bool ProbesFilePath string SnapshotOutput string DiagnosticOutput string } -type ReaderWriterOptions struct { //nolint:revive // TODO +// ReaderWriterOptions configures the ReaderWriter options for the running Dynamic Instrumentation process +type ReaderWriterOptions struct { CustomReaderWriters bool SnapshotWriter io.Writer DiagnosticWriter io.Writer @@ -80,9 +81,7 @@ func RunDynamicInstrumentation(opts *DIOptions) (*GoDI, error) { if err != nil { return nil, err } - stopFunctions := []func(){ - diagnostics.StopGlobalDiagnostics, - } + stopFunctions := []func(){} if opts.ReaderWriterOptions.CustomReaderWriters { cm, err := diconfig.NewReaderConfigManager() if err != nil { @@ -155,28 +154,6 @@ func RunDynamicInstrumentation(opts *DIOptions) (*GoDI, error) { return goDI, nil } -func (goDI *GoDI) printSnapshot(event *ditypes.DIEvent) { //nolint:unused // TODO - if event == nil { - return - } - procInfo := goDI.ConfigManager.GetProcInfos()[event.PID] - diLog := uploader.NewDILog(procInfo, event) - - var bs []byte - var err error - - if diLog != nil { - bs, err = json.MarshalIndent(diLog, "", " ") - } else { - bs, err = json.MarshalIndent(event, "", " ") - } - - if err != nil { - log.Info(err) - } - log.Debug(string(bs)) -} - func (goDI *GoDI) uploadSnapshot(event *ditypes.DIEvent) { // goDI.printSnapshot(event) procInfo := goDI.ConfigManager.GetProcInfos()[event.PID] diff --git a/pkg/dynamicinstrumentation/diagnostics/diagnostics.go b/pkg/dynamicinstrumentation/diagnostics/diagnostics.go index 8355f33783efa..fae84ff61aa77 100644 --- a/pkg/dynamicinstrumentation/diagnostics/diagnostics.go +++ b/pkg/dynamicinstrumentation/diagnostics/diagnostics.go @@ -78,7 +78,8 @@ func (m *DiagnosticManager) update(id probeInstanceID, d *ditypes.DiagnosticUplo } } -func StopGlobalDiagnostics() { //nolint:revive // TODO +// StopGlobalDiagnostics stops diagnostics from running and closes the updates channel +func StopGlobalDiagnostics() { close(Diagnostics.Updates) } diff --git a/pkg/dynamicinstrumentation/diconfig/binary_inspection.go b/pkg/dynamicinstrumentation/diconfig/binary_inspection.go index 02885a2b6772a..a1ce4e901d0d0 100644 --- a/pkg/dynamicinstrumentation/diconfig/binary_inspection.go +++ b/pkg/dynamicinstrumentation/diconfig/binary_inspection.go @@ -80,6 +80,7 @@ func AnalyzeBinary(procInfo *ditypes.ProcessInfo) error { // Use the result from InspectWithDWARF to populate the locations of parameters for functionName, functionMetadata := range r.Functions { putLocationsInParams(functionMetadata.Parameters, r.StructOffsets, procInfo.TypeMap.Functions, functionName) + populateLocationExpressionsForFunction(r.Functions, procInfo, functionName) correctStructSizes(procInfo.TypeMap.Functions[functionName]) } @@ -88,15 +89,15 @@ func AnalyzeBinary(procInfo *ditypes.ProcessInfo) error { // collectFieldIDs returns all struct fields if there are any amongst types of parameters // including if there's structs that are nested deep within complex types -func collectFieldIDs(param ditypes.Parameter) []bininspect.FieldIdentifier { +func collectFieldIDs(param *ditypes.Parameter) []bininspect.FieldIdentifier { fieldIDs := []bininspect.FieldIdentifier{} - stack := append([]ditypes.Parameter{param}, param.ParameterPieces...) + stack := append([]*ditypes.Parameter{param}, param.ParameterPieces...) for len(stack) != 0 { current := stack[len(stack)-1] stack = stack[:len(stack)-1] - if !kindIsSupported(reflect.Kind(current.Kind)) { + if current == nil || !kindIsSupported(reflect.Kind(current.Kind)) { continue } if len(current.ParameterPieces) != 0 { @@ -125,10 +126,45 @@ func collectFieldIDs(param ditypes.Parameter) []bininspect.FieldIdentifier { return fieldIDs } +func populateLocationExpressionsForFunction( + metadata map[string]bininspect.FunctionMetadata, + procInfo *ditypes.ProcessInfo, + functionName string, +) { + log.Tracef("Populating location expressions for %s", functionName) + functions := procInfo.TypeMap.Functions + parameters := functions[functionName] + probes := procInfo.GetProbes() + funcNamesToLimits := map[string]*ditypes.InstrumentationInfo{} + for i := range probes { + funcNamesToLimits[probes[i].FuncName] = probes[i].InstrumentationInfo + } + + funcMetadata, ok := metadata[functionName] + if !ok { + log.Warnf("no function metadata for function %s", functionName) + return + } + limitInfo, ok := funcNamesToLimits[functionName] + if !ok || limitInfo == nil { + log.Warnf("no limit info available for function %s", functionName) + return + } + for i := range parameters { + if i >= len(funcMetadata.Parameters) { + log.Warnf("parameter metadata does not line up with parameter itself (not found in metadata: %v)", parameters[i]) + break + } + GenerateLocationExpression(limitInfo, parameters[i]) + } +} + +// putLocationsInParams collects parameter locations from metadata which is retrieved +// from the bininspect package, and assigns it in the Parameter representation. func putLocationsInParams( paramMetadatas []bininspect.ParameterMetadata, fieldLocations map[bininspect.FieldIdentifier]uint64, - funcMap map[string][]ditypes.Parameter, + funcMap map[string][]*ditypes.Parameter, funcName string) { params := funcMap[funcName] @@ -146,18 +182,24 @@ func putLocationsInParams( } assignLocationsInOrder(params, locations) - correctTypeSpecificLocations(params, fieldLocations) - + for i := range params { + correctStructLocations(params[i], fieldLocations) + } funcMap[funcName] = params } -func assignLocationsInOrder(params []ditypes.Parameter, locations []ditypes.Location) { +// assignLocationsInOrder takes a slice of locations and a slice of parameters and assigns +// the locations in the intended order according to how they were retrieved from DWARF. +// The locations convey where in memory the parameter will be at function entry, such +// as specific registers or stack offsets. Logic such as assigning locations to individual +// array elements or types that are pointed to is handled. +func assignLocationsInOrder(params []*ditypes.Parameter, locations []ditypes.Location) { stack := []*ditypes.Parameter{} locationCounter := 0 // Start by pushing addresses of all parameters to stack for i := range params { - stack = append(stack, ¶ms[len(params)-1-i]) + stack = append(stack, params[len(params)-1-i]) } for { @@ -168,124 +210,40 @@ func assignLocationsInOrder(params []ditypes.Parameter, locations []ditypes.Loca stack = stack[:len(stack)-1] if len(current.ParameterPieces) != 0 && current.Kind != uint(reflect.Array) && - current.Kind != uint(reflect.Pointer) && - current.Kind != uint(reflect.Slice) && - current.Kind != uint(reflect.String) { - + current.Kind != uint(reflect.Pointer) { for i := range current.ParameterPieces { - stack = append(stack, ¤t.ParameterPieces[len(current.ParameterPieces)-1-i]) + stack = append(stack, current.ParameterPieces[len(current.ParameterPieces)-1-i]) } } else { // Location fields are directly assigned instead of setting the whole // location field to preserve other fields locationToAssign := locations[locationCounter] + if current.Location == nil { + current.Location = &ditypes.Location{} + } current.Location.InReg = locationToAssign.InReg current.Location.Register = locationToAssign.Register current.Location.StackOffset = locationToAssign.StackOffset - - if reflect.Kind(current.Kind) == reflect.String { - // Strings actually have two locations (pointer, length) - // but are shortened to a single one for parsing. The location - // of the length is stored as a piece of the overall string - // which contains the location of the string's address. - if len(locations) <= locationCounter+1 || - len(current.ParameterPieces) != 2 { - return - } - stringLengthLocation := locations[locationCounter+1] - current.ParameterPieces[1].Location.InReg = stringLengthLocation.InReg - current.ParameterPieces[1].Location.Register = stringLengthLocation.Register - current.ParameterPieces[1].Location.StackOffset = stringLengthLocation.StackOffset - locationCounter++ - } else if reflect.Kind(current.Kind) == reflect.Slice { - // Slices actually have three locations (array, length, capacity) - // but are shortened to a single one for parsing. The location - // of the length is stored as a piece of the overall slice - // which contains the location of the slice's address. - // The capacity slice field is ignored. - if len(locations) <= locationCounter+1 { - return - } - sliceLength := ditypes.Parameter{} - sliceLengthLocation := locations[locationCounter+1] - sliceLength.Location.InReg = sliceLengthLocation.InReg - sliceLength.Location.Register = sliceLengthLocation.Register - sliceLength.Location.StackOffset = sliceLengthLocation.StackOffset - current.ParameterPieces = append(current.ParameterPieces, sliceLength) - locationCounter += 2 - } locationCounter++ } } } -func correctTypeSpecificLocations(params []ditypes.Parameter, fieldLocations map[bininspect.FieldIdentifier]uint64) { - for i := range params { - if params[i].Kind == uint(reflect.Array) { - correctArrayLocations(¶ms[i], fieldLocations) - } else if params[i].Kind == uint(reflect.Pointer) { - correctPointerLocations(¶ms[i], fieldLocations) - } else if params[i].Kind == uint(reflect.Struct) || params[i].Kind == uint(reflect.String) { - correctStructLocations(¶ms[i], fieldLocations) - } - } -} - -// correctStructLocations sets pointer and stack offsets for struct fields from -// bininspect results +// correctStructLocations finds structs in the passed parameter tree (`structParam`) and sets the FieldOffset +// field in individual fields which convey the offset of the field within the struct when the struct is stored +// on the stack or heap. func correctStructLocations(structParam *ditypes.Parameter, fieldLocations map[bininspect.FieldIdentifier]uint64) { for i := range structParam.ParameterPieces { - fieldID := bininspect.FieldIdentifier{ - StructName: structParam.Type, - FieldName: structParam.ParameterPieces[i].Name, - } - offset, ok := fieldLocations[fieldID] - if !ok { - log.Infof("no field location available for %s.%s\n", fieldID.StructName, fieldID.FieldName) - structParam.ParameterPieces[i].NotCaptureReason = ditypes.NoFieldLocation + if structParam.ParameterPieces[i] == nil { continue } - fieldLocationsHaveAlreadyBeenDirectlyAssigned := isLocationSet(structParam.ParameterPieces[i].Location) - if fieldLocationsHaveAlreadyBeenDirectlyAssigned { - // The location would be set if it was directly assigned to (i.e. has its own register instead of needing - // to dereference a pointer or get the element from a slice) - structParam.ParameterPieces[i].Location = structParam.Location - structParam.ParameterPieces[i].Location.StackOffset = int64(offset) + structParam.Location.StackOffset + fieldID := bininspect.FieldIdentifier{ + StructName: structParam.Type, + FieldName: structParam.ParameterPieces[i].Name, } - - structParam.ParameterPieces[i].Location.PointerOffset = offset - structParam.ParameterPieces[i].Location.StackOffset = structParam.ParameterPieces[0].Location.StackOffset + int64(offset) - - correctTypeSpecificLocations([]ditypes.Parameter{structParam.ParameterPieces[i]}, fieldLocations) - } -} - -func isLocationSet(l ditypes.Location) bool { - return reflect.DeepEqual(l, ditypes.Location{}) -} - -// correctPointerLocations takes a parameters location and copies it to the underlying -// type that's pointed to. It sets `NeedsDereference` to true -// then calls the top level function on each element of the array to ensure all -// element's have corrected locations -func correctPointerLocations(pointerParam *ditypes.Parameter, fieldLocations map[bininspect.FieldIdentifier]uint64) { - // Pointers should have exactly one entry in ParameterPieces that correspond to the underlying type - if len(pointerParam.ParameterPieces) != 1 { - return - } - pointerParam.ParameterPieces[0].Location = pointerParam.Location - pointerParam.ParameterPieces[0].Location.NeedsDereference = true - correctTypeSpecificLocations([]ditypes.Parameter{pointerParam.ParameterPieces[0]}, fieldLocations) -} - -// correctArrayLocations takes a parameter's location, and distribute it to each element -// by using `stack offset + (size*index)` then calls the top level function on each element -// of the array to ensure all element's have corrected locations -func correctArrayLocations(arrayParam *ditypes.Parameter, fieldLocations map[bininspect.FieldIdentifier]uint64) { - initialOffset := arrayParam.Location.StackOffset - for i := range arrayParam.ParameterPieces { - arrayParam.ParameterPieces[i].Location.StackOffset = initialOffset + (arrayParam.ParameterPieces[i].TotalSize * int64(i)) - correctTypeSpecificLocations([]ditypes.Parameter{arrayParam.ParameterPieces[i]}, fieldLocations) + offset := fieldLocations[fieldID] + structParam.ParameterPieces[i].FieldOffset = offset + correctStructLocations(structParam.ParameterPieces[i], fieldLocations) } } diff --git a/pkg/dynamicinstrumentation/diconfig/binary_inspection_test.go b/pkg/dynamicinstrumentation/diconfig/binary_inspection_test.go index 32d31e49c2188..cc2d6dc5da170 100644 --- a/pkg/dynamicinstrumentation/diconfig/binary_inspection_test.go +++ b/pkg/dynamicinstrumentation/diconfig/binary_inspection_test.go @@ -15,49 +15,66 @@ import ( "strings" "testing" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/testutil" - "github.com/DataDog/datadog-agent/pkg/network/go/bininspect" - "github.com/DataDog/datadog-agent/pkg/util/safeelf" - - "github.com/kr/pretty" + "github.com/stretchr/testify/require" ) -func TestBinaryInspection(t *testing.T) { - - testFunctions := []string{ - "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/testutil/sample.test_single_string", - "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/testutil/sample.test_nonembedded_struct", - "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/testutil/sample.test_struct", - "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/testutil/sample.test_uint_slice", - } - - curDir, err := pwd() - if err != nil { - t.Error(err) +func TestAnalyzeBinary(t *testing.T) { + + testCases := []struct { + FuncName string + ExpectedParameters []*ditypes.Parameter + }{ + { + FuncName: "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/testutil/sample.test_single_int", + ExpectedParameters: []*ditypes.Parameter{ + { + Name: "x", + ID: "", + Type: "int", + TotalSize: 8, + Kind: 0x2, + Location: &ditypes.Location{InReg: true, StackOffset: 0, Register: 0, NeedsDereference: false, PointerOffset: 0x0}, + LocationExpressions: nil, + FieldOffset: 0x0, + NotCaptureReason: 0x0, + ParameterPieces: nil, + }, + }, + }, } - binPath, err := testutil.BuildGoBinaryWrapper(curDir, "../testutil/sample/sample_service") - if err != nil { - t.Error(err) - } + for i := range testCases { + t.Run(testCases[i].FuncName, func(t *testing.T) { - f, err := safeelf.Open(binPath) - if err != nil { - t.Error(err) - } + curDir, err := pwd() + if err != nil { + t.Error(err) + } - result, err := bininspect.InspectWithDWARF(f, testFunctions, nil) - if err != nil { - t.Error(">", err) - } + binPath, err := testutil.BuildGoBinaryWrapper(curDir, "../testutil/sample/sample_service") + if err != nil { + t.Error(err) + } - for _, funcMetadata := range result.Functions { - for paramName, paramMeta := range funcMetadata.Parameters { - for _, piece := range paramMeta.Pieces { - pretty.Log(paramName, piece) + procInfo := ditypes.ProcessInfo{ + BinaryPath: binPath, + ProbesByID: ditypes.ProbesByID{ + testCases[i].FuncName: &ditypes.Probe{ + ServiceName: "sample", + FuncName: testCases[i].FuncName, + }, + }, } - } + err = AnalyzeBinary(&procInfo) + if err != nil { + t.Error(err) + } + require.Equal(t, testCases[i].ExpectedParameters, procInfo.TypeMap.Functions[testCases[i].FuncName]) + }) } + } // pwd returns the current directory of the caller. diff --git a/pkg/dynamicinstrumentation/diconfig/config_manager.go b/pkg/dynamicinstrumentation/diconfig/config_manager.go index b939b8827d6cb..df74ba82b77c1 100644 --- a/pkg/dynamicinstrumentation/diconfig/config_manager.go +++ b/pkg/dynamicinstrumentation/diconfig/config_manager.go @@ -127,7 +127,7 @@ func (cm *RCConfigManager) installConfigProbe(procInfo *ditypes.ProcessInfo) err svcConfigProbe := *configProbe svcConfigProbe.ServiceName = procInfo.ServiceName procInfo.ProbesByID[configProbe.ID] = &svcConfigProbe - + log.Infof("Installing config probe for service: %s", svcConfigProbe.ServiceName) err = AnalyzeBinary(procInfo) if err != nil { return fmt.Errorf("could not analyze binary for config probe: %w", err) @@ -138,7 +138,7 @@ func (cm *RCConfigManager) installConfigProbe(procInfo *ditypes.ProcessInfo) err return fmt.Errorf("could not generate bpf code for config probe: %w", err) } - err = ebpf.CompileBPFProgram(procInfo, configProbe) + err = ebpf.CompileBPFProgram(configProbe) if err != nil { return fmt.Errorf("could not compile bpf code for config probe: %w", err) } @@ -165,6 +165,9 @@ func (cm *RCConfigManager) installConfigProbe(procInfo *ditypes.ProcessInfo) err func (cm *RCConfigManager) readConfigs(r *ringbuf.Reader, procInfo *ditypes.ProcessInfo) { log.Tracef("Waiting for configs for service: %s", procInfo.ServiceName) + configRateLimiter := ratelimiter.NewMultiProbeRateLimiter(0.0) + configRateLimiter.SetRate(ditypes.ConfigBPFProbeID, 0) + for { record, err := r.Read() if err != nil { @@ -172,13 +175,14 @@ func (cm *RCConfigManager) readConfigs(r *ringbuf.Reader, procInfo *ditypes.Proc continue } - configEventParams, err := eventparser.ParseParams(record.RawSample) + configEvent, err := eventparser.ParseEvent(record.RawSample, configRateLimiter) if err != nil { log.Errorf("error parsing configuration for PID %d: %v", procInfo.PID, err) continue } + configEventParams := configEvent.Argdata if len(configEventParams) != 3 { - log.Errorf("error parsing configuration for PID %d: not enough arguments", procInfo.PID) + log.Errorf("error parsing configuration for PID: %d: not enough arguments", procInfo.PID) continue } @@ -238,7 +242,7 @@ func (cm *RCConfigManager) readConfigs(r *ringbuf.Reader, procInfo *ditypes.Proc } func applyConfigUpdate(procInfo *ditypes.ProcessInfo, probe *ditypes.Probe) { - log.Tracef("Applying config update: %v", probe) + log.Tracef("Applying config update: %v\n", probe) err := AnalyzeBinary(procInfo) if err != nil { log.Errorf("couldn't inspect binary: %v\n", err) @@ -258,7 +262,7 @@ generateCompileAttach: return } - err = ebpf.CompileBPFProgram(procInfo, probe) + err = ebpf.CompileBPFProgram(probe) if err != nil { // TODO: Emit diagnostic? log.Info("Couldn't compile BPF object", err) @@ -289,8 +293,8 @@ func newConfigProbe() *ditypes.Probe { FuncName: "gopkg.in/DataDog/dd-trace-go.v1/ddtrace/tracer.passProbeConfiguration", InstrumentationInfo: &ditypes.InstrumentationInfo{ InstrumentationOptions: &ditypes.InstrumentationOptions{ - ArgumentsMaxSize: 100000, - StringMaxSize: 30000, + ArgumentsMaxSize: 50000, + StringMaxSize: 10000, MaxFieldCount: int(ditypes.MaxFieldCount), MaxReferenceDepth: 8, CaptureParameters: true, diff --git a/pkg/dynamicinstrumentation/diconfig/dwarf.go b/pkg/dynamicinstrumentation/diconfig/dwarf.go index 85b9ba2ba4176..01091a4d059c3 100644 --- a/pkg/dynamicinstrumentation/diconfig/dwarf.go +++ b/pkg/dynamicinstrumentation/diconfig/dwarf.go @@ -26,15 +26,11 @@ func getTypeMap(dwarfData *dwarf.Data, targetFunctions map[string]bool) (*ditype return loadFunctionDefinitions(dwarfData, targetFunctions) } -var dwarfMap = make(map[string]*dwarf.Data) - type seenTypeCounter struct { parameter *ditypes.Parameter count uint8 } -var seenTypes = make(map[string]*seenTypeCounter) - func loadFunctionDefinitions(dwarfData *dwarf.Data, targetFunctions map[string]bool) (*ditypes.TypeMap, error) { entryReader := dwarfData.Reader() typeReader := dwarfData.Reader() @@ -42,17 +38,19 @@ func loadFunctionDefinitions(dwarfData *dwarf.Data, targetFunctions map[string]b var funcName string var result = ditypes.TypeMap{ - Functions: make(map[string][]ditypes.Parameter), - InlinedFunctions: make(map[uint64][]*dwarf.Entry), + Functions: make(map[string][]*ditypes.Parameter), } var ( name string + isReturn bool typeFields *ditypes.Parameter ) entryLoop: for { + seenTypes := make(map[string]*seenTypeCounter) + entry, err := entryReader.Next() if err == io.EOF || entry == nil { break @@ -83,25 +81,6 @@ entryLoop: } } - if entry.Tag == dwarf.TagInlinedSubroutine { - // This is a inlined function - for i := range entry.Field { - // Find it's high program counter (where it exits in the parent routine) - if entry.Field[i].Attr == dwarf.AttrHighpc { - - // The field for HighPC can be a constant or address, which are int64 and uint64 respectively - if entry.Field[i].Class == dwarf.ClassConstant { - result.InlinedFunctions[uint64(entry.Field[i].Val.(int64))] = - append([]*dwarf.Entry{entry}, result.InlinedFunctions[uint64(entry.Field[i].Val.(int64))]...) - } else if entry.Field[i].Class == dwarf.ClassAddress { - result.InlinedFunctions[entry.Field[i].Val.(uint64)] = - append([]*dwarf.Entry{entry}, result.InlinedFunctions[entry.Field[i].Val.(uint64)]...) - } - } - } - continue entryLoop - } - if entry.Tag == dwarf.TagSubprogram { for _, field := range entry.Field { @@ -117,7 +96,8 @@ entryLoop: if !targetFunctions[funcName] { continue entryLoop } - result.Functions[funcName] = make([]ditypes.Parameter, 0) + params := make([]*ditypes.Parameter, 0) + result.Functions[funcName] = params readingAFunction = true continue entryLoop } @@ -144,6 +124,10 @@ entryLoop: name = entry.Field[i].Val.(string) } + if entry.Field[i].Attr == dwarf.AttrVarParam { + isReturn = entry.Field[i].Val.(bool) + } + // Collect information about the type of this ditypes.Parameter if entry.Field[i].Attr == dwarf.AttrType { @@ -153,7 +137,7 @@ entryLoop: return nil, err } - typeFields, err = expandTypeData(typeEntry.Offset, dwarfData) + typeFields, err = expandTypeData(typeEntry.Offset, dwarfData, seenTypes) if err != nil { return nil, fmt.Errorf("error while parsing debug information: %w", err) } @@ -161,12 +145,11 @@ entryLoop: } } - if typeFields != nil { + if typeFields != nil && !isReturn /* we ignore return values for now */ { // We've collected information about this ditypes.Parameter, append it to the slice of ditypes.Parameters for this function typeFields.Name = name - result.Functions[funcName] = append(result.Functions[funcName], *typeFields) + result.Functions[funcName] = append(result.Functions[funcName], typeFields) } - seenTypes = make(map[string]*seenTypeCounter) // reset seen types map for next parameter } // Sort program counter slice for lookup when resolving pcs->functions @@ -181,23 +164,19 @@ entryLoop: } func loadDWARF(binaryPath string) (*dwarf.Data, error) { - if dwarfData, ok := dwarfMap[binaryPath]; ok { - return dwarfData, nil - } elfFile, err := safeelf.Open(binaryPath) if err != nil { return nil, fmt.Errorf("couldn't open elf binary: %w", err) } - + defer elfFile.Close() dwarfData, err := elfFile.DWARF() if err != nil { return nil, fmt.Errorf("couldn't retrieve debug info from elf: %w", err) } - dwarfMap[binaryPath] = dwarfData return dwarfData, nil } -func expandTypeData(offset dwarf.Offset, dwarfData *dwarf.Data) (*ditypes.Parameter, error) { +func expandTypeData(offset dwarf.Offset, dwarfData *dwarf.Data, seenTypes map[string]*seenTypeCounter) (*ditypes.Parameter, error) { typeReader := dwarfData.Reader() typeReader.Seek(offset) @@ -227,8 +206,8 @@ func expandTypeData(offset dwarf.Offset, dwarfData *dwarf.Data) (*ditypes.Parame v, typeParsedAlready := seenTypes[typeHeader.Type] if typeParsedAlready { v.count++ - if v.count >= ditypes.MaxReferenceDepth { - return v.parameter, nil + if v.count > ditypes.MaxReferenceDepth { + return &ditypes.Parameter{}, nil } } else { seenTypes[typeHeader.Type] = &seenTypeCounter{ @@ -237,26 +216,20 @@ func expandTypeData(offset dwarf.Offset, dwarfData *dwarf.Data) (*ditypes.Parame } } - if typeKind == uint(reflect.Slice) { - sliceElements, err := getSliceField(typeEntry.Offset, dwarfData) - if err != nil { - return nil, fmt.Errorf("could not collect fields of slice type: %w", err) - } - typeHeader = sliceElements[0] - } else if typeEntry.Tag == dwarf.TagStructType { - structFields, err := getStructFields(typeEntry.Offset, dwarfData) + if typeEntry.Tag == dwarf.TagStructType || typeKind == uint(reflect.Slice) || typeKind == uint(reflect.String) { + structFields, err := getStructFields(typeEntry.Offset, dwarfData, seenTypes) if err != nil { return nil, fmt.Errorf("could not collect fields of struct type of ditypes.Parameter: %w", err) } typeHeader.ParameterPieces = structFields } else if typeEntry.Tag == dwarf.TagArrayType { - arrayElements, err := getIndividualArrayElements(typeEntry.Offset, dwarfData) + arrayElements, err := getIndividualArrayElements(typeEntry.Offset, dwarfData, seenTypes) if err != nil { return nil, fmt.Errorf("could not get length of array: %w", err) } typeHeader.ParameterPieces = arrayElements } else if typeEntry.Tag == dwarf.TagPointerType { - pointerElements, err := getPointerLayers(typeEntry.Offset, dwarfData) + pointerElements, err := getPointerLayers(typeEntry.Offset, dwarfData, seenTypes) if err != nil { return nil, fmt.Errorf("could not find pointer type: %w", err) } @@ -266,50 +239,7 @@ func expandTypeData(offset dwarf.Offset, dwarfData *dwarf.Data) (*ditypes.Parame return &typeHeader, nil } -// getSliceField returns the representation of a slice as a []ditypes.Parameter. The returned -// slice will have only one element. -// -// Slices are represented internally in go as a struct with 3 fields. The pointer to the -// the underlying array, the array length, and the array capacity. -func getSliceField(offset dwarf.Offset, dwarfData *dwarf.Data) ([]ditypes.Parameter, error) { - typeReader := dwarfData.Reader() - - typeReader.Seek(offset) - typeEntry, err := typeReader.Next() - if err != nil { - return nil, fmt.Errorf("could not get slice type entry: %w", err) - } - - elementTypeName, elementTypeSize, elementTypeKind := getTypeEntryBasicInfo(typeEntry) - sliceParameter := ditypes.Parameter{ - Type: elementTypeName, - TotalSize: elementTypeSize, - Kind: elementTypeKind, - } - - arrayEntry, err := typeReader.Next() - if err != nil { - return nil, fmt.Errorf("could not get slice type entry: %w", err) - } - - for i := range arrayEntry.Field { - if arrayEntry.Field[i].Attr == dwarf.AttrType { - typeReader.Seek(arrayEntry.Field[i].Val.(dwarf.Offset)) - typeEntry, err := typeReader.Next() - if err != nil { - return nil, err - } - underlyingType, err := expandTypeData(typeEntry.Offset, dwarfData) - if err != nil { - return nil, err - } - sliceParameter.ParameterPieces = append(sliceParameter.ParameterPieces, underlyingType.ParameterPieces[0]) - } - } - return []ditypes.Parameter{sliceParameter}, nil -} - -func getIndividualArrayElements(offset dwarf.Offset, dwarfData *dwarf.Data) ([]ditypes.Parameter, error) { +func getIndividualArrayElements(offset dwarf.Offset, dwarfData *dwarf.Data, seenTypes map[string]*seenTypeCounter) ([]*ditypes.Parameter, error) { savedArrayEntryOffset := offset typeReader := dwarfData.Reader() @@ -339,7 +269,7 @@ func getIndividualArrayElements(offset dwarf.Offset, dwarfData *dwarf.Data) ([]d return nil, err } - elementFields, err = expandTypeData(arrayElementTypeEntry.Offset, dwarfData) + elementFields, err = expandTypeData(arrayElementTypeEntry.Offset, dwarfData, seenTypes) if err != nil { return nil, err } @@ -366,7 +296,7 @@ func getIndividualArrayElements(offset dwarf.Offset, dwarfData *dwarf.Data) ([]d } } - arrayElements := []ditypes.Parameter{} + arrayElements := []*ditypes.Parameter{} for h := 0; h < int(arrayLength); h++ { newParam := ditypes.Parameter{} copyTree(&newParam.ParameterPieces, &elementFields.ParameterPieces) @@ -374,17 +304,17 @@ func getIndividualArrayElements(offset dwarf.Offset, dwarfData *dwarf.Data) ([]d newParam.Type = elementTypeName newParam.Kind = elementTypeKind newParam.TotalSize = elementTypeSize - arrayElements = append(arrayElements, newParam) + arrayElements = append(arrayElements, &newParam) } return arrayElements, nil } -func getStructFields(offset dwarf.Offset, dwarfData *dwarf.Data) ([]ditypes.Parameter, error) { +func getStructFields(offset dwarf.Offset, dwarfData *dwarf.Data, seenTypes map[string]*seenTypeCounter) ([]*ditypes.Parameter, error) { inOrderReader := dwarfData.Reader() typeReader := dwarfData.Reader() - structFields := []ditypes.Parameter{} + structFields := []*ditypes.Parameter{} fieldEntry := &dwarf.Entry{} // Start at the entry of the definition of the struct @@ -399,14 +329,14 @@ func getStructFields(offset dwarf.Offset, dwarfData *dwarf.Data) ([]ditypes.Para for { fieldEntry, err = inOrderReader.Next() if err != nil { - return []ditypes.Parameter{}, err + return []*ditypes.Parameter{}, err } if entryIsEmpty(fieldEntry) || fieldEntry.Tag != dwarf.TagMember { break } - newStructField := ditypes.Parameter{} + newStructField := &ditypes.Parameter{} for i := range fieldEntry.Field { @@ -420,30 +350,30 @@ func getStructFields(offset dwarf.Offset, dwarfData *dwarf.Data) ([]ditypes.Para typeReader.Seek(fieldEntry.Field[i].Val.(dwarf.Offset)) typeEntry, err := typeReader.Next() if err != nil { - return []ditypes.Parameter{}, err + return []*ditypes.Parameter{}, err } if !entryTypeIsSupported(typeEntry) { unsupportedType := resolveUnsupportedEntry(typeEntry) - structFields = append(structFields, *unsupportedType) + structFields = append(structFields, unsupportedType) continue } if typeEntry.Tag == dwarf.TagTypedef { typeEntry, err = resolveTypedefToRealType(typeEntry, typeReader) if err != nil { - return []ditypes.Parameter{}, err + return []*ditypes.Parameter{}, err } } newStructField.Type, newStructField.TotalSize, newStructField.Kind = getTypeEntryBasicInfo(typeEntry) if typeEntry.Tag != dwarf.TagBaseType { - field, err := expandTypeData(typeEntry.Offset, dwarfData) - if err != nil { - return []ditypes.Parameter{}, err + field, err := expandTypeData(typeEntry.Offset, dwarfData, seenTypes) + if err != nil || field == nil { + return []*ditypes.Parameter{}, err } field.Name = newStructField.Name - structFields = append(structFields, *field) + structFields = append(structFields, field) } else { structFields = append(structFields, newStructField) } @@ -453,7 +383,7 @@ func getStructFields(offset dwarf.Offset, dwarfData *dwarf.Data) ([]ditypes.Para return structFields, nil } -func getPointerLayers(offset dwarf.Offset, dwarfData *dwarf.Data) ([]ditypes.Parameter, error) { +func getPointerLayers(offset dwarf.Offset, dwarfData *dwarf.Data, seenTypes map[string]*seenTypeCounter) ([]*ditypes.Parameter, error) { typeReader := dwarfData.Reader() typeReader.Seek(offset) pointerEntry, err := typeReader.Next() @@ -470,16 +400,16 @@ func getPointerLayers(offset dwarf.Offset, dwarfData *dwarf.Data) ([]ditypes.Par return nil, err } - underlyingType, err = expandTypeData(typeEntry.Offset, dwarfData) + underlyingType, err = expandTypeData(typeEntry.Offset, dwarfData, seenTypes) if err != nil { return nil, err } } } if underlyingType == nil { - return []ditypes.Parameter{}, nil + return []*ditypes.Parameter{}, nil } - return []ditypes.Parameter{*underlyingType}, nil + return []*ditypes.Parameter{underlyingType}, nil } // Can use `Children` field, but there's also always a NULL/empty entry at the end of entry trees. @@ -556,32 +486,37 @@ func resolveTypedefToRealType(outerType *dwarf.Entry, reader *dwarf.Reader) (*dw return outerType, nil } -func correctStructSizes(params []ditypes.Parameter) { +func correctStructSizes(params []*ditypes.Parameter) { for i := range params { - correctStructSize(¶ms[i]) + correctStructSize(params[i]) } } // correctStructSize sets the size of structs to the number of fields in the struct func correctStructSize(param *ditypes.Parameter) { - if len(param.ParameterPieces) == 0 { + if param == nil || len(param.ParameterPieces) == 0 { return } if param.Kind == uint(reflect.Struct) || param.Kind == uint(reflect.Array) { param.TotalSize = int64(len(param.ParameterPieces)) } for i := range param.ParameterPieces { - correctStructSize(¶m.ParameterPieces[i]) + correctStructSize(param.ParameterPieces[i]) } } -func copyTree(dst, src *[]ditypes.Parameter) { +func copyTree(dst, src *[]*ditypes.Parameter) { if dst == nil || src == nil || len(*src) == 0 { return } - *dst = make([]ditypes.Parameter, len(*src)) + *dst = make([]*ditypes.Parameter, len(*src)) copy(*dst, *src) for i := range *src { + // elements can be nil if there was a nil element originally in src + // that was copied to dst + if (*dst)[i] == nil || (*src)[i] == nil { + continue + } copyTree(&((*dst)[i].ParameterPieces), &((*src)[i].ParameterPieces)) } } diff --git a/pkg/dynamicinstrumentation/diconfig/file_config_manager.go b/pkg/dynamicinstrumentation/diconfig/file_config_manager.go index f39537adbbd80..67e8a93de9a50 100644 --- a/pkg/dynamicinstrumentation/diconfig/file_config_manager.go +++ b/pkg/dynamicinstrumentation/diconfig/file_config_manager.go @@ -14,7 +14,8 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/log" ) -func NewFileConfigManager(configFile string) (*ReaderConfigManager, func(), error) { //nolint:revive // TODO +// NewFileConfigManager creates a new FileConfigManager +func NewFileConfigManager(configFile string) (*ReaderConfigManager, func(), error) { stopChan := make(chan bool) stop := func() { stopChan <- true @@ -35,7 +36,10 @@ func NewFileConfigManager(configFile string) (*ReaderConfigManager, func(), erro for { select { case rawBytes := <-updateChan: - cm.ConfigWriter.Write(rawBytes) //nolint:errcheck // TODO + _, err := cm.ConfigWriter.Write(rawBytes) + if err != nil { + log.Errorf("Error writing config file %s: %s", configFile, err) + } case <-stopChan: log.Info("stopping file config manager") fw.Stop() diff --git a/pkg/dynamicinstrumentation/diconfig/location_expression.go b/pkg/dynamicinstrumentation/diconfig/location_expression.go new file mode 100644 index 0000000000000..001f015912805 --- /dev/null +++ b/pkg/dynamicinstrumentation/diconfig/location_expression.go @@ -0,0 +1,341 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package diconfig + +import ( + "fmt" + "reflect" + "strings" + + "math/rand" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" + "github.com/DataDog/datadog-agent/pkg/util/log" +) + +// GenerateLocationExpression takes metadata about a parameter, including its type and location, and generates a list of +// LocationExpressions that can be used to read the parameter from the target process. +// +// It walks the tree of the parameter and its pieces, generating LocationExpressions for each piece. +// +//nolint:revive +func GenerateLocationExpression(limitsInfo *ditypes.InstrumentationInfo, param *ditypes.Parameter) { + triePaths, expressionTargets := generateLocationVisitsMap(param) + + getParamFromTriePaths := func(pathElement string) *ditypes.Parameter { + for n := range triePaths { + if triePaths[n].TypePath == pathElement { + return triePaths[n].Parameter + } + } + return nil + } + + // Go through each target type/field which needs to be captured + for i := range expressionTargets { + pathToInstrumentationTarget, instrumentationTarget := expressionTargets[i].TypePath, expressionTargets[i].Parameter + + targetExpressions := []ditypes.LocationExpression{} + pathElements := []string{pathToInstrumentationTarget} + // pathElements gets populated with every individual stretch of the path to the instrumentation target + for { + lastElementIndex := strings.LastIndex(pathToInstrumentationTarget, "@") + if lastElementIndex == -1 { + break + } + pathToInstrumentationTarget = pathToInstrumentationTarget[:lastElementIndex] + pathElements = append([]string{pathToInstrumentationTarget}, pathElements...) + } + + // Go through each path element of the instrumentation target + for pathElementIndex := range pathElements { + var elementParam *ditypes.Parameter = getParamFromTriePaths(pathElements[pathElementIndex]) + if elementParam == nil { + log.Infof("Path not found to target: %s", pathElements[pathElementIndex]) + continue + } + // Check if this instrumentation target is directly assigned + if elementParam.Location != nil { + // Type is directly assigned + if elementParam.Kind == uint(reflect.Array) { + if elementParam.TotalSize == 0 && len(elementParam.ParameterPieces) == 0 { + continue + } + GenerateLocationExpression(limitsInfo, elementParam.ParameterPieces[0]) + expressionsToUseForEachArrayElement := collectAllLocationExpressions(elementParam.ParameterPieces[0], true) + targetExpressions = append(targetExpressions, + // Read process stack address to the stack + ditypes.ReadRegisterLocationExpression(ditypes.StackRegister, 8), + ditypes.ApplyOffsetLocationExpression(uint(elementParam.Location.StackOffset)), + ) + //FIXME: Do we need to limit lengths of arrays?? + for i := 0; i < len(elementParam.ParameterPieces); i++ { + targetExpressions = append(targetExpressions, + ditypes.CopyLocationExpression(), + ditypes.ApplyOffsetLocationExpression(uint(i*(int(elementParam.ParameterPieces[0].TotalSize)))), + ) + targetExpressions = append(targetExpressions, expressionsToUseForEachArrayElement...) + } + } else if elementParam.Kind == uint(reflect.Pointer) { + targetExpressions = append(targetExpressions, + ditypes.DirectReadLocationExpression(elementParam), + ) + } else { + targetExpressions = append(targetExpressions, + ditypes.DirectReadLocationExpression(elementParam), + ditypes.PopLocationExpression(1, uint(elementParam.TotalSize)), + ) + } + continue + } else { /* end directly assigned types */ + // This is not directly assigned, expect the address for it on the stack + if elementParam.Kind == uint(reflect.Pointer) { + targetExpressions = append(targetExpressions, + ditypes.DereferenceLocationExpression(uint(elementParam.TotalSize)), + ) + } else if elementParam.Kind == uint(reflect.Struct) { + // Structs don't provide context on location, or have values themselves + // but we know that if there's a struct, the next element will have to have + // the offset applied + if len(pathElements) > pathElementIndex+1 { + // Apply the appropriate offset for the next element (the struct field) + structField := getParamFromTriePaths(pathElements[pathElementIndex+1]) + targetExpressions = append(targetExpressions, + ditypes.CopyLocationExpression(), + ditypes.ApplyOffsetLocationExpression(uint(structField.FieldOffset)), + ) + } + continue + } else if elementParam.Kind == uint(reflect.String) { + if len(instrumentationTarget.ParameterPieces) != 2 { + continue + } + stringCharArray := instrumentationTarget.ParameterPieces[0] + stringLength := instrumentationTarget.ParameterPieces[1] + if stringCharArray == nil || stringLength == nil { + continue + } + + if stringLength.Location != nil { + stringLength.LocationExpressions = append(stringLength.LocationExpressions, + ditypes.DirectReadLocationExpression(stringLength), + ditypes.PopLocationExpression(1, 2), + ) + } else { + stringLength.LocationExpressions = append(stringLength.LocationExpressions, + ditypes.ApplyOffsetLocationExpression(uint(stringLength.FieldOffset)), + ditypes.DereferenceToOutputLocationExpression(2), + ) + } + + if stringCharArray.Location != nil && stringLength.Location != nil { + // Fields of the string are directly assigned + targetExpressions = append(targetExpressions, + // Read string dynamically: + ditypes.DirectReadLocationExpression(stringCharArray), + ditypes.DirectReadLocationExpression(stringLength), + ditypes.DereferenceDynamicToOutputLocationExpression(uint(limitsInfo.InstrumentationOptions.StringMaxSize)), + ) + } else { + // Expect address of the string struct itself on the location expression stack + targetExpressions = append(targetExpressions, + ditypes.ReadStringToOutputLocationExpression(uint16(limitsInfo.InstrumentationOptions.StringMaxSize)), + ) + } + continue + /* end parsing strings */ + } else if elementParam.Kind == uint(reflect.Slice) { + if len(elementParam.ParameterPieces) != 3 { + continue + } + sliceIdentifier := randomLabel() + slicePointer := elementParam.ParameterPieces[0] + sliceLength := elementParam.ParameterPieces[1] + sliceLength.LocationExpressions = append(sliceLength.LocationExpressions, + ditypes.PrintStatement("%s", "Reading the length of slice"), + ) + if sliceLength.Location != nil { + sliceLength.LocationExpressions = append(sliceLength.LocationExpressions, + ditypes.DirectReadLocationExpression(sliceLength), + ditypes.PopLocationExpression(1, 2), + ) + } else { + sliceLength.LocationExpressions = append(sliceLength.LocationExpressions, + ditypes.ApplyOffsetLocationExpression(uint(sliceLength.FieldOffset)), + ditypes.DereferenceToOutputLocationExpression(2), + ) + } + if len(slicePointer.ParameterPieces) == 0 { + continue + } + + // Generate and collect the location expressions for collecting an individual + // element of this slice + sliceElementType := slicePointer.ParameterPieces[0] + + if slicePointer.Location != nil && sliceLength.Location != nil { + // Fields of the slice are directly assigned + targetExpressions = append(targetExpressions, + ditypes.PrintStatement("%s", "Reading the length of slice and setting limit (directly read)"), + ditypes.DirectReadLocationExpression(sliceLength), + ditypes.SetLimitEntry(sliceIdentifier, uint(ditypes.SliceMaxLength)), + ) + for i := 0; i < ditypes.SliceMaxLength; i++ { + GenerateLocationExpression(limitsInfo, sliceElementType) + expressionsToUseForEachSliceElement := collectAllLocationExpressions(sliceElementType, true) + labelName := randomLabel() + targetExpressions = append(targetExpressions, + ditypes.PrintStatement("%s", "Reading slice element "+fmt.Sprintf("%d", i)), + ditypes.JumpToLabelIfEqualToLimit(uint(i), sliceIdentifier, labelName), + ditypes.DirectReadLocationExpression(slicePointer), + ditypes.ApplyOffsetLocationExpression(uint(sliceElementType.TotalSize)*uint(i)), + ) + targetExpressions = append(targetExpressions, expressionsToUseForEachSliceElement...) + targetExpressions = append(targetExpressions, ditypes.InsertLabel(labelName)) + } + } else { + // Expect address of the slice struct on stack, use offsets accordingly + targetExpressions = append(targetExpressions, + ditypes.PrintStatement("%s", "Reading the length of slice and setting limit (indirect read)"), + ditypes.CopyLocationExpression(), // Setup stack so it has two pointers to slice struct + ditypes.ApplyOffsetLocationExpression(8), // Change the top pointer to the address of the length field + ditypes.DereferenceLocationExpression(8), // Dereference to place length on top of the stack + ditypes.SetLimitEntry(sliceIdentifier, uint(ditypes.SliceMaxLength)), + ) + // Expect address of slice struct on top of the stack, check limit and copy/apply offset accordingly + for i := 0; i < ditypes.SliceMaxLength; i++ { + GenerateLocationExpression(limitsInfo, sliceElementType) + expressionsToUseForEachSliceElement := collectAllLocationExpressions(sliceElementType, true) + labelName := randomLabel() + targetExpressions = append(targetExpressions, + ditypes.PrintStatement("%s", "Reading slice element "+fmt.Sprintf("%d", i)), + ditypes.JumpToLabelIfEqualToLimit(uint(i), sliceIdentifier, labelName), + ditypes.CopyLocationExpression(), + ditypes.DereferenceLocationExpression(8), + ditypes.ApplyOffsetLocationExpression(uint(i*(int(sliceElementType.TotalSize)))), + ) + targetExpressions = append(targetExpressions, expressionsToUseForEachSliceElement...) + targetExpressions = append(targetExpressions, ditypes.InsertLabel(labelName)) + } + } + continue + /* end parsing slices */ + } else if elementParam.Kind == uint(reflect.Array) { + // Expect the address of the array itself on the stack + if elementParam.TotalSize == 0 && len(elementParam.ParameterPieces) == 0 { + continue + } + //FIXME: Do we need to limit lengths of arrays?? + if elementParam.ParameterPieces[0] == nil { + continue + } + GenerateLocationExpression(limitsInfo, elementParam.ParameterPieces[0]) + expressionsToUseForEachArrayElement := collectAllLocationExpressions(elementParam.ParameterPieces[0], true) + for i := 0; i < len(elementParam.ParameterPieces); i++ { + targetExpressions = append(targetExpressions, + ditypes.CopyLocationExpression(), + ditypes.ApplyOffsetLocationExpression(uint(int(elementParam.ParameterPieces[0].TotalSize)*i)), + ) + targetExpressions = append(targetExpressions, expressionsToUseForEachArrayElement...) + } + } else { + // Basic type, indirectly assigned + targetExpressions = append(targetExpressions, + ditypes.DereferenceToOutputLocationExpression(uint(elementParam.TotalSize))) + } + } /* end indirectly assigned types */ + } + expressionTargets[i].Parameter.LocationExpressions = targetExpressions + } +} + +func collectAllLocationExpressions(parameter *ditypes.Parameter, remove bool) []ditypes.LocationExpression { + if parameter == nil { + return []ditypes.LocationExpression{} + } + expressions := parameter.LocationExpressions + for i := range parameter.ParameterPieces { + expressions = append(expressions, collectAllLocationExpressions(parameter.ParameterPieces[i], remove)...) + } + if remove { + parameter.LocationExpressions = []ditypes.LocationExpression{} + } + return expressions +} + +//nolint:all +func printLocationExpressions(expressions []ditypes.LocationExpression) { + for i := range expressions { + fmt.Printf("%s %d %d %d %s %s\n", + expressions[i].Opcode.String(), + expressions[i].Arg1, + expressions[i].Arg2, + expressions[i].Arg3, + expressions[i].Label, + expressions[i].CollectionIdentifier, + ) + } +} + +type expressionParamTuple struct { + TypePath string + Parameter *ditypes.Parameter +} + +// generateLocationVisitsMap follows the tree of parameters (parameter.ParameterPieces), and +// collects string values of all the paths to nodes that need expressions (`needsExpressions`), +// as well as all combinations of elements that can be achieved by walking the tree (`trieKeys`). +func generateLocationVisitsMap(parameter *ditypes.Parameter) (trieKeys, needsExpressions []expressionParamTuple) { + trieKeys = []expressionParamTuple{} + needsExpressions = []expressionParamTuple{} + + var visit func(param *ditypes.Parameter, path string) + visit = func(param *ditypes.Parameter, path string) { + if param == nil { + return + } + trieKeys = append(trieKeys, expressionParamTuple{path + param.Type, param}) + + if (len(param.ParameterPieces) == 0 || + isBasicType(param.Kind) || + param.Kind == uint(reflect.Array) || + param.Kind == uint(reflect.Slice)) && + param.Kind != uint(reflect.Struct) && + param.Kind != uint(reflect.Pointer) { + needsExpressions = append(needsExpressions, expressionParamTuple{path + param.Type, param}) + return + } + + for i := range param.ParameterPieces { + newPath := path + param.Type + "@" + visit(param.ParameterPieces[i], newPath) + } + } + visit(parameter, "") + return trieKeys, needsExpressions +} + +func isBasicType(kind uint) bool { + switch reflect.Kind(kind) { + case reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, + reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128, reflect.String: + return true + default: + return false + } +} + +func randomLabel() string { + length := 6 + randomString := make([]byte, length) + for i := 0; i < length; i++ { + randomString[i] = byte(65 + rand.Intn(25)) + } + return string(randomString) +} diff --git a/pkg/dynamicinstrumentation/diconfig/location_expression_test.go b/pkg/dynamicinstrumentation/diconfig/location_expression_test.go new file mode 100644 index 0000000000000..fff581de716a3 --- /dev/null +++ b/pkg/dynamicinstrumentation/diconfig/location_expression_test.go @@ -0,0 +1,54 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf + +package diconfig + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" +) + +func TestLocationExpressionGeneration(t *testing.T) { + testCases := []struct { + Name string + Parameter *ditypes.Parameter + Limits *ditypes.InstrumentationInfo + ExpectedOutput []ditypes.LocationExpression + }{ + { + Name: "DirectlyAssignedRegisterUint", + Parameter: &ditypes.Parameter{ + Type: "uint", + Kind: uint(reflect.Uint), + TotalSize: 8, + Location: &ditypes.Location{ + InReg: true, + Register: 1, + PointerOffset: 9999, // should not be used + StackOffset: 8888, // should not be used + NeedsDereference: true, // should not be used + }, + }, + ExpectedOutput: []ditypes.LocationExpression{ + ditypes.ReadRegisterLocationExpression(1, 8), + ditypes.PopLocationExpression(1, 8), + }, + }, + } + + for _, testcase := range testCases { + t.Run(testcase.Name, func(t *testing.T) { + GenerateLocationExpression(testcase.Limits, testcase.Parameter) + resultExpressions := collectAllLocationExpressions(testcase.Parameter, true) + require.Equal(t, testcase.ExpectedOutput, resultExpressions) + }) + } +} diff --git a/pkg/dynamicinstrumentation/diconfig/mem_config_manager.go b/pkg/dynamicinstrumentation/diconfig/mem_config_manager.go index 16301afddd2d5..381080eab517f 100644 --- a/pkg/dynamicinstrumentation/diconfig/mem_config_manager.go +++ b/pkg/dynamicinstrumentation/diconfig/mem_config_manager.go @@ -31,10 +31,10 @@ type ReaderConfigManager struct { state ditypes.DIProcs } -type readerConfigCallback func(configsByService) //nolint:unused // TODO type configsByService = map[ditypes.ServiceName]map[ditypes.ProbeID]rcConfig -func NewReaderConfigManager() (*ReaderConfigManager, error) { //nolint:revive // TODO +// NewReaderConfigManager creates a new ReaderConfigManager +func NewReaderConfigManager() (*ReaderConfigManager, error) { cm := &ReaderConfigManager{ callback: applyConfigUpdate, state: ditypes.NewDIProcs(), @@ -55,11 +55,13 @@ func NewReaderConfigManager() (*ReaderConfigManager, error) { //nolint:revive // return cm, nil } -func (cm *ReaderConfigManager) GetProcInfos() ditypes.DIProcs { //nolint:revive // TODO +// GetProcInfos returns the process info state +func (cm *ReaderConfigManager) GetProcInfos() ditypes.DIProcs { return cm.state } -func (cm *ReaderConfigManager) Stop() { //nolint:revive // TODO +// Stop causes the ReaderConfigManager to stop processing data +func (cm *ReaderConfigManager) Stop() { cm.ConfigWriter.Stop() cm.procTracker.Stop() } @@ -131,7 +133,8 @@ func (cm *ReaderConfigManager) updateServiceConfigs(configs configsByService) { } } -type ConfigWriter struct { //nolint:revive // TODO +// ConfigWriter handles writing configuration data +type ConfigWriter struct { io.Writer updateChannel chan ([]byte) Processes map[ditypes.PID]*ditypes.ProcessInfo @@ -139,9 +142,11 @@ type ConfigWriter struct { //nolint:revive // TODO stopChannel chan (bool) } -type ConfigWriterCallback func(configsByService) //nolint:revive // TODO +// ConfigWriterCallback provides a callback interface for ConfigWriter +type ConfigWriterCallback func(configsByService) -func NewConfigWriter(onConfigUpdate ConfigWriterCallback) *ConfigWriter { //nolint:revive // TODO +// NewConfigWriter creates a new ConfigWriter +func NewConfigWriter(onConfigUpdate ConfigWriterCallback) *ConfigWriter { return &ConfigWriter{ updateChannel: make(chan []byte, 1), configCallback: onConfigUpdate, @@ -154,7 +159,8 @@ func (r *ConfigWriter) Write(p []byte) (n int, e error) { return 0, nil } -func (r *ConfigWriter) Start() error { //nolint:revive // TODO +// Start initiates the ConfigWriter to start processing data +func (r *ConfigWriter) Start() error { go func() { configUpdateLoop: for { @@ -175,18 +181,19 @@ func (r *ConfigWriter) Start() error { //nolint:revive // TODO return nil } -func (cu *ConfigWriter) Stop() { //nolint:revive // TODO - cu.stopChannel <- true +// Stop causes the ConfigWriter to stop processing data +func (r *ConfigWriter) Stop() { + r.stopChannel <- true } // UpdateProcesses is the callback interface that ConfigWriter uses to consume the map of ProcessInfo's // such that it's used whenever there's an update to the state of known service processes on the machine. // It simply overwrites the previous state of known service processes with the new one -func (cu *ConfigWriter) UpdateProcesses(procs ditypes.DIProcs) { //nolint:revive // TODO +func (r *ConfigWriter) UpdateProcesses(procs ditypes.DIProcs) { current := procs - old := cu.Processes + old := r.Processes if !reflect.DeepEqual(current, old) { - cu.Processes = current + r.Processes = current } } @@ -208,6 +215,7 @@ func (rc *rcConfig) toProbe(service string) *ditypes.Probe { CaptureParameters: ditypes.CaptureParameters, ArgumentsMaxSize: ditypes.ArgumentsMaxSize, StringMaxSize: ditypes.StringMaxSize, + SliceMaxLength: ditypes.SliceMaxLength, MaxReferenceDepth: rc.Capture.MaxReferenceDepth, }, }, diff --git a/pkg/dynamicinstrumentation/ditypes/analysis.go b/pkg/dynamicinstrumentation/ditypes/analysis.go index 0aa4a698e5782..c311ff7f74bec 100644 --- a/pkg/dynamicinstrumentation/ditypes/analysis.go +++ b/pkg/dynamicinstrumentation/ditypes/analysis.go @@ -12,15 +12,10 @@ import ( "fmt" ) -// TypeMap contains all the information about functions and their parameters including -// functions that have been inlined in the binary +// TypeMap contains all the information about functions and their parameters type TypeMap struct { // Functions maps fully-qualified function names to a slice of its parameters - Functions map[string][]Parameter - - // InlinedFunctions maps program counters to a slice of dwarf entries used - // when resolving stack traces that include inlined functions - InlinedFunctions map[uint64][]*dwarf.Entry + Functions map[string][]*Parameter // FunctionsByPC places DWARF subprogram (function) entries in order by // its low program counter which is necessary for resolving stack traces @@ -34,14 +29,16 @@ type TypeMap struct { // Parameter represents a function parameter as read from DWARF info type Parameter struct { - Name string - ID string - Type string - TotalSize int64 - Kind uint - Location Location - NotCaptureReason NotCaptureReason - ParameterPieces []Parameter + Name string // Name is populated by the local name of the parameter + ID string // ID is randomly generated for each parameter to avoid + Type string // Type is a string representation of the type name + TotalSize int64 // TotalSize is the size of the parameter type + Kind uint // Kind is a constant representation of the type, see reflect.Kind + Location *Location // Location represents where the parameter will be in memory when passed to the target function + LocationExpressions []LocationExpression // LocationExpressions are the needed instructions for extracting the parameter value from memory + FieldOffset uint64 // FieldOffset is the offset of Parameter field within a struct, if it is a struct field + NotCaptureReason NotCaptureReason // NotCaptureReason conveys to the user why the parameter was not captured + ParameterPieces []*Parameter // ParameterPieces are the sub-fields, such as struct fields or array elements } func (p Parameter) String() string { @@ -78,6 +75,300 @@ func (s SpecialKind) String() string { } } +func (l LocationExpression) String() string { + return fmt.Sprintf("%s (%d, %d, %d)", l.Opcode.String(), l.Arg1, l.Arg2, l.Arg3) +} + +// LocationExpressionOpcode uniquely identifies each location expression operation +type LocationExpressionOpcode uint + +const ( + // OpInvalid represents an invalid operation + OpInvalid LocationExpressionOpcode = iota + // OpComment represents a comment operation + OpComment + // OpPrintStatement represents a print statement operation + OpPrintStatement + // OpReadUserRegister represents an operation to read a user register + OpReadUserRegister + // OpReadUserStack represents an operation to read the user stack + OpReadUserStack + // OpReadUserRegisterToOutput represents an operation to read a user register and output the value + OpReadUserRegisterToOutput + // OpReadUserStackToOutput represents an operation to read the user stack and output the value + OpReadUserStackToOutput + // OpDereference represents an operation to dereference a pointer + OpDereference + // OpDereferenceToOutput represents an operation to dereference a pointer and output the value + OpDereferenceToOutput + // OpDereferenceLarge represents an operation to dereference a large pointer + OpDereferenceLarge + // OpDereferenceLargeToOutput represents an operation to dereference a large pointer and output the value + OpDereferenceLargeToOutput + // OpDereferenceDynamic represents an operation to dynamically dereference a pointer + OpDereferenceDynamic + // OpDereferenceDynamicToOutput represents an operation to dynamically dereference a pointer and output the value + OpDereferenceDynamicToOutput + // OpReadStringToOutput represents an operation to read a string and output the value + OpReadStringToOutput + // OpApplyOffset represents an operation to apply an offset + OpApplyOffset + // OpPop represents an operation to pop a value from the stack + OpPop + // OpCopy represents an operation to copy a value + OpCopy + // OpLabel represents a label operation + OpLabel + // OpSetGlobalLimit represents an operation to set a global limit + OpSetGlobalLimit + // OpJumpIfGreaterThanLimit represents an operation to jump if a value is greater than a limit + OpJumpIfGreaterThanLimit +) + +func (op LocationExpressionOpcode) String() string { + switch op { + case OpInvalid: + return "Invalid" + case OpComment: + return "Comment" + case OpPrintStatement: + return "PrintStatement" + case OpReadUserRegister: + return "ReadUserRegister" + case OpReadUserStack: + return "ReadUserStack" + case OpReadUserRegisterToOutput: + return "ReadUserRegisterToOutput" + case OpReadUserStackToOutput: + return "ReadUserStackToOutput" + case OpDereference: + return "Dereference" + case OpDereferenceToOutput: + return "DereferenceToOutput" + case OpDereferenceLarge: + return "DereferenceLarge" + case OpDereferenceLargeToOutput: + return "DereferenceLargeToOutput" + case OpDereferenceDynamic: + return "DereferenceDynamic" + case OpDereferenceDynamicToOutput: + return "DereferenceDynamicToOutput" + case OpReadStringToOutput: + return "ReadStringToOutput" + case OpApplyOffset: + return "ApplyOffset" + case OpPop: + return "Pop" + case OpCopy: + return "Copy" + case OpLabel: + return "Label" + case OpSetGlobalLimit: + return "SetGlobalLimit" + case OpJumpIfGreaterThanLimit: + return "JumpIfGreaterThanLimit" + default: + return fmt.Sprintf("LocationExpressionOpcode(%d)", int(op)) + } +} + +// CopyLocationExpression express creates an expression which +// duplicates the u64 element on the top of the BPF parameter stack. +func CopyLocationExpression() LocationExpression { + return LocationExpression{Opcode: OpCopy} +} + +// DirectReadLocationExpression creates an expression which +// directly reads a value from either a specific register or stack offset +// and writes it to the bpf param stack +func DirectReadLocationExpression(p *Parameter) LocationExpression { + if p == nil || p.Location == nil { + return LocationExpression{Opcode: OpInvalid} + } + if p.Location.InReg { + return ReadRegisterLocationExpression(uint(p.Location.Register), uint(p.TotalSize)) + } + return ReadStackLocationExpression(uint(p.Location.StackOffset), uint(p.TotalSize)) +} + +// DirectReadToOutputLocationExpression creates an expression which +// directly reads a value from either a specific register or stack offset +// and writes it to the output buffer +func DirectReadToOutputLocationExpression(p *Parameter) LocationExpression { + if p == nil || p.Location == nil { + return LocationExpression{Opcode: OpInvalid} + } + if p.Location.InReg { + return ReadRegisterToOutputLocationExpression(uint(p.Location.Register), uint(p.TotalSize)) + } + return ReadStackToOutputLocationExpression(uint(p.Location.StackOffset), uint(p.TotalSize)) +} + +// ReadRegisterLocationExpression creates an expression which +// reads `size` bytes from register `reg` into a u64 which is then pushed to +// the top of the BPF parameter stack. +// Arg1 = register +// Arg2 = size of element +func ReadRegisterLocationExpression(register, size uint) LocationExpression { + return LocationExpression{Opcode: OpReadUserRegister, Arg1: register, Arg2: size} +} + +// ReadStackLocationExpression creates an expression which +// reads `size` bytes from the traced program's stack at offset `stack_offset` +// into a u64 which is then pushed to the top of the BPF parameter stack. +// Arg1 = stack offset +// Arg2 = size of element +func ReadStackLocationExpression(offset, size uint) LocationExpression { + return LocationExpression{Opcode: OpReadUserStack, Arg1: offset, Arg2: size} +} + +// ReadRegisterToOutputLocationExpression creates an expression which +// reads `size` bytes from register `reg` into a u64 which is then written to +// the output buffer. +// Arg1 = register +// Arg2 = size of element +func ReadRegisterToOutputLocationExpression(register, size uint) LocationExpression { + return LocationExpression{Opcode: OpReadUserRegisterToOutput, Arg1: register, Arg2: size} +} + +// ReadStackToOutputLocationExpression creates an expression which +// reads `size` bytes from the traced program's stack at offset `stack_offset` +// into a u64 which is then written to the output buffer +// Arg1 = stack offset +// Arg2 = size of element +func ReadStackToOutputLocationExpression(offset, size uint) LocationExpression { + return LocationExpression{Opcode: OpReadUserStackToOutput, Arg1: offset, Arg2: size} +} + +// DereferenceLocationExpression creates an expression which +// pops the 8-byte address from the top of the BPF parameter stack and dereferences +// it, reading a value of size `valueSize` from it, and pushes that value (encoded as a u64) +// back to the BPF parameter stack. +// It should only be used for types of 8 bytes or less +// Arg1 = size of value we're reading from the 8 byte address at the top of the stack +func DereferenceLocationExpression(valueSize uint) LocationExpression { + if valueSize > 8 { + return LocationExpression{Opcode: OpDereferenceLarge, Arg1: valueSize, Arg2: (valueSize + 7) / 8} + } + return LocationExpression{Opcode: OpDereference, Arg1: valueSize} +} + +// DereferenceToOutputLocationExpression creates an expression which +// pops the 8-byte address from the top of the BPF parameter stack and +// dereferences it, reading a value of size `valueSize` from it, and writes that value +// directly to the output buffer. +// It should only be used for types of 8 bytes or less +// Arg1 = size of value we're reading from the 8 byte address at the top of the stack +func DereferenceToOutputLocationExpression(valueSize uint) LocationExpression { + if valueSize > 8 { + return LocationExpression{Opcode: OpDereferenceLargeToOutput, Arg1: valueSize, Arg2: (valueSize + 7) / 8} + } + return LocationExpression{Opcode: OpDereferenceToOutput, Arg1: valueSize} +} + +// DereferenceLargeLocationExpression creates an expression which +// pops the 8-byte address from the top of the BPF parameter stack and dereferences +// it, reading a value of size `typeSize` from it, and pushes that value, encoded in 8-byte chunks +// to the BPF parameter stack. This is safe to use for types larger than 8-bytes. +// back to the BPF parameter stack. +// Arg1 = size in bytes of value we're reading from the 8 byte address at the top of the stack +// Arg2 = number of chunks (should be ({{.Arg1}} + 7) / 8) +func DereferenceLargeLocationExpression(typeSize uint) LocationExpression { + return LocationExpression{Opcode: OpDereferenceLarge, Arg1: typeSize, Arg2: (typeSize + 7) / 8} +} + +// DereferenceLargeToOutputLocationExpression creates an expression which +// pops the 8-byte address from the top of the BPF parameter stack and dereferences +// it, reading a value of size `typeSize` from it, and writes that value to the output buffer. +// This is safe to use for types larger than 8-bytes. +// Arg1 = size in bytes of value we're reading from the 8 byte address at the top of the stack +// Arg2 = number of chunks (should be ({{.Arg1}} + 7) / 8) +func DereferenceLargeToOutputLocationExpression(typeSize uint) LocationExpression { + return LocationExpression{Opcode: OpDereferenceLargeToOutput, Arg1: typeSize, Arg2: (typeSize + 7) / 8} +} + +// DereferenceDynamicToOutputLocationExpression creates an expression which +// reads an 8-byte length from the top of the BPF parameter stack, followed by +// an 8-byte address. It applies the maximum `readLimit` to the length, then dereferences the address to +// the output buffer. +// Maximum limit (Arg1) should be set to the size of each element * max collection length +// Arg1 = maximum limit on bytes read +func DereferenceDynamicToOutputLocationExpression(readLimit uint) LocationExpression { + return LocationExpression{Opcode: OpDereferenceDynamicToOutput, Arg1: readLimit} +} + +// ReadStringToOutputLocationExpression creates an expression which +// reads a Go string to the output buffer, limited in length by `limit`. +// In Go, strings are internally implemented as structs with two fields. The fields are length, +// and a pointer to a character array. This expression expects the address of the string struct +// itself to be on the top of the stack. +// Arg1 = string length limit +func ReadStringToOutputLocationExpression(limit uint16) LocationExpression { + return LocationExpression{Opcode: OpReadStringToOutput, Arg1: uint(limit)} +} + +// ApplyOffsetLocationExpression creates an expression which +// adds `offset` to the 8-byte address on the top of the bpf parameter stack. +// Arg1 = uint value (offset) we're adding to the 8-byte address on top of the stack +func ApplyOffsetLocationExpression(offset uint) LocationExpression { + return LocationExpression{Opcode: OpApplyOffset, Arg1: offset} +} + +// PopLocationExpression creates an expression which +// writes to output `num_elements` elements, each of size `elementSize, from the top of the stack. +// Arg1 = number of elements to pop +// Arg2 = size of each element +func PopLocationExpression(numElements, elementSize uint) LocationExpression { + return LocationExpression{Opcode: OpPop, Arg1: numElements, Arg2: elementSize} +} + +// InsertLabel inserts a label in the bpf program +// No args, just set label +func InsertLabel(label string) LocationExpression { + return LocationExpression{Opcode: OpLabel, Label: label} +} + +// SetLimitEntry associates a collection identifier with the passed limit +// Arg1 = limit to set +// CollectionIdentifier = the collection that we're limiting +func SetLimitEntry(collectionIdentifier string, limit uint) LocationExpression { + return LocationExpression{Opcode: OpSetGlobalLimit, CollectionIdentifier: collectionIdentifier, Arg1: limit} +} + +// JumpToLabelIfEqualToLimit jumps to a specified label if the limit associated with the collection (by identifier) +// is equal to the passed value +// Arg1 = value to compare to global limit variable +// CollectionIdentifier = the collection that we're limiting +// Label = label to jump to if the value is equal to the global limit variable +func JumpToLabelIfEqualToLimit(val uint, collectionIdentifier, label string) LocationExpression { + return LocationExpression{Opcode: OpJumpIfGreaterThanLimit, CollectionIdentifier: collectionIdentifier, Arg1: val, Label: label} +} + +// InsertComment inserts a comment into the bpf program +// Label = comment +func InsertComment(comment string) LocationExpression { + return LocationExpression{Opcode: OpComment, Label: comment} +} + +// PrintStatement inserts a print statement into the bpf program +// Label = format +// CollectionIdentifier = arguments +// Example usage: PrintStatement("%d", "variableName") +func PrintStatement(format, arguments string) LocationExpression { + return LocationExpression{Opcode: OpPrintStatement, Label: format, CollectionIdentifier: arguments} +} + +// LocationExpression is an operation which will be executed in bpf with the purpose +// of capturing parameters from a running Go program +type LocationExpression struct { + Opcode LocationExpressionOpcode + Arg1 uint + Arg2 uint + Arg3 uint + CollectionIdentifier string + Label string +} + // Location represents where a particular datatype is found on probe entry type Location struct { InReg bool diff --git a/pkg/dynamicinstrumentation/ditypes/arch_amd64.go b/pkg/dynamicinstrumentation/ditypes/arch_amd64.go new file mode 100644 index 0000000000000..a80b8e187ed27 --- /dev/null +++ b/pkg/dynamicinstrumentation/ditypes/arch_amd64.go @@ -0,0 +1,13 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf && amd64 + +package ditypes + +// StackRegister is the register containing the address of the +// program stack. On x86 DWARF maps the register number 7 to +// the stack pointer. +const StackRegister = 7 diff --git a/pkg/dynamicinstrumentation/ditypes/arch_arm64.go b/pkg/dynamicinstrumentation/ditypes/arch_arm64.go new file mode 100644 index 0000000000000..ac80bd65864fd --- /dev/null +++ b/pkg/dynamicinstrumentation/ditypes/arch_arm64.go @@ -0,0 +1,13 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux_bpf && arm64 + +package ditypes + +// StackRegister is the register containing the address of the +// program stack. On ARM64 DWARF maps the register number 29 to +// the stack pointer. +const StackRegister = 29 diff --git a/pkg/dynamicinstrumentation/ditypes/config.go b/pkg/dynamicinstrumentation/ditypes/config.go index ba9667e069552..3614a6754e30f 100644 --- a/pkg/dynamicinstrumentation/ditypes/config.go +++ b/pkg/dynamicinstrumentation/ditypes/config.go @@ -29,11 +29,10 @@ const ConfigBPFProbeID = "config" // ConfigBPFProbeID is the ID used for the con var ( CaptureParameters = true // CaptureParameters is the default value for if probes should capture parameter values ArgumentsMaxSize = 10000 // ArgumentsMaxSize is the default size in bytes of the output buffer used for param values - StringMaxSize = 512 // StringMaxSize is the default size in bytes of a single string + StringMaxSize = 512 // StringMaxSize is the length limit MaxReferenceDepth uint8 = 4 // MaxReferenceDepth is the default depth that DI will traverse datatypes for capturing values MaxFieldCount = 20 // MaxFieldCount is the default limit for how many fields DI will capture in a single data type - SliceMaxSize = 1800 // SliceMaxSize is the default limit in bytes of a slice - SliceMaxLength = 100 // SliceMaxLength is the default limit in number of elements of a slice + SliceMaxLength = 10 // SliceMaxLength is the default limit in number of elements of a slice ) // ProbeID is the unique identifier for probes @@ -260,7 +259,6 @@ type InstrumentationOptions struct { StringMaxSize int MaxReferenceDepth int MaxFieldCount int - SliceMaxSize int SliceMaxLength int } diff --git a/pkg/dynamicinstrumentation/ditypes/ebpf.go b/pkg/dynamicinstrumentation/ditypes/ebpf.go index 40bc53fdf9d28..7b4121e79ec1d 100644 --- a/pkg/dynamicinstrumentation/ditypes/ebpf.go +++ b/pkg/dynamicinstrumentation/ditypes/ebpf.go @@ -8,7 +8,7 @@ package ditypes /* -#include "../codegen/c/types.h" +#include "../codegen/c/base_event.h" */ import "C" diff --git a/pkg/dynamicinstrumentation/ditypes/ebpf_linux.go b/pkg/dynamicinstrumentation/ditypes/ebpf_linux.go index d76d6c39c2105..ab6efaabe5e33 100644 --- a/pkg/dynamicinstrumentation/ditypes/ebpf_linux.go +++ b/pkg/dynamicinstrumentation/ditypes/ebpf_linux.go @@ -4,10 +4,10 @@ package ditypes type BaseEvent struct { - Probe_id [304]byte + Probe_id [36]byte Pid uint32 Uid uint32 Program_counters [10]uint64 } -const SizeofBaseEvent = 0x188 +const SizeofBaseEvent = 0x80 diff --git a/pkg/dynamicinstrumentation/ebpf/ebpf.go b/pkg/dynamicinstrumentation/ebpf/ebpf.go index ed2c7578f685b..8703634ad69cc 100644 --- a/pkg/dynamicinstrumentation/ebpf/ebpf.go +++ b/pkg/dynamicinstrumentation/ebpf/ebpf.go @@ -14,7 +14,6 @@ import ( "fmt" "io" "text/template" - "time" "github.com/cilium/ebpf" "github.com/cilium/ebpf/link" @@ -126,8 +125,8 @@ func AttachBPFUprobe(procInfo *ditypes.ProcessInfo, probe *ditypes.Probe) error return nil } -// CompileBPFProgram compiles the code for a single probe associated with the process given by procInfo -func CompileBPFProgram(procInfo *ditypes.ProcessInfo, probe *ditypes.Probe) error { //nolint:revive // TODO +// CompileBPFProgram compiles the code for a single probe +func CompileBPFProgram(probe *ditypes.Probe) error { f := func(in io.Reader, out io.Writer) error { fileContents, err := io.ReadAll(in) if err != nil { @@ -169,7 +168,3 @@ func getCFlags(config *ddebpf.Config) []string { } return cflags } - -const ( - compilationStepTimeout = 60 * time.Second //nolint:unused // TODO -) diff --git a/pkg/dynamicinstrumentation/eventparser/event_parser.go b/pkg/dynamicinstrumentation/eventparser/event_parser.go index 3b34303fa137b..7e336cc989b7a 100644 --- a/pkg/dynamicinstrumentation/eventparser/event_parser.go +++ b/pkg/dynamicinstrumentation/eventparser/event_parser.go @@ -21,74 +21,51 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/log" ) -// MaxBufferSize is the maximum size of the output buffer from bpf which is read by this package -const MaxBufferSize = 10000 - var ( - byteOrder = binary.LittleEndian + byteOrder = binary.NativeEndian ) // ParseEvent takes the raw buffer from bpf and parses it into an event. It also potentially // applies a rate limit -func ParseEvent(record []byte, ratelimiters *ratelimiter.MultiProbeRateLimiter) *ditypes.DIEvent { +func ParseEvent(record []byte, ratelimiters *ratelimiter.MultiProbeRateLimiter) (*ditypes.DIEvent, error) { event := ditypes.DIEvent{} if len(record) < ditypes.SizeofBaseEvent { - log.Tracef("malformed event record (length %d)", len(record)) - return nil + return nil, fmt.Errorf("malformed event record (length %d)", len(record)) } baseEvent := *(*ditypes.BaseEvent)(unsafe.Pointer(&record[0])) event.ProbeID = unix.ByteSliceToString(baseEvent.Probe_id[:]) allowed, droppedEvents, successfulEvents := ratelimiters.AllowOneEvent(event.ProbeID) if !allowed { - log.Tracef("event dropped by rate limit. Probe %s\t(%d dropped events out of %d)\n", + return nil, fmt.Errorf("event dropped by rate limit, probe %s (%d dropped events out of %d)", event.ProbeID, droppedEvents, droppedEvents+successfulEvents) - return nil } event.PID = baseEvent.Pid event.UID = baseEvent.Uid event.StackPCs = baseEvent.Program_counters[:] event.Argdata = readParams(record[ditypes.SizeofBaseEvent:]) - return &event -} - -// ParseParams extracts just the parsed parameters from the full event record -func ParseParams(record []byte) ([]*ditypes.Param, error) { - if len(record) < 392 { - return nil, fmt.Errorf("malformed event record (length %d)", len(record)) - } - return readParams(record[392:]), nil + return &event, nil } func readParams(values []byte) []*ditypes.Param { + if len(values) >= 100 { + log.Tracef("DI event bytes (0:100): %v", values[0:100]) + } outputParams := []*ditypes.Param{} - for i := 0; i < MaxBufferSize; { - if i+3 >= len(values) { - break - } + for i := 0; i+3 < len(values); { paramTypeDefinition := parseTypeDefinition(values[i:]) if paramTypeDefinition == nil { break } - sizeOfTypeDefinition := countBufferUsedByTypeDefinition(paramTypeDefinition) i += sizeOfTypeDefinition val, numBytesRead := parseParamValue(paramTypeDefinition, values[i:]) if val == nil { return outputParams } - if reflect.Kind(val.Kind) == reflect.Slice { - // In BPF we read the slice by reading the maximum size of a slice - // that we allow, instead of just the size of the slice (which we - // know at runtime). This is to satisfy the verifier. When parsing - // here, we read just the actual slice content, but have to move the - // buffer index ahead by the amount of space used by the max read. - i += ditypes.SliceMaxSize - } else { - i += numBytesRead - } + i += numBytesRead outputParams = append(outputParams, val) } return outputParams @@ -99,44 +76,49 @@ func readParams(values []byte) []*ditypes.Param { // from the byte buffer. It returns the resulting parameter and an indication of // how many bytes were read from the buffer func parseParamValue(definition *ditypes.Param, buffer []byte) (*ditypes.Param, int) { + var bufferIndex int // Start by creating a stack with each layer of the definition // which will correspond with the layers of the values read from buffer. - // This is done using a temporary stack. + // This is done using a temporary stack to reverse the order. tempStack := newParamStack() definitionStack := newParamStack() tempStack.push(definition) for !tempStack.isEmpty() { current := tempStack.pop() - definitionStack.push(copyParam(current)) - for i := 0; i < len(current.Fields); i++ { - tempStack.push(current.Fields[i]) + copiedParam := copyParam(current) + definitionStack.push(copiedParam) + for n := 0; n < len(current.Fields); n++ { + tempStack.push(current.Fields[n]) } } - var i int + valueStack := newParamStack() - for i = 0; i+3 < len(buffer); { + for bufferIndex <= len(buffer) { paramDefinition := definitionStack.pop() if paramDefinition == nil { break } - if !isTypeWithHeader(paramDefinition.Kind) { - if i+int(paramDefinition.Size) >= len(buffer) { + nextIndex := bufferIndex + int(paramDefinition.Size) + if reflect.Kind(paramDefinition.Kind) == reflect.String { + if nextIndex > len(buffer) { + break + } + paramDefinition.ValueStr = string(buffer[bufferIndex:nextIndex]) + bufferIndex += int(paramDefinition.Size) + valueStack.push(paramDefinition) + } else if !isTypeWithHeader(paramDefinition.Kind) { + if nextIndex > len(buffer) { break } // This is a regular value (no sub-fields). // We parse the value of it from the buffer and push it to the value stack - paramDefinition.ValueStr = parseIndividualValue(paramDefinition.Kind, buffer[i:i+int(paramDefinition.Size)]) - i += int(paramDefinition.Size) + paramDefinition.ValueStr = parseIndividualValue(paramDefinition.Kind, buffer[bufferIndex:nextIndex]) + bufferIndex += int(paramDefinition.Size) valueStack.push(paramDefinition) } else if reflect.Kind(paramDefinition.Kind) == reflect.Pointer { - if i+int(paramDefinition.Size) >= len(buffer) { + if nextIndex > len(buffer) { break } - // Pointers are unique in that they have their own value, and sub-fields. - // We parse the value of it from the buffer, place it in the value for - // the pointer itself, then pop the next value and place it as a sub-field. - paramDefinition.ValueStr = parseIndividualValue(paramDefinition.Kind, buffer[i:i+int(paramDefinition.Size)]) - i += int(paramDefinition.Size) paramDefinition.Fields = append(paramDefinition.Fields, valueStack.pop()) valueStack.push(paramDefinition) } else { @@ -151,7 +133,18 @@ func parseParamValue(definition *ditypes.Param, buffer []byte) (*ditypes.Param, valueStack.push(paramDefinition) } } - return valueStack.pop(), i + return valueStack.pop(), bufferIndex +} + +func deepCopyParam(dst, src *ditypes.Param) { + dst.Type = src.Type + dst.Kind = src.Kind + dst.Size = src.Size + dst.Fields = make([]*ditypes.Param, len(src.Fields)) + for i, field := range src.Fields { + dst.Fields[i] = &ditypes.Param{} + deepCopyParam(dst.Fields[i], field) + } } func copyParam(p *ditypes.Param) *ditypes.Param { @@ -183,15 +176,23 @@ func parseTypeDefinition(b []byte) *ditypes.Param { if len(b) < 3 { return nil } + + kind := b[i] newParam := &ditypes.Param{ - Kind: b[i], - Size: binary.LittleEndian.Uint16(b[i+1 : i+3]), - Type: parseKindToString(b[i]), + Kind: kind, + Size: byteOrder.Uint16(b[i+1 : i+3]), + Type: parseKindToString(kind), } - if newParam.Kind == 0 && newParam.Size == 0 { + if newParam.Kind == 0 { break } i += 3 + if newParam.Size == 0 { + if reflect.Kind(newParam.Kind) == reflect.Struct { + goto stackCheck + } + break + } if isTypeWithHeader(newParam.Kind) { stack.push(newParam) continue @@ -203,12 +204,27 @@ func parseTypeDefinition(b []byte) *ditypes.Param { } top := stack.peek() top.Fields = append(top.Fields, newParam) + + if reflect.Kind(top.Kind) == reflect.Slice { + // top.Size is the length of the slice. + // We copy+append the type of the slice so we have the correct + // number of slice elements to parse values into. + if top.Size == 0 { + top.Fields = []*ditypes.Param{} + } else if top.Size > 1 { + for q := 1; q < int(top.Size); q++ { + sliceElementTypeCopy := &ditypes.Param{} + deepCopyParam(sliceElementTypeCopy, top.Fields[0]) + top.Fields = append(top.Fields, sliceElementTypeCopy) + } + } + } + if len(top.Fields) == int(top.Size) || (reflect.Kind(top.Kind) == reflect.Pointer && len(top.Fields) == 1) { newParam = stack.pop() goto stackCheck } - } return nil } @@ -224,15 +240,23 @@ func countBufferUsedByTypeDefinition(root *ditypes.Param) int { front := queue[0] queue = queue[1:] counter += 3 - queue = append(queue, front.Fields...) + + if reflect.Kind(front.Kind) == reflect.Slice && len(front.Fields) > 0 { + // The fields of slice elements are amended after the fact to account + // for the runtime discovered length. However, only one definition of + // the slice element's type is present in the buffer. + queue = append(queue, front.Fields[0]) + } else { + queue = append(queue, front.Fields...) + } } return counter } func isTypeWithHeader(pieceType byte) bool { return reflect.Kind(pieceType) == reflect.Struct || - reflect.Kind(pieceType) == reflect.Slice || reflect.Kind(pieceType) == reflect.Array || + reflect.Kind(pieceType) == reflect.Slice || reflect.Kind(pieceType) == reflect.Pointer } diff --git a/pkg/dynamicinstrumentation/eventparser/event_parser_test.go b/pkg/dynamicinstrumentation/eventparser/event_parser_test.go index 94496b5cd2d0f..cb20a19cd9ad1 100644 --- a/pkg/dynamicinstrumentation/eventparser/event_parser_test.go +++ b/pkg/dynamicinstrumentation/eventparser/event_parser_test.go @@ -8,11 +8,12 @@ package eventparser import ( - "fmt" "reflect" "testing" "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" + "github.com/kr/pretty" + "github.com/stretchr/testify/assert" ) func TestCountBufferUsedByTypeDefinition(t *testing.T) { @@ -109,6 +110,28 @@ func TestParseParamValue(t *testing.T) { }, }, }, + { + name: "same sized string", + inputBuffer: []byte{ + 65, 65, 65, + }, + inputDefinition: &ditypes.Param{ + Type: "string", Size: 0x3, Kind: 0x18, + }, + expectedValue: &ditypes.Param{ + ValueStr: "AAA", Type: "string", Size: 0x3, Kind: 0x18, + }, + }, + { + name: "empty string", + inputBuffer: []byte{}, + inputDefinition: &ditypes.Param{ + Type: "string", Size: 0x0, Kind: 0x18, + }, + expectedValue: &ditypes.Param{ + ValueStr: "", Type: "string", Size: 0x0, Kind: 0x18, + }, + }, } for _, tt := range tests { @@ -135,10 +158,6 @@ func TestReadParams(t *testing.T) { 8, 1, 0, // Struct field 1 is a uint8 (size 1) 9, 2, 0, // Struct field 2 is a uint16 (size 2) 8, 1, 0, // Struct field 3 is a uint8 (size 1) - 25, 3, 0, // Slice elements are each a struct with 3 fields - 8, 1, 0, // Struct field 1 is a uint8 (size 1) - 9, 2, 0, // Struct field 2 is a uint16 (size 2) - 8, 1, 0, // Struct field 3 is a uint8 (size 1) 1, 2, 0, 3, // Content of slice element 1 (not relevant for this function) 4, 5, 0, 6, // Content of slice element 2 (not relevant for this function) // Padding @@ -167,11 +186,7 @@ func TestReadParams(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { output := readParams(tt.inputBuffer) - if !reflect.DeepEqual(output, tt.expectedResult) { - fmt.Printf("Got: %v\n", output) - fmt.Printf("Expected: %v\n", tt.expectedResult) - t.Errorf("Didn't read correctly!") - } + assert.Equal(t, output, tt.expectedResult) }) } } @@ -234,13 +249,6 @@ func TestParseTypeDefinition(t *testing.T) { 25, 2, 0, // Struct field 4 is a struct with 2 fields 8, 1, 0, // Nested struct field 1 is a uint8 (size 1) 8, 1, 0, // Nested struct field 2 is a uint8 (size 1) - 25, 4, 0, // Slice elements are each a struct with 2 fields - 8, 1, 0, // Struct field 1 is a uint8 (size 1) - 8, 1, 0, // Struct field 2 is a uint8 (size 1) - 8, 1, 0, // Struct field 3 is a uint8 (size 1) - 25, 2, 0, // Struct field 4 is a struct with 2 fields - 8, 1, 0, // Nested struct field 1 is a uint8 (size 1) - 8, 1, 0, // Nested struct field 2 is a uint8 (size 1) 1, 2, 3, // Content of slice element 1 (top-level uint8, then 2 second tier uint8s) 4, 5, 6, // Content of slice element 2 (top-level uint8, then 2 second tier uint8s) // Padding @@ -288,11 +296,174 @@ func TestParseTypeDefinition(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { typeDefinition := parseTypeDefinition(tt.inputBuffer) - if !reflect.DeepEqual(typeDefinition, tt.expectedResult) { - fmt.Printf("%v\n", typeDefinition) - fmt.Printf("%v\n", tt.expectedResult) - t.Errorf("Not equal!") + if !paramsAreEqual(typeDefinition, tt.expectedResult) { + t.Errorf("params are not equal\nExpected: %s\nReceived: %s\n", pretty.Sprint(tt.expectedResult), pretty.Sprint(typeDefinition)) } }) } } + +func paramsAreEqual(p1, p2 *ditypes.Param) bool { + if p1 == nil && p2 == nil { + return true + } + if p1 == nil || p2 == nil { + return false + } + if p1.ValueStr != p2.ValueStr || p1.Type != p2.Type || p1.Size != p2.Size || p1.Kind != p2.Kind { + return false + } + if len(p1.Fields) != len(p2.Fields) { + return false + } + for i := range p1.Fields { + if !paramsAreEqual(p1.Fields[i], p2.Fields[i]) { + return false + } + } + return true +} + +func TestParseParams(t *testing.T) { + type testCase struct { + Name string + Buffer []byte + ExpectedOutput []*ditypes.Param + } + + testCases := []testCase{ + { + Name: "uint slice ok", + Buffer: []byte{23, 3, 0, 7, 8, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + ExpectedOutput: []*ditypes.Param{ + { + Type: "slice", + Size: 3, + Kind: byte(reflect.Slice), + Fields: []*ditypes.Param{ + { + Kind: byte(reflect.Uint), + ValueStr: "1", + Type: "uint", + Size: 8, + }, + { + Kind: byte(reflect.Uint), + ValueStr: "2", + Type: "uint", + Size: 8, + }, + { + Kind: byte(reflect.Uint), + ValueStr: "3", + Type: "uint", + Size: 8, + }, + }, + }, + }, + }, + { + Name: "uint pointer ok", + Buffer: []byte{22, 8, 0, 7, 8, 0, 123, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + ExpectedOutput: []*ditypes.Param{ + { + Type: "ptr", + Size: 8, + Kind: byte(reflect.Pointer), + Fields: []*ditypes.Param{ + { + Kind: byte(reflect.Uint), + ValueStr: "123", + Type: "uint", + Size: 8, + }, + }, + }, + }, + }, + { + Name: "struct pointer ok", + Buffer: []byte{22, 8, 0, 25, 3, 0, 1, 1, 0, 2, 8, 0, 4, 2, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + ExpectedOutput: []*ditypes.Param{ + { + Type: "ptr", + Size: 8, + Kind: byte(reflect.Pointer), + Fields: []*ditypes.Param{ + { + Type: "struct", + Size: 3, + Kind: byte(reflect.Struct), + Fields: []*ditypes.Param{ + { + Kind: byte(reflect.Bool), + ValueStr: "true", + Type: "bool", + Size: 1, + }, + { + Kind: byte(reflect.Int), + ValueStr: "1", + Type: "int", + Size: 8, + }, + { + Kind: byte(reflect.Int16), + ValueStr: "2", + Type: "int16", + Size: 2, + }, + }, + }, + }, + }, + }, + }, + { + Name: "struct pointer nil", + Buffer: []byte{22, 8, 0, 25, 3, 0, 1, 1, 0, 2, 8, 0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + ExpectedOutput: []*ditypes.Param{ + { + Type: "ptr", + Size: 8, + Kind: byte(reflect.Pointer), + Fields: []*ditypes.Param{ + { + Type: "struct", + Size: 3, + Kind: byte(reflect.Struct), + Fields: []*ditypes.Param{ + { + Kind: byte(reflect.Bool), + ValueStr: "false", + Type: "bool", + Size: 1, + }, + { + Kind: byte(reflect.Int), + ValueStr: "0", + Type: "int", + Size: 8, + }, + { + Kind: byte(reflect.Int16), + ValueStr: "0", + Type: "int16", + Size: 2, + }, + }, + }, + }, + }, + }, + }, + } + + for i := range testCases { + t.Run(testCases[i].Name, func(t *testing.T) { + result := readParams(testCases[i].Buffer) + assert.Equal(t, testCases[i].ExpectedOutput, result) + }) + } +} diff --git a/pkg/dynamicinstrumentation/eventparser/param_stack.go b/pkg/dynamicinstrumentation/eventparser/param_stack.go index b2359951ca25a..bf2f7c5259cc9 100644 --- a/pkg/dynamicinstrumentation/eventparser/param_stack.go +++ b/pkg/dynamicinstrumentation/eventparser/param_stack.go @@ -21,7 +21,7 @@ func newParamStack() *paramStack { } func (s *paramStack) isEmpty() bool { - return len(s.arr) == 0 + return s == nil || len(s.arr) == 0 } func (s *paramStack) pop() *ditypes.Param { diff --git a/pkg/dynamicinstrumentation/module/module.go b/pkg/dynamicinstrumentation/module/module.go index 27a20ea18b390..0b8ce1e4636b2 100644 --- a/pkg/dynamicinstrumentation/module/module.go +++ b/pkg/dynamicinstrumentation/module/module.go @@ -24,7 +24,7 @@ type Module struct { } // NewModule creates a new dynamic instrumentation system probe module -func NewModule(config *Config) (*Module, error) { //nolint:revive // TODO +func NewModule(_ *Config) (*Module, error) { godi, err := di.RunDynamicInstrumentation(&di.DIOptions{ RateLimitPerProbePerSecond: 1.0, OfflineOptions: di.OfflineOptions{ @@ -32,7 +32,8 @@ func NewModule(config *Config) (*Module, error) { //nolint:revive // TODO ProbesFilePath: coreconfig.SystemProbe().GetString("dynamic_instrumentation.probes_file_path"), SnapshotOutput: coreconfig.SystemProbe().GetString("dynamic_instrumentation.snapshot_output_file_path"), DiagnosticOutput: coreconfig.SystemProbe().GetString("dynamic_instrumentation.diagnostics_output_file_path"), - }}) + }, + }) if err != nil { return nil, err } @@ -65,7 +66,7 @@ func (m *Module) GetStats() map[string]interface{} { // Register creates a health check endpoint for the dynamic instrumentation module func (m *Module) Register(httpMux *module.Router) error { httpMux.HandleFunc("/check", utils.WithConcurrencyLimit(utils.DefaultMaxConcurrentRequests, - func(w http.ResponseWriter, req *http.Request) { //nolint:revive // TODO + func(w http.ResponseWriter, _ *http.Request) { stats := []string{} utils.WriteAsJSON(w, stats) })) diff --git a/pkg/dynamicinstrumentation/proctracker/proctracker.go b/pkg/dynamicinstrumentation/proctracker/proctracker.go index fd5c6f750488c..2489ee6904aee 100644 --- a/pkg/dynamicinstrumentation/proctracker/proctracker.go +++ b/pkg/dynamicinstrumentation/proctracker/proctracker.go @@ -26,7 +26,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/network/go/bininspect" "github.com/DataDog/datadog-agent/pkg/network/go/binversion" "github.com/DataDog/datadog-agent/pkg/process/monitor" - "github.com/DataDog/datadog-agent/pkg/security/secl/model" + "github.com/DataDog/datadog-agent/pkg/security/secl/model/sharedconsts" "github.com/DataDog/datadog-agent/pkg/security/utils" "github.com/DataDog/datadog-agent/pkg/util/kernel" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -186,7 +186,7 @@ func (pt *ProcessTracker) registerProcess(binID binaryID, pid pid, mTime syscall } func getServiceName(pid uint32) string { - envVars, _, err := utils.EnvVars([]string{"DD"}, pid, model.MaxArgsEnvsSize) + envVars, _, err := utils.EnvVars([]string{"DD"}, pid, sharedconsts.MaxArgsEnvsSize) if err != nil { return "" } diff --git a/pkg/dynamicinstrumentation/ringbufconsumer.go b/pkg/dynamicinstrumentation/ringbufconsumer.go index efcf578932009..9cadc02da1d15 100644 --- a/pkg/dynamicinstrumentation/ringbufconsumer.go +++ b/pkg/dynamicinstrumentation/ringbufconsumer.go @@ -49,7 +49,11 @@ func (goDI *GoDI) startRingbufferConsumer(rate float64) (func(), error) { continue } - event := eventparser.ParseEvent(record.RawSample, rateLimiters) + event, err := eventparser.ParseEvent(record.RawSample, rateLimiters) + if err != nil { + log.Trace(err) + continue + } if event == nil { continue } diff --git a/pkg/dynamicinstrumentation/testutil/e2e_test.go b/pkg/dynamicinstrumentation/testutil/e2e_test.go index 9783fba6c7ba4..0dc0b7ddcbe07 100644 --- a/pkg/dynamicinstrumentation/testutil/e2e_test.go +++ b/pkg/dynamicinstrumentation/testutil/e2e_test.go @@ -19,6 +19,8 @@ import ( "text/template" "time" + "github.com/kr/pretty" + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation" "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/diconfig" "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/ditypes" @@ -26,11 +28,19 @@ import ( "github.com/cilium/ebpf" "github.com/cilium/ebpf/features" "github.com/cilium/ebpf/rlimit" - "github.com/kr/pretty" "github.com/stretchr/testify/require" ) +type testResult struct { + testName string + matches []bool + expectation ditypes.CapturedValueMap + unexpectedResults []ditypes.CapturedValueMap +} + +var results = make(map[string]*testResult) + func TestGoDI(t *testing.T) { flake.Mark(t) if err := rlimit.RemoveMemlock(); err != nil { @@ -107,9 +117,14 @@ func TestGoDI(t *testing.T) { buf = bytes.NewBuffer(b) functionWithoutPackagePrefix, _ := strings.CutPrefix(function, "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/testutil/sample.") t.Log("Instrumenting ", functionWithoutPackagePrefix) + results[function] = &testResult{ + testName: functionWithoutPackagePrefix, + expectation: expectedCaptureValue, + matches: []bool{}, + unexpectedResults: []ditypes.CapturedValueMap{}, + } err = cfgTemplate.Execute(buf, configDataType{functionWithoutPackagePrefix}) require.NoError(t, err) - eventOutputWriter.doCompare = false eventOutputWriter.expectedResult = expectedCaptureValue // Read the configuration via the config manager @@ -122,11 +137,22 @@ func TestGoDI(t *testing.T) { time.Sleep(time.Second * 2) doCapture = false } + + for i := range results { + for _, ok := range results[i].matches { + if !ok { + t.Errorf("Failed test for: %s\nReceived event: %v\nExpected: %v", + results[i].testName, + pretty.Sprint(results[i].unexpectedResults), + pretty.Sprint(results[i].expectation)) + break + } + } + } } type eventOutputTestWriter struct { t *testing.T - doCompare bool expectedResult map[string]*ditypes.CapturedValue } @@ -141,12 +167,20 @@ func (e *eventOutputTestWriter) Write(p []byte) (n int, err error) { e.t.Error("failed to unmarshal snapshot", err) } - funcName := snapshot.Debugger.ProbeInSnapshot.Type + "." + snapshot.Debugger.ProbeInSnapshot.Method + funcName := snapshot.Debugger.ProbeInSnapshot.Method actual := snapshot.Debugger.Captures.Entry.Arguments scrubPointerValues(actual) + b, ok := results[funcName] + if !ok { + e.t.Errorf("received event from unexpected probe: %s", funcName) + return + } if !reflect.DeepEqual(e.expectedResult, actual) { - e.t.Error("Unexpected ", funcName, pretty.Sprint(actual)) - e.t.Log("Expected: ", pretty.Sprint(e.expectedResult)) + b.matches = append(b.matches, false) + b.unexpectedResults = append(b.unexpectedResults, actual) + e.t.Error("received unexpected value") + } else { + b.matches = append(b.matches, true) } return len(p), nil @@ -197,7 +231,7 @@ var configTemplateText = ` ], "captureSnapshot": false, "capture": { - "maxReferenceDepth": 6 + "maxReferenceDepth": 5 }, "sampling": { "snapshotsPerSecond": 5000 diff --git a/pkg/dynamicinstrumentation/testutil/fixtures.go b/pkg/dynamicinstrumentation/testutil/fixtures.go index fd8fbbc50243e..d25e558b0da9f 100644 --- a/pkg/dynamicinstrumentation/testutil/fixtures.go +++ b/pkg/dynamicinstrumentation/testutil/fixtures.go @@ -40,91 +40,43 @@ var basicCaptures = fixtures{ // "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/testutil/sample.test_single_float64": {"x": capturedValue("float", "-1.646464")}, } -var multiParamCaptures = fixtures{ //nolint:unused // TODO - "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/testutil/sample.test_multiple_simple_params": { - "a": capturedValue("bool", "false"), - "b": capturedValue("uint8", "42"), - "c": capturedValue("int32", "122"), - "d": capturedValue("uint", "1337"), - "e": capturedValue("string", "xyz"), - }, - "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/testutil/sample.test_multiple_composite_params": { - "a": {Type: "array", Fields: fieldMap{ - "a_0": capturedValue("string", "one"), - "a_1": capturedValue("string", "two"), - "a_2": capturedValue("string", "three"), - }}, - "b": {Type: "struct", Fields: fieldMap{ - "aBool": capturedValue("bool", "false"), - "aString": capturedValue("string", ""), - "aNumber": capturedValue("int", "0"), - "nested": {Type: "struct", Fields: fieldMap{ - "anotherInt": capturedValue("int", "0"), - "anotherString": capturedValue("string", ""), - }}, - }}, - "c": {Type: "slice", Fields: fieldMap{ - "c_0": capturedValue("uint", "24"), - "c_1": capturedValue("uint", "42"), - }}, - "d": {Type: "map", Fields: fieldMap{ - "foo": capturedValue("string", "bar"), - }}, - "e": {Type: "slice", Fields: fieldMap{ - "e_0": {Type: "struct", Fields: fieldMap{ - "anotherInt": capturedValue("int", "42"), - "anotherString": capturedValue("string", "ftwo"), - }}, - "e_1": {Type: "struct", Fields: fieldMap{ - "anotherInt": capturedValue("int", "24"), - "anotherString": capturedValue("string", "tfour"), - }}, - }}, - }, - "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/testutil/sample.test_combined_byte": { - "w": capturedValue("uint8", "2"), - "x": capturedValue("uint8", "3"), - "y": capturedValue("uint8", "3.0"), - }, -} - var stringCaptures = fixtures{ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/testutil/sample.test_single_string": {"x": capturedValue("string", "abc")}, } var arrayCaptures = fixtures{ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/testutil/sample.test_byte_array": {"x": {Type: "array", Fields: fieldMap{ - "[2]uint8[0]": capturedValue("uint8", "1"), - "[2]uint8[1]": capturedValue("uint8", "1"), + "arg_0": capturedValue("uint8", "1"), + "arg_1": capturedValue("uint8", "1"), }}}, "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/testutil/sample.test_rune_array": {"x": {Type: "array", Fields: fieldMap{ - "[2]int32[0]": capturedValue("int32", "1"), - "[2]int32[1]": capturedValue("int32", "2"), + "arg_0": capturedValue("int32", "1"), + "arg_1": capturedValue("int32", "2"), }}}, "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/testutil/sample.test_string_array": {"x": {Type: "array", Fields: fieldMap{ - "[2]string[0]": capturedValue("string", "one"), - "[2]string[1]": capturedValue("string", "two"), + "arg_0": capturedValue("string", "one"), + "arg_1": capturedValue("string", "two"), }}}, "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/testutil/sample.test_int_array": {"x": {Type: "array", Fields: fieldMap{ - "[2]int[0]": capturedValue("int", "1"), - "[2]int[1]": capturedValue("int", "2"), + "arg_0": capturedValue("int", "1"), + "arg_1": capturedValue("int", "2"), }}}, "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/testutil/sample.test_int8_array": {"x": {Type: "array", Fields: fieldMap{ - "[2]int8[0]": capturedValue("int8", "1"), - "[2]int8[1]": capturedValue("int8", "2"), + "arg_0": capturedValue("int8", "1"), + "arg_1": capturedValue("int8", "2"), }}}, "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/testutil/sample.test_uint_array": {"x": {Type: "array", Fields: fieldMap{ - "[2]uint[0]": capturedValue("uint", "1"), - "[2]uint[1]": capturedValue("uint", "2"), + "arg_0": capturedValue("uint", "1"), + "arg_1": capturedValue("uint", "2"), }}}, - "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/testutil/sample.test_array_of_arrays": {"a": {Type: "array", Fields: fieldMap{ - "[2][2]int[0]": {Type: "array", Fields: fieldMap{ - "[2]int[0]": capturedValue("int", "1"), - "[2]int[1]": capturedValue("int", "2"), + "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/testutil/sample.test_array_of_arrays": {"arg_0": {Type: "array", Fields: fieldMap{ + "arg_0": {Type: "array", Fields: fieldMap{ + "arg_0": capturedValue("int", "1"), + "arg_1": capturedValue("int", "2"), }}, - "[2][2]int[1]": {Type: "array", Fields: fieldMap{ - "[2]int[0]": capturedValue("int", "3"), - "[2]int[1]": capturedValue("int", "4"), + "arg_1": {Type: "array", Fields: fieldMap{ + "arg_0": capturedValue("int", "3"), + "arg_1": capturedValue("int", "4"), }}, }}}, "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/testutil/sample.test_array_of_structs": {"a": {Type: "array", Fields: fieldMap{ @@ -163,14 +115,14 @@ var arrayCaptures = fixtures{ var structCaptures = fixtures{ "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/testutil/sample.test_string_struct": {"t": {Type: "struct", Fields: fieldMap{ - "a": capturedValue("string", "a"), - "b": capturedValue("string", "bb"), - "c": capturedValue("string", "ccc"), + "arg_0": capturedValue("string", "a"), + "arg_1": capturedValue("string", "bb"), + "arg_2": capturedValue("string", "ccc"), }}}, "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/testutil/sample.receiver.test_method_receiver": { "r": { Type: "struct", Fields: fieldMap{ - "u": capturedValue("uint", "1"), + "arg_0": capturedValue("uint", "1"), }}, "a": capturedValue("int", "2"), }, @@ -211,16 +163,16 @@ var structCaptures = fixtures{ // "z": capturedValue("CutFieldLimit", "reached field limit"), // }}}, "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/testutil/sample.test_nonembedded_struct": {"x": {Type: "struct", Fields: fieldMap{ - "aBool": capturedValue("bool", "true"), - "aInt": capturedValue("int", "1"), - "aInt16": capturedValue("int16", "2"), + "arg_0": capturedValue("bool", "true"), + "arg_1": capturedValue("int", "1"), + "arg_2": capturedValue("int16", "2"), }}}, "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/testutil/sample.test_struct_pointer": {"x": {Type: "ptr", Fields: fieldMap{ "arg_0": { Type: "struct", Fields: fieldMap{ - "aBool": capturedValue("bool", "true"), - "aInt": capturedValue("int", "1"), - "aInt16": capturedValue("int16", "2"), + "arg_0": capturedValue("bool", "true"), + "arg_1": capturedValue("int", "1"), + "arg_2": capturedValue("int16", "2"), }}, }}}, "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/testutil/sample.test_multiple_embedded_struct": {"b": {Type: "struct", Fields: fieldMap{ @@ -239,32 +191,6 @@ var structCaptures = fixtures{ }}}, } -// TODO: this doesn't work yet: -// could not determine locations of variables from debug information could not inspect param "x" on function: no location field in parameter entry -var genericCaptures = fixtures{ //nolint:unused // TODO - "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/testutil/sample.typeWithGenerics[go.shape.string].Guess": {"value": capturedValue("string", "generics work")}, -} - -// TODO: check how map entries should be represented, likely that entries have key / value pair fields -// instead of having the keys hardcoded as string field names -// maps are no supported at the moment so this fails anyway -var mapCaptures = fixtures{ //nolint:unused // TODO - "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/testutil/sample.test_map_string_to_int": {"m": {Type: "map", Fields: fieldMap{ - "foo": capturedValue("int", "1"), - "bar": capturedValue("int", "2"), - }}}, - "github.com/DataDog/datadog-agent/pkg/dynamicinstrumentation/testutil/sample.test_map_string_to_struct": {"m": {Type: "map", Fields: fieldMap{ - "foo": {Type: "struct", Fields: fieldMap{ - "anotherInt": capturedValue("int", "3"), - "anotherString": capturedValue("string", "four"), - }}, - "bar": {Type: "struct", Fields: fieldMap{ - "anotherInt": capturedValue("int", "3"), - "anotherString": capturedValue("string", "four"), - }}, - }}}, -} - // mergeMaps combines multiple fixture maps into a single map func mergeMaps(maps ...fixtures) fixtures { result := make(fixtures) diff --git a/pkg/dynamicinstrumentation/testutil/sample/complex.go b/pkg/dynamicinstrumentation/testutil/sample/complex.go new file mode 100644 index 0000000000000..bf481a19930b2 --- /dev/null +++ b/pkg/dynamicinstrumentation/testutil/sample/complex.go @@ -0,0 +1,71 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package sample + +import "io" + +type outer struct { + A *middle +} + +type middle struct { + B *inner +} + +type inner struct { + C int + D byte + E string +} + +//nolint:all +//go:noinline +func test_multiple_dereferences(o outer) {} + +type bigStruct struct { + x []*string + z int + writer io.Writer +} + +//nolint:all +//go:noinline +func test_big_struct(b bigStruct) {} + +type circularReferenceType struct { + t *circularReferenceType +} + +//nolint:all +//go:noinline +func test_circular_type(x circularReferenceType) {} + +//nolint:all +func ExecuteComplexFuncs() { + o := outer{ + A: &middle{ + B: &inner{ + C: 1, + D: 2, + E: "three", + }, + }, + } + + str := "abc" + s := []*string{&str} + + test_big_struct(bigStruct{ + x: s, + z: 5, + writer: io.Discard, + }) + test_multiple_dereferences(o) + + circ := circularReferenceType{} + circ.t = &circ + test_circular_type(circ) +} diff --git a/pkg/dynamicinstrumentation/testutil/sample/pointers.go b/pkg/dynamicinstrumentation/testutil/sample/pointers.go index 3a01d3f422b93..fe037afdac862 100644 --- a/pkg/dynamicinstrumentation/testutil/sample/pointers.go +++ b/pkg/dynamicinstrumentation/testutil/sample/pointers.go @@ -7,6 +7,11 @@ package sample import "unsafe" +type structWithTwoValues struct { + a uint + b bool +} + type structWithPointer struct { a *uint64 } @@ -28,6 +33,10 @@ type reallyComplexType struct { aStringPtr *string } +//nolint:all +//go:noinline +func test_pointer_to_simple_struct(a *structWithTwoValues) {} + //nolint:all //go:noinline func test_linked_list(a node) {} @@ -41,6 +50,10 @@ type node struct { //go:noinline func test_unsafe_pointer(x unsafe.Pointer) {} +//nolint:all +//go:noinline +func test_array_pointer(x *[2]uint) {} + //nolint:all //go:noinline func test_uint_pointer(x *uint) {} @@ -69,6 +82,10 @@ func test_struct_with_string_pointer(z spws) {} //go:noinline func test_string_pointer(z *string) {} +//nolint:all +//go:noinline +func test_string_slice_pointer(a *[]string) {} + //nolint:all func ExecutePointerFuncs() { var u64F uint64 = 5 @@ -78,7 +95,7 @@ func ExecutePointerFuncs() { r := "abc" z := spws{3, &r} - var uintToPointTo uint = 123 + var uintToPointTo uint = 1 test_uint_pointer(&uintToPointTo) n := nStruct{true, 1, 2} @@ -92,6 +109,9 @@ func ExecutePointerFuncs() { test_struct_with_string_pointer(z) test_string_pointer(&r) + x := structWithTwoValues{9, true} + test_pointer_to_simple_struct(&x) + rct := reallyComplexType{ pointerToStructWithAPointerToAStruct: &ssaw, anArray: [1]nStruct{n}, @@ -103,11 +123,20 @@ func ExecutePointerFuncs() { b := node{ val: 1, b: &node{ - val: 2, - b: nil, + val: 5, + b: &node{ + val: 3, + b: nil, + }, }, } test_linked_list(b) test_unsafe_pointer(unsafe.Pointer(&b)) + + aruint := [2]uint{1, 2} + test_array_pointer(&aruint) + + stringSlice := []string{"aaa", "bbb", "ccc", "ddd"} + test_string_slice_pointer(&stringSlice) } diff --git a/pkg/dynamicinstrumentation/testutil/sample/sample_service/sample_service.go b/pkg/dynamicinstrumentation/testutil/sample/sample_service/sample_service.go index c7a1faa665372..4192672d8a965 100644 --- a/pkg/dynamicinstrumentation/testutil/sample/sample_service/sample_service.go +++ b/pkg/dynamicinstrumentation/testutil/sample/sample_service/sample_service.go @@ -44,6 +44,7 @@ func main() { sample.ExecuteStructFuncs() sample.ExecuteStackAndInlining() sample.ExecutePointerFuncs() + sample.ExecuteComplexFuncs() // unsupported for MVP, should not cause crashes sample.ExecuteGenericFuncs() diff --git a/pkg/dynamicinstrumentation/testutil/sample/slices.go b/pkg/dynamicinstrumentation/testutil/sample/slices.go index f31b479984f1e..c4fd70551a864 100644 --- a/pkg/dynamicinstrumentation/testutil/sample/slices.go +++ b/pkg/dynamicinstrumentation/testutil/sample/slices.go @@ -28,16 +28,36 @@ func expandSlice(x []int) { //go:noinline func test_uint_slice(u []uint) {} +//nolint:all +//go:noinline +func test_empty_slice(u []uint) {} + +//nolint:all +//go:noinline +func test_slice_of_slices(u [][]uint) {} + //nolint:all //go:noinline func test_struct_slice(xs []structWithNoStrings) {} +//nolint:all +//go:noinline +func test_string_slice(s []string) {} + //nolint:all func ExecuteSliceFuncs() { originalSlice := []int{1, 2, 3} expandSlice(originalSlice) sprintSlice(originalSlice) + test_string_slice([]string{"abc", "xyz", "123"}) test_uint_slice([]uint{1, 2, 3}) test_struct_slice([]structWithNoStrings{{42, true}, {24, true}}) + + test_slice_of_slices([][]uint{ + {4}, + {5, 6}, + {7, 8, 9}, + }) + test_empty_slice([]uint{}) } diff --git a/pkg/dynamicinstrumentation/testutil/sample/strings.go b/pkg/dynamicinstrumentation/testutil/sample/strings.go index 0eb3564e6e5c5..d9791cd80427e 100644 --- a/pkg/dynamicinstrumentation/testutil/sample/strings.go +++ b/pkg/dynamicinstrumentation/testutil/sample/strings.go @@ -9,7 +9,12 @@ package sample //go:noinline func test_single_string(x string) {} +//nolint:all +//go:noinline +func test_three_strings(x, y, z string) {} + //nolint:all func ExecuteStringFuncs() { test_single_string("abc") + test_three_strings("abc", "def", "ghi") } diff --git a/pkg/dynamicinstrumentation/testutil/sample/structs.go b/pkg/dynamicinstrumentation/testutil/sample/structs.go index ff57ffba8d1c3..1befe97aa8930 100644 --- a/pkg/dynamicinstrumentation/testutil/sample/structs.go +++ b/pkg/dynamicinstrumentation/testutil/sample/structs.go @@ -25,6 +25,14 @@ func test_struct_with_array(a structWithAnArray) {} //go:noinline func test_struct_with_a_slice(s structWithASlice) {} +//nolint:all +//go:noinline +func test_pointer_to_struct_with_a_slice(s *structWithASlice) {} + +//nolint:all +//go:noinline +func test_pointer_to_struct_with_a_string(s *structWithAString) {} + //nolint:all //go:noinline func test_struct(x aStruct) {} @@ -91,7 +99,9 @@ func ExecuteStructFuncs() { test_ten_strings(tenStrings{}) test_struct_and_byte('a', s) test_struct_with_array(structWithAnArray{[5]uint8{1, 2, 3, 4, 5}}) - test_struct_with_a_slice(structWithASlice{[]uint8{1, 2, 3}}) + test_struct_with_a_slice(structWithASlice{1, []uint8{2, 3, 4}}) + test_pointer_to_struct_with_a_slice(&structWithASlice{5, []uint8{2, 3, 4}}) + test_pointer_to_struct_with_a_string(&structWithAString{5, "abcdef"}) tenStr := tenStrings{ first: "one", @@ -202,9 +212,15 @@ type structWithAnArray struct { } type structWithASlice struct { + x int slice []uint8 } +type structWithAString struct { + x int + s string +} + type nestedStruct struct { anotherInt int anotherString string diff --git a/pkg/dynamicinstrumentation/uploader/di_log_converter.go b/pkg/dynamicinstrumentation/uploader/di_log_converter.go index e42ca051819be..7abdfa76b4f4e 100644 --- a/pkg/dynamicinstrumentation/uploader/di_log_converter.go +++ b/pkg/dynamicinstrumentation/uploader/di_log_converter.go @@ -79,7 +79,7 @@ func convertProbe(probe *ditypes.Probe) ditypes.ProbeInSnapshot { } } -func convertCaptures(defs []ditypes.Parameter, captures []*ditypes.Param) ditypes.Captures { +func convertCaptures(defs []*ditypes.Parameter, captures []*ditypes.Param) ditypes.Captures { return ditypes.Captures{ Entry: &ditypes.Capture{ Arguments: convertArgs(defs, captures), @@ -87,8 +87,8 @@ func convertCaptures(defs []ditypes.Parameter, captures []*ditypes.Param) ditype } } -func reportCaptureError(defs []ditypes.Parameter) ditypes.Captures { - notCapturedReason := "Failed to instrument, type is unsupported or too complex. Please report this issue." +func reportCaptureError(defs []*ditypes.Parameter) ditypes.Captures { + notCapturedReason := "type unsupported" args := make(map[string]*ditypes.CapturedValue) for _, def := range defs { @@ -104,23 +104,21 @@ func reportCaptureError(defs []ditypes.Parameter) ditypes.Captures { } } -func convertArgs(defs []ditypes.Parameter, captures []*ditypes.Param) map[string]*ditypes.CapturedValue { +func convertArgs(defs []*ditypes.Parameter, captures []*ditypes.Param) map[string]*ditypes.CapturedValue { args := make(map[string]*ditypes.CapturedValue) for idx, capture := range captures { var ( - argName string - captureDef *ditypes.Parameter - defPieces []ditypes.Parameter + argName string + defPieces []*ditypes.Parameter ) if idx < len(defs) { argName = defs[idx].Name - captureDef = &defs[idx] } if argName == "" { argName = fmt.Sprintf("arg_%d", idx) } if reflect.Kind(capture.Kind) == reflect.Slice { - args[argName] = convertSlice(captureDef, capture) + args[argName] = convertSlice(capture) continue } if capture == nil { @@ -143,15 +141,29 @@ func convertArgs(defs []ditypes.Parameter, captures []*ditypes.Param) map[string return args } -func convertSlice(def *ditypes.Parameter, capture *ditypes.Param) *ditypes.CapturedValue { - if def == nil || len(def.ParameterPieces) != 2 { - // The definition should have two fields, for type, and for length - return nil +func convertSlice(capture *ditypes.Param) *ditypes.CapturedValue { + defs := []*ditypes.Parameter{} + for i := range capture.Fields { + var ( + fieldType string + fieldKind uint + fieldSize int64 + ) + if capture.Fields[i] != nil { + fieldType = capture.Fields[i].Type + fieldKind = uint(capture.Fields[i].Kind) + fieldSize = int64(capture.Fields[i].Size) + } + defs = append(defs, &ditypes.Parameter{ + Name: fmt.Sprintf("[%d]%s", i, fieldType), + Type: fieldType, + Kind: fieldKind, + TotalSize: fieldSize, + }) } sliceValue := &ditypes.CapturedValue{ - Fields: map[string]*ditypes.CapturedValue{}, + Fields: convertArgs(defs, capture.Fields), } - sliceValue.Fields = convertArgs(def.ParameterPieces, capture.Fields) return sliceValue } @@ -163,7 +175,7 @@ func parseFuncName(funcName string) (string, string) { return "", funcName } -func getFunctionArguments(proc *ditypes.ProcessInfo, probe *ditypes.Probe) []ditypes.Parameter { +func getFunctionArguments(proc *ditypes.ProcessInfo, probe *ditypes.Probe) []*ditypes.Parameter { return proc.TypeMap.Functions[probe.FuncName] } diff --git a/pkg/dynamicinstrumentation/uploader/stack_trace.go b/pkg/dynamicinstrumentation/uploader/stack_trace.go index f428e2c40e0d2..d01edeae2da6c 100644 --- a/pkg/dynamicinstrumentation/uploader/stack_trace.go +++ b/pkg/dynamicinstrumentation/uploader/stack_trace.go @@ -30,23 +30,6 @@ func parseStackTrace(procInfo *ditypes.ProcessInfo, rawProgramCounters []uint64) break } - entries, ok := procInfo.TypeMap.InlinedFunctions[rawProgramCounters[i]] - if ok { - for n := range entries { - inlinedFuncInfo, err := pcToLine(procInfo, rawProgramCounters[i]) - if err != nil { - return stackTrace, fmt.Errorf("could not resolve pc to inlined function info: %w", err) - } - - symName, lineNumber, err := parseInlinedEntry(procInfo.DwarfData.Reader(), entries[n]) - if err != nil { - return stackTrace, fmt.Errorf("could not get inlined entries: %w", err) - } - stackFrame := ditypes.StackFrame{Function: fmt.Sprintf("%s [inlined in %s]", symName, inlinedFuncInfo.fn), FileName: inlinedFuncInfo.file, Line: int(lineNumber)} - stackTrace = append(stackTrace, stackFrame) - } - } - funcInfo, err := pcToLine(procInfo, rawProgramCounters[i]) if err != nil { return stackTrace, fmt.Errorf("could not resolve pc to function info: %w", err) @@ -114,6 +97,9 @@ func pcToLine(procInfo *ditypes.ProcessInfo, pc uint64) (*funcInfo, error) { return nil, fmt.Errorf("invalid file number in dwarf function entry associated with compile unit") } + if int(fileNumber) >= len(files) || files[fileNumber] == nil { + return nil, fmt.Errorf("could not find file") + } file = files[fileNumber].Name return &funcInfo{ @@ -122,30 +108,3 @@ func pcToLine(procInfo *ditypes.ProcessInfo, pc uint64) (*funcInfo, error) { fn: fn, }, nil } - -func parseInlinedEntry(reader *dwarf.Reader, e *dwarf.Entry) (name string, line int64, err error) { - - var offset dwarf.Offset - - for i := range e.Field { - if e.Field[i].Attr == dwarf.AttrAbstractOrigin { - offset = e.Field[i].Val.(dwarf.Offset) - reader.Seek(offset) - entry, err := reader.Next() - if err != nil { - return "", -1, fmt.Errorf("could not read inlined function origin: %w", err) - } - for j := range entry.Field { - if entry.Field[j].Attr == dwarf.AttrName { - name = entry.Field[j].Val.(string) - } - } - } - - if e.Field[i].Attr == dwarf.AttrCallLine { - line = e.Field[i].Val.(int64) - } - } - - return name, line, nil -} diff --git a/pkg/dynamicinstrumentation/uploader/writer.go b/pkg/dynamicinstrumentation/uploader/writer.go index 7708c6166f4cb..3ebab2a281961 100644 --- a/pkg/dynamicinstrumentation/uploader/writer.go +++ b/pkg/dynamicinstrumentation/uploader/writer.go @@ -20,19 +20,22 @@ import ( "github.com/kr/pretty" ) -type WriterSerializer[T any] struct { //nolint:revive // TODO +// WriterSerializer is an interface to serialize output guarded by a mutex +type WriterSerializer[T any] struct { output io.Writer mu sync.Mutex } -func NewWriterLogSerializer(writer io.Writer) (*WriterSerializer[ditypes.SnapshotUpload], error) { //nolint:revive // TODO +// NewWriterLogSerializer creates a new WriterSerializer for snapshot uploads +func NewWriterLogSerializer(writer io.Writer) (*WriterSerializer[ditypes.SnapshotUpload], error) { if writer == nil { return nil, errors.New("nil writer for creating log serializer") } return NewWriterSerializer[ditypes.SnapshotUpload](writer) } -func NewWriterDiagnosticSerializer(dm *diagnostics.DiagnosticManager, writer io.Writer) (*WriterSerializer[ditypes.DiagnosticUpload], error) { //nolint:revive // TODO +// NewWriterDiagnosticSerializer creates a new WriterSerializer for diagnostics uploads +func NewWriterDiagnosticSerializer(dm *diagnostics.DiagnosticManager, writer io.Writer) (*WriterSerializer[ditypes.DiagnosticUpload], error) { if writer == nil { return nil, errors.New("nil writer for creating diagnostic serializer") } @@ -51,7 +54,8 @@ func NewWriterDiagnosticSerializer(dm *diagnostics.DiagnosticManager, writer io. return ds, nil } -func NewWriterSerializer[T any](writer io.Writer) (*WriterSerializer[T], error) { //nolint:revive // TODO +// NewWriterSerializer creates a new WriterLogSerializer for generic types +func NewWriterSerializer[T any](writer io.Writer) (*WriterSerializer[T], error) { if writer == nil { return nil, errors.New("nil writer for creating serializer") } @@ -60,14 +64,15 @@ func NewWriterSerializer[T any](writer io.Writer) (*WriterSerializer[T], error) }, nil } -func (s *WriterSerializer[T]) Enqueue(item *T) error { //nolint:revive // TODO +// Enqueue writes an item to the output +func (s *WriterSerializer[T]) Enqueue(item *T) error { s.mu.Lock() defer s.mu.Unlock() bs, err := json.Marshal(item) if err != nil { return fmt.Errorf("Failed to marshal item %v", item) } - + bs = append(bs, '\n') _, err = s.output.Write(bs) if err != nil { return err diff --git a/pkg/dynamicinstrumentation/util/file_watcher.go b/pkg/dynamicinstrumentation/util/file_watcher.go index 944d8e3b35020..2b2aa862fbbd5 100644 --- a/pkg/dynamicinstrumentation/util/file_watcher.go +++ b/pkg/dynamicinstrumentation/util/file_watcher.go @@ -5,7 +5,8 @@ //go:build linux_bpf -package util //nolint:revive // TODO +// Package util providers utility file functions to dynamic instrumentation +package util import ( "os" @@ -70,6 +71,7 @@ func (fw *FileWatcher) Watch() (<-chan []byte, error) { return updateChan, nil } -func (fw *FileWatcher) Stop() { //nolint:revive // TODO +// Stop causes the FileWatcher to stop watching the file +func (fw *FileWatcher) Stop() { fw.stop <- true } diff --git a/pkg/ebpf/btf.go b/pkg/ebpf/btf.go index 7d7f3e91b5033..45ad7bec22893 100644 --- a/pkg/ebpf/btf.go +++ b/pkg/ebpf/btf.go @@ -21,7 +21,6 @@ import ( "github.com/cilium/ebpf/btf" - ebpftelemetry "github.com/DataDog/datadog-agent/pkg/ebpf/telemetry" "github.com/DataDog/datadog-agent/pkg/util/archive" "github.com/DataDog/datadog-agent/pkg/util/funcs" "github.com/DataDog/datadog-agent/pkg/util/kernel" @@ -87,14 +86,12 @@ func FlushBTF() { } } -type kernelModuleBTFLoadFunc func(string) (*btf.Spec, error) - type returnBTF struct { - vmlinux *btf.Spec - moduleLoadFunc kernelModuleBTFLoadFunc + vmlinux *btf.Spec } -type BTFResultMetadata struct { //nolint:revive // TODO +// BTFResultMetadata holds metadata about BTF results +type BTFResultMetadata struct { // numLoadAttempts is how many times the loader has been invoked (doesn't include cached requests) numLoadAttempts int // loaderUsed the name of the loader that was used to get the BTF data @@ -120,7 +117,7 @@ type orderedBTFLoader struct { userBTFPath string embeddedDir string - result ebpftelemetry.BTFResult + result BTFResult resultMetadata BTFResultMetadata loadFunc funcs.CachedFunc[returnBTF] delayedFlusher *time.Timer @@ -130,7 +127,7 @@ func initBTFLoader(cfg *Config) *orderedBTFLoader { btfLoader := &orderedBTFLoader{ userBTFPath: cfg.BTFPath, embeddedDir: filepath.Join(cfg.BPFDir, "co-re", "btf"), - result: ebpftelemetry.BtfNotFound, + result: BtfNotFound, } btfLoader.loadFunc = funcs.CacheWithCallback[returnBTF](btfLoader.get, loadKernelSpec.Flush) btfLoader.delayedFlusher = time.AfterFunc(btfFlushDelay, btfLoader.Flush) @@ -140,12 +137,12 @@ func initBTFLoader(cfg *Config) *orderedBTFLoader { type btfLoaderFunc func() (*returnBTF, error) // Get returns BTF for the running kernel -func (b *orderedBTFLoader) Get() (*returnBTF, ebpftelemetry.COREResult, error) { +func (b *orderedBTFLoader) Get() (*returnBTF, COREResult, error) { ret, err := b.loadFunc.Do() if ret != nil && ret.vmlinux != nil { b.delayedFlusher.Reset(btfFlushDelay) } - return ret, ebpftelemetry.COREResult(b.result), err + return ret, COREResult(b.result), err } // Flush deletes any cached BTF @@ -158,13 +155,13 @@ func (b *orderedBTFLoader) get() (*returnBTF, error) { b.resultMetadata.numLoadAttempts++ loaders := []struct { - result ebpftelemetry.BTFResult + result BTFResult loader btfLoaderFunc desc string }{ - {ebpftelemetry.SuccessCustomBTF, b.loadUser, "configured BTF file"}, - {ebpftelemetry.SuccessDefaultBTF, b.loadKernel, "kernel"}, - {ebpftelemetry.SuccessEmbeddedBTF, b.loadEmbedded, "embedded collection"}, + {SuccessCustomBTF, b.loadUser, "configured BTF file"}, + {SuccessDefaultBTF, b.loadKernel, "kernel"}, + {SuccessEmbeddedBTF, b.loadEmbedded, "embedded collection"}, } var err error var ret *returnBTF @@ -196,8 +193,7 @@ func (b *orderedBTFLoader) loadKernel() (*returnBTF, error) { } b.resultMetadata.filepathUsed = "" return &returnBTF{ - vmlinux: spec, - moduleLoadFunc: nil, + vmlinux: spec, }, nil } @@ -211,8 +207,7 @@ func (b *orderedBTFLoader) loadUser() (*returnBTF, error) { } b.resultMetadata.filepathUsed = b.userBTFPath return &returnBTF{ - vmlinux: spec, - moduleLoadFunc: nil, + vmlinux: spec, }, nil } @@ -227,19 +222,13 @@ func (b *orderedBTFLoader) checkForMinimizedBTF(extractDir string) (*returnBTF, } b.resultMetadata.filepathUsed = extractedBtfPath return &returnBTF{ - vmlinux: spec, - moduleLoadFunc: nil, + vmlinux: spec, }, nil } return nil, nil } func (b *orderedBTFLoader) checkForUnminimizedBTF(extractDir string) (*returnBTF, error) { - absExtractDir := filepath.Join(b.embeddedDir, extractDir) - modLoadFunc := func(mod string) (*btf.Spec, error) { - b.delayedFlusher.Reset(btfFlushDelay) - return loadBTFFrom(filepath.Join(absExtractDir, mod)) - } btfRelativePath := filepath.Join(extractDir, "vmlinux") extractedBtfPath := filepath.Join(b.embeddedDir, btfRelativePath) if _, err := os.Stat(extractedBtfPath); err == nil { @@ -249,8 +238,7 @@ func (b *orderedBTFLoader) checkForUnminimizedBTF(extractDir string) (*returnBTF } b.resultMetadata.filepathUsed = extractedBtfPath return &returnBTF{ - vmlinux: spec, - moduleLoadFunc: modLoadFunc, + vmlinux: spec, }, nil } return nil, nil diff --git a/pkg/ebpf/btf_test.go b/pkg/ebpf/btf_test.go index fd8edceba0a7c..3eb370ea93a3a 100644 --- a/pkg/ebpf/btf_test.go +++ b/pkg/ebpf/btf_test.go @@ -17,8 +17,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - ebpftelemetry "github.com/DataDog/datadog-agent/pkg/ebpf/telemetry" ) func TestEmbeddedBTFMatch(t *testing.T) { @@ -64,7 +62,7 @@ func TestBTFTelemetry(t *testing.T) { ret, result, err := loader.Get() require.NoError(t, err) require.NotNil(t, ret) - require.NotEqual(t, ebpftelemetry.COREResult(ebpftelemetry.BtfNotFound), result) + require.NotEqual(t, COREResult(BtfNotFound), result) } func curDir() (string, error) { diff --git a/pkg/ebpf/c/bpf_bypass.h b/pkg/ebpf/c/bpf_bypass.h index 6ab05047d2264..d5c40ffcda8fb 100644 --- a/pkg/ebpf/c/bpf_bypass.h +++ b/pkg/ebpf/c/bpf_bypass.h @@ -12,6 +12,10 @@ BPF_ARRAY_MAP(program_bypassed, u32, 1) unsigned long bypass_program; \ asm("%0 = " "bypass_program" " ll" : "=r"(bypass_program) :: "memory"); +/* BPF_BYPASSABLE_KPROBE is identical to BPF_KPROBE (bpf_tracing.h), but with a stub (CHECK_BPF_PROGRAM_BYPASSED) + * that checks if the program is bypassed. This is useful for testing, as we want to dynamically control + * the execution of the program. + */ #define BPF_BYPASSABLE_KPROBE(name, args...) \ name(struct pt_regs *ctx); \ static __always_inline typeof(name(0)) \ @@ -27,6 +31,10 @@ typeof(name(0)) name(struct pt_regs *ctx) \ static __always_inline typeof(name(0)) \ ____##name(struct pt_regs *ctx, ##args) +/* BPF_BYPASSABLE_KRETPROBE is identical to BPF_KRETPROBE (bpf_tracing.h), but with a stub (CHECK_BPF_PROGRAM_BYPASSED) + * that checks if the program is bypassed. This is useful for testing, as we want to dynamically control + * the execution of the program. + */ #define BPF_BYPASSABLE_KRETPROBE(name, args...) \ name(struct pt_regs *ctx); \ static __always_inline typeof(name(0)) \ @@ -48,4 +56,23 @@ static __always_inline typeof(name(0)) ____##name(struct pt_regs *ctx, ##args) #define BPF_BYPASSABLE_UPROBE(name, args...) BPF_BYPASSABLE_KPROBE(name, ##args) #define BPF_BYPASSABLE_URETPROBE(name, args...) BPF_BYPASSABLE_KRETPROBE(name, ##args) +/* BPF_BYPASSABLE_PROG is identical to BPF_PROG (bpf_tracing.h), but with a stub (CHECK_BPF_PROGRAM_BYPASSED) + * that checks if the program is bypassed. This is useful for testing, as we want to dynamically control + * the execution of the program. + */ +#define BPF_BYPASSABLE_PROG(name, args...) \ +name(unsigned long long *ctx); \ +static __always_inline typeof(name(0)) \ +____##name(unsigned long long *ctx, ##args); \ +typeof(name(0)) name(unsigned long long *ctx) \ +{ \ + CHECK_BPF_PROGRAM_BYPASSED() \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wint-conversion\"") \ + return ____##name(___bpf_ctx_cast(args)); \ + _Pragma("GCC diagnostic pop") \ +} \ +static __always_inline typeof(name(0)) \ +____##name(unsigned long long *ctx, ##args) + #endif diff --git a/pkg/ebpf/c/bpf_helpers_custom.h b/pkg/ebpf/c/bpf_helpers_custom.h index 42c83c272ed11..d2b997032293c 100644 --- a/pkg/ebpf/c/bpf_helpers_custom.h +++ b/pkg/ebpf/c/bpf_helpers_custom.h @@ -39,4 +39,17 @@ unsigned long long load_half(void *skb, unsigned long long load_word(void *skb, unsigned long long off) asm("llvm.bpf.load.word"); +// declare our own versions of these enums, because they don't exist on <5.8 +enum { + DD_BPF_RB_NO_WAKEUP = 1, + DD_BPF_RB_FORCE_WAKEUP = 2, +}; + +enum { + DD_BPF_RB_AVAIL_DATA = 0, + DD_BPF_RB_RING_SIZE = 1, + DD_BPF_RB_CONS_POS = 2, + DD_BPF_RB_PROD_POS = 3, +}; + #endif diff --git a/pkg/ebpf/c/map-defs.h b/pkg/ebpf/c/map-defs.h index c55e84f2c00e9..165257783fc67 100644 --- a/pkg/ebpf/c/map-defs.h +++ b/pkg/ebpf/c/map-defs.h @@ -3,50 +3,59 @@ #include "bpf_helpers.h" -#define BPF_MAP(_name, _type, _key_type, _value_type, _max_entries, _pin, _map_flags) \ - struct { \ - __uint(type, _type); \ - __uint(max_entries, _max_entries); \ - __uint(pinning, _pin); \ - __type(key, _key_type); \ - __type(value, _value_type); \ - __uint(map_flags, _map_flags); \ +#define BPF_MAP(_name, _type, _key_type, _value_type, _max_entries, _pin, _map_flags, _key_exclude) \ + struct { \ + __uint(type, _type); \ + __uint(max_entries, _max_entries); \ + __uint(pinning, _pin); \ + _key_exclude(__type(key, _key_type)); \ + __type(value, _value_type); \ + __uint(map_flags, _map_flags); \ } _name SEC(".maps"); +#define EXCLUDE_KEY_TYPE(x) +#define INCLUDE_KEY_TYPE(x) x + #define BPF_PERF_EVENT_ARRAY_MAP_PINNED(name, value_type, max_entries) \ - BPF_MAP(name, BPF_MAP_TYPE_PERF_EVENT_ARRAY, u32, value_type, max_entries, 1, 0) + BPF_MAP(name, BPF_MAP_TYPE_PERF_EVENT_ARRAY, u32, value_type, max_entries, 1, 0, INCLUDE_KEY_TYPE) #define BPF_PERF_EVENT_ARRAY_MAP(name, value_type) \ - BPF_MAP(name, BPF_MAP_TYPE_PERF_EVENT_ARRAY, u32, value_type, 0, 0, 0) + BPF_MAP(name, BPF_MAP_TYPE_PERF_EVENT_ARRAY, u32, value_type, 0, 0, 0, INCLUDE_KEY_TYPE) #define BPF_RINGBUF_MAP(name, value_type) \ - BPF_MAP(name, BPF_MAP_TYPE_RINGBUF, u32, value_type, 0, 0, 0) + BPF_MAP(name, BPF_MAP_TYPE_RINGBUF, u32, value_type, 0, 0, 0, INCLUDE_KEY_TYPE) #define BPF_ARRAY_MAP(name, value_type, max_entries) \ - BPF_MAP(name, BPF_MAP_TYPE_ARRAY, u32, value_type, max_entries, 0, 0) + BPF_MAP(name, BPF_MAP_TYPE_ARRAY, u32, value_type, max_entries, 0, 0, INCLUDE_KEY_TYPE) #define BPF_HASH_MAP_PINNED(name, key_type, value_type, max_entries) \ - BPF_MAP(name, BPF_MAP_TYPE_HASH, key_type, value_type, max_entries, 1, 0) + BPF_MAP(name, BPF_MAP_TYPE_HASH, key_type, value_type, max_entries, 1, 0, INCLUDE_KEY_TYPE) #define BPF_HASH_MAP(name, key_type, value_type, max_entries) \ - BPF_MAP(name, BPF_MAP_TYPE_HASH, key_type, value_type, max_entries, 0, 0) + BPF_MAP(name, BPF_MAP_TYPE_HASH, key_type, value_type, max_entries, 0, 0, INCLUDE_KEY_TYPE) + +#define BPF_HASH_MAP_FLAGS(name, key_type, value_type, max_entries, map_flags) \ + BPF_MAP(name, BPF_MAP_TYPE_HASH, key_type, value_type, max_entries, 0, map_flags, INCLUDE_KEY_TYPE) #define BPF_PROG_ARRAY(name, max_entries) \ - BPF_MAP(name, BPF_MAP_TYPE_PROG_ARRAY, u32, u32, max_entries, 0, 0) + BPF_MAP(name, BPF_MAP_TYPE_PROG_ARRAY, u32, u32, max_entries, 0, 0, INCLUDE_KEY_TYPE) #define BPF_LRU_MAP(name, key_type, value_type, max_entries) \ - BPF_MAP(name, BPF_MAP_TYPE_LRU_HASH, key_type, value_type, max_entries, 0, 0) + BPF_MAP(name, BPF_MAP_TYPE_LRU_HASH, key_type, value_type, max_entries, 0, 0, INCLUDE_KEY_TYPE) #define BPF_LRU_MAP_PINNED(name, key_type, value_type, max_entries) \ - BPF_MAP(name, BPF_MAP_TYPE_LRU_HASH, key_type, value_type, max_entries, 1, 0) + BPF_MAP(name, BPF_MAP_TYPE_LRU_HASH, key_type, value_type, max_entries, 1, 0, INCLUDE_KEY_TYPE) #define BPF_LRU_MAP_FLAGS(name, key_type, value_type, max_entries, map_flags) \ - BPF_MAP(name, BPF_MAP_TYPE_LRU_HASH, key_type, value_type, max_entries, 0, map_flags) + BPF_MAP(name, BPF_MAP_TYPE_LRU_HASH, key_type, value_type, max_entries, 0, map_flags, INCLUDE_KEY_TYPE) #define BPF_PERCPU_HASH_MAP(name, key_type, value_type, max_entries) \ - BPF_MAP(name, BPF_MAP_TYPE_PERCPU_HASH, key_type, value_type, max_entries, 0, 0) + BPF_MAP(name, BPF_MAP_TYPE_PERCPU_HASH, key_type, value_type, max_entries, 0, 0, INCLUDE_KEY_TYPE) #define BPF_PERCPU_ARRAY_MAP(name, value_type, max_entries) \ - BPF_MAP(name, BPF_MAP_TYPE_PERCPU_ARRAY, u32, value_type, max_entries, 0, 0) + BPF_MAP(name, BPF_MAP_TYPE_PERCPU_ARRAY, u32, value_type, max_entries, 0, 0, INCLUDE_KEY_TYPE) + +#define BPF_STACK_MAP(name, value_type, max_entries) \ + BPF_MAP(name, BPF_MAP_TYPE_STACK, 0, value_type, max_entries, 0, 0, EXCLUDE_KEY_TYPE) #endif diff --git a/pkg/ebpf/co_re.go b/pkg/ebpf/co_re.go index 34f13e5c4ef4d..d651cfd504541 100644 --- a/pkg/ebpf/co_re.go +++ b/pkg/ebpf/co_re.go @@ -17,7 +17,6 @@ import ( bpflib "github.com/cilium/ebpf" "github.com/DataDog/datadog-agent/pkg/ebpf/bytecode" - ebpftelemetry "github.com/DataDog/datadog-agent/pkg/ebpf/telemetry" "github.com/DataDog/datadog-agent/pkg/telemetry" "github.com/DataDog/datadog-agent/pkg/util/kernel" ) @@ -55,7 +54,7 @@ func GetBTFLoaderInfo() (string, error) { } func (c *coreAssetLoader) loadCOREAsset(filename string, startFn func(bytecode.AssetReader, manager.Options) error) error { - var result ebpftelemetry.COREResult + var result COREResult base := strings.TrimSuffix(filename, path.Ext(filename)) defer func() { c.reportTelemetry(base, result) @@ -71,13 +70,12 @@ func (c *coreAssetLoader) loadCOREAsset(filename string, startFn func(bytecode.A buf, err := bytecode.GetReader(c.coreDir, filename) if err != nil { - result = ebpftelemetry.AssetReadError + result = AssetReadError return fmt.Errorf("error reading %s: %s", filename, err) } defer buf.Close() opts := manager.Options{ - KernelModuleBTFLoadFunc: ret.moduleLoadFunc, VerifierOptions: bpflib.CollectionOptions{ Programs: bpflib.ProgramOptions{ KernelTypes: ret.vmlinux, @@ -89,16 +87,16 @@ func (c *coreAssetLoader) loadCOREAsset(filename string, startFn func(bytecode.A if err != nil { var ve *bpflib.VerifierError if errors.As(err, &ve) { - result = ebpftelemetry.VerifierError + result = VerifierError } else { - result = ebpftelemetry.LoaderError + result = LoaderError } } return err } -func (c *coreAssetLoader) reportTelemetry(assetName string, result ebpftelemetry.COREResult) { - ebpftelemetry.StoreCORETelemetryForAsset(assetName, result) +func (c *coreAssetLoader) reportTelemetry(assetName string, result COREResult) { + StoreCORETelemetryForAsset(assetName, result) var err error platform, err := getBTFPlatform() @@ -121,13 +119,13 @@ func (c *coreAssetLoader) reportTelemetry(assetName string, result ebpftelemetry // capacity should match number of tags tags := make([]string, 0, 6) tags = append(tags, platform.String(), platformVersion, kernelVersion, arch, assetName) - if ebpftelemetry.BTFResult(result) < ebpftelemetry.BtfNotFound { - switch ebpftelemetry.BTFResult(result) { - case ebpftelemetry.SuccessCustomBTF: + if BTFResult(result) < BtfNotFound { + switch BTFResult(result) { + case SuccessCustomBTF: tags = append(tags, "custom") - case ebpftelemetry.SuccessEmbeddedBTF: + case SuccessEmbeddedBTF: tags = append(tags, "embedded") - case ebpftelemetry.SuccessDefaultBTF: + case SuccessDefaultBTF: tags = append(tags, "default") default: return @@ -136,15 +134,15 @@ func (c *coreAssetLoader) reportTelemetry(assetName string, result ebpftelemetry return } - if ebpftelemetry.BTFResult(result) == ebpftelemetry.BtfNotFound { + if BTFResult(result) == BtfNotFound { tags = append(tags, "btf_not_found") } else { switch result { - case ebpftelemetry.AssetReadError: + case AssetReadError: tags = append(tags, "asset_read") - case ebpftelemetry.VerifierError: + case VerifierError: tags = append(tags, "verifier") - case ebpftelemetry.LoaderError: + case LoaderError: tags = append(tags, "loader") default: return diff --git a/pkg/ebpf/telemetry/co_re_telemetry.go b/pkg/ebpf/co_re_telemetry.go similarity index 98% rename from pkg/ebpf/telemetry/co_re_telemetry.go rename to pkg/ebpf/co_re_telemetry.go index 055e9702f96d1..0b1908aa05609 100644 --- a/pkg/ebpf/telemetry/co_re_telemetry.go +++ b/pkg/ebpf/co_re_telemetry.go @@ -5,7 +5,7 @@ //go:build linux_bpf -package telemetry +package ebpf import ( "sync" diff --git a/pkg/ebpf/telemetry/co_re_telemetry_test.go b/pkg/ebpf/co_re_telemetry_test.go similarity index 97% rename from pkg/ebpf/telemetry/co_re_telemetry_test.go rename to pkg/ebpf/co_re_telemetry_test.go index 71b46a37ea10d..732fd2216c619 100644 --- a/pkg/ebpf/telemetry/co_re_telemetry_test.go +++ b/pkg/ebpf/co_re_telemetry_test.go @@ -5,7 +5,7 @@ //go:build linux_bpf -package telemetry +package ebpf import ( "testing" diff --git a/pkg/ebpf/helper_call_patcher.go b/pkg/ebpf/helper_call_patcher.go index b274d752270a9..380bbab529aeb 100644 --- a/pkg/ebpf/helper_call_patcher.go +++ b/pkg/ebpf/helper_call_patcher.go @@ -40,7 +40,7 @@ var replaceIns = asm.Mov.Imm(asm.R0, 0) // conditionally select eBPF helpers. This should be regarded as a last resort // when the aforementioned options don't apply (prebuilt artifacts, for // example). -func NewHelperCallRemover(helpers ...asm.BuiltinFunc) Modifier { +func NewHelperCallRemover(helpers ...asm.BuiltinFunc) ModifierBeforeInit { return &helperCallRemover{ helpers: helpers, } @@ -81,10 +81,6 @@ func (h *helperCallRemover) BeforeInit(m *manager.Manager, _ names.ModuleName, _ return nil } -func (h *helperCallRemover) AfterInit(*manager.Manager, names.ModuleName, *manager.Options) error { - return nil -} - func (h *helperCallRemover) String() string { return fmt.Sprintf("HelperCallRemover[%+v]", h.helpers) } diff --git a/pkg/ebpf/lockcontention.go b/pkg/ebpf/lockcontention.go index d9ac3662a7d3d..998f7c8cb778d 100644 --- a/pkg/ebpf/lockcontention.go +++ b/pkg/ebpf/lockcontention.go @@ -423,7 +423,7 @@ func (l *LockContentionCollector) Initialize(trackAllResources bool) error { } if uint32(count) < ranges && !staticRanges { - log.Warnf("discovered fewer ranges than expected: %d < %d", count, ranges) + log.Debugf("discovered fewer ranges than expected: %d < %d", count, ranges) } for i, id := range mapids { diff --git a/pkg/ebpf/manager.go b/pkg/ebpf/manager.go index 06e790609a9ec..1f7990accede4 100644 --- a/pkg/ebpf/manager.go +++ b/pkg/ebpf/manager.go @@ -8,6 +8,7 @@ package ebpf import ( + "errors" "fmt" "io" "sync" @@ -51,27 +52,84 @@ func NewManagerWithDefault(mgr *manager.Manager, name string, modifiers ...Modif return NewManager(mgr, name, append(defaultModifiers, modifiers...)...) } -// Modifier is an interface that can be implemented by a package to -// add functionality to the ebpf.Manager. It exposes a name to identify the modifier, -// two functions that will be called before and after the ebpf.Manager.InitWithOptions -// call, and a function that will be called when the manager is stopped. -// Note regarding internal state of the modifier: if the modifier is added to the list of modifiers -// enabled by default (pkg/ebpf/ebpf.go:registerDefaultModifiers), all managers with those default modifiers -// will share the same instance of the modifier. On the other hand, if the modifier is added to a specific -// manager, it can have its own instance of the modifier, unless the caller explicitly uses the same modifier -// instance with different managers. In other words, if the modifier is to have any internal state specific to -// each manager, it should not be added to the list of default modifiers, and developers using it -// should be aware of this behavior. +// Modifier is an interface that can be implemented by a package to add +// functionality to the ebpf.Manager. It exposes a name to identify the +// modifier, and then any of the functions Before/AfterInit, Before/AfterStart, +// Before/AfterStop, that will be called at the corresponding stage of the +// manager lifecycle. To avoid code churn and implementing unnecessary +// functions, the Modifier interface is split into sub-interfaces, each with a +// single function. This way, the developer can implement only the functions +// they need, and the manager will call them at the right time. Note regarding +// internal state of the modifier: if the modifier is added to the list of +// modifiers enabled by default (see NewManagerWithDefault above), all managers +// with those default modifiers will share the same instance of the modifier. On +// the other hand, if the modifier is added to a specific manager, it can have +// its own instance of the modifier, unless the caller explicitly uses the same +// modifier instance with different managers. In other words, if the modifier is +// to have any internal state specific to each manager, it should not be added +// to the list of default modifiers, and developers using it should be aware of +// this behavior. type Modifier interface { fmt.Stringer +} + +// ModifierBeforeInit is a sub-interface of Modifier that exposes a BeforeInit method +type ModifierBeforeInit interface { + Modifier + // BeforeInit is called before the ebpf.Manager.InitWithOptions call - // names.ModuleName refers to the name associated with Manager instance. + // names.ModuleName refers to the name associated with Manager instance. An + // error returned from this function will stop the initialization process. BeforeInit(*manager.Manager, names.ModuleName, *manager.Options) error +} + +// ModifierAfterInit is a sub-interface of Modifier that exposes an AfterInit method +type ModifierAfterInit interface { + Modifier // AfterInit is called after the ebpf.Manager.InitWithOptions call AfterInit(*manager.Manager, names.ModuleName, *manager.Options) error } +// ModifierPreStart is a sub-interface of Modifier that exposes an PreStart method +type ModifierPreStart interface { + Modifier + + // PreStart is called before the ebpf.Manager.Start call + PreStart(*manager.Manager, names.ModuleName) error +} + +// ModifierBeforeStop is a sub-interface of Modifier that exposes a BeforeStop method +type ModifierBeforeStop interface { + Modifier + + // BeforeStop is called before the ebpf.Manager.Stop call. An error returned + // from this function will not prevent the manager from stopping, but it will + // be logged. + BeforeStop(*manager.Manager, names.ModuleName, manager.MapCleanupType) error +} + +// ModifierAfterStop is a sub-interface of Modifier that exposes an AfterStop method +type ModifierAfterStop interface { + Modifier + + // AfterStop is called after the ebpf.Manager.Stop call. An error returned + // from this function will be logged. + AfterStop(*manager.Manager, names.ModuleName, manager.MapCleanupType) error +} + +func runModifiersOfType[K Modifier](modifiers []Modifier, funcName string, runner func(K) error) error { + var errs error + for _, mod := range modifiers { + if as, ok := mod.(K); ok { + if err := runner(as); err != nil { + errs = errors.Join(errs, fmt.Errorf("error running %s manager modifier %s: %w", mod, funcName, err)) + } + } + } + return errs +} + // InitWithOptions is a wrapper around ebpf-manager.Manager.InitWithOptions func (m *Manager) InitWithOptions(bytecode io.ReaderAt, opts *manager.Options) error { // we must load the ELF file before initialization, @@ -81,22 +139,52 @@ func (m *Manager) InitWithOptions(bytecode io.ReaderAt, opts *manager.Options) e return fmt.Errorf("failed to load elf from reader: %w", err) } - for _, mod := range m.EnabledModifiers { - log.Tracef("Running %s manager modifier BeforeInit", mod) - if err := mod.BeforeInit(m.Manager, m.Name, opts); err != nil { - return fmt.Errorf("error running %s manager modifier: %w", mod, err) - } + err := runModifiersOfType(m.EnabledModifiers, "BeforeInit", func(mod ModifierBeforeInit) error { + return mod.BeforeInit(m.Manager, m.Name, opts) + }) + if err != nil { + return err } if err := m.Manager.InitWithOptions(nil, *opts); err != nil { return err } - for _, mod := range m.EnabledModifiers { - log.Tracef("Running %s manager modifier AfterInit", mod) - if err := mod.AfterInit(m.Manager, m.Name, opts); err != nil { - return fmt.Errorf("error running %s manager modifier: %w", mod, err) - } + return runModifiersOfType(m.EnabledModifiers, "AfterInit", func(mod ModifierAfterInit) error { + return mod.AfterInit(m.Manager, m.Name, opts) + }) +} + +// Stop is a wrapper around ebpf-manager.Manager.Stop +func (m *Manager) Stop(cleanupType manager.MapCleanupType) error { + var errs error + + err := runModifiersOfType(m.EnabledModifiers, "BeforeStop", func(mod ModifierBeforeStop) error { + return mod.BeforeStop(m.Manager, m.Name, cleanupType) + }) + if err != nil { + errs = errors.Join(errs, err) + } + + if err := m.Manager.Stop(cleanupType); err != nil { + errs = errors.Join(errs, fmt.Errorf("failed to stop manager %w", err)) } - return nil + + err = runModifiersOfType(m.EnabledModifiers, "AfterStop", func(mod ModifierAfterStop) error { + return mod.AfterStop(m.Manager, m.Name, cleanupType) + }) + + return errors.Join(errs, err) +} + +// Start is a wrapper around ebpf-manager.Manager.Start +func (m *Manager) Start() error { + err := runModifiersOfType(m.EnabledModifiers, "PreStart", func(mod ModifierPreStart) error { + return mod.PreStart(m.Manager, m.Name) + }) + if err != nil { + return err + } + + return m.Manager.Start() } diff --git a/pkg/ebpf/manager_test.go b/pkg/ebpf/manager_test.go index 32018e808fb0e..54c45f2498cda 100644 --- a/pkg/ebpf/manager_test.go +++ b/pkg/ebpf/manager_test.go @@ -9,29 +9,44 @@ package ebpf import ( "testing" - "github.com/DataDog/datadog-agent/pkg/ebpf/names" manager "github.com/DataDog/ebpf-manager" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/DataDog/datadog-agent/pkg/ebpf/bytecode" + "github.com/DataDog/datadog-agent/pkg/ebpf/names" ) -// PrintkPatcherModifier adds an InstructionPatcher to the manager that removes the newline character from log_debug calls if needed type dummyModifier struct { + mock.Mock } const dummyModifierName = "DummyModifier" func (t *dummyModifier) String() string { + // Do not mock this method for simplicity, to avoid having to define it always return dummyModifierName } -// BeforeInit adds the patchPrintkNewline function to the manager -func (t *dummyModifier) BeforeInit(_ *manager.Manager, _ names.ModuleName, _ *manager.Options) error { - return nil +func (t *dummyModifier) BeforeInit(m *manager.Manager, name names.ModuleName, opts *manager.Options) error { + args := t.Called(m, name, opts) + return args.Error(0) +} + +func (t *dummyModifier) AfterInit(m *manager.Manager, name names.ModuleName, opts *manager.Options) error { + args := t.Called(m, name, opts) + return args.Error(0) } -// AfterInit is a no-op for this modifier -func (t *dummyModifier) AfterInit(_ *manager.Manager, _ names.ModuleName, _ *manager.Options) error { - return nil +func (t *dummyModifier) BeforeStop(m *manager.Manager, name names.ModuleName, cleanupType manager.MapCleanupType) error { + args := t.Called(m, name, cleanupType) + return args.Error(0) +} + +func (t *dummyModifier) AfterStop(m *manager.Manager, name names.ModuleName, cleanupType manager.MapCleanupType) error { + args := t.Called(m, name, cleanupType) + return args.Error(0) } func TestNewManagerWithDefault(t *testing.T) { @@ -69,3 +84,36 @@ func TestNewManagerWithDefault(t *testing.T) { }) } } + +func TestManagerInitWithOptions(t *testing.T) { + modifier := &dummyModifier{} + modifier.On("BeforeInit", mock.Anything, mock.Anything, mock.Anything).Return(nil) + modifier.On("AfterInit", mock.Anything, mock.Anything, mock.Anything).Return(nil) + + mgr := NewManager(&manager.Manager{}, "test", modifier) + require.NotNil(t, mgr) + + err := LoadCOREAsset("logdebug-test.o", func(buf bytecode.AssetReader, opts manager.Options) error { + err := mgr.InitWithOptions(buf, &opts) + require.NoError(t, err) + + return nil + }) + require.NoError(t, err) + + modifier.AssertExpectations(t) +} + +func TestManagerStop(t *testing.T) { + modifier := &dummyModifier{} + modifier.On("BeforeStop", mock.Anything, mock.Anything, mock.Anything).Return(nil) + modifier.On("AfterStop", mock.Anything, mock.Anything, mock.Anything).Return(nil) + + mgr := NewManager(&manager.Manager{}, "test", modifier) + require.NotNil(t, mgr) + + // The Stop call will fail because the manager is not initialized, but the modifiers should still be called + _ = mgr.Stop(manager.CleanAll) + + modifier.AssertExpectations(t) +} diff --git a/pkg/ebpf/map_cleaner.go b/pkg/ebpf/map_cleaner.go index 9ac674764fc3a..4b07dd2c8d6c0 100644 --- a/pkg/ebpf/map_cleaner.go +++ b/pkg/ebpf/map_cleaner.go @@ -40,6 +40,9 @@ type MapCleaner[K any, V any] struct { emap *maps.GenericMap[K, V] batchSize uint32 + // useBatchAPI determines whether the cleaner will use the batch API for iteration and deletion. + useBatchAPI bool + once sync.Once // termination @@ -53,7 +56,9 @@ type MapCleaner[K any, V any] struct { elapsed telemetry.SimpleHistogram } -// NewMapCleaner instantiates a new MapCleaner +// NewMapCleaner instantiates a new MapCleaner. defaultBatchSize controls the +// batch size for iteration of the map. If it is set to 1, the batch API will +// not be used for iteration nor for deletion. func NewMapCleaner[K any, V any](emap *ebpf.Map, defaultBatchSize uint32, name, module string) (*MapCleaner[K, V], error) { batchSize := defaultBatchSize if defaultBatchSize > emap.MaxEntries() { @@ -68,10 +73,12 @@ func NewMapCleaner[K any, V any](emap *ebpf.Map, defaultBatchSize uint32, name, return nil, err } + useBatchAPI := batchSize > 1 && m.CanUseBatchAPI() + singleTags := map[string]string{"map_name": name, "module": module, "api": "single"} batchTags := map[string]string{"map_name": name, "module": module, "api": "batch"} tags := singleTags - if m.CanUseBatchAPI() { + if useBatchAPI { tags = batchTags } return &MapCleaner[K, V]{ @@ -83,6 +90,7 @@ func NewMapCleaner[K any, V any](emap *ebpf.Map, defaultBatchSize uint32, name, batchDeleted: mapCleanerTelemetry.deleted.WithTags(batchTags), aborts: mapCleanerTelemetry.aborts.WithTags(tags), elapsed: mapCleanerTelemetry.elapsed.WithTags(tags), + useBatchAPI: useBatchAPI, }, nil } @@ -105,7 +113,7 @@ func (mc *MapCleaner[K, V]) Clean(interval time.Duration, preClean func() bool, // of a version comparison because some distros have backported this API), and fallback to // the old method otherwise. The new API is also more efficient because it minimizes the number of allocations. cleaner := mc.cleanWithoutBatches - if mc.emap.CanUseBatchAPI() { + if mc.useBatchAPI { cleaner = mc.cleanWithBatches } ticker := time.NewTicker(interval) diff --git a/pkg/ebpf/map_cleaner_test.go b/pkg/ebpf/map_cleaner_test.go index a9dfdbd1cb32b..ccbc9617e8573 100644 --- a/pkg/ebpf/map_cleaner_test.go +++ b/pkg/ebpf/map_cleaner_test.go @@ -17,6 +17,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/DataDog/datadog-agent/pkg/ebpf/maps" + "github.com/DataDog/datadog-agent/pkg/util/kernel" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -110,6 +112,53 @@ func TestMapCleaner(t *testing.T) { } } +func TestMapCleanerBatchSize1ForcesSingleItem(t *testing.T) { + const numMapEntries = 100 + m, err := ebpf.NewMap(&ebpf.MapSpec{ + Type: ebpf.Hash, + KeySize: 8, + ValueSize: 8, + MaxEntries: numMapEntries, + }) + require.NoError(t, err) + + t.Run("batch size 1", func(t *testing.T) { + cleaner, err := NewMapCleaner[int64, int64](m, 1, "test", "") + require.NoError(t, err) + require.NotNil(t, cleaner) + require.False(t, cleaner.useBatchAPI) + }) + + t.Run("batch size not 1", func(t *testing.T) { + cleaner, err := NewMapCleaner[int64, int64](m, 100, "test", "") + require.NoError(t, err) + require.NotNil(t, cleaner) + + require.Equal(t, maps.BatchAPISupported(), cleaner.useBatchAPI) + }) + + t.Run("map does not support batches", func(t *testing.T) { + kernelVersion, err := kernel.HostVersion() + require.NoError(t, err) + if kernelVersion < kernel.VersionCode(4, 6, 0) { + t.Skip("Kernel version does not support per-CPU maps") + } + + m, err := ebpf.NewMap(&ebpf.MapSpec{ + Type: ebpf.PerCPUHash, + KeySize: 8, + ValueSize: 8, + MaxEntries: numMapEntries, + }) + require.NoError(t, err) + + cleaner, err := NewMapCleaner[int64, []int64](m, 100, "test", "") + require.NoError(t, err) + require.NotNil(t, cleaner) + require.False(t, cleaner.useBatchAPI) + }) +} + func benchmarkBatchCleaner(b *testing.B, numMapEntries, batchSize uint32) { var ( key = new(int64) diff --git a/pkg/ebpf/mappings.go b/pkg/ebpf/mappings.go index b7feb3f966195..3c0171812c8ca 100644 --- a/pkg/ebpf/mappings.go +++ b/pkg/ebpf/mappings.go @@ -49,6 +49,26 @@ func RemoveProgramID(progID uint32, expectedModule string) { } } +// ClearNameMappings clears all name mappings for a given module +func ClearNameMappings(module string) { + mappingLock.Lock() + defer mappingLock.Unlock() + + for progID, progModule := range progModuleMapping { + if progModule == module { + delete(progNameMapping, progID) + delete(progModuleMapping, progID) + } + } + + for mapID, mapModule := range mapModuleMapping { + if mapModule == module { + delete(mapNameMapping, mapID) + delete(mapModuleMapping, mapID) + } + } +} + // AddNameMappings adds the full name mappings for ebpf maps in the manager func AddNameMappings(mgr *manager.Manager, module string) { maps, err := mgr.GetMaps() @@ -56,6 +76,11 @@ func AddNameMappings(mgr *manager.Manager, module string) { return } + progs, err := mgr.GetPrograms() + if err != nil { + return + } + mappingLock.Lock() defer mappingLock.Unlock() @@ -64,10 +89,6 @@ func AddNameMappings(mgr *manager.Manager, module string) { mapModuleMapping[mapid] = module }) - progs, err := mgr.GetPrograms() - if err != nil { - return - } iterateProgs(progs, func(progid uint32, name string) { progNameMapping[progid] = name progModuleMapping[progid] = module diff --git a/pkg/ebpf/maps/generic_map.go b/pkg/ebpf/maps/generic_map.go index 02036ce8a7605..15dba2ed118b4 100644 --- a/pkg/ebpf/maps/generic_map.go +++ b/pkg/ebpf/maps/generic_map.go @@ -25,7 +25,8 @@ import ( const defaultBatchSize = 100 -var ErrBatchAPINotSupported = errors.New("batch API not supported for this map: check whether key is fixed-size, kernel supports batch API and if this map is not per-cpu") //nolint:revive // TODO +// ErrBatchAPINotSupported is the error when batch API is not supported for a given map +var ErrBatchAPINotSupported = errors.New("batch API not supported for this map: check whether key is fixed-size, kernel supports batch API and if this map is not per-cpu") // BatchAPISupported returns true if the kernel supports the batch API for maps var BatchAPISupported = funcs.MemoizeNoError(func() bool { diff --git a/pkg/ebpf/names/names.go b/pkg/ebpf/names/names.go index 7daa51a30b084..9bab478ccb6a3 100644 --- a/pkg/ebpf/names/names.go +++ b/pkg/ebpf/names/names.go @@ -21,11 +21,18 @@ type MapName struct { n string } -func (m *MapName) Name() string { //nolint:revive // TODO +// Name returns the map name as a string +func (m *MapName) Name() string { return m.n } -func NewMapNameFromManagerMap(m *manager.Map) MapName { //nolint:revive // TODO +// NewMapNameFromManagerMap creates a MapName object from a *manager.Map +func NewMapNameFromManagerMap(m *manager.Map) MapName { + return MapName{n: m.Name} +} + +// NewMapNameFromMapSpec creates a MapName object from an *ebpf.MapSpec +func NewMapNameFromMapSpec(m *ebpf.MapSpec) MapName { return MapName{n: m.Name} } @@ -36,23 +43,30 @@ type ProgramName struct { n string } -func (p *ProgramName) Name() string { //nolint:revive // TODO +// Name returns the program name as a string +func (p *ProgramName) Name() string { return p.n } -func NewProgramNameFromProgramSpec(spec *ebpf.ProgramSpec) ProgramName { //nolint:revive // TODO +// NewProgramNameFromProgramSpec creates a ProgramName from a *ebpf.ProgramSpec +func NewProgramNameFromProgramSpec(spec *ebpf.ProgramSpec) ProgramName { return ProgramName{n: spec.Name} } -type ModuleName struct { //nolint:revive // TODO +// ModuleName represents a module name. It should be used in places where +// we want guarantees that we are working with a string which was intended +// by the programmer to be treated as a module name +type ModuleName struct { n string } -func (mn *ModuleName) Name() string { //nolint:revive // TODO +// Name returns the module name as a string +func (mn *ModuleName) Name() string { return mn.n } -func NewModuleName(mn string) ModuleName { //nolint:revive // TODO +// NewModuleName creates a ModuleName from a string +func NewModuleName(mn string) ModuleName { return ModuleName{n: mn} } diff --git a/pkg/ebpf/perf/event.go b/pkg/ebpf/perf/event.go new file mode 100644 index 0000000000000..0fcf33a14603a --- /dev/null +++ b/pkg/ebpf/perf/event.go @@ -0,0 +1,460 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build linux_bpf + +// Package perf implements types related to eBPF and the perf subsystem, like perf buffers and ring buffers. +package perf + +import ( + "errors" + "fmt" + "slices" + "sync/atomic" + + manager "github.com/DataDog/ebpf-manager" + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/features" + "github.com/cilium/ebpf/perf" + "github.com/cilium/ebpf/ringbuf" + + ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" + "github.com/DataDog/datadog-agent/pkg/ebpf/names" + ebpfTelemetry "github.com/DataDog/datadog-agent/pkg/ebpf/telemetry" + ddsync "github.com/DataDog/datadog-agent/pkg/util/sync" +) + +var perfPool = ddsync.NewDefaultTypedPool[perf.Record]() +var ringbufPool = ddsync.NewDefaultTypedPool[ringbuf.Record]() + +// Flusher is an interface for objects that support flushing +type Flusher interface { + Flush() +} + +// compile time check to ensure this satisfies the Modifier* interfaces +var _ ddebpf.ModifierPreStart = (*EventHandler)(nil) +var _ ddebpf.ModifierAfterStop = (*EventHandler)(nil) + +// EventHandler abstracts consuming data from a perf buffer or ring buffer (depending on availability and options). +// It handles upgrading maps from a ring buffer if desired, and unmarshalling into the desired data type. +type EventHandler struct { + f Flusher + opts eventHandlerOptions + // mapName specifies the name of the map + mapName string + // handler is the callback for data received from the perf/ring buffer + handler func([]byte) + + readLoop func() + perfChan chan *perf.Record + ringChan chan *ringbuf.Record + + chLenTelemetry *atomic.Uint64 +} + +type mapMode uint8 + +const ( + perfBufferOnly mapMode = iota + upgradePerfBuffer + ringBufferOnly +) + +// EventHandlerMode controls the mode in which the event handler operates +type EventHandlerMode func(*EventHandler) + +// UsePerfBuffers will only use perf buffers and will not attempt any upgrades to ring buffers. +func UsePerfBuffers(bufferSize int, channelSize int, perfMode PerfBufferMode) EventHandlerMode { + return func(e *EventHandler) { + e.opts.mode = perfBufferOnly + e.opts.channelSize = channelSize + e.opts.perfBufferSize = bufferSize + perfMode(&e.opts.perfOptions) + } +} + +// UpgradePerfBuffers will upgrade to ring buffers if available, but will fall back to perf buffers if not. +func UpgradePerfBuffers(perfBufferSize int, channelSize int, perfMode PerfBufferMode, ringBufferSize int) EventHandlerMode { + return func(e *EventHandler) { + e.opts.mode = upgradePerfBuffer + e.opts.channelSize = channelSize + e.opts.perfBufferSize = perfBufferSize + e.opts.ringBufferSize = ringBufferSize + perfMode(&e.opts.perfOptions) + } +} + +// UseRingBuffers will only use ring buffers. +func UseRingBuffers(bufferSize int, channelSize int) EventHandlerMode { + return func(e *EventHandler) { + e.opts.mode = ringBufferOnly + e.opts.channelSize = channelSize + e.opts.ringBufferSize = bufferSize + } +} + +// EventHandlerOption is an option that applies to the event handler +type EventHandlerOption func(*EventHandler) + +// SendTelemetry specifies whether to collect usage telemetry from the perf/ring buffer +func SendTelemetry(enabled bool) EventHandlerOption { + return func(e *EventHandler) { + e.opts.telemetryEnabled = enabled + } +} + +// RingBufferEnabledConstantName provides a constant name that will be set whether ring buffers are in use +func RingBufferEnabledConstantName(name string) EventHandlerOption { + return func(e *EventHandler) { + e.opts.ringBufferEnabledConstantName = name + } +} + +// RingBufferWakeupSize sets a constant for eBPF to use, that determines when to wakeup userspace +func RingBufferWakeupSize(name string, size uint64) EventHandlerOption { + return func(e *EventHandler) { + e.opts.ringBufferWakeupConstantName = name + e.opts.ringBufferWakeupSize = size + } +} + +// eventHandlerOptions are the options controlling the EventHandler. +type eventHandlerOptions struct { + // telemetryEnabled specifies whether to collect usage telemetry from the perf/ring buffer. + telemetryEnabled bool + + mode mapMode + channelSize int + + perfBufferSize int + perfOptions perfBufferOptions + + ringBufferSize int + ringBufferEnabledConstantName string + + ringBufferWakeupConstantName string + ringBufferWakeupSize uint64 +} + +// PerfBufferMode is a mode for the perf buffer +// +//nolint:revive +type PerfBufferMode func(*perfBufferOptions) + +// Watermark - The reader will start processing samples once their sizes in the perf buffer +// exceed this value. Must be smaller than the perf buffer size. +func Watermark(byteCount int) PerfBufferMode { + return func(opts *perfBufferOptions) { + opts.watermark = byteCount + opts.wakeupEvents = 0 + } +} + +// WakeupEvents - The number of events required in any per CPU buffer before Read will process data. +func WakeupEvents(count int) PerfBufferMode { + return func(opts *perfBufferOptions) { + opts.wakeupEvents = count + opts.watermark = 0 + } +} + +// perfBufferOptions are options specifically for perf buffers +// +//nolint:revive +type perfBufferOptions struct { + watermark int + wakeupEvents int +} + +// NewEventHandler creates an event handler with the provided options +func NewEventHandler(mapName string, handler func([]byte), mode EventHandlerMode, opts ...EventHandlerOption) (*EventHandler, error) { + if mapName == "" { + return nil, errors.New("invalid options: MapName is required") + } + if handler == nil { + return nil, errors.New("invalid options: Handler is required") + } + e := &EventHandler{ + mapName: mapName, + handler: handler, + } + mode(e) + for _, opt := range opts { + opt(e) + } + if e.opts.telemetryEnabled { + e.chLenTelemetry = &atomic.Uint64{} + } + return e, nil +} + +// BeforeInit implements the Modifier interface +// This function will modify the shared buffers according to the user provided mode +func (e *EventHandler) BeforeInit(mgr *manager.Manager, moduleName names.ModuleName, mgrOpts *manager.Options) (err error) { + ms, _, _ := mgr.GetMapSpec(e.mapName) + if ms == nil { + return fmt.Errorf("unable to find map spec %q", e.mapName) + } + defer e.setupEnabledConstant(mgrOpts) + defer e.setupRingbufferWakeupConstant(mgrOpts) + + ringBufErr := features.HaveMapType(ebpf.RingBuf) + if e.opts.mode == ringBufferOnly { + if ringBufErr != nil { + return ringBufErr + } + if ms.Type != ebpf.RingBuf { + return fmt.Errorf("map %q is not a ring buffer, got %q instead", e.mapName, ms.Type.String()) + } + + // the size of the ring buffer is communicated to the kernel via the max entries field + // of the bpf map + if ms.MaxEntries != uint32(e.opts.ringBufferSize) { + ResizeRingBuffer(mgrOpts, e.mapName, e.opts.ringBufferSize) + } + e.initRingBuffer(mgr) + return nil + } + defer e.removeRingBufferHelperCalls(mgr, moduleName, mgrOpts) + + if e.opts.mode == perfBufferOnly { + if ms.Type != ebpf.PerfEventArray { + return fmt.Errorf("map %q is not a perf buffer, got %q instead", e.mapName, ms.Type.String()) + } + e.initPerfBuffer(mgr) + return nil + } + + if e.opts.mode == upgradePerfBuffer { + if ms.Type != ebpf.PerfEventArray { + return fmt.Errorf("map %q is not a perf buffer, got %q instead", e.mapName, ms.Type.String()) + } + + // the layout of the bpf map for perf buffers does not match that of ring buffers. + // When upgrading perf buffers to ring buffers, we must account for these differences. + // - Ring buffers do not use key/value sizes + // - Ring buffers specify their size via max entries + if ringBufErr == nil { + UpgradePerfBuffer(mgr, mgrOpts, e.mapName) + if ms.MaxEntries != uint32(e.opts.ringBufferSize) { + ResizeRingBuffer(mgrOpts, e.mapName, e.opts.ringBufferSize) + } + e.initRingBuffer(mgr) + return nil + } + + e.initPerfBuffer(mgr) + return nil + } + + return fmt.Errorf("unsupported EventHandlerMode %d", e.opts.mode) +} + +func (e *EventHandler) removeRingBufferHelperCalls(mgr *manager.Manager, moduleName names.ModuleName, mgrOpts *manager.Options) { + if features.HaveMapType(ebpf.RingBuf) == nil { + return + } + // add helper call remover because ring buffers are not available + _ = ddebpf.NewHelperCallRemover(asm.FnRingbufOutput, asm.FnRingbufQuery, asm.FnRingbufReserve, asm.FnRingbufSubmit, asm.FnRingbufDiscard).BeforeInit(mgr, moduleName, mgrOpts) +} + +func (e *EventHandler) setupEnabledConstant(mgrOpts *manager.Options) { + if e.opts.ringBufferEnabledConstantName == "" || e.f == nil { + return + } + + var val uint64 + switch e.f.(type) { + case *manager.RingBuffer: + val = uint64(1) + default: + val = uint64(0) + } + mgrOpts.ConstantEditors = append(mgrOpts.ConstantEditors, manager.ConstantEditor{ + Name: e.opts.ringBufferEnabledConstantName, + Value: val, + }) +} + +func (e *EventHandler) setupRingbufferWakeupConstant(mgrOpts *manager.Options) { + if e.opts.ringBufferWakeupSize == 0 || e.opts.ringBufferWakeupConstantName == "" || e.f == nil { + return + } + + switch e.f.(type) { + case *manager.RingBuffer: + mgrOpts.ConstantEditors = append(mgrOpts.ConstantEditors, manager.ConstantEditor{ + Name: e.opts.ringBufferWakeupConstantName, + Value: e.opts.ringBufferWakeupSize, + }) + default: + // do nothing + } +} + +// PreStart implements the ModifierPreStart interface +func (e *EventHandler) PreStart(_ *manager.Manager, _ names.ModuleName) error { + go e.readLoop() + return nil +} + +// AfterStop implements the ModifierAfterStop interface +func (e *EventHandler) AfterStop(_ *manager.Manager, _ names.ModuleName, _ manager.MapCleanupType) error { + if e.perfChan != nil { + close(e.perfChan) + } + if e.ringChan != nil { + close(e.ringChan) + } + return nil +} + +func (e *EventHandler) String() string { + return "EventHandler" +} + +// Flush flushes the pending data from the underlying perfbuf/ringbuf +func (e *EventHandler) Flush() { + e.f.Flush() +} + +// ResizeRingBuffer resizes the ring buffer by creating/updating a map spec editor +func ResizeRingBuffer(mgrOpts *manager.Options, mapName string, bufferSize int) { + if mgrOpts.MapSpecEditors == nil { + mgrOpts.MapSpecEditors = make(map[string]manager.MapSpecEditor) + } + specEditor := mgrOpts.MapSpecEditors[mapName] + specEditor.MaxEntries = uint32(bufferSize) + specEditor.EditorFlag |= manager.EditMaxEntries + mgrOpts.MapSpecEditors[mapName] = specEditor +} + +func (e *EventHandler) perfLoop() { + for record := range e.perfChan { + e.perfLoopHandler(record) + } +} + +func (e *EventHandler) initPerfBuffer(mgr *manager.Manager) { + e.perfChan = make(chan *perf.Record, e.opts.channelSize) + e.readLoop = e.perfLoop + + // remove any existing perf buffers from manager + mgr.PerfMaps = slices.DeleteFunc(mgr.PerfMaps, func(perfMap *manager.PerfMap) bool { + return perfMap.Name == e.mapName + }) + pm := &manager.PerfMap{ + Map: manager.Map{Name: e.mapName}, + PerfMapOptions: manager.PerfMapOptions{ + PerfRingBufferSize: e.opts.perfBufferSize, + Watermark: e.opts.perfOptions.watermark, + WakeupEvents: e.opts.perfOptions.wakeupEvents, + RecordHandler: e.perfRecordHandler, + LostHandler: nil, // TODO do we need support for Lost? + RecordGetter: perfPool.Get, + TelemetryEnabled: e.opts.telemetryEnabled, + }, + } + mgr.PerfMaps = append(mgr.PerfMaps, pm) + ebpfTelemetry.ReportPerfMapTelemetry(pm) + ebpfTelemetry.ReportPerfMapChannelLenTelemetry(pm, func() int { + return int(e.chLenTelemetry.Swap(0)) + }) + e.f = pm +} + +func (e *EventHandler) perfRecordHandler(record *perf.Record, _ *manager.PerfMap, _ *manager.Manager) { + e.perfChan <- record + if e.opts.telemetryEnabled { + updateMaxTelemetry(e.chLenTelemetry, uint64(len(e.perfChan))) + } +} + +func (e *EventHandler) perfLoopHandler(record *perf.Record) { + // record is only allowed to live for the duration of the callback. Put it back into the sync.Pool once done. + defer perfPool.Put(record) + e.handler(record.RawSample) +} + +func (e *EventHandler) initRingBuffer(mgr *manager.Manager) { + e.ringChan = make(chan *ringbuf.Record, e.opts.channelSize) + e.readLoop = e.ringLoop + + // remove any existing matching ring buffers from manager + mgr.RingBuffers = slices.DeleteFunc(mgr.RingBuffers, func(ringBuf *manager.RingBuffer) bool { + return ringBuf.Name == e.mapName + }) + rb := &manager.RingBuffer{ + Map: manager.Map{Name: e.mapName}, + RingBufferOptions: manager.RingBufferOptions{ + RecordHandler: e.ringRecordHandler, + RecordGetter: ringbufPool.Get, + TelemetryEnabled: e.opts.telemetryEnabled, + }, + } + mgr.RingBuffers = append(mgr.RingBuffers, rb) + ebpfTelemetry.ReportRingBufferTelemetry(rb) + ebpfTelemetry.ReportRingBufferChannelLenTelemetry(rb, func() int { + return int(e.chLenTelemetry.Swap(0)) + }) + e.f = rb +} + +func (e *EventHandler) ringLoop() { + for record := range e.ringChan { + e.ringLoopHandler(record) + } +} + +func (e *EventHandler) ringRecordHandler(record *ringbuf.Record, _ *manager.RingBuffer, _ *manager.Manager) { + e.ringChan <- record + if e.opts.telemetryEnabled { + updateMaxTelemetry(e.chLenTelemetry, uint64(len(e.ringChan))) + } +} + +func (e *EventHandler) ringLoopHandler(record *ringbuf.Record) { + // record is only allowed to live for the duration of the callback. Put it back into the sync.Pool once done. + defer ringbufPool.Put(record) + e.handler(record.RawSample) +} + +// UpgradePerfBuffer upgrades a perf buffer to a ring buffer by creating a map spec editor +func UpgradePerfBuffer(mgr *manager.Manager, mgrOpts *manager.Options, mapName string) { + if mgrOpts.MapSpecEditors == nil { + mgrOpts.MapSpecEditors = make(map[string]manager.MapSpecEditor) + } + specEditor := mgrOpts.MapSpecEditors[mapName] + specEditor.Type = ebpf.RingBuf + specEditor.KeySize = 0 + specEditor.ValueSize = 0 + specEditor.EditorFlag |= manager.EditType | manager.EditKeyValue + mgrOpts.MapSpecEditors[mapName] = specEditor + + // remove map from perf maps because it has been upgraded + mgr.PerfMaps = slices.DeleteFunc(mgr.PerfMaps, func(perfMap *manager.PerfMap) bool { + return perfMap.Name == mapName + }) +} + +// implement the CAS algorithm to atomically update a max value +func updateMaxTelemetry(a *atomic.Uint64, val uint64) { + for { + oldVal := a.Load() + if val <= oldVal { + return + } + // if the value at a is not `oldVal`, then `CompareAndSwap` returns + // false indicating that the value of the atomic has changed between + // the above check and this invocation. + // In this case we retry the above test, to see if the value still needs + // to be updated. + if a.CompareAndSwap(oldVal, val) { + return + } + } +} diff --git a/pkg/ebpf/printk_patcher.go b/pkg/ebpf/printk_patcher.go index afda0f4869c6a..b417c8b011415 100644 --- a/pkg/ebpf/printk_patcher.go +++ b/pkg/ebpf/printk_patcher.go @@ -213,6 +213,9 @@ func patchPrintkInstructions(p *ebpf.ProgramSpec) (int, error) { type PrintkPatcherModifier struct { } +// ensure PrintkPatcherModifier implements the ModifierBeforeInit interface +var _ ModifierBeforeInit = &PrintkPatcherModifier{} + func (t *PrintkPatcherModifier) String() string { return "PrintkPatcherModifier" } @@ -222,8 +225,3 @@ func (t *PrintkPatcherModifier) BeforeInit(m *manager.Manager, _ names.ModuleNam m.InstructionPatchers = append(m.InstructionPatchers, patchPrintkNewline) return nil } - -// AfterInit is a no-op for this modifier -func (t *PrintkPatcherModifier) AfterInit(_ *manager.Manager, _ names.ModuleName, _ *manager.Options) error { - return nil -} diff --git a/pkg/ebpf/printk_patcher_test.go b/pkg/ebpf/printk_patcher_test.go index 5e8c9d85cb5a1..08922faae016d 100644 --- a/pkg/ebpf/printk_patcher_test.go +++ b/pkg/ebpf/printk_patcher_test.go @@ -43,10 +43,6 @@ func TestPatchPrintkNewline(t *testing.T) { cfg := NewConfig() require.NotNil(t, cfg) - buf, err := bytecode.GetReader(cfg.BPFDir, "logdebug-test.o") - require.NoError(t, err) - defer buf.Close() - idPair := manager.ProbeIdentificationPair{ EBPFFuncName: "logdebugtest", UID: "logdebugtest", @@ -60,22 +56,25 @@ func TestPatchPrintkNewline(t *testing.T) { }, }, } - - opts := manager.Options{ - RemoveRlimit: true, - MapEditors: make(map[string]*ebpf.Map), - } mgr.InstructionPatchers = append(mgr.InstructionPatchers, patchPrintkNewline) + err = LoadCOREAsset("logdebug-test.o", func(buf bytecode.AssetReader, opts manager.Options) error { + opts.RemoveRlimit = true + opts.MapEditors = make(map[string]*ebpf.Map) + + require.NoError(t, mgr.InitWithOptions(buf, opts)) + require.NoError(t, mgr.Start()) + t.Cleanup(func() { mgr.Stop(manager.CleanAll) }) + + return nil + }) + require.NoError(t, err) + tp, err := tracefs.OpenFile("trace_pipe", os.O_RDONLY, 0) require.NoError(t, err) traceReader := bufio.NewReader(tp) t.Cleanup(func() { _ = tp.Close() }) - require.NoError(t, mgr.InitWithOptions(buf, opts)) - require.NoError(t, mgr.Start()) - t.Cleanup(func() { mgr.Stop(manager.CleanAll) }) - progs, ok, err := mgr.GetProgram(idPair) require.NoError(t, err) require.True(t, ok) diff --git a/pkg/ebpf/telemetry/status_codes.go b/pkg/ebpf/status_codes.go similarity index 98% rename from pkg/ebpf/telemetry/status_codes.go rename to pkg/ebpf/status_codes.go index 09317ef9e0e97..8d1dc1ab8bd18 100644 --- a/pkg/ebpf/telemetry/status_codes.go +++ b/pkg/ebpf/status_codes.go @@ -5,7 +5,7 @@ //go:build linux_bpf -package telemetry +package ebpf // BTFResult enumerates BTF loading success & failure modes type BTFResult int diff --git a/pkg/ebpf/telemetry/errors_collector_linux.go b/pkg/ebpf/telemetry/errors_collector_linux.go index 07ddb82fb1f0f..38a865c80d13f 100644 --- a/pkg/ebpf/telemetry/errors_collector_linux.go +++ b/pkg/ebpf/telemetry/errors_collector_linux.go @@ -39,7 +39,7 @@ type metricKey struct { // NewEBPFErrorsCollector initializes a new Collector object for ebpf helper and map operations errors func NewEBPFErrorsCollector() prometheus.Collector { - if supported, _ := ebpfTelemetrySupported(); !supported { + if supported, _ := EBPFTelemetrySupported(); !supported { return nil } diff --git a/pkg/ebpf/telemetry/errors_collector_linux_test.go b/pkg/ebpf/telemetry/errors_collector_linux_test.go index 5e14afc7b31d7..1dd559f94abf7 100644 --- a/pkg/ebpf/telemetry/errors_collector_linux_test.go +++ b/pkg/ebpf/telemetry/errors_collector_linux_test.go @@ -90,7 +90,7 @@ func createTestCollector(telemetry ebpfErrorsTelemetry) prometheus.Collector { func TestEBPFErrorsCollector_NotInitialized(t *testing.T) { //skip this test on unsupported kernel versions - if ok, _ := ebpfTelemetrySupported(); !ok { + if ok, _ := EBPFTelemetrySupported(); !ok { t.SkipNow() } telemetry := &mockErrorsTelemetry{ @@ -116,7 +116,7 @@ func TestEBPFErrorsCollector_NotInitialized(t *testing.T) { func TestEBPFErrorsCollector_SingleCollect(t *testing.T) { //skip this test on unsupported kernel versions - if ok, _ := ebpfTelemetrySupported(); !ok { + if ok, _ := EBPFTelemetrySupported(); !ok { t.SkipNow() } mapErrorsMockValue, helperErrorsMockValue := uint64(20), uint64(100) @@ -221,7 +221,7 @@ func TestEBPFErrorsCollector_SingleCollect(t *testing.T) { // TestEBPFErrorsCollector_DoubleCollect tests the case when the collector is called twice to validate the delta calculation of the Counter metric func TestEBPFErrorsCollector_DoubleCollect(t *testing.T) { //skip this test on unsupported kernel versions - if ok, _ := ebpfTelemetrySupported(); !ok { + if ok, _ := EBPFTelemetrySupported(); !ok { t.SkipNow() } mapErrorsMockValue1, helperErrorsMockValue1 := uint64(20), uint64(100) diff --git a/pkg/ebpf/telemetry/errors_telemetry.go b/pkg/ebpf/telemetry/errors_telemetry.go index a0375797ab06c..8546c40ae3390 100644 --- a/pkg/ebpf/telemetry/errors_telemetry.go +++ b/pkg/ebpf/telemetry/errors_telemetry.go @@ -60,6 +60,7 @@ func (k *telemetryKey) String() string { type ebpfErrorsTelemetry interface { sync.Locker fill([]names.MapName, names.ModuleName, *maps.GenericMap[uint64, mapErrTelemetry], *maps.GenericMap[uint64, helperErrTelemetry]) error + cleanup([]names.MapName, names.ModuleName, *maps.GenericMap[uint64, mapErrTelemetry], *maps.GenericMap[uint64, helperErrTelemetry]) error setProbe(name telemetryKey, hash uint64) isInitialized() bool forEachMapErrorEntryInMaps(yield func(telemetryKey, uint64, mapErrTelemetry) bool) @@ -102,21 +103,54 @@ func (e *ebpfTelemetry) fill(maps []names.MapName, mn names.ModuleName, mapErrMa if _, ok := e.mapErrMapsByModule[mn]; ok { return fmt.Errorf("eBPF map for map-operation errors for module %s already exists", mn.Name()) - } else { //nolint:revive // TODO - e.mapErrMapsByModule[mn] = mapErrMap } + e.mapErrMapsByModule[mn] = mapErrMap if _, ok := e.helperErrMapsByModule[mn]; ok { return fmt.Errorf("eBPF map for helper-operation errors for module %s already exists", mn.Name()) - } else { //nolint:revive // TODO - e.helperErrMapsByModule[mn] = helperErrMap } + e.helperErrMapsByModule[mn] = helperErrMap e.initialized = true return nil } +func (e *ebpfTelemetry) cleanup(maps []names.MapName, mn names.ModuleName, mapErrMap *maps.GenericMap[uint64, mapErrTelemetry], helperErrMap *maps.GenericMap[uint64, helperErrTelemetry]) error { + var errs error + + e.mtx.Lock() + defer e.mtx.Unlock() + + // Cleanup mapKeys (initialized in initializeMapErrTelemetryMap) + h := keyHash() + for _, mapName := range maps { + delete(e.mapKeys, mapTelemetryKey(mapName, mn)) + key := eBPFMapErrorKey(h, mapTelemetryKey(mapName, mn)) + err := mapErrMap.Delete(&key) + if err != nil { + errs = errors.Join(errs, fmt.Errorf("failed to delete telemetry struct for map %s: %w", mapName, err)) + } + } + + // Cleanup helper keys + for p, key := range e.probeKeys { + if p.moduleName != mn { + continue + } + err := helperErrMap.Delete(&key) + if err != nil { + errs = errors.Join(errs, fmt.Errorf("failed to delete telemetry struct for probe %s: %w", p.String(), err)) + } + delete(e.probeKeys, p) + } + + delete(e.mapErrMapsByModule, mn) + delete(e.helperErrMapsByModule, mn) + + return errs +} + func (e *ebpfTelemetry) setProbe(key telemetryKey, hash uint64) { e.probeKeys[key] = hash } @@ -213,34 +247,54 @@ func (e *ebpfTelemetry) initializeHelperErrTelemetryMap(module names.ModuleName, return nil } +// PatchConstant replaces the value for the provided relocation entry in the bpf bytecode +func PatchConstant(symbol string, p *ebpf.ProgramSpec, eBPFKey uint64) error { + // do constant editing of programs for helper errors post-init + ins := p.Instructions + ldDWImm := asm.LoadImmOp(asm.DWord) + offsets := ins.ReferenceOffsets() + indices := offsets[symbol] + if len(indices) > 0 { + for _, index := range indices { + load := &ins[index] + if load.OpCode != ldDWImm { + return fmt.Errorf("symbol %v: load: found %v instead of %v", symbol, load.OpCode, ldDWImm) + } + + load.Constant = int64(eBPFKey) + } + } + + return nil +} + +// PatchEBPFTelemetry performs bytecode patching to support eBPF helper error telemetry collection +func PatchEBPFTelemetry(programSpecs map[string]*ebpf.ProgramSpec, enable bool, mn names.ModuleName) error { + if errorsTelemetry == nil { + return errors.New("errorsTelemetry not initialized") + } + return patchEBPFTelemetry(programSpecs, enable, mn, errorsTelemetry) +} + func patchEBPFTelemetry(programSpecs map[string]*ebpf.ProgramSpec, enable bool, mn names.ModuleName, bpfTelemetry ebpfErrorsTelemetry) error { const symbol = "telemetry_program_id_key" newIns := asm.Mov.Reg(asm.R1, asm.R1) if enable { newIns = asm.StoreXAdd(asm.R1, asm.R2, asm.Word) } - ldDWImm := asm.LoadImmOp(asm.DWord) h := keyHash() for _, p := range programSpecs { - // do constant editing of programs for helper errors post-init ins := p.Instructions + // do constant editing of programs for helper errors post-init if enable && bpfTelemetry != nil { - offsets := ins.ReferenceOffsets() - indices := offsets[symbol] - if len(indices) > 0 { - for _, index := range indices { - load := &ins[index] - if load.OpCode != ldDWImm { - return fmt.Errorf("symbol %v: load: found %v instead of %v", symbol, load.OpCode, ldDWImm) - } - - programName := names.NewProgramNameFromProgramSpec(p) - key := eBPFHelperErrorKey(h, probeTelemetryKey(programName, mn)) - load.Constant = int64(key) - bpfTelemetry.setProbe(probeTelemetryKey(programName, mn), key) - } + programName := names.NewProgramNameFromProgramSpec(p) + tk := probeTelemetryKey(programName, mn) + key := eBPFHelperErrorKey(h, tk) + if err := PatchConstant(symbol, p, key); err != nil { + return err } + bpfTelemetry.setProbe(tk, key) } // patch telemetry helper calls @@ -281,8 +335,8 @@ func eBPFHelperErrorKey(h hash.Hash64, name telemetryKey) uint64 { return h.Sum64() } -// ebpfTelemetrySupported returns whether eBPF telemetry is supported, which depends on the verifier in 4.14+ -func ebpfTelemetrySupported() (bool, error) { +// EBPFTelemetrySupported returns whether eBPF telemetry is supported, which depends on the verifier in 4.14+ +func EBPFTelemetrySupported() (bool, error) { kversion, err := kernel.HostVersion() if err != nil { return false, err diff --git a/pkg/ebpf/telemetry/errors_telemetry_test.go b/pkg/ebpf/telemetry/errors_telemetry_test.go index 3d7485f39877b..87666af856af4 100644 --- a/pkg/ebpf/telemetry/errors_telemetry_test.go +++ b/pkg/ebpf/telemetry/errors_telemetry_test.go @@ -9,10 +9,10 @@ package telemetry import ( "os" + "syscall" "testing" - sysconfig "github.com/DataDog/datadog-agent/cmd/system-probe/config" - pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/ebpf/bytecode" "github.com/DataDog/datadog-agent/pkg/ebpf/names" @@ -20,10 +20,11 @@ import ( "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" + "github.com/cilium/ebpf" "github.com/stretchr/testify/require" ) -var m = &manager.Manager{ +var m1 = &manager.Manager{ Probes: []*manager.Probe{ { ProbeIdentificationPair: manager.ProbeIdentificationPair{ @@ -38,65 +39,98 @@ var m = &manager.Manager{ { Name: "suppress_map", }, + { + Name: "shared_map", + }, }, } -type config struct { - bpfDir string -} - -func testConfig() *config { - cfg := pkgconfigsetup.SystemProbe() - sysconfig.Adjust(cfg) - - return &config{ - bpfDir: cfg.GetString("system_probe_config.bpf_dir"), - } +var m2 = &manager.Manager{ + Probes: []*manager.Probe{ + { + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + EBPFFuncName: "kprobe__do_vfs_ioctl", + }, + }, + }, } func skipTestIfEBPFTelemetryNotSupported(t *testing.T) { - ok, err := ebpfTelemetrySupported() + ok, err := EBPFTelemetrySupported() require.NoError(t, err) if !ok { t.Skip("EBPF telemetry is not supported for this kernel version") } } -func triggerTestAndGetTelemetry(t *testing.T) []prometheus.Metric { - cfg := testConfig() +func startManager(t *testing.T, m *manager.Manager, options manager.Options, name string, buf bytecode.AssetReader) { + err := m.LoadELF(buf) + require.NoError(t, err) - buf, err := bytecode.GetReader(cfg.bpfDir, "error_telemetry.o") + modifier := ErrorsTelemetryModifier{} + err = modifier.BeforeInit(m, names.NewModuleName(name), &options) + require.NoError(t, err) + err = m.InitWithOptions(nil, options) + require.NoError(t, err) + err = modifier.AfterInit(m, names.NewModuleName(name), &options) require.NoError(t, err) - t.Cleanup(func() { _ = buf.Close }) + m.Start() + + t.Cleanup(func() { + m.Stop(manager.CleanAll) + }) +} +func triggerTestAndGetTelemetry(t *testing.T) []prometheus.Metric { collector := NewEBPFErrorsCollector() - options := manager.Options{ - RemoveRlimit: true, - ActivatedProbes: []manager.ProbesSelector{ + err := ddebpf.LoadCOREAsset("error_telemetry.o", func(buf bytecode.AssetReader, opts manager.Options) error { + opts.RemoveRlimit = true + opts.ActivatedProbes = []manager.ProbesSelector{ &manager.ProbeSelector{ ProbeIdentificationPair: manager.ProbeIdentificationPair{ EBPFFuncName: "kprobe__vfs_open", }, }, - }, - } + } + startManager(t, m1, opts, "m1", buf) - err = m.LoadELF(buf) + return nil + }) require.NoError(t, err) - modifier := ErrorsTelemetryModifier{} - err = modifier.BeforeInit(m, names.NewModuleName("ebpf"), &options) - require.NoError(t, err) - err = m.InitWithOptions(nil, options) + sharedMap, found, err := m1.GetMap("shared_map") + require.True(t, found, "'shared_map' not found in manager") require.NoError(t, err) - err = modifier.AfterInit(m, names.NewModuleName("ebpf"), &options) + + err = ddebpf.LoadCOREAsset("error_telemetry.o", func(buf bytecode.AssetReader, opts manager.Options) error { + opts.RemoveRlimit = true + opts.ActivatedProbes = []manager.ProbesSelector{ + &manager.ProbeSelector{ + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + EBPFFuncName: "kprobe__do_vfs_ioctl", + }, + }, + } + opts.MapEditors = map[string]*ebpf.Map{ + "shared_map": sharedMap, + } + + startManager(t, m2, opts, "m2", buf) + + return nil + }) require.NoError(t, err) - m.Start() _, err = os.Open("/proc/self/exe") require.NoError(t, err) + if _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, uintptr(0), 0xfafadead, uintptr(0)); errno != 0 { + // Only valid return value is ENOTTY (invalid ioctl for device) because indeed we + // are not doing any valid ioctl, we just want to trigger the kprobe + require.Equal(t, syscall.ENOTTY, errno) + } + ch := make(chan prometheus.Metric) go func() { collector.Collect(ch) @@ -116,9 +150,6 @@ func TestMapsTelemetry(t *testing.T) { skipTestIfEBPFTelemetryNotSupported(t) mapsTelemetry := triggerTestAndGetTelemetry(t) - t.Cleanup(func() { - m.Stop(manager.CleanAll) - }) errorMapEntryFound, e2bigErrorFound := false, false for _, promMetric := range mapsTelemetry { @@ -153,9 +184,6 @@ func TestMapsTelemetrySuppressError(t *testing.T) { skipTestIfEBPFTelemetryNotSupported(t) mapsTelemetry := triggerTestAndGetTelemetry(t) - t.Cleanup(func() { - m.Stop(manager.CleanAll) - }) suppressMapEntryFound := false for _, promMetric := range mapsTelemetry { @@ -180,9 +208,6 @@ func TestHelpersTelemetry(t *testing.T) { skipTestIfEBPFTelemetryNotSupported(t) helperTelemetry := triggerTestAndGetTelemetry(t) - t.Cleanup(func() { - m.Stop(manager.CleanAll) - }) probeReadHelperFound, efaultErrorFound := false, false for _, promMetric := range helperTelemetry { @@ -212,3 +237,49 @@ func TestHelpersTelemetry(t *testing.T) { // ensure test fails if helper metric not found require.True(t, probeReadHelperFound) } + +func testSharedMaps(t *testing.T, mapsTelemetry []prometheus.Metric, errorCount float64, sharedMap, moduleToTest string) bool { + testComplete := false + for _, promMetric := range mapsTelemetry { + sharedMapFound, moduleFound, e2bigErrorFound := false, false, false + + dtoMetric := dto.Metric{} + require.NoError(t, promMetric.Write(&dtoMetric), "Failed to parse metric %v", promMetric.Desc()) + require.NotNilf(t, dtoMetric.GetCounter(), "expected metric %v to be of a counter type", promMetric.Desc()) + + for _, label := range dtoMetric.GetLabel() { + switch label.GetName() { + case "map_name": + if label.GetValue() == sharedMap { + sharedMapFound = true + } + case "error": + if label.GetValue() == "E2BIG" { + e2bigErrorFound = true + } + case "module": + if label.GetValue() == moduleToTest { + moduleFound = true + } + } + } + + // check error value immediately if map is discovered + if sharedMapFound && moduleFound { + testComplete = true + require.True(t, e2bigErrorFound) + require.Equal(t, dtoMetric.GetCounter().GetValue(), errorCount) + break + } + } + + return testComplete +} + +func TestSharedMapsTelemetry(t *testing.T) { + skipTestIfEBPFTelemetryNotSupported(t) + + mapsTelemetry := triggerTestAndGetTelemetry(t) + require.True(t, testSharedMaps(t, mapsTelemetry, float64(1), "shared_map", "m1")) + require.True(t, testSharedMaps(t, mapsTelemetry, float64(3), "shared_map", "m2")) +} diff --git a/pkg/ebpf/telemetry/modifier.go b/pkg/ebpf/telemetry/modifier.go index 8f70c867ab0fa..706255ccdfcbb 100644 --- a/pkg/ebpf/telemetry/modifier.go +++ b/pkg/ebpf/telemetry/modifier.go @@ -9,35 +9,72 @@ package telemetry import ( "fmt" + "hash" "slices" manager "github.com/DataDog/ebpf-manager" + ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/ebpf/maps" "github.com/DataDog/datadog-agent/pkg/ebpf/names" "github.com/DataDog/datadog-agent/pkg/util/log" ) const ( - // MapErrTelemetryMap is the map storing the map error telemetry - mapErrTelemetryMapName string = "map_err_telemetry_map" - // HelperErrTelemetryMap is the map storing the helper error telemetry - helperErrTelemetryMapName string = "helper_err_telemetry_map" + // MapErrTelemetryMapName is the map storing the map error telemetry + MapErrTelemetryMapName string = "map_err_telemetry_map" + // HelperErrTelemetryMapName is the map storing the helper error telemetry + HelperErrTelemetryMapName string = "helper_err_telemetry_map" ) // ErrorsTelemetryModifier is a modifier that sets up the manager to handle eBPF telemetry. type ErrorsTelemetryModifier struct{} +// Ensure it implements the required interfaces +var _ ddebpf.ModifierBeforeInit = &ErrorsTelemetryModifier{} +var _ ddebpf.ModifierAfterInit = &ErrorsTelemetryModifier{} +var _ ddebpf.ModifierBeforeStop = &ErrorsTelemetryModifier{} + // String returns the name of the modifier. func (t *ErrorsTelemetryModifier) String() string { return "ErrorsTelemetryModifier" } +// getMapNames returns the names of the maps in the manager. +func getMapNames(m *manager.Manager) ([]names.MapName, error) { + var mapNames []names.MapName + + // we use map specs instead of iterating over the user defined `manager.Maps` + // because the user defined list may not contain shared maps passed to the manager + // via `manager.Options.MapEditors`. On the other hand, MapSpecs will include all maps + // referenced in the ELF file associated with the manager + specs, err := m.GetMapSpecs() + if err != nil { + return nil, err + } + + for _, spec := range specs { + mapNames = append(mapNames, names.NewMapNameFromMapSpec(spec)) + } + + return mapNames, nil +} + +// MapTelemetryKeyName builds the name of the parameterized constant used in bpf code +func MapTelemetryKeyName(mapName names.MapName) string { + return mapName.Name() + "_telemetry_key" +} + +// MapTelemetryErrorKey returns the key for map errors +func MapTelemetryErrorKey(h hash.Hash64, mapName names.MapName, module names.ModuleName) uint64 { + return eBPFMapErrorKey(h, mapTelemetryKey(mapName, module)) +} + // BeforeInit sets up the manager to handle eBPF telemetry. // It will patch the instructions of all the manager probes and `undefinedProbes` provided. // Constants are replaced for map error and helper error keys with their respective values. func (t *ErrorsTelemetryModifier) BeforeInit(m *manager.Manager, module names.ModuleName, opts *manager.Options) error { - activateBPFTelemetry, err := ebpfTelemetrySupported() + activateBPFTelemetry, err := EBPFTelemetrySupported() if err != nil { return err } @@ -46,20 +83,20 @@ func (t *ErrorsTelemetryModifier) BeforeInit(m *manager.Manager, module names.Mo } // add telemetry maps to list of maps, if not present - if !slices.ContainsFunc(m.Maps, func(x *manager.Map) bool { return x.Name == mapErrTelemetryMapName }) { - m.Maps = append(m.Maps, &manager.Map{Name: mapErrTelemetryMapName}) + if !slices.ContainsFunc(m.Maps, func(x *manager.Map) bool { return x.Name == MapErrTelemetryMapName }) { + m.Maps = append(m.Maps, &manager.Map{Name: MapErrTelemetryMapName}) } - if !slices.ContainsFunc(m.Maps, func(x *manager.Map) bool { return x.Name == helperErrTelemetryMapName }) { - m.Maps = append(m.Maps, &manager.Map{Name: helperErrTelemetryMapName}) + if !slices.ContainsFunc(m.Maps, func(x *manager.Map) bool { return x.Name == HelperErrTelemetryMapName }) { + m.Maps = append(m.Maps, &manager.Map{Name: HelperErrTelemetryMapName}) } // set a small max entries value if telemetry is not supported. We have to load the maps because the eBPF code // references them even when we cannot track the telemetry. - opts.MapSpecEditors[mapErrTelemetryMapName] = manager.MapSpecEditor{ + opts.MapSpecEditors[MapErrTelemetryMapName] = manager.MapSpecEditor{ MaxEntries: uint32(1), EditorFlag: manager.EditMaxEntries, } - opts.MapSpecEditors[helperErrTelemetryMapName] = manager.MapSpecEditor{ + opts.MapSpecEditors[HelperErrTelemetryMapName] = manager.MapSpecEditor{ MaxEntries: uint32(1), EditorFlag: manager.EditMaxEntries, } @@ -75,23 +112,28 @@ func (t *ErrorsTelemetryModifier) BeforeInit(m *manager.Manager, module names.Mo return fmt.Errorf("failed to get program specs from manager: %w", err) } - opts.MapSpecEditors[mapErrTelemetryMapName] = manager.MapSpecEditor{ + opts.MapSpecEditors[MapErrTelemetryMapName] = manager.MapSpecEditor{ MaxEntries: uint32(len(ebpfMaps)), EditorFlag: manager.EditMaxEntries, } - log.Tracef("module %s maps %d", module.Name(), opts.MapSpecEditors[mapErrTelemetryMapName].MaxEntries) + log.Tracef("module %s maps %d", module.Name(), opts.MapSpecEditors[MapErrTelemetryMapName].MaxEntries) - opts.MapSpecEditors[helperErrTelemetryMapName] = manager.MapSpecEditor{ + opts.MapSpecEditors[HelperErrTelemetryMapName] = manager.MapSpecEditor{ MaxEntries: uint32(len(ebpfPrograms)), EditorFlag: manager.EditMaxEntries, } - log.Tracef("module %s probes %d", module.Name(), opts.MapSpecEditors[helperErrTelemetryMapName].MaxEntries) + log.Tracef("module %s probes %d", module.Name(), opts.MapSpecEditors[HelperErrTelemetryMapName].MaxEntries) + + mapNames, err := getMapNames(m) + if err != nil { + return err + } h := keyHash() - for _, ebpfMap := range m.Maps { + for _, mapName := range mapNames { opts.ConstantEditors = append(opts.ConstantEditors, manager.ConstantEditor{ - Name: ebpfMap.Name + "_telemetry_key", - Value: eBPFMapErrorKey(h, mapTelemetryKey(names.NewMapNameFromManagerMap(ebpfMap), module)), + Name: MapTelemetryKeyName(mapName), + Value: MapTelemetryErrorKey(h, mapName, module), }) } @@ -108,28 +150,61 @@ func (t *ErrorsTelemetryModifier) BeforeInit(m *manager.Manager, module names.Mo return nil } +// getErrMaps returns the mapErrMap and helperErrMap from the manager. +func getErrMaps(m *manager.Manager) (mapErrMap *maps.GenericMap[uint64, mapErrTelemetry], helperErrMap *maps.GenericMap[uint64, helperErrTelemetry], err error) { + mapErrMap, err = maps.GetMap[uint64, mapErrTelemetry](m, MapErrTelemetryMapName) + if err != nil { + return nil, nil, fmt.Errorf("failed to get generic map %s: %w", MapErrTelemetryMapName, err) + } + + helperErrMap, err = maps.GetMap[uint64, helperErrTelemetry](m, HelperErrTelemetryMapName) + if err != nil { + return nil, nil, fmt.Errorf("failed to get generic map %s: %w", HelperErrTelemetryMapName, err) + } + + return mapErrMap, helperErrMap, nil +} + // AfterInit pre-populates the telemetry maps with entries corresponding to the ebpf program of the manager. func (t *ErrorsTelemetryModifier) AfterInit(m *manager.Manager, module names.ModuleName, _ *manager.Options) error { if errorsTelemetry == nil { return nil } - var mapNames []names.MapName - for _, mp := range m.Maps { - mapNames = append(mapNames, names.NewMapNameFromManagerMap(mp)) + genericMapErrMap, genericHelperErrMap, err := getErrMaps(m) + if err != nil { + return err } - genericMapErrMap, err := maps.GetMap[uint64, mapErrTelemetry](m, mapErrTelemetryMapName) + mapNames, err := getMapNames(m) if err != nil { - return fmt.Errorf("failed to get generic map %s: %w", mapErrTelemetryMapName, err) + return err } - genericHelperErrMap, err := maps.GetMap[uint64, helperErrTelemetry](m, helperErrTelemetryMapName) + if err := errorsTelemetry.fill(mapNames, module, genericMapErrMap, genericHelperErrMap); err != nil { + return err + } + + return nil +} + +// BeforeStop stops the perf collector from telemetry and removes the modules from the telemetry maps. +func (t *ErrorsTelemetryModifier) BeforeStop(m *manager.Manager, module names.ModuleName, _ manager.MapCleanupType) error { + if errorsTelemetry == nil { + return nil + } + + genericMapErrMap, genericHelperErrMap, err := getErrMaps(m) if err != nil { - return fmt.Errorf("failed to get generic map %s: %w", helperErrTelemetryMapName, err) + return err } - if err := errorsTelemetry.fill(mapNames, module, genericMapErrMap, genericHelperErrMap); err != nil { + mapNames, err := getMapNames(m) + if err != nil { + return err + } + + if err := errorsTelemetry.cleanup(mapNames, module, genericMapErrMap, genericHelperErrMap); err != nil { return err } diff --git a/pkg/ebpf/telemetry/modifier_test.go b/pkg/ebpf/telemetry/modifier_test.go new file mode 100644 index 0000000000000..fb868b9193c08 --- /dev/null +++ b/pkg/ebpf/telemetry/modifier_test.go @@ -0,0 +1,108 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build linux_bpf + +package telemetry + +import ( + "os" + "testing" + + manager "github.com/DataDog/ebpf-manager" + "github.com/prometheus/client_golang/prometheus" + + ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" + "github.com/DataDog/datadog-agent/pkg/ebpf/bytecode" + "github.com/DataDog/datadog-agent/pkg/ebpf/names" + + "github.com/stretchr/testify/require" +) + +func TestModifierAppliesMultipleTimes(t *testing.T) { + skipTestIfEBPFTelemetryNotSupported(t) + + // set up the collector outside of the loop, as that's the usual usage in + // system-probe + collector := NewEBPFErrorsCollector() + + numTries := 4 // Just to be sure + for i := 0; i < numTries; i++ { + // Init the manager + mgr := &manager.Manager{ + Probes: []*manager.Probe{ + { + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + EBPFFuncName: "kprobe__vfs_open", + }, + }, + }, + Maps: []*manager.Map{ + { + Name: "error_map", + }, + { + Name: "suppress_map", + }, + }, + } + t.Cleanup(func() { _ = mgr.Stop(manager.CleanAll) }) // Ensure we stop the manager, if it's already stopped it will be a no-op + + modifier := ErrorsTelemetryModifier{} + mname := names.NewModuleName("ebpf") + err := ddebpf.LoadCOREAsset("error_telemetry.o", func(buf bytecode.AssetReader, opts manager.Options) error { + opts.RemoveRlimit = true + opts.ActivatedProbes = []manager.ProbesSelector{ + &manager.ProbeSelector{ + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + EBPFFuncName: "kprobe__vfs_open", + }, + }, + } + + err := mgr.LoadELF(buf) + require.NoError(t, err) + + err = modifier.BeforeInit(mgr, mname, &opts) + require.NoError(t, err, "BeforeInit failed on try %d", i) + err = mgr.InitWithOptions(nil, opts) + require.NoError(t, err, "InitWithOptions failed on try %d", i) + err = modifier.AfterInit(mgr, mname, &opts) + require.NoError(t, err, "AfterInit failed on try %d", i) + + err = mgr.Start() + require.NoError(t, err, "Start failed on try %d", i) + + return nil + }) + require.NoError(t, err) + + // Trigger our kprobe + _, err = os.Open("/proc/self/exe") + require.NoError(t, err) + + ch := make(chan prometheus.Metric) + go func() { + collector.Collect(ch) + close(ch) + }() + + // Collect metrics from the channel + var metrics []prometheus.Metric + for m := range ch { + metrics = append(metrics, m) + } + + // Ensure we have metrics + require.NotEmpty(t, metrics, "No metrics collected on try %d", i) + + // Run our BeforeStop + err = modifier.BeforeStop(mgr, mname, manager.CleanAll) + require.NoError(t, err, "BeforeStop failed on try %d", i) + + // Stop the manager + require.NoError(t, mgr.Stop(manager.CleanAll), "Stop failed on try %d", i) + } +} diff --git a/pkg/ebpf/telemetry/perf_metrics.go b/pkg/ebpf/telemetry/perf_metrics.go index 84324790e1d28..4389c156fb819 100644 --- a/pkg/ebpf/telemetry/perf_metrics.go +++ b/pkg/ebpf/telemetry/perf_metrics.go @@ -22,19 +22,25 @@ var ( ) type perfUsageCollector struct { - mtx sync.Mutex - usage *prometheus.GaugeVec - usagePct *prometheus.GaugeVec - size *prometheus.GaugeVec - lost *prometheus.CounterVec - - perfMaps []*manager.PerfMap - ringBuffers []*manager.RingBuffer + mtx sync.Mutex + usage *prometheus.GaugeVec + usagePct *prometheus.GaugeVec + size *prometheus.GaugeVec + lost *prometheus.CounterVec + channelLen *prometheus.GaugeVec + + perfMaps []*manager.PerfMap + perfChannelLenFuncs map[*manager.PerfMap]func() int + + ringBuffers []*manager.RingBuffer + ringChannelLenFuncs map[*manager.RingBuffer]func() int } // NewPerfUsageCollector creates a prometheus.Collector for perf buffer and ring buffer metrics func NewPerfUsageCollector() prometheus.Collector { perfCollector = &perfUsageCollector{ + perfChannelLenFuncs: make(map[*manager.PerfMap]func() int), + ringChannelLenFuncs: make(map[*manager.RingBuffer]func() int), usage: prometheus.NewGaugeVec( prometheus.GaugeOpts{ Subsystem: "ebpf__perf", @@ -67,6 +73,14 @@ func NewPerfUsageCollector() prometheus.Collector { }, []string{"map_name", "map_type", "cpu_num"}, ), + channelLen: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Subsystem: "ebpf__perf", + Name: "_channel_len", + Help: "gauge tracking number of elements in buffer channel", + }, + []string{"map_name", "map_type"}, + ), } return perfCollector } @@ -103,6 +117,11 @@ func (p *perfUsageCollector) Collect(metrics chan<- prometheus.Metric) { } } + for pm, chFunc := range p.perfChannelLenFuncs { + mapName, mapType := pm.Name, ebpf.PerfEventArray.String() + p.channelLen.WithLabelValues(mapName, mapType).Set(float64(chFunc())) + } + for _, rb := range p.ringBuffers { mapName, mapType := rb.Name, ebpf.RingBuf.String() size := float64(rb.BufferSize()) @@ -118,10 +137,16 @@ func (p *perfUsageCollector) Collect(metrics chan<- prometheus.Metric) { p.size.WithLabelValues(mapName, mapType, cpuString).Set(size) } + for rb, chFunc := range p.ringChannelLenFuncs { + mapName, mapType := rb.Name, ebpf.RingBuf.String() + p.channelLen.WithLabelValues(mapName, mapType).Set(float64(chFunc())) + } + p.usage.Collect(metrics) p.usagePct.Collect(metrics) p.size.Collect(metrics) p.lost.Collect(metrics) + p.channelLen.Collect(metrics) } // ReportPerfMapTelemetry starts reporting the telemetry for the provided PerfMap @@ -132,6 +157,14 @@ func ReportPerfMapTelemetry(pm *manager.PerfMap) { perfCollector.registerPerfMap(pm) } +// ReportPerfMapChannelLenTelemetry starts reporting the telemetry for the provided PerfMap's buffer channel +func ReportPerfMapChannelLenTelemetry(pm *manager.PerfMap, channelLenFunc func() int) { + if perfCollector == nil { + return + } + perfCollector.registerPerfMapChannel(pm, channelLenFunc) +} + // ReportRingBufferTelemetry starts reporting the telemetry for the provided RingBuffer func ReportRingBufferTelemetry(rb *manager.RingBuffer) { if perfCollector == nil { @@ -140,6 +173,14 @@ func ReportRingBufferTelemetry(rb *manager.RingBuffer) { perfCollector.registerRingBuffer(rb) } +// ReportRingBufferChannelLenTelemetry starts reporting the telemetry for the provided RingBuffer's buffer channel +func ReportRingBufferChannelLenTelemetry(rb *manager.RingBuffer, channelLenFunc func() int) { + if perfCollector == nil { + return + } + perfCollector.registerRingBufferChannel(rb, channelLenFunc) +} + func (p *perfUsageCollector) registerPerfMap(pm *manager.PerfMap) { if !pm.TelemetryEnabled { return @@ -149,6 +190,15 @@ func (p *perfUsageCollector) registerPerfMap(pm *manager.PerfMap) { p.perfMaps = append(p.perfMaps, pm) } +func (p *perfUsageCollector) registerPerfMapChannel(pm *manager.PerfMap, channelLenFunc func() int) { + if !pm.TelemetryEnabled { + return + } + p.mtx.Lock() + defer p.mtx.Unlock() + p.perfChannelLenFuncs[pm] = channelLenFunc +} + func (p *perfUsageCollector) registerRingBuffer(rb *manager.RingBuffer) { if !rb.TelemetryEnabled { return @@ -158,12 +208,20 @@ func (p *perfUsageCollector) registerRingBuffer(rb *manager.RingBuffer) { p.ringBuffers = append(p.ringBuffers, rb) } +func (p *perfUsageCollector) registerRingBufferChannel(rb *manager.RingBuffer, channelLenFunc func() int) { + if !rb.TelemetryEnabled { + return + } + p.mtx.Lock() + defer p.mtx.Unlock() + p.ringChannelLenFuncs[rb] = channelLenFunc +} + // UnregisterTelemetry unregisters the PerfMap and RingBuffers from telemetry func UnregisterTelemetry(m *manager.Manager) { - if perfCollector == nil { - return + if perfCollector != nil { + perfCollector.unregisterTelemetry(m) } - perfCollector.unregisterTelemetry(m) } func (p *perfUsageCollector) unregisterTelemetry(m *manager.Manager) { @@ -172,7 +230,13 @@ func (p *perfUsageCollector) unregisterTelemetry(m *manager.Manager) { p.perfMaps = slices.DeleteFunc(p.perfMaps, func(perfMap *manager.PerfMap) bool { return slices.Contains(m.PerfMaps, perfMap) }) + for _, pm := range m.PerfMaps { + delete(p.perfChannelLenFuncs, pm) + } p.ringBuffers = slices.DeleteFunc(p.ringBuffers, func(ringBuf *manager.RingBuffer) bool { return slices.Contains(m.RingBuffers, ringBuf) }) + for _, rb := range m.RingBuffers { + delete(p.ringChannelLenFuncs, rb) + } } diff --git a/pkg/ebpf/testdata/c/error_telemetry.c b/pkg/ebpf/testdata/c/error_telemetry.c index 4f9b832f3eb3f..da72feaafe2ab 100644 --- a/pkg/ebpf/testdata/c/error_telemetry.c +++ b/pkg/ebpf/testdata/c/error_telemetry.c @@ -1,8 +1,5 @@ #include "ktypes.h" #include "bpf_metadata.h" -#ifdef COMPILE_RUNTIME -#include "kconfig.h" -#endif #include "compiler.h" #include "map-defs.h" #include "bpf_tracing.h" @@ -10,6 +7,7 @@ BPF_HASH_MAP(error_map, u32, u32, 2); BPF_HASH_MAP(suppress_map, u32, u32, 2); +BPF_HASH_MAP(shared_map, u32, u32, 1); #define E2BIG 7 @@ -31,6 +29,40 @@ int kprobe__vfs_open(int *ctx) { char buf[16]; bpf_probe_read_with_telemetry(&buf, 16, (void *)0xdeadbeef); + u32 j = 1; + u32* val = bpf_map_lookup_elem(&shared_map, &j); + if (val == NULL) { + bpf_map_update_with_telemetry(shared_map, &j, &j, BPF_ANY); + j++; + + bpf_map_update_with_telemetry(shared_map, &j, &j, BPF_ANY); + } + + return 0; +} + +static int __always_inline is_telemetry_call(struct pt_regs *ctx) { + u32 cmd = PT_REGS_PARM3(ctx); + return cmd == 0xfafadead; +}; + +SEC("kprobe/do_vfs_ioctl") +int kprobe__do_vfs_ioctl(struct pt_regs *ctx) { + if (!is_telemetry_call(ctx)) { + return 0; + } + + // we must start updating from a value we know does not exist in the map already + // from the call to `kprobe__vs_open` + u32 i = 0xabcd; + bpf_map_update_with_telemetry(shared_map, &i, &i, BPF_ANY); + i++; + bpf_map_update_with_telemetry(shared_map, &i, &i, BPF_ANY); + i++; + bpf_map_update_with_telemetry(shared_map, &i, &i, BPF_ANY); + + // 2 E2BIG errors + return 0; } diff --git a/pkg/ebpf/testdata/c/logdebug-test.c b/pkg/ebpf/testdata/c/logdebug-test.c index e6e081526b1d1..ade6b6bd0cd3e 100644 --- a/pkg/ebpf/testdata/c/logdebug-test.c +++ b/pkg/ebpf/testdata/c/logdebug-test.c @@ -1,11 +1,8 @@ -#include "kconfig.h" #include "ktypes.h" #include "bpf_metadata.h" -#include #include "bpf_tracing.h" #include "bpf_helpers.h" #include "bpf_helpers_custom.h" -#include char __license[] SEC("license") = "GPL"; diff --git a/pkg/ebpf/testdata/c/uprobe_attacher-test.c b/pkg/ebpf/testdata/c/uprobe_attacher-test.c index bcc755a245786..afde2418db084 100644 --- a/pkg/ebpf/testdata/c/uprobe_attacher-test.c +++ b/pkg/ebpf/testdata/c/uprobe_attacher-test.c @@ -1,13 +1,10 @@ // This program is used to test the UprobeAttacher object, it defines two simple probes that attach // to userspace functions. -#include "kconfig.h" #include "ktypes.h" #include "bpf_metadata.h" -#include #include "bpf_tracing.h" #include "bpf_helpers.h" #include "bpf_helpers_custom.h" -#include SEC("uprobe/SSL_connect") int uprobe__SSL_connect(struct pt_regs *ctx) { diff --git a/pkg/ebpf/uprobes/attacher.go b/pkg/ebpf/uprobes/attacher.go index f413462bea633..36781ff80bdc2 100644 --- a/pkg/ebpf/uprobes/attacher.go +++ b/pkg/ebpf/uprobes/attacher.go @@ -303,7 +303,8 @@ type FileRegistry interface { // AttachCallback is a callback that is called whenever a probe is attached successfully type AttachCallback func(*manager.Probe, *utils.FilePath) -var NopOnAttachCallback AttachCallback = nil //nolint:revive // TODO +// NopOnAttachCallback is a callback that indicates that no action should be taken for the callback +var NopOnAttachCallback AttachCallback // UprobeAttacher is a struct that handles the attachment of uprobes to processes and libraries type UprobeAttacher struct { diff --git a/pkg/ebpf/uprobes/attacher_test.go b/pkg/ebpf/uprobes/attacher_test.go index be848bb5c6afb..5edf30c1f4972 100644 --- a/pkg/ebpf/uprobes/attacher_test.go +++ b/pkg/ebpf/uprobes/attacher_test.go @@ -26,7 +26,6 @@ import ( ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/ebpf/bytecode" "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" - "github.com/DataDog/datadog-agent/pkg/ebpf/prebuilt" "github.com/DataDog/datadog-agent/pkg/eventmonitor/consumers/testutil" "github.com/DataDog/datadog-agent/pkg/network/go/bininspect" "github.com/DataDog/datadog-agent/pkg/network/usm/sharedlibraries" @@ -701,10 +700,6 @@ func TestUprobeAttacher(t *testing.T) { procMon := launchProcessMonitor(t, false) - buf, err := bytecode.GetReader(ebpfCfg.BPFDir, "uprobe_attacher-test.o") - require.NoError(t, err) - t.Cleanup(func() { buf.Close() }) - connectProbeID := manager.ProbeIdentificationPair{EBPFFuncName: "uprobe__SSL_connect"} mainProbeID := manager.ProbeIdentificationPair{EBPFFuncName: "uprobe__main"} @@ -748,9 +743,15 @@ func TestUprobeAttacher(t *testing.T) { require.NoError(t, err) require.NotNil(t, ua) - require.NoError(t, mgr.InitWithOptions(buf, manager.Options{})) - require.NoError(t, mgr.Start()) - t.Cleanup(func() { mgr.Stop(manager.CleanAll) }) + err = ddebpf.LoadCOREAsset("uprobe_attacher-test.o", func(buf bytecode.AssetReader, opts manager.Options) error { + require.NoError(t, mgr.InitWithOptions(buf, opts)) + require.NoError(t, mgr.Start()) + t.Cleanup(func() { mgr.Stop(manager.CleanAll) }) + + return nil + }) + require.NoError(t, err) + require.NoError(t, ua.Start()) t.Cleanup(ua.Stop) @@ -827,11 +828,7 @@ type SharedLibrarySuite struct { } func TestAttacherSharedLibrary(t *testing.T) { - modes := []ebpftest.BuildMode{ebpftest.RuntimeCompiled, ebpftest.CORE} - if !prebuilt.IsDeprecated() { - modes = append(modes, ebpftest.Prebuilt) - } - ebpftest.TestBuildModes(t, modes, "", func(tt *testing.T) { + ebpftest.TestBuildModes(t, ebpftest.SupportedBuildModes(), "", func(tt *testing.T) { if !sharedlibraries.IsSupported(ddebpf.NewConfig()) { tt.Skip("shared library tracing not supported for this platform") } @@ -985,9 +982,10 @@ func (s *SharedLibrarySuite) TestDetectionWithPIDAndRootNamespace() { t.Cleanup(ua.Stop) time.Sleep(10 * time.Millisecond) - // simulate a slow (1 second) : open, write, close of the file + // simulate a slow (1 second) : open, read, close of the file // in a new pid and mount namespaces - o, err := exec.Command("unshare", "--fork", "--pid", "-R", root, "/ash", "-c", fmt.Sprintf("sleep 1 > %s", libpath)).CombinedOutput() + o, err := exec.Command("unshare", "--fork", "--pid", "-R", root, "/ash", "-c", + fmt.Sprintf("touch foo && mv foo %s && sleep 1 < %s", libpath, libpath)).CombinedOutput() if err != nil { t.Log(err, string(o)) } diff --git a/pkg/ebpf/uprobes/testutil.go b/pkg/ebpf/uprobes/testutil.go index 2d4e8b85d4b25..cd4b2891d72ef 100644 --- a/pkg/ebpf/uprobes/testutil.go +++ b/pkg/ebpf/uprobes/testutil.go @@ -58,7 +58,7 @@ type MockFileRegistry struct { } // Register is a mock implementation of the FileRegistry.Register method. -func (m *MockFileRegistry) Register(namespacedPath string, pid uint32, activationCB, deactivationCB, alreadyRegistered utils.Callback) error { //nolint:revive // TODO +func (m *MockFileRegistry) Register(namespacedPath string, pid uint32, activationCB, deactivationCB, _ utils.Callback) error { args := m.Called(namespacedPath, pid, activationCB, deactivationCB) return args.Error(0) } diff --git a/pkg/ebpf/verifier/stats.go b/pkg/ebpf/verifier/stats.go index 157d8f9b92f9f..5e254d54ea9a6 100644 --- a/pkg/ebpf/verifier/stats.go +++ b/pkg/ebpf/verifier/stats.go @@ -11,6 +11,7 @@ package verifier import ( "fmt" + "hash/fnv" "log" "os" "path/filepath" @@ -22,11 +23,12 @@ import ( ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/ebpf/bytecode" + "github.com/DataDog/datadog-agent/pkg/ebpf/names" + ebpftelemetry "github.com/DataDog/datadog-agent/pkg/ebpf/telemetry" "github.com/DataDog/datadog-agent/pkg/util/kernel" manager "github.com/DataDog/ebpf-manager" "github.com/cilium/ebpf" - "github.com/cilium/ebpf/asm" ) var ( @@ -90,6 +92,9 @@ func generateLoadFunction(file string, opts *StatsOptions, results *StatsResult, return fmt.Errorf("failed to get host kernel version: %w", err) } + // initialize `ebpfErrorsTelemetry` + ebpftelemetry.NewEBPFErrorsCollector() + log.Printf("Loading asset %s\n", file) collectionSpec, err := ebpf.LoadCollectionSpecFromReader(bc) if err != nil { @@ -103,24 +108,44 @@ func generateLoadFunction(file string, opts *StatsOptions, results *StatsResult, } } - // replace telemetry patch points with nops - // r1 = r1 - newIns := asm.Mov.Reg(asm.R1, asm.R1) - for _, p := range collectionSpec.Programs { - ins := p.Instructions - - // patch telemetry helper calls - const ebpfTelemetryPatchCall = -1 - iter := ins.Iterate() - for iter.Next() { - ins := iter.Ins - if !ins.IsBuiltinCall() || ins.Constant != ebpfTelemetryPatchCall { - continue + activateBPFTelemetry, err := ebpftelemetry.EBPFTelemetrySupported() + if err != nil { + return err + } + + if activateBPFTelemetry { + // update lengths for the ebpf telemetry maps + for name, mapSpec := range collectionSpec.Maps { + if name == ebpftelemetry.MapErrTelemetryMapName { + mapSpec.MaxEntries = uint32(len(collectionSpec.Maps)) + } + if name == ebpftelemetry.HelperErrTelemetryMapName { + mapSpec.MaxEntries = uint32(len(collectionSpec.Programs)) } - *ins = newIns.WithMetadata(ins.Metadata) } } + // patch the map telemetry keys + h := fnv.New64a() + mn := names.NewModuleName("verifier-stats") + for _, mapSpec := range collectionSpec.Maps { + mapName := names.NewMapNameFromMapSpec(mapSpec) + for _, p := range collectionSpec.Programs { + if err := ebpftelemetry.PatchConstant( + ebpftelemetry.MapTelemetryKeyName(mapName), + p, + ebpftelemetry.MapTelemetryErrorKey(h, mapName, mn), + ); err != nil { + return err + } + } + } + + // patch helper error telemetry + if err := ebpftelemetry.PatchEBPFTelemetry(collectionSpec.Programs, activateBPFTelemetry, mn); err != nil { + return err + } + progOpts := ebpf.ProgramOptions{ LogLevel: ebpf.LogLevelStats, KernelTypes: managerOptions.VerifierOptions.Programs.KernelTypes, @@ -162,7 +187,6 @@ func generateLoadFunction(file string, opts *StatsOptions, results *StatsResult, continue } } - log.Printf("Loading program %s\n", progSpec.Name) prog := reflect.New( reflect.StructOf([]reflect.StructField{ diff --git a/pkg/eventmonitor/consumers/testutil/testutil.go b/pkg/eventmonitor/consumers/testutil/testutil.go index 871cf780b0089..62aa502491f37 100644 --- a/pkg/eventmonitor/consumers/testutil/testutil.go +++ b/pkg/eventmonitor/consumers/testutil/testutil.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/eventmonitor" "github.com/DataDog/datadog-agent/pkg/eventmonitor/consumers" eventtestutil "github.com/DataDog/datadog-agent/pkg/eventmonitor/testutil" - "github.com/DataDog/datadog-agent/pkg/security/utils" + "github.com/DataDog/datadog-agent/pkg/security/utils/hostnameutils" ) const defaultChanSize = 100 @@ -26,7 +26,7 @@ const defaultChanSize = 100 func NewTestProcessConsumer(t *testing.T) *consumers.ProcessConsumer { var pc *consumers.ProcessConsumer // Set fake hostname to avoid fetching it from the core agent. - utils.SetCachedHostname("test-hostname") + hostnameutils.SetCachedHostname("test-hostname") eventtestutil.StartEventMonitor(t, func(t *testing.T, evm *eventmonitor.EventMonitor) { var err error eventTypes := []consumers.ProcessConsumerEventTypes{consumers.ExecEventType, consumers.ExitEventType} diff --git a/pkg/fips/fips_disabled.go b/pkg/fips/fips_disabled.go new file mode 100644 index 0000000000000..0f4fdf6f79868 --- /dev/null +++ b/pkg/fips/fips_disabled.go @@ -0,0 +1,19 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build !goexperiment.systemcrypto + +// Package fips is an interface for build specific status of FIPS compliance +package fips + +// Status returns an empty string when not the datadog-fips-agent flavor +func Status() string { + return "" +} + +// Enabled returns false when not the datadog-fips-agent flavor +func Enabled() (bool, error) { + return false, nil +} diff --git a/pkg/fips/fips_nix.go b/pkg/fips/fips_nix.go new file mode 100644 index 0000000000000..d0a41c1626c7e --- /dev/null +++ b/pkg/fips/fips_nix.go @@ -0,0 +1,25 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build goexperiment.systemcrypto && !windows + +// Package fips is an interface for build specific status of FIPS compliance +package fips + +import ( + "os" + "strconv" +) + +// Status returns a displayable string or error of FIPS compliance state of the agent build and runtime +func Status() string { + enabled, _ := Enabled() + return strconv.FormatBool(enabled) +} + +// Enabled checks to see if the agent runtime environment is as expected relating to its build to be FIPS compliant. For Linux this is that the binary is run with the GOFIPS=1 environment variable. +func Enabled() (bool, error) { + return os.Getenv("GOFIPS") == "1", nil +} diff --git a/pkg/fips/fips_windows.go b/pkg/fips/fips_windows.go new file mode 100644 index 0000000000000..b13e2c4878e7d --- /dev/null +++ b/pkg/fips/fips_windows.go @@ -0,0 +1,47 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build goexperiment.systemcrypto && windows + +// Package fips is an interface for build specific status of FIPS compliance +package fips + +import ( + "fmt" + "strconv" + + "golang.org/x/sys/windows/registry" +) + +// Status returns a displayable string or error of FIPS compliance state of the agent build and runtime +func Status() string { + enabled, _ := Enabled() + return strconv.FormatBool(enabled) +} + +// Enabled checks to see if the agent runtime environment is as expected relating to its build to be FIPS compliant. For Windows this means that FIPS mode is enabled via the Windows registry. +func Enabled() (bool, error) { + // this is copied from how microsoft/go checks the windows registry that FIPS is enabled: + // https://github.com/microsoft/go/blob/d0f965f87c51211b3ea554f88e94b4c68116f5d1/eng/_util/cmd/run-builder/systemfips_windows.go#L17-L54 + key, err := registry.OpenKey( + registry.LOCAL_MACHINE, + `SYSTEM\CurrentControlSet\Control\Lsa\FipsAlgorithmPolicy`, + registry.QUERY_VALUE, + ) + if err != nil { + return false, err + } + + enabled, enabledType, err := key.GetIntegerValue("Enabled") + if err != nil { + return false, err + } + + if enabledType != registry.DWORD { + return false, fmt.Errorf("unexpected FIPS algorithm policy Enabled key type: %v, expected: %v", enabledType, registry.DWORD) + } + + return enabled == 1, nil +} diff --git a/pkg/fips/go.mod b/pkg/fips/go.mod new file mode 100644 index 0000000000000..5f13859a31a30 --- /dev/null +++ b/pkg/fips/go.mod @@ -0,0 +1,5 @@ +module github.com/DataDog/datadog-agent/pkg/fips + +go 1.23.0 + +require golang.org/x/sys v0.29.0 diff --git a/pkg/fips/go.sum b/pkg/fips/go.sum new file mode 100644 index 0000000000000..0664caa90be69 --- /dev/null +++ b/pkg/fips/go.sum @@ -0,0 +1,2 @@ +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= diff --git a/pkg/flare/archive.go b/pkg/flare/archive.go index 8e820bb36f3ef..54f9450326841 100644 --- a/pkg/flare/archive.go +++ b/pkg/flare/archive.go @@ -69,7 +69,7 @@ func ExtraFlareProviders(diagnoseDeps diagnose.SuitesDeps) []*flaretypes.FlareFi flaretypes.NewFiller(getRegistryJSON), flaretypes.NewFiller(getVersionHistory), flaretypes.NewFiller(getWindowsData), - flaretypes.NewFiller(getExpVar), + flaretypes.NewFiller(GetExpVar), flaretypes.NewFiller(provideInstallInfo), flaretypes.NewFiller(provideAuthTokenPerm), flaretypes.NewFiller(provideDiagnoses(diagnoseDeps)), @@ -81,7 +81,7 @@ func ExtraFlareProviders(diagnoseDeps diagnose.SuitesDeps) []*flaretypes.FlareFi telemetryURL := fmt.Sprintf("http://127.0.0.1:%s/telemetry", pkgconfigsetup.Datadog().GetString("expvar_port")) for filename, fromFunc := range map[string]func() ([]byte, error){ - "envvars.log": getEnvVars, + "envvars.log": GetEnvVars, "health.yaml": getHealth, "go-routine-dump.log": func() ([]byte, error) { return getHTTPCallContent(pprofURL) }, "telemetry.log": func() ([]byte, error) { return getHTTPCallContent(telemetryURL) }, @@ -178,7 +178,8 @@ func getRegistryJSON(fb flaretypes.FlareBuilder) error { return nil } -func getLogFiles(fb flaretypes.FlareBuilder, logFileDir string) { +// GetLogFiles copies log files to the flare archive. +func GetLogFiles(fb flaretypes.FlareBuilder, logFileDir string) { log.Flush() fb.CopyDirToWithoutScrubbing(filepath.Dir(logFileDir), "logs", func(path string) bool { //nolint:errcheck @@ -189,7 +190,8 @@ func getLogFiles(fb flaretypes.FlareBuilder, logFileDir string) { }) } -func getExpVar(fb flaretypes.FlareBuilder) error { +// GetExpVar copies expvar files to the flare archive. +func GetExpVar(fb flaretypes.FlareBuilder) error { variables := make(map[string]interface{}) expvar.Do(func(kv expvar.KeyValue) { variable := make(map[string]interface{}) @@ -214,7 +216,7 @@ func getExpVar(fb flaretypes.FlareBuilder) error { apmDebugPort := pkgconfigsetup.Datadog().GetInt("apm_config.debug.port") f := filepath.Join("expvar", "trace-agent") - resp, err := http.Get(fmt.Sprintf("https://127.0.0.1:%d/debug/vars", apmDebugPort)) + resp, err := http.Get(fmt.Sprintf("http://127.0.0.1:%d/debug/vars", apmDebugPort)) if err != nil { return fb.AddFile(f, []byte(fmt.Sprintf("Error retrieving vars: %v", err))) } @@ -275,7 +277,8 @@ func getProcessAgentFullConfig() ([]byte, error) { return bytes, nil } -func getConfigFiles(fb flaretypes.FlareBuilder, confSearchPaths map[string]string) { +// GetConfigFiles copies configuration files to the flare archive. +func GetConfigFiles(fb flaretypes.FlareBuilder, confSearchPaths map[string]string) { for prefix, filePath := range confSearchPaths { fb.CopyDirTo(filePath, filepath.Join("etc", "confd", prefix), func(path string) bool { //nolint:errcheck // ignore .example file @@ -385,7 +388,7 @@ func getAgentTaggerList() ([]byte, error) { taggerListURL := fmt.Sprintf("https://%v:%v/agent/tagger-list", ipcAddress, pkgconfigsetup.Datadog().GetInt("cmd_port")) - return getTaggerList(taggerListURL) + return GetTaggerList(taggerListURL) } func getProcessAgentTaggerList() ([]byte, error) { @@ -400,10 +403,11 @@ func getProcessAgentTaggerList() ([]byte, error) { } taggerListURL := fmt.Sprintf("https://%s/agent/tagger-list", addressPort) - return getTaggerList(taggerListURL) + return GetTaggerList(taggerListURL) } -func getTaggerList(remoteURL string) ([]byte, error) { +// GetTaggerList fetches the tagger list from the given URL. +func GetTaggerList(remoteURL string) ([]byte, error) { c := apiutil.GetClient(false) // FIX: get certificates right then make this true r, err := apiutil.DoGet(c, remoteURL, apiutil.LeaveConnectionOpen) @@ -429,10 +433,11 @@ func getAgentWorkloadList() ([]byte, error) { return nil, err } - return getWorkloadList(fmt.Sprintf("https://%v:%v/agent/workload-list?verbose=true", ipcAddress, pkgconfigsetup.Datadog().GetInt("cmd_port"))) + return GetWorkloadList(fmt.Sprintf("https://%v:%v/agent/workload-list?verbose=true", ipcAddress, pkgconfigsetup.Datadog().GetInt("cmd_port"))) } -func getWorkloadList(url string) ([]byte, error) { +// GetWorkloadList fetches the workload list from the given URL. +func GetWorkloadList(url string) ([]byte, error) { c := apiutil.GetClient(false) // FIX: get certificates right then make this true r, err := apiutil.DoGet(c, url, apiutil.LeaveConnectionOpen) diff --git a/pkg/flare/archive_docker.go b/pkg/flare/archive_docker.go index 4b9b67c0139c8..f93927da89513 100644 --- a/pkg/flare/archive_docker.go +++ b/pkg/flare/archive_docker.go @@ -21,13 +21,13 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/containers/metrics" "github.com/DataDog/datadog-agent/pkg/util/docker" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/docker/docker/api/types/container" ) const dockerCommandMaxLength = 29 -func getDockerSelfInspect(wmeta optional.Option[workloadmeta.Component]) ([]byte, error) { +func getDockerSelfInspect(wmeta option.Option[workloadmeta.Component]) ([]byte, error) { if !env.IsContainerized() { return nil, fmt.Errorf("The Agent is not containerized") } diff --git a/pkg/flare/archive_linux.go b/pkg/flare/archive_linux.go index 9a3aea87a0ac0..54bde1e4e7a83 100644 --- a/pkg/flare/archive_linux.go +++ b/pkg/flare/archive_linux.go @@ -8,12 +8,7 @@ package flare import ( - "bytes" - "fmt" - "io" "path/filepath" - "regexp" - "syscall" "github.com/DataDog/ebpf-manager/tracefs" @@ -38,11 +33,24 @@ func addSystemProbePlatformSpecificEntries(fb flaretypes.FlareBuilder) { _ = fb.AddFileFromFunc(filepath.Join("system-probe", "conntrack_cached.log"), getSystemProbeConntrackCached) _ = fb.AddFileFromFunc(filepath.Join("system-probe", "conntrack_host.log"), getSystemProbeConntrackHost) _ = fb.AddFileFromFunc(filepath.Join("system-probe", "ebpf_btf_loader.log"), getSystemProbeBTFLoaderInfo) + _ = fb.AddFileFromFunc(filepath.Join("system-probe", "dmesg.log"), getLinuxDmesg) _ = fb.AddFileFromFunc(filepath.Join("system-probe", "selinux_sestatus.log"), getSystemProbeSelinuxSestatus) _ = fb.AddFileFromFunc(filepath.Join("system-probe", "selinux_semodule_list.log"), getSystemProbeSelinuxSemoduleList) } } +// only used in tests when running on linux +var linuxKernelSymbols = getLinuxKernelSymbols + +func addSecurityAgentPlatformSpecificEntries(fb flaretypes.FlareBuilder) { + linuxKernelSymbols(fb) //nolint:errcheck + getLinuxPid1MountInfo(fb) //nolint:errcheck + fb.AddFileFromFunc("dmesg", getLinuxDmesg) //nolint:errcheck + getLinuxKprobeEvents(fb) //nolint:errcheck + getLinuxTracingAvailableEvents(fb) //nolint:errcheck + getLinuxTracingAvailableFilterFunctions(fb) //nolint:errcheck +} + func getLinuxKernelSymbols(fb flaretypes.FlareBuilder) error { return fb.CopyFile("/proc/kallsyms") } @@ -59,62 +67,10 @@ func getLinuxPid1MountInfo(fb flaretypes.FlareBuilder) error { return fb.CopyFile("/proc/1/mountinfo") } -var klogRegexp = regexp.MustCompile(`<(\d+)>(.*)`) - -func readAllDmesg() ([]byte, error) { - const syslogActionSizeBuffer = 10 - const syslogActionReadAll = 3 - - n, err := syscall.Klogctl(syslogActionSizeBuffer, nil) - if err != nil { - return nil, fmt.Errorf("failed to query size of log buffer [%w]", err) - } - - b := make([]byte, n) - - m, err := syscall.Klogctl(syslogActionReadAll, b) - if err != nil { - return nil, fmt.Errorf("failed to read messages from log buffer [%w]", err) - } - - return b[:m], nil -} - -func parseDmesg(buffer []byte) (string, error) { - buf := bytes.NewBuffer(buffer) - var result string - - for { - line, err := buf.ReadString('\n') - if err == io.EOF { - break - } else if err != nil { - return result, err - } - - parts := klogRegexp.FindStringSubmatch(line) - if parts != nil { - result += parts[2] + "\n" - } else { - result += line - } - } - - return result, nil -} - -func getLinuxDmesg(fb flaretypes.FlareBuilder) error { - dmesg, err := readAllDmesg() - if err != nil { - return err - } - - content, err := parseDmesg(dmesg) - if err != nil { - return err - } - - return fb.AddFile("dmesg", []byte(content)) +func getLinuxDmesg() ([]byte, error) { + sysProbeClient := sysprobeclient.Get(getSystemProbeSocketPath()) + url := sysprobeclient.DebugURL("/dmesg") + return getHTTPData(sysProbeClient, url) } func getLinuxTracingAvailableEvents(fb flaretypes.FlareBuilder) error { diff --git a/pkg/flare/archive_nodocker.go b/pkg/flare/archive_nodocker.go index 431d2ebd3826b..ccf794ec26c2d 100644 --- a/pkg/flare/archive_nodocker.go +++ b/pkg/flare/archive_nodocker.go @@ -9,10 +9,10 @@ package flare import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) -func getDockerSelfInspect(_ optional.Option[workloadmeta.Component]) ([]byte, error) { +func getDockerSelfInspect(_ option.Option[workloadmeta.Component]) ([]byte, error) { return nil, nil } diff --git a/pkg/flare/archive_nolinux.go b/pkg/flare/archive_nolinux.go index 9b78a31e1f068..effc92e7b1058 100644 --- a/pkg/flare/archive_nolinux.go +++ b/pkg/flare/archive_nolinux.go @@ -13,26 +13,11 @@ import ( func addSystemProbePlatformSpecificEntries(_ flaretypes.FlareBuilder) {} -func getLinuxKernelSymbols(_ flaretypes.FlareBuilder) error { - return nil -} +func addSecurityAgentPlatformSpecificEntries(_ flaretypes.FlareBuilder) {} -func getLinuxKprobeEvents(_ flaretypes.FlareBuilder) error { - return nil -} - -func getLinuxDmesg(_ flaretypes.FlareBuilder) error { - return nil -} - -func getLinuxPid1MountInfo(_ flaretypes.FlareBuilder) error { - return nil -} - -func getLinuxTracingAvailableEvents(_ flaretypes.FlareBuilder) error { - return nil -} +// only used in tests when running on linux +var linuxKernelSymbols = getLinuxKernelSymbols //nolint:unused -func getLinuxTracingAvailableFilterFunctions(_ flaretypes.FlareBuilder) error { +func getLinuxKernelSymbols(_ flaretypes.FlareBuilder) error { //nolint:unused return nil } diff --git a/pkg/flare/archive_security.go b/pkg/flare/archive_security.go index 1fb01ba7e3aa1..07ba9fde18ab2 100644 --- a/pkg/flare/archive_security.go +++ b/pkg/flare/archive_security.go @@ -15,9 +15,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/log" ) -// for testing purpose -var linuxKernelSymbols = getLinuxKernelSymbols - // CreateSecurityAgentArchive packages up the files func CreateSecurityAgentArchive(local bool, logFilePath string, statusComponent status.Component) (string, error) { fb, err := flarehelpers.NewFlareBuilder(local, flaretypes.FlareArgs{}) @@ -46,18 +43,14 @@ func createSecurityAgentArchive(fb flaretypes.FlareBuilder, logFilePath string, } } - getLogFiles(fb, logFilePath) - getConfigFiles(fb, searchPaths{}) + GetLogFiles(fb, logFilePath) + GetConfigFiles(fb, searchPaths{}) getComplianceFiles(fb) //nolint:errcheck getRuntimeFiles(fb) //nolint:errcheck - getExpVar(fb) //nolint:errcheck - fb.AddFileFromFunc("envvars.log", getEnvVars) //nolint:errcheck - linuxKernelSymbols(fb) //nolint:errcheck - getLinuxPid1MountInfo(fb) //nolint:errcheck - getLinuxDmesg(fb) //nolint:errcheck - getLinuxKprobeEvents(fb) //nolint:errcheck - getLinuxTracingAvailableEvents(fb) //nolint:errcheck - getLinuxTracingAvailableFilterFunctions(fb) //nolint:errcheck + GetExpVar(fb) //nolint:errcheck + fb.AddFileFromFunc("envvars.log", GetEnvVars) //nolint:errcheck + + addSecurityAgentPlatformSpecificEntries(fb) } func getComplianceFiles(fb flaretypes.FlareBuilder) error { diff --git a/pkg/flare/archive_test.go b/pkg/flare/archive_test.go index 25271c47273f9..f92d38fcf07b5 100644 --- a/pkg/flare/archive_test.go +++ b/pkg/flare/archive_test.go @@ -50,7 +50,7 @@ func TestIncludeSystemProbeConfig(t *testing.T) { defer os.Remove("./test/system-probe.yaml") mock := flarehelpers.NewFlareBuilderMock(t, false) - getConfigFiles(mock.Fb, searchPaths{"": "./test/confd"}) + GetConfigFiles(mock.Fb, searchPaths{"": "./test/confd"}) mock.AssertFileExists("etc", "datadog.yaml") mock.AssertFileExists("etc", "system-probe.yaml") @@ -60,7 +60,7 @@ func TestIncludeConfigFiles(t *testing.T) { configmock.New(t) mock := flarehelpers.NewFlareBuilderMock(t, false) - getConfigFiles(mock.Fb, searchPaths{"": "./test/confd"}) + GetConfigFiles(mock.Fb, searchPaths{"": "./test/confd"}) mock.AssertFileExists("etc/confd/test.yaml") mock.AssertFileExists("etc/confd/test.Yml") @@ -71,7 +71,7 @@ func TestIncludeConfigFilesWithPrefix(t *testing.T) { configmock.New(t) mock := flarehelpers.NewFlareBuilderMock(t, false) - getConfigFiles(mock.Fb, searchPaths{"prefix": "./test/confd"}) + GetConfigFiles(mock.Fb, searchPaths{"prefix": "./test/confd"}) mock.AssertFileExists("etc/confd/prefix/test.yaml") mock.AssertFileExists("etc/confd/prefix/test.Yml") diff --git a/pkg/flare/archive_dca.go b/pkg/flare/clusteragent/archive_dca.go similarity index 80% rename from pkg/flare/archive_dca.go rename to pkg/flare/clusteragent/archive_dca.go index 047ae0cf9eaca..70ff0a6bf992d 100644 --- a/pkg/flare/archive_dca.go +++ b/pkg/flare/clusteragent/archive_dca.go @@ -3,7 +3,8 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package flare +// Package clusteragent contains the logic to create the cluster agent flare archive. +package clusteragent import ( "bufio" @@ -12,14 +13,18 @@ import ( "fmt" "io" "net/http" + "net/url" "path/filepath" + "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" flarehelpers "github.com/DataDog/datadog-agent/comp/core/flare/helpers" flaretypes "github.com/DataDog/datadog-agent/comp/core/flare/types" "github.com/DataDog/datadog-agent/comp/core/status" + "github.com/DataDog/datadog-agent/pkg/api/util" apiv1 "github.com/DataDog/datadog-agent/pkg/clusteragent/api/v1" "github.com/DataDog/datadog-agent/pkg/clusteragent/autoscaling/custommetrics" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + "github.com/DataDog/datadog-agent/pkg/flare" "github.com/DataDog/datadog-agent/pkg/status/render" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -59,10 +64,10 @@ func createDCAArchive(fb flaretypes.FlareBuilder, confSearchPaths map[string]str } - getLogFiles(fb, logFilePath) - getConfigFiles(fb, confSearchPaths) + flare.GetLogFiles(fb, logFilePath) + flare.GetConfigFiles(fb, confSearchPaths) getClusterAgentConfigCheck(fb) //nolint:errcheck - getExpVar(fb) //nolint:errcheck + flare.GetExpVar(fb) //nolint:errcheck getMetadataMap(fb) //nolint:errcheck getClusterAgentClusterChecks(fb) //nolint:errcheck getClusterAgentDiagnose(fb) //nolint:errcheck @@ -70,7 +75,7 @@ func createDCAArchive(fb flaretypes.FlareBuilder, confSearchPaths map[string]str fb.AddFileFromFunc("cluster-agent-deployment.yaml", getClusterAgentDeployment) //nolint:errcheck fb.AddFileFromFunc("helm-values.yaml", getHelmValues) //nolint:errcheck fb.AddFileFromFunc("datadog-agent-cr.yaml", getDatadogAgentManifest) //nolint:errcheck - fb.AddFileFromFunc("envvars.log", getEnvVars) //nolint:errcheck + fb.AddFileFromFunc("envvars.log", flare.GetEnvVars) //nolint:errcheck fb.AddFileFromFunc("telemetry.log", QueryDCAMetrics) //nolint:errcheck fb.AddFileFromFunc("tagger-list.json", getDCATaggerList) //nolint:errcheck fb.AddFileFromFunc("workload-list.log", getDCAWorkloadList) //nolint:errcheck @@ -158,6 +163,41 @@ func getClusterAgentConfigCheck(fb flaretypes.FlareBuilder) error { return fb.AddFile("config-check.log", b.Bytes()) } +// GetClusterAgentConfigCheck gets config check from the server for cluster agent +func GetClusterAgentConfigCheck(w io.Writer, withDebug bool) error { + c := util.GetClient(false) // FIX: get certificates right then make this true + + // Set session token + err := util.SetAuthToken(pkgconfigsetup.Datadog()) + if err != nil { + return err + } + + targetURL := url.URL{ + Scheme: "https", + Host: fmt.Sprintf("localhost:%v", pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port")), + Path: "config-check", + } + + r, err := util.DoGet(c, targetURL.String(), util.LeaveConnectionOpen) + if err != nil { + if r != nil && string(r) != "" { + return fmt.Errorf("the agent ran into an error while checking config: %s", string(r)) + } + return fmt.Errorf("failed to query the agent (running?): %s", err) + } + + cr := integration.ConfigCheckResponse{} + err = json.Unmarshal(r, &cr) + if err != nil { + return err + } + + flare.PrintConfigCheck(w, cr, withDebug) + + return nil +} + func getClusterAgentDiagnose(fb flaretypes.FlareBuilder) error { var b bytes.Buffer @@ -176,7 +216,7 @@ func getDCATaggerList() ([]byte, error) { taggerListURL := fmt.Sprintf("https://%v:%v/tagger-list", ipcAddress, pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port")) - return getTaggerList(taggerListURL) + return flare.GetTaggerList(taggerListURL) } func getDCAWorkloadList() ([]byte, error) { @@ -185,7 +225,7 @@ func getDCAWorkloadList() ([]byte, error) { return nil, err } - return getWorkloadList(fmt.Sprintf("https://%v:%v/workload-list?verbose=true", ipcAddress, pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port"))) + return flare.GetWorkloadList(fmt.Sprintf("https://%v:%v/workload-list?verbose=true", ipcAddress, pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port"))) } func getPerformanceProfileDCA(fb flaretypes.FlareBuilder, pdata ProfileData) { diff --git a/pkg/flare/cluster_checks.go b/pkg/flare/clusteragent/cluster_checks.go similarity index 96% rename from pkg/flare/cluster_checks.go rename to pkg/flare/clusteragent/cluster_checks.go index 47108857ab8e2..d68377a2bcd86 100644 --- a/pkg/flare/cluster_checks.go +++ b/pkg/flare/clusteragent/cluster_checks.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package flare +package clusteragent import ( "encoding/json" @@ -18,6 +18,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/api/util" "github.com/DataDog/datadog-agent/pkg/clusteragent/clusterchecks/types" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + "github.com/DataDog/datadog-agent/pkg/flare" ) // GetClusterChecks dumps the clustercheck dispatching state to the writer @@ -74,7 +75,7 @@ func GetClusterChecks(w io.Writer, checkName string) error { if len(cr.Dangling) > 0 { fmt.Fprintf(w, "=== %s configurations ===\n", color.RedString("Unassigned")) for _, c := range cr.Dangling { - PrintConfig(w, c, checkName) + flare.PrintConfig(w, c, checkName) } fmt.Fprintln(w, "") } @@ -101,7 +102,7 @@ func GetClusterChecks(w io.Writer, checkName string) error { } fmt.Fprintf(w, "\n===== Checks on %s =====\n", color.HiMagentaString(node.Name)) for _, c := range node.Configs { - PrintConfig(w, c, checkName) + flare.PrintConfig(w, c, checkName) } } @@ -146,7 +147,7 @@ func GetEndpointsChecks(w io.Writer, checkName string) error { // Print summary of pod-backed endpointschecks fmt.Fprintf(w, "\n===== %d Pod-backed Endpoints-Checks scheduled =====\n", len(cr.Configs)) for _, c := range cr.Configs { - PrintConfig(w, c, checkName) + flare.PrintConfig(w, c, checkName) } return nil diff --git a/pkg/flare/diagnose.go b/pkg/flare/clusteragent/diagnose.go similarity index 98% rename from pkg/flare/diagnose.go rename to pkg/flare/clusteragent/diagnose.go index 9a731d0327fcb..a291f5f91bf8e 100644 --- a/pkg/flare/diagnose.go +++ b/pkg/flare/clusteragent/diagnose.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package flare +package clusteragent import ( "io" diff --git a/pkg/flare/manifests.go b/pkg/flare/clusteragent/manifests.go similarity index 99% rename from pkg/flare/manifests.go rename to pkg/flare/clusteragent/manifests.go index 0de1ff0ae492a..461eeb38c34e0 100644 --- a/pkg/flare/manifests.go +++ b/pkg/flare/clusteragent/manifests.go @@ -5,7 +5,7 @@ //go:build kubeapiserver -package flare +package clusteragent import ( "bytes" diff --git a/pkg/flare/manifests_nocompile.go b/pkg/flare/clusteragent/manifests_nocompile.go similarity index 94% rename from pkg/flare/manifests_nocompile.go rename to pkg/flare/clusteragent/manifests_nocompile.go index 854b3fb659659..b389ca6ffa19c 100644 --- a/pkg/flare/manifests_nocompile.go +++ b/pkg/flare/clusteragent/manifests_nocompile.go @@ -5,7 +5,7 @@ //go:build !kubeapiserver -package flare +package clusteragent import ( "errors" @@ -19,7 +19,7 @@ var ( ErrNotCompiled = errors.New("kubernetes apiserver support not compiled in") ) -// getAgentDaemonSet retrieves the DaemonSet manifest of the Agent +// GetAgentDaemonSet retrieves the DaemonSet manifest of the Agent func getAgentDaemonSet() ([]byte, error) { return nil, log.Errorf("getAgentDaemonSet not implemented %s", ErrNotCompiled.Error()) } diff --git a/pkg/flare/config_check.go b/pkg/flare/config_check.go index 6d6bfca1970b8..3372de9adbc9f 100644 --- a/pkg/flare/config_check.go +++ b/pkg/flare/config_check.go @@ -6,54 +6,15 @@ package flare import ( - "encoding/json" "fmt" "io" - "net/url" "github.com/fatih/color" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" - "github.com/DataDog/datadog-agent/pkg/api/util" checkid "github.com/DataDog/datadog-agent/pkg/collector/check/id" - pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" ) -// GetClusterAgentConfigCheck gets config check from the server for cluster agent -func GetClusterAgentConfigCheck(w io.Writer, withDebug bool) error { - c := util.GetClient(false) // FIX: get certificates right then make this true - - // Set session token - err := util.SetAuthToken(pkgconfigsetup.Datadog()) - if err != nil { - return err - } - - targetURL := url.URL{ - Scheme: "https", - Host: fmt.Sprintf("localhost:%v", pkgconfigsetup.Datadog().GetInt("cluster_agent.cmd_port")), - Path: "config-check", - } - - r, err := util.DoGet(c, targetURL.String(), util.LeaveConnectionOpen) - if err != nil { - if r != nil && string(r) != "" { - return fmt.Errorf("the agent ran into an error while checking config: %s", string(r)) - } - return fmt.Errorf("failed to query the agent (running?): %s", err) - } - - cr := integration.ConfigCheckResponse{} - err = json.Unmarshal(r, &cr) - if err != nil { - return err - } - - PrintConfigCheck(w, cr, withDebug) - - return nil -} - // PrintConfigCheck prints a human-readable representation of the config check response func PrintConfigCheck(w io.Writer, cr integration.ConfigCheckResponse, withDebug bool) { if w != color.Output { diff --git a/pkg/flare/envvars.go b/pkg/flare/envvars.go index 40b98a3b74d2d..00c4f7a6a7026 100644 --- a/pkg/flare/envvars.go +++ b/pkg/flare/envvars.go @@ -108,6 +108,8 @@ var allowedEnvvarNames = []string{ "DD_APM_OBFUSCATION_SQL_EXEC_PLAN_NORMALIZE_KEEP_VALUES", "DD_APM_OBFUSCATION_SQL_EXEC_PLAN_NORMALIZE_OBFUSCATE_SQL_VALUES", "DD_APM_OBFUSCATION_CACHE_ENABLED", + "DD_APM_SQL_OBFUSCATION_MODE", + "DD_APM_OBFUSCATION_CACHE_MAX_SIZE", "DD_APM_DEBUG_PORT", "DD_APM_INSTRUMENTATION_ENABLED", "DD_APM_INSTRUMENTATION_ENABLED_NAMESPACES", @@ -159,9 +161,9 @@ func getAllowedEnvvars() []string { return found } -// getEnvVars collects allowed envvars that can affect the agent's +// GetEnvVars collects allowed envvars that can affect the agent's // behaviour while not being handled by viper, in addition to the envvars handled by viper -func getEnvVars() ([]byte, error) { +func GetEnvVars() ([]byte, error) { envvars := getAllowedEnvvars() var b bytes.Buffer diff --git a/pkg/flare/remote_config.go b/pkg/flare/remote_config.go index 32538ab43bf29..14f62090c417c 100644 --- a/pkg/flare/remote_config.go +++ b/pkg/flare/remote_config.go @@ -25,7 +25,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/api/security" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" - "github.com/DataDog/datadog-agent/pkg/util" + "github.com/DataDog/datadog-agent/pkg/util/filesystem" agentgrpc "github.com/DataDog/datadog-agent/pkg/util/grpc" ) @@ -107,7 +107,7 @@ func getRemoteConfigDB(fb flaretypes.FlareBuilder) error { // Copies the db so it avoids bbolt from being locked // Also avoid concurrent modifications - err = util.CopyFileAll(srcPath, tempPath) + err = filesystem.CopyFileAll(srcPath, tempPath) // Delete the db at the end to avoid having target files content defer os.Remove(tempPath) if err != nil { diff --git a/pkg/flare/service_windows.go b/pkg/flare/service_windows.go index 6047c11fe2ddb..e2e4e6f1fee29 100644 --- a/pkg/flare/service_windows.go +++ b/pkg/flare/service_windows.go @@ -296,33 +296,39 @@ func getDDServices(manager *mgr.Mgr) ([]serviceInfo, error) { log.Warnf("Error getting list of running services %v", err) return nil, err } + list = filterDatadogServices(list) + // need to add the drivers manually as they are not returned by ListServices + drivers := []string{"ddnpm", "ddprocmon"} + list = append(list, drivers...) for _, serviceName := range list { - if strings.HasPrefix(serviceName, "datadog") { - srvc, err := winutil.OpenService(manager, serviceName, windows.GENERIC_READ) + srvc, err := winutil.OpenService(manager, serviceName, windows.GENERIC_READ) + if err != nil { + log.Warnf("Error Opening Service %s: %v", serviceName, err) + } else { + conf2, err := getServiceInfo(srvc) if err != nil { - log.Warnf("Error Opening Service %s: %v", serviceName, err) - } else { - conf2, err := getServiceInfo(srvc) - if err != nil { - log.Warnf("Error getting info for %s: %v", serviceName, err) - } - ddServices = append(ddServices, conf2) + log.Warnf("Error getting info for %s: %v", serviceName, err) } + ddServices = append(ddServices, conf2) + srvc.Close() } } - // Getting ddnpm service info separately - ddnpm, err := winutil.OpenService(manager, "ddnpm", windows.GENERIC_READ) - if err != nil { - log.Warnf("Error Opening Service ddnpm %v", err) - } else { - ddnpmConf, err := getServiceInfo(ddnpm) - if err != nil { - log.Warnf("Error getting info for ddnpm: %v", err) + return ddServices, nil +} + +// filterDatadogServices returns the services that start with "datadog" (case insensitive) +func filterDatadogServices(services []string) []string { + ddServices := []string{} + + for _, serviceName := range services { + // "The service control manager database preserves the case of the characters, but service name comparisons are always case insensitive." + // https://learn.microsoft.com/en-us/windows/win32/api/winsvc/nf-winsvc-openservicew + if strings.HasPrefix(strings.ToLower(serviceName), "datadog") { + ddServices = append(ddServices, serviceName) } - ddServices = append(ddServices, ddnpmConf) } - return ddServices, nil + return ddServices } diff --git a/pkg/flare/service_windows_test.go b/pkg/flare/service_windows_test.go index c55ea72596900..e1a9a2c5ffc9b 100644 --- a/pkg/flare/service_windows_test.go +++ b/pkg/flare/service_windows_test.go @@ -48,3 +48,11 @@ func TestWindowsService(t *testing.T) { } } + +func TestFilterDatadogServices(t *testing.T) { + inServices := []string{"datadog-agent", "Datadog Installer", "not-datadog"} + outServices := filterDatadogServices(inServices) + assert.Contains(t, outServices, "datadog-agent") + assert.Contains(t, outServices, "Datadog Installer", "prefix match should be case insensitive") + assert.NotContains(t, outServices, "not-datadog", "non-datadog services should not be included") +} diff --git a/pkg/fleet/daemon/daemon.go b/pkg/fleet/daemon/daemon.go index 5023393d99e5b..1917c6733d850 100644 --- a/pkg/fleet/daemon/daemon.go +++ b/pkg/fleet/daemon/daemon.go @@ -28,9 +28,7 @@ import ( installerErrors "github.com/DataDog/datadog-agent/pkg/fleet/installer/errors" "github.com/DataDog/datadog-agent/pkg/fleet/installer/repository" "github.com/DataDog/datadog-agent/pkg/fleet/internal/bootstrap" - "github.com/DataDog/datadog-agent/pkg/fleet/internal/cdn" "github.com/DataDog/datadog-agent/pkg/fleet/internal/exec" - "github.com/DataDog/datadog-agent/pkg/fleet/internal/paths" "github.com/DataDog/datadog-agent/pkg/fleet/telemetry" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -44,6 +42,17 @@ const ( refreshStateInterval = 30 * time.Second ) +var ( + // errStateDoesntMatch is the error returned when the state doesn't match + errStateDoesntMatch = errors.New("state doesn't match") +) + +// PackageState represents a package state. +type PackageState struct { + Version repository.State + Config repository.State +} + // Daemon is the fleet daemon in charge of remote install, updates and configuration. type Daemon interface { Start(ctx context.Context) error @@ -51,6 +60,7 @@ type Daemon interface { SetCatalog(c catalog) Install(ctx context.Context, url string, args []string) error + Remove(ctx context.Context, pkg string) error StartExperiment(ctx context.Context, url string) error StopExperiment(ctx context.Context, pkg string) error PromoteExperiment(ctx context.Context, pkg string) error @@ -59,7 +69,7 @@ type Daemon interface { PromoteConfigExperiment(ctx context.Context, pkg string) error GetPackage(pkg string, version string) (Package, error) - GetState() (map[string]repository.State, error) + GetState() (map[string]PackageState, error) GetRemoteConfigState() *pbgo.ClientUpdater GetAPMInjectionStatus() (APMInjectionStatus, error) } @@ -69,17 +79,19 @@ type daemonImpl struct { stopChan chan struct{} env *env.Env - installer installer.Installer + installer func(env *env.Env) installer.Installer rc *remoteConfig - cdn *cdn.CDN catalog catalog + configs map[string]installerConfig requests chan remoteAPIRequest requestsWG sync.WaitGroup requestsState map[string]requestState } -func newInstaller(env *env.Env, installerBin string) installer.Installer { - return exec.NewInstallerExec(env, installerBin) +func newInstaller(installerBin string) func(env *env.Env) installer.Installer { + return func(env *env.Env) installer.Installer { + return exec.NewInstallerExec(env, installerBin) + } } // NewDaemon returns a new daemon. @@ -112,22 +124,18 @@ func NewDaemon(hostname string, rcFetcher client.ConfigFetcher, config config.Re HTTPSProxy: config.GetString("proxy.https"), NoProxy: strings.Join(config.GetStringSlice("proxy.no_proxy"), ","), } - installer := newInstaller(env, installerBin) - cdn, err := cdn.New(env, filepath.Join(paths.RunPath, "rc_daemon")) - if err != nil { - return nil, err - } - return newDaemon(rc, installer, env, cdn), nil + installer := newInstaller(installerBin) + return newDaemon(rc, installer, env), nil } -func newDaemon(rc *remoteConfig, installer installer.Installer, env *env.Env, cdn *cdn.CDN) *daemonImpl { +func newDaemon(rc *remoteConfig, installer func(env *env.Env) installer.Installer, env *env.Env) *daemonImpl { i := &daemonImpl{ env: env, rc: rc, installer: installer, - cdn: cdn, requests: make(chan remoteAPIRequest, 32), catalog: catalog{}, + configs: make(map[string]installerConfig), stopChan: make(chan struct{}), requestsState: make(map[string]requestState), } @@ -136,35 +144,28 @@ func newDaemon(rc *remoteConfig, installer installer.Installer, env *env.Env, cd } // GetState returns the state. -func (d *daemonImpl) GetState() (map[string]repository.State, error) { +func (d *daemonImpl) GetState() (map[string]PackageState, error) { d.m.Lock() defer d.m.Unlock() - states, err := d.installer.States() + states, err := d.installer(d.env).States() if err != nil { return nil, err } var configStates map[string]repository.State if d.env.RemotePolicies { - configStates, err = d.installer.ConfigStates() + configStates, err = d.installer(d.env).ConfigStates() if err != nil { return nil, err } } - res := make(map[string]repository.State) - for pkg, state := range states { - res[pkg] = state - } - for pkg, state := range configStates { - if _, ok := res[pkg]; !ok { - res[pkg] = repository.State{ - Stable: "", - Experiment: "", - StablePoliciesState: state.StablePoliciesState, - ExperimentPoliciesState: state.ExperimentPoliciesState, - } + res := make(map[string]PackageState) + for pkg := range states { + res[pkg] = PackageState{ + Version: states[pkg], + Config: configStates[pkg], } } return res, nil @@ -250,7 +251,7 @@ func (d *daemonImpl) Start(_ context.Context) error { select { case <-gcTicker.C: d.m.Lock() - err := d.installer.GarbageCollect(context.Background()) + err := d.installer(d.env).GarbageCollect(context.Background()) d.m.Unlock() if err != nil { log.Errorf("Daemon: could not run GC: %v", err) @@ -269,11 +270,7 @@ func (d *daemonImpl) Start(_ context.Context) error { } } }() - if !d.env.RemoteUpdates { - log.Infof("Daemon: Remote updates are disabled") - return nil - } - d.rc.Start(d.handleCatalogUpdate, d.scheduleRemoteAPIRequest) + d.rc.Start(d.handleConfigsUpdate, d.handleCatalogUpdate, d.scheduleRemoteAPIRequest) return nil } @@ -283,7 +280,6 @@ func (d *daemonImpl) Stop(_ context.Context) error { defer d.m.Unlock() d.rc.Close() close(d.stopChan) - d.cdn.Close() d.requestsWG.Wait() return nil } @@ -292,17 +288,17 @@ func (d *daemonImpl) Stop(_ context.Context) error { func (d *daemonImpl) Install(ctx context.Context, url string, args []string) error { d.m.Lock() defer d.m.Unlock() - return d.install(ctx, url, args) + return d.install(ctx, d.env, url, args) } -func (d *daemonImpl) install(ctx context.Context, url string, args []string) (err error) { +func (d *daemonImpl) install(ctx context.Context, env *env.Env, url string, args []string) (err error) { span, ctx := telemetry.StartSpanFromContext(ctx, "install") defer func() { span.Finish(err) }() d.refreshState(ctx) defer d.refreshState(ctx) log.Infof("Daemon: Installing package from %s", url) - err = d.installer.Install(ctx, url, args) + err = d.installer(env).Install(ctx, url, args) if err != nil { return fmt.Errorf("could not install: %w", err) } @@ -310,6 +306,27 @@ func (d *daemonImpl) install(ctx context.Context, url string, args []string) (er return nil } +func (d *daemonImpl) Remove(ctx context.Context, pkg string) error { + d.m.Lock() + defer d.m.Unlock() + return d.remove(ctx, pkg) +} + +func (d *daemonImpl) remove(ctx context.Context, pkg string) (err error) { + span, ctx := telemetry.StartSpanFromContext(ctx, "remove") + defer func() { span.Finish(err) }() + d.refreshState(ctx) + defer d.refreshState(ctx) + + log.Infof("Daemon: Removing package %s", pkg) + err = d.installer(d.env).Remove(ctx, pkg) + if err != nil { + return fmt.Errorf("could not remove: %w", err) + } + log.Infof("Daemon: Successfully removed package %s", pkg) + return nil +} + // StartExperiment starts an experiment with the given package. func (d *daemonImpl) StartExperiment(ctx context.Context, url string) error { d.m.Lock() @@ -324,7 +341,7 @@ func (d *daemonImpl) startExperiment(ctx context.Context, url string) (err error defer d.refreshState(ctx) log.Infof("Daemon: Starting experiment for package from %s", url) - err = d.installer.InstallExperiment(ctx, url) + err = d.installer(d.env).InstallExperiment(ctx, url) if err != nil { return fmt.Errorf("could not install experiment: %w", err) } @@ -340,7 +357,7 @@ func (d *daemonImpl) startInstallerExperiment(ctx context.Context, url string) ( log.Infof("Daemon: Starting installer experiment for package from %s", url) if runtime.GOOS == "windows" { - err = d.installer.InstallExperiment(ctx, url) + err = d.installer(d.env).InstallExperiment(ctx, url) } else { err = bootstrap.InstallExperiment(ctx, d.env, url) } @@ -365,7 +382,7 @@ func (d *daemonImpl) promoteExperiment(ctx context.Context, pkg string) (err err defer d.refreshState(ctx) log.Infof("Daemon: Promoting experiment for package %s", pkg) - err = d.installer.PromoteExperiment(ctx, pkg) + err = d.installer(d.env).PromoteExperiment(ctx, pkg) if err != nil { return fmt.Errorf("could not promote experiment: %w", err) } @@ -387,7 +404,7 @@ func (d *daemonImpl) stopExperiment(ctx context.Context, pkg string) (err error) defer d.refreshState(ctx) log.Infof("Daemon: Stopping experiment for package %s", pkg) - err = d.installer.RemoveExperiment(ctx, pkg) + err = d.installer(d.env).RemoveExperiment(ctx, pkg) if err != nil { return fmt.Errorf("could not stop experiment: %w", err) } @@ -402,18 +419,22 @@ func (d *daemonImpl) StartConfigExperiment(ctx context.Context, url string, vers return d.startConfigExperiment(ctx, url, version) } -func (d *daemonImpl) startConfigExperiment(ctx context.Context, url string, version string) (err error) { +func (d *daemonImpl) startConfigExperiment(ctx context.Context, pkg string, version string) (err error) { span, ctx := telemetry.StartSpanFromContext(ctx, "start_config_experiment") defer func() { span.Finish(err) }() d.refreshState(ctx) defer d.refreshState(ctx) - log.Infof("Daemon: Starting config experiment for package from %s", url) - err = d.installer.InstallConfigExperiment(ctx, url, version) + log.Infof("Daemon: Starting config experiment version %s for package %s", version, pkg) + config, ok := d.configs[version] + if !ok { + return fmt.Errorf("could not find config version %s", version) + } + err = d.installer(d.env).InstallConfigExperiment(ctx, pkg, version, config.Configs) if err != nil { return fmt.Errorf("could not start config experiment: %w", err) } - log.Infof("Daemon: Successfully started config experiment for package from %s", url) + log.Infof("Daemon: Successfully started config experiment version %s for package %s", version, pkg) return nil } @@ -431,7 +452,7 @@ func (d *daemonImpl) promoteConfigExperiment(ctx context.Context, pkg string) (e defer d.refreshState(ctx) log.Infof("Daemon: Promoting config experiment for package %s", pkg) - err = d.installer.PromoteConfigExperiment(ctx, pkg) + err = d.installer(d.env).PromoteConfigExperiment(ctx, pkg) if err != nil { return fmt.Errorf("could not promote config experiment: %w", err) } @@ -453,7 +474,7 @@ func (d *daemonImpl) stopConfigExperiment(ctx context.Context, pkg string) (err defer d.refreshState(ctx) log.Infof("Daemon: Stopping config experiment for package %s", pkg) - err = d.installer.RemoveConfigExperiment(ctx, pkg) + err = d.installer(d.env).RemoveConfigExperiment(ctx, pkg) if err != nil { return fmt.Errorf("could not stop config experiment: %w", err) } @@ -461,6 +482,14 @@ func (d *daemonImpl) stopConfigExperiment(ctx context.Context, pkg string) (err return nil } +func (d *daemonImpl) handleConfigsUpdate(configs map[string]installerConfig) error { + d.m.Lock() + defer d.m.Unlock() + log.Infof("Installer: Received configs update") + d.configs = configs + return nil +} + func (d *daemonImpl) handleCatalogUpdate(c catalog) error { d.m.Lock() defer d.m.Unlock() @@ -484,35 +513,53 @@ func (d *daemonImpl) handleRemoteAPIRequest(request remoteAPIRequest) (err error d.refreshState(ctx) defer d.refreshState(ctx) - s, err := d.installer.State(request.Package) + err = d.verifyState(ctx, request) if err != nil { - return fmt.Errorf("could not get installer state: %w", err) - } - - c, err := d.installer.ConfigState(request.Package) - if err != nil { - return fmt.Errorf("could not get installer config state: %w", err) + if errors.Is(err, errStateDoesntMatch) { + return nil // Error already reported to RC + } + return fmt.Errorf("couldn't verify state: %w", err) } - versionEqual := request.ExpectedState.InstallerVersion == "" || version.AgentVersion == request.ExpectedState.InstallerVersion - if versionEqual && - (s.Stable != request.ExpectedState.Stable || - s.Experiment != request.ExpectedState.Experiment || - c.Stable != request.ExpectedState.StableConfig || - c.Experiment != request.ExpectedState.ExperimentConfig) { - log.Infof( - "remote request %s not executed as state does not match: expected %v, got package: %v, config: %v", - request.ID, request.ExpectedState, s, c, - ) - setRequestInvalid(ctx) - d.refreshState(ctx) - return nil - } defer func() { setRequestDone(ctx, err) }() switch request.Method { + case methodInstallPackage: + var params installPackageTaskParams + err = json.Unmarshal(request.Params, ¶ms) + if err != nil { + return fmt.Errorf("could not unmarshal install package params: %w", err) + } + log.Infof("Installer: Received remote request %s to install package %s version %s", request.ID, request.Package, params.Version) + + // Handle install args + newEnv := *d.env + if params.ApmInstrumentation != "" { + if err := env.ValidateAPMInstrumentationEnabled(params.ApmInstrumentation); err != nil { + return fmt.Errorf("invalid APM instrumentation value: %w", err) + } + newEnv.InstallScript.APMInstrumentationEnabled = params.ApmInstrumentation + } + + pkg, ok := d.catalog.getPackage(request.Package, params.Version, runtime.GOARCH, runtime.GOOS) + if !ok { + return installerErrors.Wrap( + installerErrors.ErrPackageNotFound, + fmt.Errorf("could not get package %s, %s for %s, %s", request.Package, params.Version, runtime.GOARCH, runtime.GOOS), + ) + } + return d.install(ctx, &newEnv, pkg.URL, nil) + + case methodUninstallPackage: + log.Infof("Installer: Received remote request %s to uninstall package %s", request.ID, request.Package) + if request.Package == "datadog-installer" || request.Package == "datadog-agent" { + log.Infof("Installer: Can't uninstall the package %s", request.Package) + return nil + } + return d.remove(ctx, request.Package) + case methodStartExperiment: - var params taskWithVersionParams + var params experimentTaskParams err = json.Unmarshal(request.Params, ¶ms) if err != nil { return fmt.Errorf("could not unmarshal start experiment params: %w", err) @@ -530,24 +577,28 @@ func (d *daemonImpl) handleRemoteAPIRequest(request remoteAPIRequest) (err error return d.startInstallerExperiment(ctx, experimentPackage.URL) } return d.startExperiment(ctx, experimentPackage.URL) + case methodStopExperiment: log.Infof("Installer: Received remote request %s to stop experiment for package %s", request.ID, request.Package) return d.stopExperiment(ctx, request.Package) + case methodPromoteExperiment: log.Infof("Installer: Received remote request %s to promote experiment for package %s", request.ID, request.Package) return d.promoteExperiment(ctx, request.Package) case methodStartConfigExperiment: - var params taskWithVersionParams + var params experimentTaskParams err = json.Unmarshal(request.Params, ¶ms) if err != nil { return fmt.Errorf("could not unmarshal start experiment params: %w", err) } log.Infof("Installer: Received remote request %s to start config experiment for package %s", request.ID, request.Package) return d.startConfigExperiment(ctx, request.Package, params.Version) + case methodStopConfigExperiment: log.Infof("Installer: Received remote request %s to stop config experiment for package %s", request.ID, request.Package) return d.stopConfigExperiment(ctx, request.Package) + case methodPromoteConfigExperiment: log.Infof("Installer: Received remote request %s to promote config experiment for package %s", request.ID, request.Package) return d.promoteConfigExperiment(ctx, request.Package) @@ -557,6 +608,40 @@ func (d *daemonImpl) handleRemoteAPIRequest(request remoteAPIRequest) (err error } } +func (d *daemonImpl) verifyState(ctx context.Context, request remoteAPIRequest) error { + if request.Method == methodInstallPackage { + // No state verification if the method is to install a package, as the package may + // not be installed yet. + return nil + } + + s, err := d.installer(d.env).State(request.Package) + if err != nil { + return fmt.Errorf("could not get installer state: %w", err) + } + + c, err := d.installer(d.env).ConfigState(request.Package) + if err != nil { + return fmt.Errorf("could not get installer config state: %w", err) + } + + installerVersionEqual := request.ExpectedState.InstallerVersion == "" || version.AgentVersion == request.ExpectedState.InstallerVersion + packageVersionEqual := s.Stable == request.ExpectedState.Stable && s.Experiment == request.ExpectedState.Experiment + configVersionEqual := c.Stable == request.ExpectedState.StableConfig && c.Experiment == request.ExpectedState.ExperimentConfig + + if installerVersionEqual && (!packageVersionEqual || !configVersionEqual) { + log.Infof( + "remote request %s not executed as state does not match: expected %v, got package: %v, config: %v", + request.ID, request.ExpectedState, s, c, + ) + setRequestInvalid(ctx) + d.refreshState(ctx) + return errStateDoesntMatch + } + + return nil +} + type requestKey int var requestStateKey requestKey @@ -594,63 +679,35 @@ func setRequestDone(ctx context.Context, err error) { } } -func (d *daemonImpl) resolveRemoteConfigVersion(ctx context.Context, pkg string) (*pbgo.PoliciesState, error) { - if !d.env.RemotePolicies { - return nil, nil - } - config, err := d.cdn.Get(ctx, pkg) - if err != nil { - return nil, err - } - return config.State(), nil -} - func (d *daemonImpl) refreshState(ctx context.Context) { request, ok := ctx.Value(requestStateKey).(*requestState) if ok { d.requestsState[request.Package] = *request } - state, err := d.installer.States() + state, err := d.installer(d.env).States() if err != nil { // TODO: we should report this error through RC in some way log.Errorf("could not get installer state: %v", err) return } - configState, err := d.installer.ConfigStates() + configState, err := d.installer(d.env).ConfigStates() if err != nil { log.Errorf("could not get installer config state: %v", err) return } - availableSpace, err := d.installer.AvailableDiskSpace() + availableSpace, err := d.installer(d.env).AvailableDiskSpace() if err != nil { log.Errorf("could not get available size: %v", err) } - for pkg, configState := range configState { - if _, ok := state[pkg]; !ok { - state[pkg] = repository.State{} - } - tmp := state[pkg] - tmp.StablePoliciesState = configState.StablePoliciesState - tmp.ExperimentPoliciesState = configState.ExperimentPoliciesState - state[pkg] = tmp - } - var packages []*pbgo.PackageState for pkg, s := range state { p := &pbgo.PackageState{ - Package: pkg, - StableVersion: s.Stable, - ExperimentVersion: s.Experiment, - StableConfigState: s.StablePoliciesState, - ExperimentConfigState: s.ExperimentPoliciesState, - } - - configState, err := d.resolveRemoteConfigVersion(ctx, pkg) - if err == nil && configState != nil { - p.RemoteConfigState = configState - } else if err != cdn.ErrProductNotSupported { - log.Warnf("could not get remote config version: %v", err) + Package: pkg, + StableVersion: s.Stable, + ExperimentVersion: s.Experiment, + StableConfigVersion: configState[pkg].Stable, + ExperimentConfigVersion: configState[pkg].Experiment, } requestState, ok := d.requestsState[pkg] diff --git a/pkg/fleet/daemon/daemon_test.go b/pkg/fleet/daemon/daemon_test.go index 228b37089845f..28c6d5c61abec 100644 --- a/pkg/fleet/daemon/daemon_test.go +++ b/pkg/fleet/daemon/daemon_test.go @@ -20,9 +20,9 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/DataDog/datadog-agent/pkg/fleet/installer" "github.com/DataDog/datadog-agent/pkg/fleet/installer/env" "github.com/DataDog/datadog-agent/pkg/fleet/installer/repository" - "github.com/DataDog/datadog-agent/pkg/fleet/internal/cdn" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/remoteconfig/state" "github.com/DataDog/datadog-agent/pkg/version" @@ -91,8 +91,8 @@ func (m *testPackageManager) PromoteExperiment(ctx context.Context, pkg string) return args.Error(0) } -func (m *testPackageManager) InstallConfigExperiment(ctx context.Context, url string, hash string) error { - args := m.Called(ctx, url, hash) +func (m *testPackageManager) InstallConfigExperiment(ctx context.Context, pkg string, version string, rawConfig []byte) error { + args := m.Called(ctx, pkg, version, rawConfig) return args.Error(0) } @@ -213,11 +213,11 @@ func newTestInstaller(t *testing.T) *testInstaller { pm.On("ConfigStates").Return(map[string]repository.State{}, nil) rcc := newTestRemoteConfigClient(t) rc := &remoteConfig{client: rcc} - env := &env.Env{RemoteUpdates: true} - cdn, err := cdn.New(env, t.TempDir()) - require.NoError(t, err) - daemon := newDaemon(rc, pm, env, cdn) - require.NoError(t, err) + daemon := newDaemon( + rc, + func(_ *env.Env) installer.Installer { return pm }, + &env.Env{RemoteUpdates: true}, + ) i := &testInstaller{ daemonImpl: daemon, rcc: rcc, @@ -332,7 +332,7 @@ func TestRemoteRequest(t *testing.T) { c := catalog{ Packages: []Package{testExperimentPackage}, } - versionParams := taskWithVersionParams{ + versionParams := experimentTaskParams{ Version: testExperimentPackage.Version, } versionParamsJSON, _ := json.Marshal(versionParams) diff --git a/pkg/fleet/daemon/local_api.go b/pkg/fleet/daemon/local_api.go index 80c4e0c8eae5f..2031345c7c714 100644 --- a/pkg/fleet/daemon/local_api.go +++ b/pkg/fleet/daemon/local_api.go @@ -13,7 +13,6 @@ import ( "net" "net/http" - "github.com/DataDog/datadog-agent/pkg/fleet/installer/repository" pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/version" @@ -23,10 +22,10 @@ import ( // StatusResponse is the response to the status endpoint. type StatusResponse struct { APIResponse - Version string `json:"version"` - Packages map[string]repository.State `json:"packages"` - ApmInjectionStatus APMInjectionStatus `json:"apm_injection_status"` - RemoteConfigState []*pbgo.PackageState `json:"remote_config_state"` + Version string `json:"version"` + Packages map[string]PackageState `json:"packages"` + ApmInjectionStatus APMInjectionStatus `json:"apm_injection_status"` + RemoteConfigState []*pbgo.PackageState `json:"remote_config_state"` } // APMInjectionStatus contains the instrumentation status of the APM injection. @@ -87,6 +86,7 @@ func (l *localAPIImpl) handler() http.Handler { r.HandleFunc("/{package}/config_experiment/stop", l.stopConfigExperiment).Methods(http.MethodPost) r.HandleFunc("/{package}/config_experiment/promote", l.promoteConfigExperiment).Methods(http.MethodPost) r.HandleFunc("/{package}/install", l.install).Methods(http.MethodPost) + r.HandleFunc("/{package}/remove", l.remove).Methods(http.MethodPost) return r } @@ -133,11 +133,11 @@ func (l *localAPIImpl) setCatalog(w http.ResponseWriter, r *http.Request) { l.daemon.SetCatalog(catalog) } -// example: curl -X POST --unix-socket /opt/datadog-packages/installer.sock -H 'Content-Type: application/json' http://installer/datadog-agent/experiment/start -d '{"version":"1.21.5"}' +// example: curl -X POST --unix-socket /opt/datadog-packages/run/installer.sock -H 'Content-Type: application/json' http://installer/datadog-agent/experiment/start -d '{"version":"1.21.5"}' func (l *localAPIImpl) startExperiment(w http.ResponseWriter, r *http.Request) { pkg := mux.Vars(r)["package"] w.Header().Set("Content-Type", "application/json") - var request taskWithVersionParams + var request experimentTaskParams var response APIResponse defer func() { _ = json.NewEncoder(w).Encode(response) @@ -163,7 +163,7 @@ func (l *localAPIImpl) startExperiment(w http.ResponseWriter, r *http.Request) { } } -// example: curl -X POST --unix-socket /opt/datadog-packages/installer.sock -H 'Content-Type: application/json' http://installer/datadog-agent/experiment/stop -d '{}' +// example: curl -X POST --unix-socket /opt/datadog-packages/run/installer.sock -H 'Content-Type: application/json' http://installer/datadog-agent/experiment/stop -d '{}' func (l *localAPIImpl) stopExperiment(w http.ResponseWriter, r *http.Request) { pkg := mux.Vars(r)["package"] w.Header().Set("Content-Type", "application/json") @@ -180,7 +180,7 @@ func (l *localAPIImpl) stopExperiment(w http.ResponseWriter, r *http.Request) { } } -// example: curl -X POST --unix-socket /opt/datadog-packages/installer.sock -H 'Content-Type: application/json' http://installer/datadog-agent/experiment/promote -d '{}' +// example: curl -X POST --unix-socket /opt/datadog-packages/run/installer.sock -H 'Content-Type: application/json' http://installer/datadog-agent/experiment/promote -d '{}' func (l *localAPIImpl) promoteExperiment(w http.ResponseWriter, r *http.Request) { pkg := mux.Vars(r)["package"] w.Header().Set("Content-Type", "application/json") @@ -197,11 +197,11 @@ func (l *localAPIImpl) promoteExperiment(w http.ResponseWriter, r *http.Request) } } -// example: curl -X POST --unix-socket /opt/datadog-packages/installer.sock -H 'Content-Type: application/json' http://installer/datadog-agent/config_experiment/start -d '{"version":"1.21.5"}' +// example: curl -X POST --unix-socket /opt/datadog-packages/run/installer.sock -H 'Content-Type: application/json' http://installer/datadog-agent/config_experiment/start -d '{"version":"1.21.5"}' func (l *localAPIImpl) startConfigExperiment(w http.ResponseWriter, r *http.Request) { pkg := mux.Vars(r)["package"] w.Header().Set("Content-Type", "application/json") - var request taskWithVersionParams + var request experimentTaskParams var response APIResponse defer func() { _ = json.NewEncoder(w).Encode(response) @@ -220,7 +220,7 @@ func (l *localAPIImpl) startConfigExperiment(w http.ResponseWriter, r *http.Requ } } -// example: curl -X POST --unix-socket /opt/datadog-packages/installer.sock -H 'Content-Type: application/json' http://installer/datadog-agent/config_experiment/stop -d '{}' +// example: curl -X POST --unix-socket /opt/datadog-packages/run/installer.sock -H 'Content-Type: application/json' http://installer/datadog-agent/config_experiment/stop -d '{}' func (l *localAPIImpl) stopConfigExperiment(w http.ResponseWriter, r *http.Request) { pkg := mux.Vars(r)["package"] w.Header().Set("Content-Type", "application/json") @@ -237,7 +237,7 @@ func (l *localAPIImpl) stopConfigExperiment(w http.ResponseWriter, r *http.Reque } } -// example: curl -X POST --unix-socket /opt/datadog-packages/installer.sock -H 'Content-Type: application/json' http://installer/datadog-agent/config_experiment/promote -d '{}' +// example: curl -X POST --unix-socket /opt/datadog-packages/run/installer.sock -H 'Content-Type: application/json' http://installer/datadog-agent/config_experiment/promote -d '{}' func (l *localAPIImpl) promoteConfigExperiment(w http.ResponseWriter, r *http.Request) { pkg := mux.Vars(r)["package"] w.Header().Set("Content-Type", "application/json") @@ -254,11 +254,11 @@ func (l *localAPIImpl) promoteConfigExperiment(w http.ResponseWriter, r *http.Re } } -// example: curl -X POST --unix-socket /opt/datadog-packages/installer.sock -H 'Content-Type: application/json' http://installer/datadog-agent/install -d '{"version":"1.21.5"}' +// example: curl -X POST --unix-socket /opt/datadog-packages/run/installer.sock -H 'Content-Type: application/json' http://installer/datadog-agent/install -d '{"version":"1.21.5"}' func (l *localAPIImpl) install(w http.ResponseWriter, r *http.Request) { pkg := mux.Vars(r)["package"] w.Header().Set("Content-Type", "application/json") - var request taskWithVersionParams + var request experimentTaskParams var response APIResponse defer func() { _ = json.NewEncoder(w).Encode(response) @@ -289,12 +289,41 @@ func (l *localAPIImpl) install(w http.ResponseWriter, r *http.Request) { } } +// example: curl -X POST --unix-socket /opt/datadog-packages/run/installer.sock -H 'Content-Type: application/json' http://installer/datadog-agent/remove -d '{}' +func (l *localAPIImpl) remove(w http.ResponseWriter, r *http.Request) { + pkg := mux.Vars(r)["package"] + w.Header().Set("Content-Type", "application/json") + var request experimentTaskParams + var response APIResponse + defer func() { + _ = json.NewEncoder(w).Encode(response) + }() + var err error + if r.ContentLength > 0 { + err = json.NewDecoder(r.Body).Decode(&request) + if err != nil { + w.WriteHeader(http.StatusBadRequest) + response.Error = &APIError{Message: err.Error()} + return + } + } + + log.Infof("Received local request to remove package %s", pkg) + err = l.daemon.Remove(r.Context(), pkg) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + response.Error = &APIError{Message: err.Error()} + return + } +} + // LocalAPIClient is a client to interact with the locally exposed daemon API. type LocalAPIClient interface { Status() (StatusResponse, error) SetCatalog(catalog string) error Install(pkg, version string) error + Remove(pkg string) error StartExperiment(pkg, version string) error StopExperiment(pkg string) error PromoteExperiment(pkg string) error @@ -359,7 +388,7 @@ func (c *localAPIClientImpl) SetCatalog(catalog string) error { // StartExperiment starts an experiment for a package. func (c *localAPIClientImpl) StartExperiment(pkg, version string) error { - params := taskWithVersionParams{ + params := experimentTaskParams{ Version: version, } body, err := json.Marshal(params) @@ -438,7 +467,7 @@ func (c *localAPIClientImpl) PromoteExperiment(pkg string) error { // StartConfigExperiment starts a config experiment for a package. func (c *localAPIClientImpl) StartConfigExperiment(pkg, version string) error { - params := taskWithVersionParams{ + params := experimentTaskParams{ Version: version, } body, err := json.Marshal(params) @@ -517,7 +546,7 @@ func (c *localAPIClientImpl) PromoteConfigExperiment(pkg string) error { // Install installs a package with a specific version. func (c *localAPIClientImpl) Install(pkg, version string) error { - params := taskWithVersionParams{ + params := experimentTaskParams{ Version: version, } body, err := json.Marshal(params) @@ -545,3 +574,27 @@ func (c *localAPIClientImpl) Install(pkg, version string) error { } return nil } + +// Remove removes a package +func (c *localAPIClientImpl) Remove(pkg string) error { + req, err := http.NewRequest(http.MethodPost, fmt.Sprintf("http://%s/%s/remove", c.addr, pkg), nil) + if err != nil { + return err + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Accept", "application/json") + resp, err := c.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + var response APIResponse + err = json.NewDecoder(resp.Body).Decode(&response) + if err != nil { + return err + } + if response.Error != nil { + return fmt.Errorf("error removing: %s", response.Error.Message) + } + return nil +} diff --git a/pkg/fleet/daemon/local_api_test.go b/pkg/fleet/daemon/local_api_test.go index fa7cf3713848c..806a72aca838c 100644 --- a/pkg/fleet/daemon/local_api_test.go +++ b/pkg/fleet/daemon/local_api_test.go @@ -41,6 +41,11 @@ func (m *testDaemon) Install(ctx context.Context, url string, installArgs []stri return args.Error(0) } +func (m *testDaemon) Remove(ctx context.Context, pkg string) error { + args := m.Called(ctx, pkg) + return args.Error(0) +} + func (m *testDaemon) StartExperiment(ctx context.Context, url string) error { args := m.Called(ctx, url) return args.Error(0) @@ -76,9 +81,9 @@ func (m *testDaemon) GetPackage(pkg string, version string) (Package, error) { return args.Get(0).(Package), args.Error(1) } -func (m *testDaemon) GetState() (map[string]repository.State, error) { +func (m *testDaemon) GetState() (map[string]PackageState, error) { args := m.Called() - return args.Get(0).(map[string]repository.State), args.Error(1) + return args.Get(0).(map[string]PackageState), args.Error(1) } func (m *testDaemon) GetRemoteConfigState() *pbgo.ClientUpdater { @@ -126,10 +131,12 @@ func TestAPIStatus(t *testing.T) { api := newTestLocalAPI(t) defer api.Stop() - installerState := map[string]repository.State{ + installerState := map[string]PackageState{ "pkg1": { - Stable: "1.0.0", - Experiment: "2.0.0", + Version: repository.State{ + Stable: "1.0.0", + Experiment: "2.0.0", + }, }, } api.i.On("GetState").Return(installerState, nil) diff --git a/pkg/fleet/daemon/remote_config.go b/pkg/fleet/daemon/remote_config.go index 1b34301343cf1..b7ad0c98e4a38 100644 --- a/pkg/fleet/daemon/remote_config.go +++ b/pkg/fleet/daemon/remote_config.go @@ -46,7 +46,7 @@ func newRemoteConfig(rcFetcher client.ConfigFetcher) (*remoteConfig, error) { } // Start starts the remote config client. -func (rc *remoteConfig) Start(handleCatalogUpdate handleCatalogUpdate, handleRemoteAPIRequest handleRemoteAPIRequest) { +func (rc *remoteConfig) Start(handleConfigsUpdate handleConfigsUpdate, handleCatalogUpdate handleCatalogUpdate, handleRemoteAPIRequest handleRemoteAPIRequest) { if rc.client == nil { return } @@ -55,6 +55,7 @@ func (rc *remoteConfig) Start(handleCatalogUpdate handleCatalogUpdate, handleRem // subscribe in a goroutine to avoid deadlocking the client go rc.client.Subscribe(state.ProductUpdaterTask, handleUpdaterTaskUpdate(handleRemoteAPIRequest)) } + rc.client.Subscribe(state.ProductInstallerConfig, handleInstallerConfigUpdate(handleConfigsUpdate)) rc.client.Subscribe(state.ProductUpdaterCatalogDD, handleUpdaterCatalogDDUpdate(handleCatalogUpdate, subscribeToTask)) rc.client.Start() } @@ -74,6 +75,40 @@ func (rc *remoteConfig) SetState(state *pbgo.ClientUpdater) { rc.client.SetInstallerState(state) } +type installerConfig struct { + ID string `json:"id"` + Configs json.RawMessage `json:"configs"` +} + +type handleConfigsUpdate func(configs map[string]installerConfig) error + +func handleInstallerConfigUpdate(h handleConfigsUpdate) func(map[string]state.RawConfig, func(cfgPath string, status state.ApplyStatus)) { + return func(configs map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus)) { + installerConfigs := map[string]installerConfig{} + for id, config := range configs { + var installerConfig installerConfig + err := json.Unmarshal(config.Config, &installerConfig) + if err != nil { + log.Errorf("could not unmarshal installer config: %s", err) + applyStateCallback(id, state.ApplyStatus{State: state.ApplyStateError, Error: err.Error()}) + return + } + installerConfigs[installerConfig.ID] = installerConfig + } + err := h(installerConfigs) + if err != nil { + log.Errorf("could not update installer configs: %s", err) + for id := range configs { + applyStateCallback(id, state.ApplyStatus{State: state.ApplyStateError, Error: err.Error()}) + } + return + } + for id := range configs { + applyStateCallback(id, state.ApplyStatus{State: state.ApplyStateAcknowledged}) + } + } +} + // Package represents a downloadable package. type Package struct { Name string `json:"package"` @@ -164,6 +199,9 @@ func validatePackage(pkg Package) error { } const ( + methodInstallPackage = "install_package" + methodUninstallPackage = "uninstall_package" + methodStartExperiment = "start_experiment" methodStopExperiment = "stop_experiment" methodPromoteExperiment = "promote_experiment" @@ -191,11 +229,16 @@ type expectedState struct { ExperimentConfig string `json:"experiment_config"` } -type taskWithVersionParams struct { +type experimentTaskParams struct { Version string `json:"version"` InstallArgs []string `json:"install_args"` } +type installPackageTaskParams struct { + Version string `json:"version"` + ApmInstrumentation string `json:"apm_instrumentation"` +} + type handleRemoteAPIRequest func(request remoteAPIRequest) error func handleUpdaterTaskUpdate(h handleRemoteAPIRequest) func(map[string]state.RawConfig, func(cfgPath string, status state.ApplyStatus)) { diff --git a/pkg/fleet/installer/env/env.go b/pkg/fleet/installer/env/env.go index 094003754ddb3..31dd619a85b41 100644 --- a/pkg/fleet/installer/env/env.go +++ b/pkg/fleet/installer/env/env.go @@ -35,8 +35,6 @@ const ( envAgentMajorVersion = "DD_AGENT_MAJOR_VERSION" envAgentMinorVersion = "DD_AGENT_MINOR_VERSION" envApmLanguages = "DD_APM_INSTRUMENTATION_LANGUAGES" - envCDNLocalDirPath = "DD_INSTALLER_DEBUG_CDN_LOCAL_DIR_PATH" - envCDNEnabled = "DD_INSTALLER_CDN_ENABLED" envAgentUserName = "DD_AGENT_USER_NAME" // envAgentUserNameCompat provides compatibility with the original MSI parameter name envAgentUserNameCompat = "DDAGENTUSER_NAME" @@ -127,9 +125,6 @@ type Env struct { InstallScript InstallScriptEnv - CDNEnabled bool - CDNLocalDirPath string - Tags []string Hostname string @@ -199,9 +194,6 @@ func FromEnv() *Env { APMInstrumentationEnabled: getEnvOrDefault(envApmInstrumentationEnabled, APMInstrumentationNotSet), }, - CDNEnabled: strings.ToLower(os.Getenv(envCDNEnabled)) == "true", - CDNLocalDirPath: getEnvOrDefault(envCDNLocalDirPath, ""), - Tags: append( strings.FieldsFunc(os.Getenv(envTags), splitFunc), strings.FieldsFunc(os.Getenv(envExtraTags), splitFunc)..., @@ -244,6 +236,9 @@ func (e *Env) ToEnv() []string { if e.RegistryPassword != "" { env = append(env, envRegistryPassword+"="+e.RegistryPassword) } + if e.InstallScript.APMInstrumentationEnabled != "" { + env = append(env, envApmInstrumentationEnabled+"="+e.InstallScript.APMInstrumentationEnabled) + } if len(e.ApmLibraries) > 0 { libraries := []string{} for l, v := range e.ApmLibraries { @@ -354,3 +349,11 @@ func getProxySetting(ddEnv string, env string) string { ), ) } + +// ValidateAPMInstrumentationEnabled validates the value of the DD_APM_INSTRUMENTATION_ENABLED environment variable. +func ValidateAPMInstrumentationEnabled(value string) error { + if value != APMInstrumentationEnabledAll && value != APMInstrumentationEnabledDocker && value != APMInstrumentationEnabledHost && value != APMInstrumentationNotSet { + return fmt.Errorf("invalid value for %s: %s", envApmInstrumentationEnabled, value) + } + return nil +} diff --git a/pkg/fleet/installer/installer.go b/pkg/fleet/installer/installer.go index 179cad03a2834..6faa5599347fe 100644 --- a/pkg/fleet/installer/installer.go +++ b/pkg/fleet/installer/installer.go @@ -8,6 +8,7 @@ package installer import ( "context" + "encoding/json" "errors" "fmt" "os" @@ -17,9 +18,9 @@ import ( "sync" "time" - "github.com/DataDog/datadog-agent/pkg/fleet/internal/cdn" "github.com/DataDog/datadog-agent/pkg/fleet/internal/paths" "github.com/DataDog/datadog-agent/pkg/fleet/telemetry" + "gopkg.in/yaml.v3" "github.com/DataDog/datadog-agent/pkg/fleet/installer/env" installerErrors "github.com/DataDog/datadog-agent/pkg/fleet/installer/errors" @@ -56,7 +57,7 @@ type Installer interface { RemoveExperiment(ctx context.Context, pkg string) error PromoteExperiment(ctx context.Context, pkg string) error - InstallConfigExperiment(ctx context.Context, pkg string, version string) error + InstallConfigExperiment(ctx context.Context, pkg string, version string, rawConfig []byte) error RemoveConfigExperiment(ctx context.Context, pkg string) error PromoteConfigExperiment(ctx context.Context, pkg string) error @@ -73,7 +74,6 @@ type installerImpl struct { m sync.Mutex env *env.Env - cdn *cdn.CDN db *db.PackagesDB downloader *oci.Downloader packages *repository.Repositories @@ -93,13 +93,8 @@ func NewInstaller(env *env.Env) (Installer, error) { if err != nil { return nil, fmt.Errorf("could not create packages db: %w", err) } - cdn, err := cdn.New(env, filepath.Join(paths.RunPath, "rc_cmd")) - if err != nil { - return nil, fmt.Errorf("could not create CDN client: %w", err) - } return &installerImpl{ env: env, - cdn: cdn, db: db, downloader: oci.NewDownloader(env, env.HTTPClient()), packages: repository.NewRepositories(paths.PackagesPath, paths.LocksPath), @@ -169,10 +164,6 @@ func (i *installerImpl) Install(ctx context.Context, url string, args []string) span.SetResourceName(pkg.Name) span.SetTag("package_version", pkg.Version) } - err = i.preparePackage(ctx, pkg.Name, args) // Preinst - if err != nil { - return fmt.Errorf("could not prepare package: %w", err) - } dbPkg, err := i.db.GetPackage(pkg.Name) if err != nil && !errors.Is(err, db.ErrPackageNotFound) { return fmt.Errorf("could not get package: %w", err) @@ -181,6 +172,10 @@ func (i *installerImpl) Install(ctx context.Context, url string, args []string) log.Infof("package %s version %s is already installed", pkg.Name, pkg.Version) return nil } + err = i.preparePackage(ctx, pkg.Name, args) // Preinst + if err != nil { + return fmt.Errorf("could not prepare package: %w", err) + } err = checkAvailableDiskSpace(i.packages, pkg) if err != nil { return fmt.Errorf("not enough disk space: %w", err) @@ -339,24 +334,10 @@ func (i *installerImpl) PromoteExperiment(ctx context.Context, pkg string) error } // InstallConfigExperiment installs an experiment on top of an existing package. -func (i *installerImpl) InstallConfigExperiment(ctx context.Context, pkg string, version string) error { +func (i *installerImpl) InstallConfigExperiment(ctx context.Context, pkg string, version string, rawConfig []byte) error { i.m.Lock() defer i.m.Unlock() - config, err := i.cdn.Get(ctx, pkg) - if err != nil { - return installerErrors.Wrap( - installerErrors.ErrDownloadFailed, - fmt.Errorf("could not get cdn config: %w", err), - ) - } - if config.State().GetVersion() != version { - return installerErrors.Wrap( - installerErrors.ErrDownloadFailed, - fmt.Errorf("version mismatch: expected %s, got %s", config.State().GetVersion(), version), - ) - } - tmpDir, err := i.packages.MkdirTemp() if err != nil { return installerErrors.Wrap( @@ -366,7 +347,7 @@ func (i *installerImpl) InstallConfigExperiment(ctx context.Context, pkg string, } defer os.RemoveAll(tmpDir) - err = config.Write(tmpDir) + err = i.writeConfig(tmpDir, rawConfig) if err != nil { return installerErrors.Wrap( installerErrors.ErrFilesystemIssue, @@ -574,18 +555,9 @@ func (i *installerImpl) close() error { } i.db = nil } - if i.cdn != nil { - if cdnErr := i.cdn.Close(); cdnErr != nil { - cdnErr = fmt.Errorf("failed to close Remote Config cdn: %w", cdnErr) - errs = append(errs, cdnErr) - } - i.cdn = nil - } - if len(errs) > 0 { return errors.Join(errs...) } - return nil } @@ -667,37 +639,67 @@ func (i *installerImpl) removePackage(ctx context.Context, pkg string) error { } func (i *installerImpl) configurePackage(ctx context.Context, pkg string) (err error) { - if !i.env.RemotePolicies { + span, _ := telemetry.StartSpanFromContext(ctx, "configure_package") + defer func() { span.Finish(err) }() + // TODO: Windows support + if runtime.GOOS == "windows" { return nil } + tmpDir, err := i.configs.MkdirTemp() + if err != nil { + return fmt.Errorf("could not create temporary directory: %w", err) + } + defer os.RemoveAll(tmpDir) + err = i.configs.Create(pkg, "empty", tmpDir) + if err != nil { + return fmt.Errorf("could not create %s repository: %w", pkg, err) + } + return nil +} - span, _ := telemetry.StartSpanFromContext(ctx, "configure_package") - defer func() { span.Finish(err) }() +var ( + allowedConfigFiles = []string{ + "datadog.yaml", + "security-agent.yaml", + "system-probe.yaml", + "libraries_config.yaml", + "conf.d/*.yaml", + } +) - switch pkg { - case packageDatadogAgent, packageAPMInjector, packageAPMLibraries: - config, err := i.cdn.Get(ctx, pkg) +func configNameAllowed(file string) bool { + for _, allowedFile := range allowedConfigFiles { + match, err := filepath.Match(allowedFile, file) if err != nil { - return fmt.Errorf("could not get %s CDN config: %w", pkg, err) + return false } - tmpDir, err := i.configs.MkdirTemp() - if err != nil { - return fmt.Errorf("could not create temporary directory: %w", err) + if match { + return true } - defer os.RemoveAll(tmpDir) + } + return false +} - err = config.Write(tmpDir) +func (i *installerImpl) writeConfig(dir string, rawConfig []byte) error { + var configs map[string]interface{} + err := json.Unmarshal(rawConfig, &configs) + if err != nil { + return fmt.Errorf("could not unmarshal config: %w", err) + } + for file, config := range configs { + if !configNameAllowed(file) { + return fmt.Errorf("config file %s is not allowed", file) + } + serializedConfig, err := yaml.Marshal(config) if err != nil { - return fmt.Errorf("could not write %s config: %w", pkg, err) + return fmt.Errorf("could not marshal config: %w", err) } - err = i.configs.Create(pkg, config.State().GetVersion(), tmpDir) + err = os.WriteFile(filepath.Join(dir, file), serializedConfig, 0644) if err != nil { - return fmt.Errorf("could not create %s repository: %w", pkg, err) + return fmt.Errorf("could not write config file: %w", err) } - return nil - default: - return nil } + return nil } const ( diff --git a/pkg/fleet/installer/installer_test.go b/pkg/fleet/installer/installer_test.go index a98deece97ee0..1527993014605 100644 --- a/pkg/fleet/installer/installer_test.go +++ b/pkg/fleet/installer/installer_test.go @@ -31,6 +31,7 @@ type testPackageManager struct { func newTestPackageManager(t *testing.T, s *fixtures.Server, rootPath string, locksPath string) *testPackageManager { packages := repository.NewRepositories(rootPath, locksPath) + configs := repository.NewRepositories(t.TempDir(), t.TempDir()) db, err := db.New(filepath.Join(rootPath, "packages.db")) assert.NoError(t, err) return &testPackageManager{ @@ -39,6 +40,7 @@ func newTestPackageManager(t *testing.T, s *fixtures.Server, rootPath string, lo db: db, downloader: oci.NewDownloader(&env.Env{}, s.Client()), packages: packages, + configs: configs, userConfigsDir: t.TempDir(), packagesDir: rootPath, }, @@ -222,5 +224,4 @@ func TestPurge(t *testing.T) { assert.NoFileExists(t, filepath.Join(rootPath, "packages.db"), "purge should remove the packages database") assert.NoDirExists(t, rootPath, "purge should remove the packages directory") assert.Nil(t, installer.db, "purge should close the packages database") - assert.Nil(t, installer.cdn, "purge should close the CDN client") } diff --git a/pkg/fleet/installer/packages/datadog_agent.go b/pkg/fleet/installer/packages/datadog_agent.go index 7a07b99389ee0..d391b7a0cc44d 100644 --- a/pkg/fleet/installer/packages/datadog_agent.go +++ b/pkg/fleet/installer/packages/datadog_agent.go @@ -11,6 +11,7 @@ import ( "context" "errors" "fmt" + "io/fs" "os" "os/exec" "path/filepath" @@ -19,6 +20,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/fleet/telemetry" "github.com/DataDog/datadog-agent/pkg/util/installinfo" "github.com/DataDog/datadog-agent/pkg/util/log" + "github.com/DataDog/datadog-agent/pkg/version" ) const ( @@ -75,11 +77,6 @@ func PrepareAgent(ctx context.Context) (err error) { span, ctx := telemetry.StartSpanFromContext(ctx, "prepare_agent") defer func() { span.Finish(err) }() - err = removeDebRPMPackage(ctx, "datadog-agent") - if err != nil { - return fmt.Errorf("failed to remove deb/rpm datadog-agent package: %w", err) - } - for _, unit := range stableUnits { if err := stopUnit(ctx, unit); err != nil { log.Warnf("Failed to stop %s: %s", unit, err) @@ -129,6 +126,13 @@ func SetupAgent(ctx context.Context, _ []string) (err error) { if err = chownRecursive("/opt/datadog-packages/datadog-agent/stable/", ddAgentUID, ddAgentGID, rootOwnedAgentPaths); err != nil { return fmt.Errorf("failed to chown /opt/datadog-packages/datadog-agent/stable/: %v", err) } + // Give root:datadog-agent permissions to system-probe and security-agent config files if they exist + if err = os.Chown("/etc/datadog-agent/system-probe.yaml", 0, ddAgentGID); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("failed to chown /etc/datadog-agent/system-probe.yaml: %v", err) + } + if err = os.Chown("/etc/datadog-agent/security-agent.yaml", 0, ddAgentGID); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("failed to chown /etc/datadog-agent/security-agent.yaml: %v", err) + } if err = systemdReload(ctx); err != nil { return fmt.Errorf("failed to reload systemd daemon: %v", err) @@ -143,8 +147,7 @@ func SetupAgent(ctx context.Context, _ []string) (err error) { } // write installinfo before start, or the agent could write it - // TODO: add installer version properly - if err = installinfo.WriteInstallInfo("installer_package", "manual_update"); err != nil { + if err = installinfo.WriteInstallInfo("installer", fmt.Sprintf("installer-%s", version.AgentVersion), "manual_update"); err != nil { return fmt.Errorf("failed to write install info: %v", err) } @@ -210,8 +213,11 @@ func RemoveAgent(ctx context.Context) error { } func chownRecursive(path string, uid int, gid int, ignorePaths []string) error { - return filepath.Walk(path, func(p string, _ os.FileInfo, err error) error { + return filepath.WalkDir(path, func(p string, _ fs.DirEntry, err error) error { if err != nil { + if os.IsNotExist(err) { + return nil + } return err } relPath, err := filepath.Rel(path, p) @@ -223,7 +229,11 @@ func chownRecursive(path string, uid int, gid int, ignorePaths []string) error { return nil } } - return os.Chown(p, uid, gid) + err = os.Chown(p, uid, gid) + if err != nil && os.IsNotExist(err) { + return nil + } + return err }) } diff --git a/pkg/fleet/installer/packages/datadog_installer.go b/pkg/fleet/installer/packages/datadog_installer.go index 1e36bb462c1fd..9d527c11f0757 100644 --- a/pkg/fleet/installer/packages/datadog_installer.go +++ b/pkg/fleet/installer/packages/datadog_installer.go @@ -151,14 +151,6 @@ func SetupInstaller(ctx context.Context) (err error) { return fmt.Errorf("error creating %s: %w", systemdPath, err) } - // FIXME(Arthur): enable the daemon unit by default and use the same strategy as the system probe - if os.Getenv("DD_REMOTE_UPDATES") != "true" { - if err = systemdReload(ctx); err != nil { - return err - } - return nil - } - for _, unit := range installerUnits { if err = loadUnit(ctx, unit); err != nil { return err diff --git a/pkg/fleet/installer/packages/docker.go b/pkg/fleet/installer/packages/docker.go index 2b264fb7f9e02..dff8496927c1e 100644 --- a/pkg/fleet/installer/packages/docker.go +++ b/pkg/fleet/installer/packages/docker.go @@ -148,7 +148,9 @@ func (a *apmInjectorInstaller) verifyDockerRuntime(ctx context.Context) (err err return nil } - for i := 0; i < 3; i++ { + currentRuntime := "" + maxRetries := 10 + for i := 0; i < maxRetries; i++ { if i > 0 { time.Sleep(time.Second) } @@ -157,17 +159,26 @@ func (a *apmInjectorInstaller) verifyDockerRuntime(ctx context.Context) (err err cmd.Stdout = &outb err = cmd.Run() if err != nil { - if i < 2 { + if i < maxRetries { log.Debug("failed to verify docker runtime, retrying: ", err) } else { log.Warn("failed to verify docker runtime: ", err) } + // Reload Docker daemon again in case the signal was lost + if reloadErr := reloadDockerConfig(ctx); reloadErr != nil { + log.Warn("failed to reload docker daemon: ", reloadErr) + } } if strings.TrimSpace(outb.String()) == "dd-shim" { + span.SetTag("retries", i) + span.SetTag("docker_runtime", "dd-shim") return nil } + currentRuntime = strings.TrimSpace(outb.String()) } - err = fmt.Errorf("docker default runtime has not been set to injector docker runtime") + span.SetTag("retries", maxRetries) + span.SetTag("docker_runtime", currentRuntime) + err = fmt.Errorf("docker default runtime has not been set to injector docker runtime (is \"%s\")", currentRuntime) return err } @@ -209,13 +220,18 @@ func isDockerInstalled(ctx context.Context) bool { defer span.Finish(nil) // Docker is installed if the docker binary is in the PATH - _, err := exec.LookPath("docker") + dockerPath, err := exec.LookPath("docker") if err != nil && errors.Is(err, exec.ErrNotFound) { return false } else if err != nil { log.Warn("installer: failed to check if docker is installed, assuming it isn't: ", err) return false } + span.SetTag("docker_path", dockerPath) + if strings.Contains(dockerPath, "/snap/") { + log.Warn("installer: docker is installed via snap, skipping docker instrumentation") + return false + } return true } diff --git a/pkg/fleet/installer/packages/embedded/datadog-agent-security-exp.service b/pkg/fleet/installer/packages/embedded/datadog-agent-security-exp.service index b593ece47463d..1c692d61bf6a9 100644 --- a/pkg/fleet/installer/packages/embedded/datadog-agent-security-exp.service +++ b/pkg/fleet/installer/packages/embedded/datadog-agent-security-exp.service @@ -11,7 +11,7 @@ PIDFile=/opt/datadog-packages/datadog-agent/experiment/run/security-agent.pid Restart=on-failure EnvironmentFile=-/etc/datadog-agent/environment Environment="DD_FLEET_POLICIES_DIR=/etc/datadog-agent/managed/datadog-agent/experiment" -ExecStart=/opt/datadog-packages/datadog-agent/experiment/embedded/bin/security-agent start -c /etc/datadog-agent/datadog.yaml --pidfile /opt/datadog-packages/datadog-agent/experiment/run/security-agent.pid +ExecStart=/opt/datadog-packages/datadog-agent/experiment/embedded/bin/security-agent start -c /etc/datadog-agent/datadog.yaml -c /etc/datadog-agent/security-agent.yaml --sysprobe-config /etc/datadog-agent/system-probe.yaml --pidfile /opt/datadog-packages/datadog-agent/experiment/run/security-agent.pid # Since systemd 229, should be in [Unit] but in order to support systemd <229, # it is also supported to have it here. StartLimitInterval=10 diff --git a/pkg/fleet/installer/packages/embedded/datadog-agent-security.service b/pkg/fleet/installer/packages/embedded/datadog-agent-security.service index 12c51f65446bc..fc0e18861aef5 100644 --- a/pkg/fleet/installer/packages/embedded/datadog-agent-security.service +++ b/pkg/fleet/installer/packages/embedded/datadog-agent-security.service @@ -11,7 +11,7 @@ PIDFile=/opt/datadog-packages/datadog-agent/stable/run/security-agent.pid Restart=on-failure EnvironmentFile=-/etc/datadog-agent/environment Environment="DD_FLEET_POLICIES_DIR=/etc/datadog-agent/managed/datadog-agent/stable" -ExecStart=/opt/datadog-packages/datadog-agent/stable/embedded/bin/security-agent start -c /etc/datadog-agent/datadog.yaml --pidfile /opt/datadog-packages/datadog-agent/stable/run/security-agent.pid +ExecStart=/opt/datadog-packages/datadog-agent/stable/embedded/bin/security-agent start -c /etc/datadog-agent/datadog.yaml -c /etc/datadog-agent/security-agent.yaml --sysprobe-config /etc/datadog-agent/system-probe.yaml --pidfile /opt/datadog-packages/datadog-agent/stable/run/security-agent.pid # Since systemd 229, should be in [Unit] but in order to support systemd <229, # it is also supported to have it here. StartLimitInterval=10 diff --git a/pkg/fleet/installer/repository/repository.go b/pkg/fleet/installer/repository/repository.go index b83a03d4e81d7..559bf88854641 100644 --- a/pkg/fleet/installer/repository/repository.go +++ b/pkg/fleet/installer/repository/repository.go @@ -7,7 +7,6 @@ package repository import ( - "encoding/json" "errors" "fmt" "io/fs" @@ -17,7 +16,6 @@ import ( "github.com/DataDog/gopsutil/process" - pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -64,9 +62,6 @@ type Repository struct { type State struct { Stable string Experiment string - - StablePoliciesState *pbgo.PoliciesState - ExperimentPoliciesState *pbgo.PoliciesState } // HasStable returns true if the repository has a stable package. @@ -103,51 +98,12 @@ func (r *Repository) GetState() (State, error) { if experiment == stable { experiment = "" } - - // Load the policies state - stablePoliciesState, err := r.loadPoliciesMetadata(stable) - if err != nil { - return State{}, fmt.Errorf("could not load stable policies state: %w", err) - } - - experimentPoliciesState, err := r.loadPoliciesMetadata(experiment) - if err != nil { - return State{}, fmt.Errorf("could not load experiment policies state: %w", err) - } - return State{ Stable: stable, Experiment: experiment, - - StablePoliciesState: stablePoliciesState, - ExperimentPoliciesState: experimentPoliciesState, }, nil } -func (r *Repository) loadPoliciesMetadata(version string) (*pbgo.PoliciesState, error) { - if version == "" { - return nil, nil - } - - statePath := filepath.Join(r.rootPath, version, "policy.metadata") - stateFile, err := os.ReadFile(statePath) - if err != nil { - if errors.Is(err, os.ErrNotExist) { - return nil, nil - } - return nil, fmt.Errorf("could not read policies state: %w", err) - } - - state := &pbgo.PoliciesState{} - err = json.Unmarshal(stateFile, state) - if err != nil { - return nil, fmt.Errorf("could not unmarshal policies state: %w", err) - } - state.Version = version - - return state, nil -} - // Create creates a fresh new repository at the given root path // and moves the given stable source path to the repository as the first stable. // If a repository already exists at the given path, it is fully removed. diff --git a/pkg/fleet/installer/setup/common/config.go b/pkg/fleet/installer/setup/common/config.go index fb4550edebb65..871bf1ded8ada 100644 --- a/pkg/fleet/installer/setup/common/config.go +++ b/pkg/fleet/installer/setup/common/config.go @@ -27,6 +27,18 @@ func writeConfigs(config Config, configDir string) error { if err != nil { return fmt.Errorf("could not write datadog.yaml: %w", err) } + if config.SecurityAgentYAML != nil { + err = writeConfig(filepath.Join(configDir, "security-agent.yaml"), config.SecurityAgentYAML, 0640, true) + if err != nil { + return fmt.Errorf("could not write security-agent.yaml: %w", err) + } + } + if config.SystemProbeYAML != nil { + err = writeConfig(filepath.Join(configDir, "system-probe.yaml"), config.SystemProbeYAML, 0640, true) + if err != nil { + return fmt.Errorf("could not write system-probe.yaml: %w", err) + } + } err = writeConfig(filepath.Join(configDir, injectTracerConfigFile), config.InjectTracerYAML, 0644, false) if err != nil { return fmt.Errorf("could not write tracer.yaml: %w", err) @@ -92,6 +104,10 @@ func writeConfig(path string, config any, perms os.FileMode, merge bool) error { type Config struct { // DatadogYAML is the content of the datadog.yaml file DatadogYAML DatadogConfig + // SecurityAgentYAML is the content of the security-agent.yaml file + SecurityAgentYAML *SecurityAgentConfig + // SystemProbeYAML is the content of the system-probe.yaml file + SystemProbeYAML *SystemProbeConfig // InjectTracerYAML is the content of the inject/tracer.yaml file InjectTracerYAML InjectTracerConfig // IntegrationConfigs is the content of the integration configuration files under conf.d/ @@ -110,6 +126,10 @@ type DatadogConfig struct { DJM DatadogConfigDJM `yaml:"djm,omitempty"` ProcessConfig DatadogConfigProcessConfig `yaml:"process_config,omitempty"` ExpectedTagsDuration string `yaml:"expected_tags_duration,omitempty"` + RemoteUpdates bool `yaml:"remote_updates,omitempty"` + RemotePolicies bool `yaml:"remote_policies,omitempty"` + Installer DatadogConfigInstaller `yaml:"installer,omitempty"` + DDURL string `yaml:"dd_url,omitempty"` } // DatadogConfigProxy represents the configuration for the proxy @@ -129,6 +149,17 @@ type DatadogConfigProcessConfig struct { ExpvarPort int `yaml:"expvar_port,omitempty"` } +// DatadogConfigInstaller represents the configuration for the installer +type DatadogConfigInstaller struct { + Registry DatadogConfigInstallerRegistry `yaml:"registry,omitempty"` +} + +// DatadogConfigInstallerRegistry represents the configuration for the installer registry +type DatadogConfigInstallerRegistry struct { + URL string `yaml:"url,omitempty"` + Auth string `yaml:"auth,omitempty"` +} + // IntegrationConfig represents the configuration for an integration under conf.d/ type IntegrationConfig struct { InitConfig []any `yaml:"init_config"` @@ -171,6 +202,27 @@ type InjectTracerConfigEnvVar struct { Value string `yaml:"value"` } +// SystemProbeConfig represents the configuration to write in /etc/datadog-agent/system-probe.yaml +type SystemProbeConfig struct { + RuntimeSecurityConfig RuntimeSecurityConfig `yaml:"runtime_security_config,omitempty"` +} + +// RuntimeSecurityConfig represents the configuration for the runtime security +type RuntimeSecurityConfig struct { + Enabled bool `yaml:"enabled,omitempty"` +} + +// SecurityAgentConfig represents the configuration to write in /etc/datadog-agent/security-agent.yaml +type SecurityAgentConfig struct { + ComplianceConfig SecurityAgentComplianceConfig `yaml:"compliance_config,omitempty"` + RuntimeSecurityConfig RuntimeSecurityConfig `yaml:"runtime_security_config,omitempty"` +} + +// SecurityAgentComplianceConfig represents the configuration for the compliance +type SecurityAgentComplianceConfig struct { + Enabled bool `yaml:"enabled,omitempty"` +} + // mergeConfig merges the current config with the setup config. // // The values are merged as follows: diff --git a/pkg/fleet/installer/setup/common/packages.go b/pkg/fleet/installer/setup/common/packages.go index c4c0c091ea048..8f74abc687021 100644 --- a/pkg/fleet/installer/setup/common/packages.go +++ b/pkg/fleet/installer/setup/common/packages.go @@ -40,6 +40,16 @@ var ( DatadogAPMLibraryDotNetPackage, DatadogAPMLibraryPHPPackage, } + + // ApmLibraries is a list of all the apm libraries + ApmLibraries = []string{ + DatadogAPMLibraryJavaPackage, + DatadogAPMLibraryPythonPackage, + DatadogAPMLibraryRubyPackage, + DatadogAPMLibraryJSPackage, + DatadogAPMLibraryDotNetPackage, + DatadogAPMLibraryPHPPackage, + } ) func resolvePackages(packages Packages) []packageWithVersion { diff --git a/pkg/fleet/installer/setup/common/services_nix.go b/pkg/fleet/installer/setup/common/services_nix.go new file mode 100644 index 0000000000000..3edd9e3bbde4e --- /dev/null +++ b/pkg/fleet/installer/setup/common/services_nix.go @@ -0,0 +1,44 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build !windows + +package common + +import ( + "bytes" + "fmt" + "os/exec" +) + +// restartServices restarts the services that need to be restarted after a package upgrade or +// an install script re-run; because the configuration may have changed. +func (s *Setup) restartServices(pkgs []packageWithVersion) error { + if s.Config.DatadogYAML.RemotePolicies || s.Config.DatadogYAML.RemoteUpdates { + if err := restartService("datadog-installer.service"); err != nil { + return err + } + } + for _, pkg := range pkgs { + switch pkg.name { + case DatadogAgentPackage: + if err := restartService("datadog-agent.service"); err != nil { + return err + } + } + } + return nil +} + +func restartService(unit string) error { + cmd := exec.Command("systemctl", "restart", unit) + stderr := bytes.Buffer{} + cmd.Stderr = &stderr + err := cmd.Run() + if err != nil { + return fmt.Errorf("failed to restart %s (%s): %s", unit, err.Error(), stderr.String()) + } + return nil +} diff --git a/pkg/fleet/installer/setup/common/services_windows.go b/pkg/fleet/installer/setup/common/services_windows.go new file mode 100644 index 0000000000000..727cd49fb5b06 --- /dev/null +++ b/pkg/fleet/installer/setup/common/services_windows.go @@ -0,0 +1,13 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build windows + +package common + +func (s *Setup) restartServices(_ []packageWithVersion) error { + // Not implemented yet on Windows + return nil +} diff --git a/pkg/fleet/installer/setup/common/setup.go b/pkg/fleet/installer/setup/common/setup.go index b6e512d7dc5d6..5970f6bad40bc 100644 --- a/pkg/fleet/installer/setup/common/setup.go +++ b/pkg/fleet/installer/setup/common/setup.go @@ -19,6 +19,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/fleet/installer/env" "github.com/DataDog/datadog-agent/pkg/fleet/installer/oci" "github.com/DataDog/datadog-agent/pkg/fleet/telemetry" + "github.com/DataDog/datadog-agent/pkg/util/installinfo" "github.com/DataDog/datadog-agent/pkg/version" ) @@ -114,6 +115,10 @@ func (s *Setup) Run() (err error) { if err != nil { return fmt.Errorf("failed to install installer: %w", err) } + err = installinfo.WriteInstallInfo("installer", fmt.Sprintf("installer-%s", version.AgentVersion), fmt.Sprintf("install-%s.sh", s.flavor)) + if err != nil { + return fmt.Errorf("failed to write install info: %w", err) + } for _, p := range packages { url := oci.PackageURL(s.Env, p.name, p.version) err = s.installPackage(p.name, url) @@ -121,6 +126,10 @@ func (s *Setup) Run() (err error) { return fmt.Errorf("failed to install package %s: %w", url, err) } } + err = s.restartServices(packages) + if err != nil { + return fmt.Errorf("failed to restart services: %w", err) + } s.Out.WriteString(fmt.Sprintf("Successfully ran the %s install script in %s!\n", s.flavor, time.Since(s.start).Round(time.Second))) return nil } diff --git a/pkg/fleet/installer/setup/defaultscript/default_script.go b/pkg/fleet/installer/setup/defaultscript/default_script.go new file mode 100644 index 0000000000000..b6c29a67002b9 --- /dev/null +++ b/pkg/fleet/installer/setup/defaultscript/default_script.go @@ -0,0 +1,243 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package defaultscript contains default standard installation logic +package defaultscript + +import ( + "fmt" + "os" + "regexp" + "strings" + + "github.com/DataDog/datadog-agent/pkg/fleet/installer/env" + "github.com/DataDog/datadog-agent/pkg/fleet/installer/setup/common" +) + +const ( + defaultAgentVersion = "7.60.1-1" + defaultInjectorVersion = "0.26.0-1" +) + +var ( + defaultLibraryVersions = map[string]string{ + common.DatadogAPMLibraryJavaPackage: "1.44.1-1", + common.DatadogAPMLibraryRubyPackage: "2.8.0-1", + common.DatadogAPMLibraryJSPackage: "5.30.0-1", + common.DatadogAPMLibraryDotNetPackage: "3.7.0-1", + common.DatadogAPMLibraryPythonPackage: "2.9.2-1", + common.DatadogAPMLibraryPHPPackage: "1.5.1-1", + } + + fullSemverRe = regexp.MustCompile(`^[0-9]+\.[0-9]+\.[0-9]+`) + + // unsupportedEnvVars are the environment variables that are not supported by the default script + unsupportedEnvVars = []string{ + "DD_INSTALLER", + "DD_AGENT_FLAVOR", + "DD_UPGRADE", + "DD_INSTALL_ONLY", + "DD_FIPS_MODE", + } + + // supportedEnvVars are the environment variables that are supported by the default script to be reported + // in the span + supportedEnvVars = []string{ + "DD_ENV", + "DD_SITE", + "DD_TAGS", + "DD_HOST_TAGS", + "DD_URL", + "DD_REMOTE_UPDATES", + "DD_REMOTE_POLICIES", + "DD_FIPS_MODE", + "DD_SYSTEM_PROBE_ENSURE_CONFIG", + "DD_RUNTIME_SECURITY_CONFIG_ENABLED", + "DD_COMPLIANCE_CONFIG_ENABLED", + "DD_APM_INSTRUMENTATION_ENABLED", + "DD_APM_LIBRARIES", + "DD_NO_AGENT_INSTALL", + "DD_INSTALLER_REGISTRY_URL", + "DD_INSTALLER_REGISTRY_AUTH", + "DD_HOSTNAME", + "DD_PROXY_HTTP", + "DD_PROXY_HTTPS", + "DD_PROXY_NO_PROXY", + } +) + +// SetupDefaultScript sets up the default installation +func SetupDefaultScript(s *common.Setup) error { + // Telemetry + telemetrySupportedEnvVars(s, supportedEnvVars...) + if err := exitOnUnsupportedEnvVars(unsupportedEnvVars...); err != nil { + return err + } + + // Installer management + setConfigInstallerDaemon(s) + setConfigInstallerRegistries(s) + + // Config management + setConfigTags(s) + setConfigSecurityProducts(s) + + if url, ok := os.LookupEnv("DD_URL"); ok { + s.Config.DatadogYAML.DDURL = url + } + + // Install packages + installAgentPackage(s) + installAPMPackages(s) + + return nil +} + +// setConfigSecurityProducts sets the configuration for the security products +func setConfigSecurityProducts(s *common.Setup) { + runtimeSecurityConfigEnabled, runtimeSecurityConfigEnabledOk := os.LookupEnv("DD_RUNTIME_SECURITY_CONFIG_ENABLED") + complianceConfigEnabled, complianceConfigEnabledOk := os.LookupEnv("DD_COMPLIANCE_CONFIG_ENABLED") + if runtimeSecurityConfigEnabledOk || complianceConfigEnabledOk { + s.Config.SecurityAgentYAML = &common.SecurityAgentConfig{} + s.Config.SystemProbeYAML = &common.SystemProbeConfig{} + } + if complianceConfigEnabledOk && strings.ToLower(complianceConfigEnabled) != "false" { + s.Config.SecurityAgentYAML.ComplianceConfig = common.SecurityAgentComplianceConfig{ + Enabled: true, + } + } + if runtimeSecurityConfigEnabledOk && strings.ToLower(runtimeSecurityConfigEnabled) != "false" { + s.Config.SecurityAgentYAML.RuntimeSecurityConfig = common.RuntimeSecurityConfig{ + Enabled: true, + } + s.Config.SystemProbeYAML.RuntimeSecurityConfig = common.RuntimeSecurityConfig{ + Enabled: true, + } + } +} + +// setConfigInstallerDaemon sets the daemon in the configuration +func setConfigInstallerDaemon(s *common.Setup) { + s.Config.DatadogYAML.RemoteUpdates = true + s.Config.DatadogYAML.RemotePolicies = true + if val, ok := os.LookupEnv("DD_REMOTE_UPDATES"); ok && strings.ToLower(val) == "false" { + s.Config.DatadogYAML.RemoteUpdates = false + } + if val, ok := os.LookupEnv("DD_REMOTE_POLICIES"); ok && strings.ToLower(val) == "false" { + s.Config.DatadogYAML.RemotePolicies = false + } +} + +// setConfigInstallerRegistries sets the registries in the configuration +func setConfigInstallerRegistries(s *common.Setup) { + registryURL, registryURLOk := os.LookupEnv("DD_INSTALLER_REGISTRY_URL") + registryAuth, registryAuthOk := os.LookupEnv("DD_INSTALLER_REGISTRY_AUTH") + if registryURLOk || registryAuthOk { + s.Config.DatadogYAML.Installer = common.DatadogConfigInstaller{ + Registry: common.DatadogConfigInstallerRegistry{ + URL: registryURL, + Auth: registryAuth, + }, + } + } +} + +// setConfigTags sets the tags in the configuration +func setConfigTags(s *common.Setup) { + if tags, ok := os.LookupEnv("DD_TAGS"); ok { + s.Config.DatadogYAML.Tags = strings.Split(tags, ",") + } else { + if tags, ok := os.LookupEnv("DD_HOST_TAGS"); ok { + s.Config.DatadogYAML.Tags = strings.Split(tags, ",") + } + } +} + +// installAgentPackage installs the agent package +func installAgentPackage(s *common.Setup) { + // Agent install + if _, ok := os.LookupEnv("DD_NO_AGENT_INSTALL"); !ok { + s.Packages.Install(common.DatadogAgentPackage, agentVersion(s.Env)) + } +} + +// installAPMPackages installs the APM packages +func installAPMPackages(s *common.Setup) { + // Injector install + _, apmInstrumentationEnabled := os.LookupEnv("DD_APM_INSTRUMENTATION_ENABLED") + if apmInstrumentationEnabled { + s.Packages.Install(common.DatadogAPMInjectPackage, defaultInjectorVersion) + } + + // Libraries install + _, installAllAPMLibraries := s.Env.ApmLibraries["all"] + for _, library := range common.ApmLibraries { + lang := packageToLanguage(library) + _, installLibrary := s.Env.ApmLibraries[lang] + if (installAllAPMLibraries || len(s.Env.ApmLibraries) == 0 && apmInstrumentationEnabled) && library != common.DatadogAPMLibraryPHPPackage || installLibrary { + s.Packages.Install(library, getLibraryVersion(s.Env, library)) + } + } +} + +// packageToLanguage returns the language of an APM package +func packageToLanguage(packageName string) env.ApmLibLanguage { + lang, found := strings.CutPrefix(packageName, "datadog-apm-library-") + if !found { + return "" + } + return env.ApmLibLanguage(lang) +} + +// getLibraryVersion returns the version of the library to install +// It uses the version from the environment if available, otherwise it uses the default version. +// Maybe we should only use the default version? +func getLibraryVersion(env *env.Env, library string) string { + version := "latest" + if defaultVersion, ok := defaultLibraryVersions[library]; ok { + version = defaultVersion + } + + apmLibVersion := env.ApmLibraries[packageToLanguage(library)] + if apmLibVersion == "" { + return version + } + + versionTag, _ := strings.CutPrefix(string(apmLibVersion), "v") + if fullSemverRe.MatchString(versionTag) { + return versionTag + "-1" + } + return versionTag +} + +func exitOnUnsupportedEnvVars(envVars ...string) error { + var unsupported []string + for _, envVar := range envVars { + if _, ok := os.LookupEnv(envVar); ok { + unsupported = append(unsupported, envVar) + } + } + if len(unsupported) > 0 { + return fmt.Errorf("unsupported environment variables: %s, exiting setup", strings.Join(unsupported, ", ")) + } + return nil +} + +func telemetrySupportedEnvVars(s *common.Setup, envVars ...string) { + for _, envVar := range envVars { + s.Span.SetTag(fmt.Sprintf("env.%s", envVar), os.Getenv(envVar)) + } +} + +func agentVersion(e *env.Env) string { + minorVersion := e.AgentMinorVersion + if strings.Contains(minorVersion, ".") && !strings.HasSuffix(minorVersion, "-1") { + minorVersion = minorVersion + "-1" + } + if minorVersion != "" { + return "7." + minorVersion + } + return defaultAgentVersion +} diff --git a/pkg/fleet/installer/setup/djm/databricks.go b/pkg/fleet/installer/setup/djm/databricks.go index e1e699f65a2de..7d868b585339e 100644 --- a/pkg/fleet/installer/setup/djm/databricks.go +++ b/pkg/fleet/installer/setup/djm/databricks.go @@ -121,6 +121,7 @@ func setupCommonHostTags(s *common.Setup) { return clusterNameRegex.ReplaceAllString(v, "_") }) setIfExists(s, "DB_CLUSTER_ID", "databricks_cluster_id", nil) + setIfExists(s, "DATABRICKS_WORKSPACE", "databricks_workspace", nil) // dupes for backward compatibility setIfExists(s, "DB_CLUSTER_ID", "cluster_id", nil) @@ -133,6 +134,7 @@ func setupCommonHostTags(s *common.Setup) { setHostTag(s, "jobid", jobID) setHostTag(s, "runid", runID) } + setHostTag(s, "data_workload_monitoring_trial", "true") } func getJobAndRunIDs() (jobID, runID string, ok bool) { diff --git a/pkg/fleet/installer/setup/djm/databricks_test.go b/pkg/fleet/installer/setup/djm/databricks_test.go index 0bd7d183122cf..a57ec60fb3163 100644 --- a/pkg/fleet/installer/setup/djm/databricks_test.go +++ b/pkg/fleet/installer/setup/djm/databricks_test.go @@ -27,14 +27,16 @@ func TestSetupCommonHostTags(t *testing.T) { { name: "basic fields with formatting", env: map[string]string{ - "DB_DRIVER_IP": "192.168.1.100", - "DB_INSTANCE_TYPE": "m4.xlarge", - "DB_IS_JOB_CLUSTER": "true", - "DD_JOB_NAME": "example,'job,name", - "DB_CLUSTER_NAME": "example[,'job]name", - "DB_CLUSTER_ID": "cluster123", + "DB_DRIVER_IP": "192.168.1.100", + "DB_INSTANCE_TYPE": "m4.xlarge", + "DB_IS_JOB_CLUSTER": "true", + "DD_JOB_NAME": "example,'job,name", + "DB_CLUSTER_NAME": "example[,'job]name", + "DB_CLUSTER_ID": "cluster123", + "DATABRICKS_WORKSPACE": "example_workspace", }, wantTags: []string{ + "data_workload_monitoring_trial:true", "spark_host_ip:192.168.1.100", "databricks_instance_type:m4.xlarge", "databricks_is_job_cluster:true", @@ -43,6 +45,7 @@ func TestSetupCommonHostTags(t *testing.T) { "databricks_cluster_id:cluster123", "cluster_id:cluster123", "cluster_name:example___job_name", + "databricks_workspace:example_workspace", }, }, { @@ -51,6 +54,7 @@ func TestSetupCommonHostTags(t *testing.T) { "DB_CLUSTER_NAME": "job-123-run-456", }, wantTags: []string{ + "data_workload_monitoring_trial:true", "databricks_cluster_name:job-123-run-456", "cluster_name:job-123-run-456", "jobid:123", @@ -58,9 +62,11 @@ func TestSetupCommonHostTags(t *testing.T) { }, }, { - name: "Missing env vars results in no tags", - env: map[string]string{}, - wantTags: []string{}, + name: "Missing env vars results in no tags", + env: map[string]string{}, + wantTags: []string{ + "data_workload_monitoring_trial:true", + }, }, } diff --git a/pkg/fleet/installer/setup/djm/dataproc.go b/pkg/fleet/installer/setup/djm/dataproc.go new file mode 100644 index 0000000000000..26bfb0babe713 --- /dev/null +++ b/pkg/fleet/installer/setup/djm/dataproc.go @@ -0,0 +1,106 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package djm contains data-jobs-monitoring installation logic +package djm + +import ( + "cloud.google.com/go/compute/metadata" + "context" + "fmt" + "os" + + "github.com/DataDog/datadog-agent/pkg/fleet/installer/setup/common" + "github.com/DataDog/datadog-agent/pkg/util/log" +) + +const ( + dataprocInjectorVersion = "0.26.0-1" + dataprocJavaTracerVersion = "1.42.2-1" + dataprocAgentVersion = "7.58.2-1" +) + +var ( + tracerEnvConfigDataproc = []common.InjectTracerConfigEnvVar{ + { + Key: "DD_DATA_JOBS_ENABLED", + Value: "true", + }, + { + Key: "DD_INTEGRATIONS_ENABLED", + Value: "false", + }, + { + Key: "DD_DATA_JOBS_COMMAND_PATTERN", + Value: ".*org.apache.spark.deploy.*", + }, + { + Key: "DD_SPARK_APP_NAME_AS_SERVICE", + Value: "true", + }, + } +) + +// SetupDataproc sets up the DJM environment on Dataproc +func SetupDataproc(s *common.Setup) error { + + metadataClient := metadata.NewClient(nil) + s.Packages.Install(common.DatadogAgentPackage, dataprocAgentVersion) + s.Packages.Install(common.DatadogAPMInjectPackage, dataprocInjectorVersion) + s.Packages.Install(common.DatadogAPMLibraryJavaPackage, dataprocJavaTracerVersion) + + os.Setenv("DD_APM_INSTRUMENTATION_ENABLED", "host") + + hostname, err := os.Hostname() + if err != nil { + return fmt.Errorf("failed to get hostname: %w", err) + } + s.Config.DatadogYAML.Hostname = hostname + s.Config.DatadogYAML.DJM.Enabled = true + s.Config.InjectTracerYAML.AdditionalEnvironmentVariables = tracerEnvConfigDataproc + + // Ensure tags are always attached with the metrics + s.Config.DatadogYAML.ExpectedTagsDuration = "10m" + isMaster, clusterName, err := setupCommonDataprocHostTags(s, metadataClient) + if err != nil { + return fmt.Errorf("failed to set tags: %w", err) + } + if isMaster == "true" { + setupResourceManager(s, clusterName) + } + return nil +} + +func setupCommonDataprocHostTags(s *common.Setup, metadataClient *metadata.Client) (string, string, error) { + ctx := context.Background() + + clusterID, err := metadataClient.InstanceAttributeValueWithContext(ctx, "dataproc-cluster-uuid") + if err != nil { + return "", "", err + } + setHostTag(s, "cluster_id", clusterID) + setHostTag(s, "dataproc_cluster_id", clusterID) + setHostTag(s, "data_workload_monitoring_trial", "true") + + dataprocRole, err := metadataClient.InstanceAttributeValueWithContext(ctx, "dataproc-role") + if err != nil { + return "", "", err + } + isMaster := "false" + if dataprocRole == "Master" { + isMaster = "true" + } + setHostTag(s, "is_master_node", isMaster) + s.Span.SetTag("host."+"is_master_node", isMaster) + + clusterName, err := metadataClient.InstanceAttributeValueWithContext(ctx, "dataproc-cluster-name") + if err != nil { + log.Warn("failed to get clusterName, using clusterID instead") + return isMaster, clusterID, nil + } + setHostTag(s, "cluster_name", clusterName) + + return isMaster, clusterName, nil +} diff --git a/pkg/fleet/installer/setup/djm/dataproc_test.go b/pkg/fleet/installer/setup/djm/dataproc_test.go new file mode 100644 index 0000000000000..d60ef86a0ef15 --- /dev/null +++ b/pkg/fleet/installer/setup/djm/dataproc_test.go @@ -0,0 +1,89 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package djm contains data-jobs-monitoring installation logic +package djm + +import ( + "cloud.google.com/go/compute/metadata" + "context" + "github.com/DataDog/datadog-agent/pkg/fleet/telemetry" + "io" + "net/http" + "strings" + "testing" + + "github.com/DataDog/datadog-agent/pkg/fleet/installer/setup/common" + "github.com/stretchr/testify/assert" +) + +type DynamicRoundTripper struct { + Handler func(req *http.Request) (*http.Response, error) +} + +func (d *DynamicRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + return d.Handler(req) +} + +func TestSetupDataproc(t *testing.T) { + + mockResponses := map[string]string{ + "/computeMetadata/v1/instance/attributes/dataproc-cluster-uuid": "test-cluster-uuid", + "/computeMetadata/v1/instance/attributes/dataproc-role": "Master", + "/computeMetadata/v1/instance/attributes/dataproc-cluster-name": "test-cluster-name", + } + + mockRoundTripper := &DynamicRoundTripper{ + Handler: func(req *http.Request) (*http.Response, error) { + if value, found := mockResponses[req.URL.Path]; found { + return &http.Response{ + StatusCode: 200, + Body: io.NopCloser(strings.NewReader(value)), + Header: make(http.Header), + }, nil + } + return &http.Response{ + StatusCode: 404, + Body: io.NopCloser(strings.NewReader("")), + Header: make(http.Header), + }, nil + }, + } + + mockHTTPClient := &http.Client{Transport: mockRoundTripper} + + // Create a metadata client with the mocked HTTP client + mockMetadataClient := metadata.NewClient(mockHTTPClient) + + tests := []struct { + name string + wantTags []string + }{ + { + name: "master node", + wantTags: []string{ + "data_workload_monitoring_trial:true", + "cluster_id:test-cluster-uuid", + "dataproc_cluster_id:test-cluster-uuid", + "cluster_name:test-cluster-name", + "is_master_node:true", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + span, _ := telemetry.StartSpanFromContext(context.Background(), "test") + s := &common.Setup{ + Span: span, + Ctx: context.Background(), + } + + _, _, err := setupCommonDataprocHostTags(s, mockMetadataClient) + assert.Nil(t, err) + assert.ElementsMatch(t, tt.wantTags, s.Config.DatadogYAML.Tags) + }) + } +} diff --git a/pkg/fleet/installer/setup/djm/emr.go b/pkg/fleet/installer/setup/djm/emr.go index c8003f4d70866..a449215dd54d6 100644 --- a/pkg/fleet/installer/setup/djm/emr.go +++ b/pkg/fleet/installer/setup/djm/emr.go @@ -96,7 +96,7 @@ func SetupEmr(s *common.Setup) error { return fmt.Errorf("failed to set tags: %w", err) } if isMaster { - setupEmrResourceManager(s, clusterName) + setupResourceManager(s, clusterName) } return nil } @@ -115,7 +115,7 @@ func setupCommonEmrHostTags(s *common.Setup) (bool, string, error) { setHostTag(s, "instance_group_id", info.InstanceGroupID) setHostTag(s, "is_master_node", strconv.FormatBool(info.IsMaster)) - s.Span.SetTag("host_tag."+"is_master_node", info.IsMaster) + s.Span.SetTag("host."+"is_master_node", info.IsMaster) extraInstanceInfoRaw, err := os.ReadFile(filepath.Join(emrInfoPath, "extraInstanceData.json")) if err != nil { @@ -130,14 +130,15 @@ func setupCommonEmrHostTags(s *common.Setup) (bool, string, error) { setHostTag(s, "cluster_id", extraInfo.JobFlowID) setHostTag(s, "emr_version", extraInfo.ReleaseLabel) s.Span.SetTag("emr_version", extraInfo.ReleaseLabel) + setHostTag(s, "data_workload_monitoring_trial", "true") - clusterName := resolveClusterName(s, extraInfo.JobFlowID) + clusterName := resolveEmrClusterName(s, extraInfo.JobFlowID) setHostTag(s, "cluster_name", clusterName) return info.IsMaster, clusterName, nil } -func setupEmrResourceManager(s *common.Setup, clusterName string) { +func setupResourceManager(s *common.Setup, clusterName string) { var sparkIntegration common.IntegrationConfig var yarnIntegration common.IntegrationConfig @@ -179,7 +180,7 @@ var executeCommandWithTimeout = func(s *common.Setup, command string, args ...st return output, nil } -func resolveClusterName(s *common.Setup, jobFlowID string) string { +func resolveEmrClusterName(s *common.Setup, jobFlowID string) string { var err error span, _ := telemetry.StartSpanFromContext(s.Ctx, "resolve.cluster_name") defer func() { span.Finish(err) }() diff --git a/pkg/fleet/installer/setup/djm/emr_test.go b/pkg/fleet/installer/setup/djm/emr_test.go index f5931399475fc..7fef6ab36eb26 100644 --- a/pkg/fleet/installer/setup/djm/emr_test.go +++ b/pkg/fleet/installer/setup/djm/emr_test.go @@ -21,10 +21,10 @@ import ( "github.com/DataDog/datadog-agent/pkg/fleet/telemetry" ) -//go:embed testdata/instance.json +//go:embed testdata/emrInstance.json var instanceJSON string -//go:embed testdata/extraInstanceData.json +//go:embed testdata/emrExtraInstanceData.json var extraInstanceJSON string //go:embed testdata/emrDescribeClusterResponse.json @@ -60,6 +60,7 @@ func TestSetupEmr(t *testing.T) { { name: "basic fields json", wantTags: []string{ + "data_workload_monitoring_trial:true", "instance_group_id:ig-123", "is_master_node:true", "job_flow_id:j-456", diff --git a/pkg/fleet/installer/setup/djm/testdata/extraInstanceData.json b/pkg/fleet/installer/setup/djm/testdata/emrExtraInstanceData.json similarity index 100% rename from pkg/fleet/installer/setup/djm/testdata/extraInstanceData.json rename to pkg/fleet/installer/setup/djm/testdata/emrExtraInstanceData.json diff --git a/pkg/fleet/installer/setup/djm/testdata/instance.json b/pkg/fleet/installer/setup/djm/testdata/emrInstance.json similarity index 100% rename from pkg/fleet/installer/setup/djm/testdata/instance.json rename to pkg/fleet/installer/setup/djm/testdata/emrInstance.json diff --git a/pkg/fleet/installer/setup/setup.go b/pkg/fleet/installer/setup/setup.go index b435f3c76ae18..4a356182137bd 100644 --- a/pkg/fleet/installer/setup/setup.go +++ b/pkg/fleet/installer/setup/setup.go @@ -13,6 +13,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/fleet/installer/env" "github.com/DataDog/datadog-agent/pkg/fleet/installer/setup/common" + "github.com/DataDog/datadog-agent/pkg/fleet/installer/setup/defaultscript" "github.com/DataDog/datadog-agent/pkg/fleet/installer/setup/djm" "github.com/DataDog/datadog-agent/pkg/fleet/internal/exec" "github.com/DataDog/datadog-agent/pkg/fleet/internal/paths" @@ -24,15 +25,17 @@ type flavor struct { } var flavors = map[string]flavor{ + "default": {path: "defaultscript/default_script.go", run: defaultscript.SetupDefaultScript}, "databricks": {path: "djm/databricks.go", run: djm.SetupDatabricks}, "emr": {path: "djm/emr.go", run: djm.SetupEmr}, + "dataproc": {path: "djm/dataproc.go", run: djm.SetupDataproc}, } // Setup installs Datadog. func Setup(ctx context.Context, env *env.Env, flavor string) error { f, ok := flavors[flavor] if !ok { - return fmt.Errorf("unknown flavor %s", flavor) + return fmt.Errorf("unknown flavor \"%s\"", flavor) } s, err := common.NewSetup(ctx, env, flavor, f.path, os.Stdout) if err != nil { diff --git a/pkg/fleet/internal/cdn/cdn.go b/pkg/fleet/internal/cdn/cdn.go deleted file mode 100644 index dcb57bf0ee17d..0000000000000 --- a/pkg/fleet/internal/cdn/cdn.go +++ /dev/null @@ -1,196 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -// Package cdn provides access to the Remote Config CDN. -package cdn - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "os" - "path/filepath" - "runtime" - - "github.com/DataDog/datadog-agent/pkg/fleet/installer/env" - "github.com/DataDog/datadog-agent/pkg/fleet/telemetry" - pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" - "gopkg.in/yaml.v2" -) - -const ( - policyMetadataFilename = "policy.metadata" - doNotEditDisclaimer = `# This configuration was generated by Datadog's Fleet Automation. DO NOT EDIT.` -) - -var ( - // ErrProductNotSupported is returned when the product is not supported. - ErrProductNotSupported = errors.New("product not supported") -) - -// Config represents a configuration. -type Config interface { - State() *pbgo.PoliciesState - Write(dir string) error -} - -// fetcher provides access to the Remote Config CDN. -type fetcher interface { - get(ctx context.Context) ([][]byte, error) - close() error -} - -// CDN provides access to the Remote Config CDN. -type CDN struct { - fetcher fetcher - hostTagsGetter hostTagsGetter -} - -// New creates a new CDN and chooses the implementation depending -// on the environment -func New(env *env.Env, configDBPath string) (*CDN, error) { - cdn := CDN{ - hostTagsGetter: newHostTagsGetter(env), - } - - if runtime.GOOS == "windows" { - // There's an assumption on windows that some directories are already there - // but they are in fact created by the regular CDN implementation. Until - // there is a fix on windows we keep the previous CDN behaviour for them - fetcher, err := newHTTPFetcher(env, configDBPath) - if err != nil { - return nil, err - } - cdn.fetcher = fetcher - return &cdn, nil - } - - if !env.RemotePolicies { - // Remote policies are not enabled -- we don't need the CDN - // and we don't want to create the directories that the CDN - // implementation would create. We return a no-op CDN to avoid - // nil pointer dereference. - fetcher, err := newNoopFetcher() - if err != nil { - return nil, err - } - cdn.fetcher = fetcher - return &cdn, nil - } - - if env.CDNLocalDirPath != "" { - // Mock the CDN for local development or testing - fetcher, err := newLocalFetcher(env) - if err != nil { - return nil, err - } - cdn.fetcher = fetcher - return &cdn, nil - } - - if !env.CDNEnabled { - // Remote policies are enabled but we don't want to use the CDN - // as it's still in development. We use standard remote config calls - // instead (dubbed "direct" CDN). - fetcher, err := newRCFetcher(env, configDBPath) - if err != nil { - return nil, err - } - cdn.fetcher = fetcher - return &cdn, nil - } - - // Regular CDN with the cloudfront distribution - fetcher, err := newHTTPFetcher(env, configDBPath) - if err != nil { - return nil, err - } - cdn.fetcher = fetcher - return &cdn, nil -} - -// Get fetches the configuration for the given package. -func (c *CDN) Get(ctx context.Context, pkg string) (cfg Config, err error) { - span, _ := telemetry.StartSpanFromContext(ctx, "cdn.Get") - defer func() { - spanErr := err - if spanErr == ErrProductNotSupported { - spanErr = nil - } - span.Finish(spanErr) - }() - - switch pkg { - case "datadog-agent": - orderedLayers, err := c.fetcher.get(ctx) - if err != nil { - return nil, err - } - cfg, err = newAgentConfig(orderedLayers...) - if err != nil { - return nil, err - } - case "datadog-apm-inject": - orderedLayers, err := c.fetcher.get(ctx) - if err != nil { - return nil, err - } - cfg, err = newAPMSSIConfig(c.hostTagsGetter.get(), orderedLayers...) - if err != nil { - return nil, err - } - case "datadog-apm-libraries": - orderedLayers, err := c.fetcher.get(ctx) - if err != nil { - return nil, err - } - cfg, err = newAPMLibrariesConfig(c.hostTagsGetter.get(), orderedLayers...) - if err != nil { - return nil, err - } - default: - return nil, ErrProductNotSupported - } - - return cfg, nil -} - -// Close closes the CDN. -func (c *CDN) Close() error { - return c.fetcher.close() -} - -// writePolicyMetadata writes the policy metadata to the given directory -// and makes it world-readable -func writePolicyMetadata(config Config, dir string) error { - state := config.State() - stateBytes, err := json.Marshal(state) - if err != nil { - return fmt.Errorf("could not marshal state: %w", err) - } - err = os.WriteFile(filepath.Join(dir, policyMetadataFilename), stateBytes, 0444) - if err != nil { - return fmt.Errorf("could not write %s: %w", policyMetadataFilename, err) - } - return nil -} - -// marshalYAMLConfig marshals the config as YAML. -func marshalYAMLConfig(c map[string]interface{}) ([]byte, error) { - if len(c) == 0 { - return nil, nil - } - var b bytes.Buffer - b.WriteString(doNotEditDisclaimer) - b.WriteString("\n") - rawConfig, err := yaml.Marshal(c) - if err != nil { - return nil, err - } - b.Write(rawConfig) - return b.Bytes(), nil -} diff --git a/pkg/fleet/internal/cdn/cdn_http.go b/pkg/fleet/internal/cdn/cdn_http.go deleted file mode 100644 index 1e0c90b36d69a..0000000000000 --- a/pkg/fleet/internal/cdn/cdn_http.go +++ /dev/null @@ -1,102 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -// Package cdn provides access to the Remote Config CDN. -package cdn - -import ( - "context" - "encoding/json" - - remoteconfig "github.com/DataDog/datadog-agent/pkg/config/remote/service" - "github.com/DataDog/datadog-agent/pkg/fleet/installer/env" - pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" - "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/version" - "github.com/DataDog/go-tuf/data" -) - -type fetcherHTTP struct { - client *remoteconfig.HTTPClient - currentRootsVersion uint64 - hostTagsGetter hostTagsGetter - env *env.Env -} - -func newHTTPFetcher(env *env.Env, configDBPath string) (fetcher, error) { - client, err := remoteconfig.NewHTTPClient( - configDBPath, - env.Site, - env.APIKey, - version.AgentVersion, - ) - if err != nil { - return nil, err - } - return &fetcherHTTP{ - client: client, - currentRootsVersion: 1, - hostTagsGetter: newHostTagsGetter(env), - env: env, - }, nil -} - -// Close cleans up the CDN's resources -func (c *fetcherHTTP) close() error { - return c.client.Close() -} - -// get calls the Remote Config service to get the ordered layers. -func (c *fetcherHTTP) get(ctx context.Context) ([][]byte, error) { - agentConfigUpdate, err := c.client.GetCDNConfigUpdate( - ctx, - []string{"AGENT_CONFIG"}, - // Always send 0 since we are relying on the CDN cache state instead of our own tracer cache. This will fetch the latest configs from the cache/CDN everytime. - 0, - // Not using the roots; send the highest seen version of roots so don't received them all on every request - c.currentRootsVersion, - // Not using a client cache; fetch all the applicable target files every time. - []*pbgo.TargetFileMeta{}, - ) - if err != nil { - return nil, err - } - - if agentConfigUpdate == nil { - return nil, nil - } - - // Update CDN root versions - for _, root := range agentConfigUpdate.TUFRoots { - var signedRoot data.Signed - err = json.Unmarshal(root, &signedRoot) - if err != nil { - continue - } - var r data.Root - err = json.Unmarshal(signedRoot.Signed, &r) - if err != nil { - continue - } - if uint64(r.Version) > c.currentRootsVersion { - c.currentRootsVersion = uint64(r.Version) - } - } - - files := map[string][]byte{} - for path, content := range agentConfigUpdate.TargetFiles { - pathMatches := datadogConfigIDRegexp.FindStringSubmatch(path) - if len(pathMatches) != 2 { - log.Warnf("invalid config path: %s", path) - continue - } - files[pathMatches[1]] = content - } - - return getOrderedScopedLayers( - files, - getScopeExprVars(c.env, c.hostTagsGetter), - ) -} diff --git a/pkg/fleet/internal/cdn/cdn_local.go b/pkg/fleet/internal/cdn/cdn_local.go deleted file mode 100644 index f657d870ab7d3..0000000000000 --- a/pkg/fleet/internal/cdn/cdn_local.go +++ /dev/null @@ -1,53 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package cdn - -import ( - "context" - "fmt" - "os" - "path/filepath" - - "github.com/DataDog/datadog-agent/pkg/fleet/installer/env" -) - -type fetcherLocal struct { - dirPath string -} - -// newfetcherLocal creates a new local CDN. -func newLocalFetcher(env *env.Env) (fetcher, error) { - return &fetcherLocal{ - dirPath: env.CDNLocalDirPath, - }, nil -} - -func (c *fetcherLocal) get(_ context.Context) (orderedLayers [][]byte, err error) { - f, err := os.ReadDir(c.dirPath) - if err != nil { - return nil, fmt.Errorf("couldn't read directory %s: %w", c.dirPath, err) - } - - files := map[string][]byte{} - for _, file := range f { - if file.IsDir() { - continue - } - - contents, err := os.ReadFile(filepath.Join(c.dirPath, file.Name())) - if err != nil { - return nil, fmt.Errorf("couldn't read file %s: %w", file.Name(), err) - } - - files[file.Name()] = contents - } - - return getOrderedScopedLayers(files, nil) -} - -func (c *fetcherLocal) close() error { - return nil -} diff --git a/pkg/fleet/internal/cdn/cdn_noop.go b/pkg/fleet/internal/cdn/cdn_noop.go deleted file mode 100644 index 9be9fb1ae3264..0000000000000 --- a/pkg/fleet/internal/cdn/cdn_noop.go +++ /dev/null @@ -1,30 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package cdn - -import ( - "context" - - "github.com/DataDog/datadog-agent/pkg/util/log" -) - -type fetcherNoop struct { -} - -// newNoopFetcher creates a new noop CDN. -func newNoopFetcher() (fetcher, error) { - return &fetcherNoop{}, nil -} - -func (c *fetcherNoop) get(_ context.Context) ([][]byte, error) { - log.Debug("Noop CDN get") - return nil, nil -} - -func (c *fetcherNoop) close() error { - log.Debug("Noop CDN close") - return nil -} diff --git a/pkg/fleet/internal/cdn/cdn_rc.go b/pkg/fleet/internal/cdn/cdn_rc.go deleted file mode 100644 index b0713047b586d..0000000000000 --- a/pkg/fleet/internal/cdn/cdn_rc.go +++ /dev/null @@ -1,169 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package cdn - -import ( - "context" - "encoding/json" - "fmt" - "os" - "time" - - "github.com/DataDog/datadog-agent/comp/remote-config/rctelemetryreporter/rctelemetryreporterimpl" - remoteconfig "github.com/DataDog/datadog-agent/pkg/config/remote/service" - pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" - "github.com/DataDog/datadog-agent/pkg/fleet/installer/env" - pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" - pkghostname "github.com/DataDog/datadog-agent/pkg/util/hostname" - "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/version" - "github.com/DataDog/go-tuf/data" - "github.com/google/uuid" -) - -type fetcherRC struct { - rcService *remoteconfig.CoreAgentService - currentRootsVersion uint64 - clientUUID string - configDBPath string - firstRequest bool - hostTagsGetter hostTagsGetter - env *env.Env -} - -// newRCFetcher creates a new CDN fetcher with RC: it fetches the configuration from the remote config service instead of cloudfront -// note: naming is a bit misleading, it's not really a cdn, but we're following the convention -func newRCFetcher(env *env.Env, configDBPath string) (fetcher, error) { - ctx := context.Background() - ctx, cc := context.WithTimeout(ctx, 10*time.Second) - defer cc() - - ht := newHostTagsGetter(env) - hostname, err := pkghostname.Get(ctx) - if err != nil { - hostname = "unknown" - } - - // ensures the config db path exists - err = os.MkdirAll(configDBPath, 0755) - if err != nil { - return nil, err - } - - configDBPathTemp, err := os.MkdirTemp(configDBPath, "direct-*") - if err != nil { - return nil, err - } - - options := []remoteconfig.Option{ - remoteconfig.WithAPIKey(env.APIKey), - remoteconfig.WithConfigRootOverride(env.Site, ""), - remoteconfig.WithDirectorRootOverride(env.Site, ""), - remoteconfig.WithDatabaseFileName("remote-config.db"), - remoteconfig.WithDatabasePath(configDBPathTemp), - } - - service, err := remoteconfig.NewService( - pkgconfigsetup.Datadog(), // May not be filled as we don't read the config when we're not in the daemon, in which case we'll use the defaults - "Datadog Installer", - fmt.Sprintf("https://config.%s", env.Site), - hostname, - ht.get, - &rctelemetryreporterimpl.DdRcTelemetryReporter{}, // No telemetry for this client - version.AgentVersion, - options..., - ) - if err != nil { - return nil, err - } - cdn := &fetcherRC{ - rcService: service, - currentRootsVersion: 1, - clientUUID: uuid.New().String(), - configDBPath: configDBPathTemp, - firstRequest: true, - hostTagsGetter: ht, - env: env, - } - service.Start() - return cdn, nil -} - -// get calls the Remote Config service to get the ordered layers. -func (c *fetcherRC) get(ctx context.Context) ([][]byte, error) { - if c.firstRequest { - // A first request is made to the remote config service at service startup, - // so if we do another request too close to the first one (in the same second) - // we'll get the same director version (== timestamp) with different contents, - // which will cause the response to be rejected silently and we won't get - // the configurations - time.Sleep(1500 * time.Millisecond) - c.firstRequest = false - } - - agentConfigUpdate, err := c.rcService.ClientGetConfigs(ctx, &pbgo.ClientGetConfigsRequest{ - Client: &pbgo.Client{ - Id: c.clientUUID, - Products: []string{"AGENT_CONFIG"}, - IsUpdater: true, - ClientUpdater: &pbgo.ClientUpdater{ - Tags: []string{"installer:true"}, - }, - State: &pbgo.ClientState{ - RootVersion: c.currentRootsVersion, - TargetsVersion: 0, - }, - }, - }) - if err != nil { - return nil, err - } - - if agentConfigUpdate == nil { - return nil, nil - } - - // Update root versions - for _, root := range agentConfigUpdate.Roots { - var signedRoot data.Signed - err = json.Unmarshal(root, &signedRoot) - if err != nil { - continue - } - var r data.Root - err = json.Unmarshal(signedRoot.Signed, &r) - if err != nil { - continue - } - if uint64(r.Version) > c.currentRootsVersion { - c.currentRootsVersion = uint64(r.Version) - } - } - - // Unmarshal RC results - files := map[string][]byte{} - for _, file := range agentConfigUpdate.TargetFiles { - path := file.GetPath() - pathMatches := datadogConfigIDRegexp.FindStringSubmatch(path) - if len(pathMatches) != 2 { - log.Warnf("invalid config path: %s", path) - continue - } - files[pathMatches[1]] = file.GetRaw() - } - return getOrderedScopedLayers( - files, - getScopeExprVars(c.env, c.hostTagsGetter), - ) -} - -func (c *fetcherRC) close() error { - err := c.rcService.Stop() - if err != nil { - return err - } - return os.RemoveAll(c.configDBPath) -} diff --git a/pkg/fleet/internal/cdn/config_datadog_agent.go b/pkg/fleet/internal/cdn/config_datadog_agent.go deleted file mode 100644 index 71e96d0d2655e..0000000000000 --- a/pkg/fleet/internal/cdn/config_datadog_agent.go +++ /dev/null @@ -1,201 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package cdn - -import ( - "crypto/sha256" - "encoding/json" - "fmt" - "os" - "os/user" - "path/filepath" - "runtime" - "strconv" - - pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" - "github.com/DataDog/datadog-agent/pkg/util/log" -) - -const ( - layerKeys = "fleet_layers" - - configDatadogYAML = "datadog.yaml" - configSecurityAgentYAML = "security-agent.yaml" - configSystemProbeYAML = "system-probe.yaml" -) - -// agentConfig represents the agent configuration from the CDN. -type agentConfig struct { - version string - policyIDs []string - - datadog []byte - securityAgent []byte - systemProbe []byte -} - -// agentConfigLayer is a config layer that can be merged with other layers into a config. -type agentConfigLayer struct { - ID string `json:"name"` - AgentConfig map[string]interface{} `json:"config"` - SecurityAgentConfig map[string]interface{} `json:"security_agent"` - SystemProbeConfig map[string]interface{} `json:"system_probe"` -} - -// State returns the agent policies state -func (a *agentConfig) State() *pbgo.PoliciesState { - return &pbgo.PoliciesState{ - MatchedPolicies: a.policyIDs, - Version: a.version, - } -} - -func newAgentConfig(orderedLayers ...[]byte) (*agentConfig, error) { - // Compile ordered layers into a single config - policyIDs := []string{} - compiledLayer := &agentConfigLayer{ - AgentConfig: map[string]interface{}{}, - SecurityAgentConfig: map[string]interface{}{}, - SystemProbeConfig: map[string]interface{}{}, - } - for _, rawLayer := range orderedLayers { - layer := &agentConfigLayer{} - if err := json.Unmarshal(rawLayer, layer); err != nil { - log.Warnf("Failed to unmarshal layer: %v", err) - continue - } - if layer.AgentConfig == nil && layer.SecurityAgentConfig == nil && layer.SystemProbeConfig == nil { - // Only add layers that have at least one config that matches the agent - continue - } - - policyIDs = append(policyIDs, layer.ID) - - if layer.AgentConfig != nil { - agentConfig, err := merge(compiledLayer.AgentConfig, layer.AgentConfig) - if err != nil { - return nil, err - } - compiledLayer.AgentConfig = agentConfig.(map[string]interface{}) - } - - if layer.SecurityAgentConfig != nil { - securityAgentConfig, err := merge(compiledLayer.SecurityAgentConfig, layer.SecurityAgentConfig) - if err != nil { - return nil, err - } - compiledLayer.SecurityAgentConfig = securityAgentConfig.(map[string]interface{}) - } - - if layer.SystemProbeConfig != nil { - systemProbeAgentConfig, err := merge(compiledLayer.SystemProbeConfig, layer.SystemProbeConfig) - if err != nil { - return nil, err - } - compiledLayer.SystemProbeConfig = systemProbeAgentConfig.(map[string]interface{}) - } - } - - // Report applied layers - compiledLayer.AgentConfig[layerKeys] = policyIDs - - // Marshal into YAML configs - config, err := marshalYAMLConfig(compiledLayer.AgentConfig) - if err != nil { - return nil, err - } - securityAgentConfig, err := marshalYAMLConfig(compiledLayer.SecurityAgentConfig) - if err != nil { - return nil, err - } - systemProbeConfig, err := marshalYAMLConfig(compiledLayer.SystemProbeConfig) - if err != nil { - return nil, err - } - - hash := sha256.New() - version, err := json.Marshal(compiledLayer) - if err != nil { - return nil, err - } - hash.Write(version) - - return &agentConfig{ - version: fmt.Sprintf("%x", hash.Sum(nil)), - policyIDs: policyIDs, - - datadog: config, - securityAgent: securityAgentConfig, - systemProbe: systemProbeConfig, - }, nil -} - -// Write writes the agent configuration to the given directory. -func (a *agentConfig) Write(dir string) error { - ddAgentUID, ddAgentGID, err := getAgentIDs() - if err != nil { - return fmt.Errorf("error getting dd-agent user and group IDs: %w", err) - } - - if a.datadog != nil { - err = os.WriteFile(filepath.Join(dir, configDatadogYAML), []byte(a.datadog), 0640) - if err != nil { - return fmt.Errorf("could not write %s: %w", configDatadogYAML, err) - } - if runtime.GOOS != "windows" { - err = os.Chown(filepath.Join(dir, configDatadogYAML), ddAgentUID, ddAgentGID) - if err != nil { - return fmt.Errorf("could not chown %s: %w", configDatadogYAML, err) - } - } - } - if a.securityAgent != nil { - err = os.WriteFile(filepath.Join(dir, configSecurityAgentYAML), []byte(a.securityAgent), 0440) - if err != nil { - return fmt.Errorf("could not write %s: %w", configSecurityAgentYAML, err) - } - if runtime.GOOS != "windows" { - err = os.Chown(filepath.Join(dir, configSecurityAgentYAML), 0, ddAgentGID) // root:dd-agent - if err != nil { - return fmt.Errorf("could not chown %s: %w", configSecurityAgentYAML, err) - } - } - } - if a.systemProbe != nil { - err = os.WriteFile(filepath.Join(dir, configSystemProbeYAML), []byte(a.systemProbe), 0440) - if err != nil { - return fmt.Errorf("could not write %s: %w", configSecurityAgentYAML, err) - } - if runtime.GOOS != "windows" { - err = os.Chown(filepath.Join(dir, configSystemProbeYAML), 0, ddAgentGID) // root:dd-agent - if err != nil { - return fmt.Errorf("could not chown %s: %w", configSecurityAgentYAML, err) - } - } - } - return writePolicyMetadata(a, dir) -} - -// getAgentIDs returns the UID and GID of the dd-agent user and group. -func getAgentIDs() (uid, gid int, err error) { - ddAgentUser, err := user.Lookup("dd-agent") - if err != nil { - return -1, -1, fmt.Errorf("dd-agent user not found: %w", err) - } - ddAgentGroup, err := user.LookupGroup("dd-agent") - if err != nil { - return -1, -1, fmt.Errorf("dd-agent group not found: %w", err) - } - ddAgentUID, err := strconv.Atoi(ddAgentUser.Uid) - if err != nil { - return -1, -1, fmt.Errorf("error converting dd-agent UID to int: %w", err) - } - ddAgentGID, err := strconv.Atoi(ddAgentGroup.Gid) - if err != nil { - return -1, -1, fmt.Errorf("error converting dd-agent GID to int: %w", err) - } - return ddAgentUID, ddAgentGID, nil -} diff --git a/pkg/fleet/internal/cdn/config_datadog_agent_test.go b/pkg/fleet/internal/cdn/config_datadog_agent_test.go deleted file mode 100644 index 50fbad87879b7..0000000000000 --- a/pkg/fleet/internal/cdn/config_datadog_agent_test.go +++ /dev/null @@ -1,54 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package cdn - -import ( - "encoding/json" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestAgentConfig(t *testing.T) { - baseLayer := &agentConfigLayer{ - ID: "base", - AgentConfig: map[string]interface{}{ - "api_key": "1234", - "apm": map[string]interface{}{ - "enabled": true, - "sampling_rate": 0.5, - }, - }, - } - baseLayerRaw, err := json.Marshal(baseLayer) - assert.NoError(t, err) - - overrideLayer := &agentConfigLayer{ - ID: "override", - AgentConfig: map[string]interface{}{ - "apm": map[string]interface{}{ - "sampling_rate": 0.7, - "env": "prod", - }, - }, - } - overrideLayerRaw, err := json.Marshal(overrideLayer) - assert.NoError(t, err) - - config, err := newAgentConfig(baseLayerRaw, overrideLayerRaw) - assert.NoError(t, err) - expectedConfig := doNotEditDisclaimer + ` -api_key: "1234" -apm: - enabled: true - env: prod - sampling_rate: 0.7 -fleet_layers: -- base -- override -` - assert.Equal(t, expectedConfig, string(config.datadog)) -} diff --git a/pkg/fleet/internal/cdn/config_datadog_apm_inject.go b/pkg/fleet/internal/cdn/config_datadog_apm_inject.go deleted file mode 100644 index f5cc04894b7f7..0000000000000 --- a/pkg/fleet/internal/cdn/config_datadog_apm_inject.go +++ /dev/null @@ -1,104 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package cdn - -import ( - "crypto/sha256" - "encoding/json" - "fmt" - "os" - "path/filepath" - - pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" - "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/vmihailenco/msgpack/v5" -) - -const ( - injectorConfigFilename = "injector.msgpack" -) - -// apmSSIConfig represents the injector configuration from the CDN. -type apmSSIConfig struct { - version string - policyIDs []string - - injectorConfig []byte -} - -// apmSSIConfigLayer is a config layer that can be merged with other layers into a config. -type apmSSIConfigLayer struct { - ID string `json:"name"` - InjectorConfig map[string]interface{} `json:"apm_ssi_config"` -} - -// State returns the APM configs state -func (i *apmSSIConfig) State() *pbgo.PoliciesState { - return &pbgo.PoliciesState{ - MatchedPolicies: i.policyIDs, - Version: i.version, - } -} - -func newAPMSSIConfig(hostTags []string, orderedLayers ...[]byte) (*apmSSIConfig, error) { - // Compile ordered layers into a single config - // TODO: maybe we don't want that and we should reject if there are more than one config? - policyIDs := []string{} - compiledLayer := &apmSSIConfigLayer{ - InjectorConfig: map[string]interface{}{}, - } - for _, rawLayer := range orderedLayers { - layer := &apmSSIConfigLayer{} - if err := json.Unmarshal(rawLayer, layer); err != nil { - log.Warnf("Failed to unmarshal layer: %v", err) - continue - } - - // Only add layers that match the injector - if layer.InjectorConfig != nil { - injectorConfig, err := merge(compiledLayer.InjectorConfig, layer.InjectorConfig) - if err != nil { - return nil, err - } - compiledLayer.InjectorConfig = injectorConfig.(map[string]interface{}) - policyIDs = append(policyIDs, layer.ID) - } - } - - hash := sha256.New() - version, err := json.Marshal(compiledLayer) - if err != nil { - return nil, err - } - hash.Write(version) - - // Add host tags AFTER compiling the version -- we don't want to trigger noop updates - compiledLayer.InjectorConfig["host_tags"] = hostTags - - // Marshal into msgpack configs - injectorConfig, err := msgpack.Marshal(compiledLayer.InjectorConfig) - if err != nil { - return nil, err - } - - return &apmSSIConfig{ - version: fmt.Sprintf("%x", hash.Sum(nil)), - policyIDs: policyIDs, - - injectorConfig: injectorConfig, - }, nil -} - -// Write writes the agent configuration to the given directory. -func (i *apmSSIConfig) Write(dir string) error { - if i.injectorConfig != nil { - err := os.WriteFile(filepath.Join(dir, injectorConfigFilename), []byte(i.injectorConfig), 0644) // Must be world readable - if err != nil { - return fmt.Errorf("could not write %s: %w", injectorConfigFilename, err) - } - } - return writePolicyMetadata(i, dir) -} diff --git a/pkg/fleet/internal/cdn/config_datadog_apm_libraries.go b/pkg/fleet/internal/cdn/config_datadog_apm_libraries.go deleted file mode 100644 index bf31d18bd69a3..0000000000000 --- a/pkg/fleet/internal/cdn/config_datadog_apm_libraries.go +++ /dev/null @@ -1,101 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package cdn - -import ( - "crypto/sha256" - "encoding/json" - "fmt" - "os" - "path/filepath" - - pbgo "github.com/DataDog/datadog-agent/pkg/proto/pbgo/core" - "github.com/DataDog/datadog-agent/pkg/util/log" -) - -const ( - apmLibrariesConfigPath = "libraries_config.yaml" -) - -// apmLibrariesConfig represents the injector configuration from the CDN. -type apmLibrariesConfig struct { - version string - policyIDs []string - - apmLibrariesConfig []byte -} - -// apmLibrariesConfigLayer is a config layer that can be merged with other layers into a config. -type apmLibrariesConfigLayer struct { - ID string `json:"name"` - APMLibrariesConfig map[string]interface{} `json:"apm_libraries_config"` -} - -// State returns the APM configs state -func (i *apmLibrariesConfig) State() *pbgo.PoliciesState { - return &pbgo.PoliciesState{ - MatchedPolicies: i.policyIDs, - Version: i.version, - } -} - -func newAPMLibrariesConfig(hostTags []string, orderedLayers ...[]byte) (*apmLibrariesConfig, error) { - // Compile ordered layers into a single config - policyIDs := []string{} - compiledLayer := &apmLibrariesConfigLayer{ - APMLibrariesConfig: map[string]interface{}{}, - } - for _, rawLayer := range orderedLayers { - layer := &apmLibrariesConfigLayer{} - if err := json.Unmarshal(rawLayer, layer); err != nil { - log.Warnf("Failed to unmarshal layer: %v", err) - continue - } - - if layer.APMLibrariesConfig != nil { - cfg, err := merge(compiledLayer.APMLibrariesConfig, layer.APMLibrariesConfig) - if err != nil { - return nil, err - } - compiledLayer.APMLibrariesConfig = cfg.(map[string]interface{}) - policyIDs = append(policyIDs, layer.ID) - } - } - - hash := sha256.New() - version, err := json.Marshal(compiledLayer) - if err != nil { - return nil, err - } - hash.Write(version) - - // Add host tags AFTER compiling the version -- we don't want to trigger noop updates - compiledLayer.APMLibrariesConfig["host_tags"] = hostTags - - // Marshal into msgpack configs - yamlCfg, err := marshalYAMLConfig(compiledLayer.APMLibrariesConfig) - if err != nil { - return nil, err - } - - return &apmLibrariesConfig{ - version: fmt.Sprintf("%x", hash.Sum(nil)), - policyIDs: policyIDs, - - apmLibrariesConfig: yamlCfg, - }, nil -} - -// Write writes the agent configuration to the given directory. -func (i *apmLibrariesConfig) Write(dir string) error { - if i.apmLibrariesConfig != nil { - err := os.WriteFile(filepath.Join(dir, apmLibrariesConfigPath), []byte(i.apmLibrariesConfig), 0644) // Must be world readable - if err != nil { - return fmt.Errorf("could not write %s: %w", apmLibrariesConfigPath, err) - } - } - return writePolicyMetadata(i, dir) -} diff --git a/pkg/fleet/internal/cdn/merge.go b/pkg/fleet/internal/cdn/merge.go deleted file mode 100644 index 05a4b1a08b01b..0000000000000 --- a/pkg/fleet/internal/cdn/merge.go +++ /dev/null @@ -1,65 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package cdn - -import ( - "fmt" -) - -func isList(i interface{}) bool { - _, ok := i.([]interface{}) - return ok -} - -func isMap(i interface{}) bool { - _, ok := i.(map[string]interface{}) - return ok -} - -func isScalar(i interface{}) bool { - return !isList(i) && !isMap(i) -} - -// merge merges two layers into a single layer -// -// The override layer takes precedence over the base layer. The values are merged as follows: -// - Scalars: the override value is used -// - Lists: the override list is used -// - Maps: the override map is recursively merged into the base map -func merge(base interface{}, override interface{}) (interface{}, error) { - if base == nil { - return override, nil - } - if override == nil { - // this allows to override a value with nil - return nil, nil - } - if isScalar(base) && isScalar(override) { - return override, nil - } - if isList(base) && isList(override) { - return override, nil - } - if isMap(base) && isMap(override) { - return mergeMap(base.(map[string]interface{}), override.(map[string]interface{})) - } - return nil, fmt.Errorf("could not merge %T with %T", base, override) -} - -func mergeMap(base, override map[string]interface{}) (map[string]interface{}, error) { - merged := make(map[string]interface{}) - for k, v := range base { - merged[k] = v - } - for k := range override { - v, err := merge(base[k], override[k]) - if err != nil { - return nil, fmt.Errorf("could not merge key %v: %w", k, err) - } - merged[k] = v - } - return merged, nil -} diff --git a/pkg/fleet/internal/cdn/merge_test.go b/pkg/fleet/internal/cdn/merge_test.go deleted file mode 100644 index 8af4b7f81d42b..0000000000000 --- a/pkg/fleet/internal/cdn/merge_test.go +++ /dev/null @@ -1,191 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package cdn - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestMergeScalar(t *testing.T) { - tests := []struct { - name string - base interface{} - override interface{} - expected interface{} - expectedErr bool - }{ - { - name: "nil base and override", - base: nil, - override: nil, - expected: nil, - }, - { - name: "nil base", - base: nil, - override: "override", - expected: "override", - }, - { - name: "nil override", - base: "base", - override: nil, - expected: nil, - }, - { - name: "override", - base: "base", - override: "override", - expected: "override", - }, - { - name: "scalar and list error", - base: "base", - override: []interface{}{"override"}, - expectedErr: true, - }, - { - name: "scalar and map error", - base: "base", - override: map[string]interface{}{"key": "value"}, - expectedErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - merged, err := merge(tt.base, tt.override) - if tt.expectedErr { - assert.Error(t, err, "expected an error") - } else { - assert.Equal(t, tt.expected, merged) - } - }) - } -} - -func TestMergeList(t *testing.T) { - tests := []struct { - name string - base interface{} - override interface{} - expected interface{} - expectedErr bool - }{ - { - name: "nil override", - base: []interface{}{"base"}, - override: nil, - expected: nil, - }, - { - name: "override", - base: []interface{}{"base"}, - override: []interface{}{"override"}, - expected: []interface{}{"override"}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - merged, err := merge(tt.base, tt.override) - if tt.expectedErr { - assert.Error(t, err, "expected an error") - } else { - assert.Equal(t, tt.expected, merged) - } - }) - } -} - -func TestMergeMap(t *testing.T) { - tests := []struct { - name string - base interface{} - override interface{} - expected interface{} - expectedErr bool - }{ - { - name: "nil override", - base: map[string]interface{}{ - "base": "value", - }, - override: nil, - expected: nil, - }, - { - name: "override", - base: map[string]interface{}{ - "base": "value", - }, - override: map[string]interface{}{ - "base": "override", - }, - expected: map[string]interface{}{ - "base": "override", - }, - }, - { - name: "add key", - base: map[string]interface{}{ - "base": "value", - }, - override: map[string]interface{}{ - "override": "value", - }, - expected: map[string]interface{}{ - "base": "value", - "override": "value", - }, - }, - { - name: "nested", - base: map[string]interface{}{ - "base": map[string]interface{}{ - "key": "value", - }, - }, - override: map[string]interface{}{ - "base": map[string]interface{}{ - "key": "override", - }, - }, - expected: map[string]interface{}{ - "base": map[string]interface{}{ - "key": "override", - }, - }, - }, - { - name: "nested scalar and list error", - base: map[string]interface{}{ - "base": map[string]interface{}{ - "key": []interface{}{"value"}, - }, - }, - override: map[string]interface{}{ - "base": map[string]interface{}{ - "key": "override", - }, - }, - expectedErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - merged, err := merge(tt.base, tt.override) - if tt.expectedErr { - assert.Error(t, err, "expected an error") - } else { - assert.Equal(t, tt.expected, merged) - } - }) - } -} diff --git a/pkg/fleet/internal/cdn/scope_expression.go b/pkg/fleet/internal/cdn/scope_expression.go deleted file mode 100644 index cc86626fcf9c0..0000000000000 --- a/pkg/fleet/internal/cdn/scope_expression.go +++ /dev/null @@ -1,125 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package cdn - -import ( - "encoding/json" - "fmt" - "regexp" - - "github.com/DataDog/datadog-agent/comp/metadata/host/hostimpl/utils" - "github.com/DataDog/datadog-agent/pkg/fleet/installer/env" - "github.com/DataDog/datadog-agent/pkg/gohai/platform" - "github.com/DataDog/datadog-agent/pkg/version" - "github.com/expr-lang/expr" -) - -var ( - datadogConfigIDRegexp = regexp.MustCompile(`^datadog/\d+/AGENT_CONFIG/([^/]+)/[^/]+$`) -) - -const configOrderID = "configuration_order" - -type orderConfig struct { - Order []string `json:"order"` - ScopeExpressions []scopeExpression `json:"scope_expressions"` -} -type scopeExpression struct { - Expression string `json:"expression"` - PolicyID string `json:"config_id"` -} - -// Match returns true if the given policy ID matches its scope expression -func (o *orderConfig) Match(policyID string, env map[string]interface{}) (bool, error) { - var scopeExpression string - for _, scope := range o.ScopeExpressions { - if scope.PolicyID == policyID { - scopeExpression = scope.Expression - break - } - } - if scopeExpression == "" { - return false, fmt.Errorf("no scope expression found for policy ID %s", policyID) - } - - program, err := expr.Compile(scopeExpression, expr.Env(env), expr.AsBool()) - if err != nil { - return false, err - } - - output, err := expr.Run(program, env) - if err != nil { - return false, err - } - - boolOutput, ok := output.(bool) - if !ok { - return false, fmt.Errorf("scope expression %s did not evaluate to a boolean", scopeExpression) - } - - return boolOutput, nil -} - -// getOrderedScopedLayers takes in a Remote Config response and returns the ordered layers -// that match the current scope -// Layers are ordered from the lowest priority to the highest priority so that -// a simple loop can merge them in order -func getOrderedScopedLayers(configs map[string][]byte, env map[string]interface{}) ([][]byte, error) { - // First unmarshal the order configuration - var configOrder *orderConfig - for configID, content := range configs { - if configID == configOrderID { - configOrder = &orderConfig{} - err := json.Unmarshal(content, configOrder) - if err != nil { - return nil, err - } - break - } - } - if configOrder == nil { - return nil, fmt.Errorf("no order found in the remote config response") - } - - // Match layers against the scope expressions - scopedLayers := map[string][]byte{} - for configID, content := range configs { - if configID == configOrderID { - continue - } - - scopeMatch, err := configOrder.Match(configID, env) - if err != nil { - // Don't apply anything if there is an error parsing scope expressions - return nil, fmt.Errorf("error matching scope expressions: %w", err) - } - if scopeMatch { - scopedLayers[configID] = content - } - } - - // Order layers - layers := make([][]byte, 0) - for i := len(configOrder.Order) - 1; i >= 0; i-- { - content, matched := scopedLayers[configOrder.Order[i]] - if matched { - layers = append(layers, content) - } - } - - return layers, nil -} - -func getScopeExprVars(env *env.Env, hostTagsGetter hostTagsGetter) map[string]interface{} { - return map[string]interface{}{ - "hostname": env.Hostname, - "installer_version": version.AgentVersion, // AgentVersion evaluates to the installer version here - "os": platform.CollectInfo().KernelName.ValueOrDefault(), - "os_version": utils.GetOSVersion(), - - "tags": hostTagsGetter.get(), - } -} diff --git a/pkg/fleet/internal/cdn/scope_expression_test.go b/pkg/fleet/internal/cdn/scope_expression_test.go deleted file mode 100644 index 4101f39c02b09..0000000000000 --- a/pkg/fleet/internal/cdn/scope_expression_test.go +++ /dev/null @@ -1,125 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package cdn - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestMatchScopeExpression(t *testing.T) { - type test struct { - name string - policyID string - scopeExpressions []scopeExpression - env map[string]interface{} - match bool - err bool - } - - tests := []test{ - { - name: "Match scope expression", - policyID: "policy1", - scopeExpressions: []scopeExpression{ - { - Expression: "'env:test' in tags", - PolicyID: "policy1", - }, - }, - env: map[string]interface{}{ - "tags": []string{"env:test"}, - }, - match: true, - err: false, - }, - { - name: "Match true", - scopeExpressions: []scopeExpression{ - { - Expression: "true", - PolicyID: "policy1", - }, - }, - policyID: "policy1", - env: nil, - match: true, - err: false, - }, - { - name: "Policy not present", - policyID: "policy2", - scopeExpressions: []scopeExpression{}, - env: map[string]interface{}{ - "tags": []string{"env:test"}, - }, - match: false, - err: true, - }, - { - name: "Policy not matching", - policyID: "policy1", - scopeExpressions: []scopeExpression{ - { - Expression: "'foo:bar' in tags", - PolicyID: "policy1", - }, - }, - env: map[string]interface{}{ - "tags": []string{"env:test"}, - }, - match: false, - err: false, - }, - { - name: "Multiple policies -- one matching", - policyID: "policy1", - scopeExpressions: []scopeExpression{ - { - Expression: "'env:test' in tags", - PolicyID: "policy1", - }, - { - Expression: "'foo:bar' in tags", - PolicyID: "policy2", - }, - }, - env: map[string]interface{}{ - "tags": []string{"env:test"}, - }, - match: true, - err: false, - }, - { - name: "Multiple tags in expression", - policyID: "policy1", - scopeExpressions: []scopeExpression{ - { - Expression: "any(['env:test', 'env:prod'], {# in tags})", - PolicyID: "policy1", - }, - }, - env: map[string]interface{}{ - "tags": []string{"env:test", "foo:bar"}, - }, - match: true, - err: false, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - orderConfig := orderConfig{ - ScopeExpressions: test.scopeExpressions, - } - - match, err := orderConfig.Match(test.policyID, test.env) - assert.Equal(t, err != nil, test.err) - assert.Equal(t, test.match, match) - }) - } -} diff --git a/pkg/fleet/internal/cdn/tags.go b/pkg/fleet/internal/cdn/tags.go deleted file mode 100644 index 101c3afec8ce3..0000000000000 --- a/pkg/fleet/internal/cdn/tags.go +++ /dev/null @@ -1,55 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package cdn - -import ( - "context" - "time" - - "github.com/DataDog/datadog-agent/comp/metadata/host/hostimpl/hosttags" - detectenv "github.com/DataDog/datadog-agent/pkg/config/env" - "github.com/DataDog/datadog-agent/pkg/config/model" - pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" - "github.com/DataDog/datadog-agent/pkg/fleet/installer/env" -) - -type hostTagsGetter struct { - config model.Config - staticTags []string -} - -func newHostTagsGetter(env *env.Env) hostTagsGetter { - config := pkgconfigsetup.Datadog() - detectenv.DetectFeatures(config) - return hostTagsGetter{ - config: config, - staticTags: env.Tags, - } -} - -func (h *hostTagsGetter) get() []string { - // Host tags are cached on host, but we add a timeout to avoid blocking the request - // if the host tags are not available yet and need to be fetched - ctx, cc := context.WithTimeout(context.Background(), time.Second) - defer cc() - hostTags := hosttags.Get(ctx, true, h.config) - - tags := []string{} - tags = append(tags, h.staticTags...) - tags = append(tags, hostTags.System...) - tags = append(tags, hostTags.GoogleCloudPlatform...) - tagSet := make(map[string]struct{}) - for _, tag := range tags { - tagSet[tag] = struct{}{} - } - deduplicatedTags := make([]string, 0, len(tagSet)) - for tag := range tagSet { - deduplicatedTags = append(deduplicatedTags, tag) - } - tags = deduplicatedTags - - return tags -} diff --git a/pkg/fleet/internal/exec/installer_exec.go b/pkg/fleet/internal/exec/installer_exec.go index 92f5880723fa9..87f8cce841f30 100644 --- a/pkg/fleet/internal/exec/installer_exec.go +++ b/pkg/fleet/internal/exec/installer_exec.go @@ -109,8 +109,8 @@ func (i *InstallerExec) PromoteExperiment(ctx context.Context, pkg string) (err } // InstallConfigExperiment installs an experiment. -func (i *InstallerExec) InstallConfigExperiment(ctx context.Context, url string, version string) (err error) { - cmd := i.newInstallerCmd(ctx, "install-config-experiment", url, version) +func (i *InstallerExec) InstallConfigExperiment(ctx context.Context, pkg string, version string, rawConfig []byte) (err error) { + cmd := i.newInstallerCmd(ctx, "install-config-experiment", pkg, version, string(rawConfig)) defer func() { cmd.span.Finish(err) }() return cmd.Run() } diff --git a/pkg/gohai/go.mod b/pkg/gohai/go.mod index e7dee142ec26f..f9d3ef921ca24 100644 --- a/pkg/gohai/go.mod +++ b/pkg/gohai/go.mod @@ -7,9 +7,9 @@ go 1.23.0 require ( github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 github.com/moby/sys/mountinfo v0.7.2 - github.com/shirou/gopsutil/v4 v4.24.11 + github.com/shirou/gopsutil/v4 v4.24.12 github.com/stretchr/testify v1.10.0 - golang.org/x/sys v0.28.0 + golang.org/x/sys v0.29.0 ) require ( @@ -19,9 +19,9 @@ require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/ebitengine/purego v0.8.1 // indirect github.com/go-ole/go-ole v1.3.0 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/numcpus v0.8.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect diff --git a/pkg/gohai/go.sum b/pkg/gohai/go.sum index 828633a9495c7..c970895704858 100644 --- a/pkg/gohai/go.sum +++ b/pkg/gohai/go.sum @@ -7,25 +7,24 @@ github.com/ebitengine/purego v0.8.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/moby/sys/mountinfo v0.7.2 h1:1shs6aH5s4o5H2zQLn796ADW1wMrIwHsyJ2v9KouLrg= github.com/moby/sys/mountinfo v0.7.2/go.mod h1:1YOa8w8Ih7uW0wALDUgT1dTTSBrZ+HiBLGws92L2RU4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= @@ -39,8 +38,8 @@ go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0 golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/pkg/gpu/consumer.go b/pkg/gpu/consumer.go index 55148350b12e3..5fb10d6ca1733 100644 --- a/pkg/gpu/consumer.go +++ b/pkg/gpu/consumer.go @@ -17,6 +17,7 @@ import ( "github.com/NVIDIA/go-nvml/pkg/nvml" "golang.org/x/sys/unix" + "github.com/DataDog/datadog-agent/comp/core/telemetry" ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/gpu/config" gpuebpf "github.com/DataDog/datadog-agent/pkg/gpu/ebpf" @@ -27,6 +28,11 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/log" ) +const telemetryEventErrorMismatch = "size_mismatch" +const telemetryEventErrorUnknownType = "unknown_type" +const telemetryEventTypeUnknown = "unknown" +const telemetryEventHeader = "header" + // cudaEventConsumer is responsible for consuming CUDA events from the eBPF probe, and delivering them // to the appropriate stream handler. type cudaEventConsumer struct { @@ -38,16 +44,40 @@ type cudaEventConsumer struct { running atomic.Bool sysCtx *systemContext cfg *config.Config + telemetry *cudaEventConsumerTelemetry +} + +type cudaEventConsumerTelemetry struct { + activeHandlers telemetry.Gauge + removedHandlers telemetry.Counter + events telemetry.Counter + eventErrors telemetry.Counter + finalizedProcesses telemetry.Counter + missingContainers telemetry.Counter } // newCudaEventConsumer creates a new CUDA event consumer. -func newCudaEventConsumer(sysCtx *systemContext, eventHandler ddebpf.EventHandler, cfg *config.Config) *cudaEventConsumer { +func newCudaEventConsumer(sysCtx *systemContext, eventHandler ddebpf.EventHandler, cfg *config.Config, telemetry telemetry.Component) *cudaEventConsumer { return &cudaEventConsumer{ eventHandler: eventHandler, closed: make(chan struct{}), streamHandlers: make(map[streamKey]*StreamHandler), cfg: cfg, sysCtx: sysCtx, + telemetry: newCudaEventConsumerTelemetry(telemetry), + } +} + +func newCudaEventConsumerTelemetry(tm telemetry.Component) *cudaEventConsumerTelemetry { + subsystem := gpuTelemetryModule + "__consumer" + + return &cudaEventConsumerTelemetry{ + activeHandlers: tm.NewGauge(subsystem, "active_handlers", nil, "Number of active stream handlers"), + removedHandlers: tm.NewCounter(subsystem, "removed_handlers", nil, "Number of removed stream handlers"), + events: tm.NewCounter(subsystem, "events", []string{"event_type"}, "Number of processed CUDA events received by the consumer"), + eventErrors: tm.NewCounter(subsystem, "events__errors", []string{"event_type", "error"}, "Number of CUDA events that couldn't be processed due to an error"), + finalizedProcesses: tm.NewCounter(subsystem, "finalized_processes", nil, "Number of finalized processes"), + missingContainers: tm.NewCounter(subsystem, "missing_containers", []string{"reason"}, "Number of missing containers"), } } @@ -105,6 +135,7 @@ func (c *cudaEventConsumer) Start() { dataLen := len(batchData.Data) if dataLen < gpuebpf.SizeofCudaEventHeader { log.Errorf("Not enough data to parse header, data size=%d, expecting at least %d", dataLen, gpuebpf.SizeofCudaEventHeader) + c.telemetry.eventErrors.Inc(telemetryEventHeader, telemetryEventErrorMismatch) continue } @@ -112,7 +143,9 @@ func (c *cudaEventConsumer) Start() { dataPtr := unsafe.Pointer(&batchData.Data[0]) var err error - if isStreamSpecificEvent(gpuebpf.CudaEventType(header.Type)) { + eventType := gpuebpf.CudaEventType(header.Type) + c.telemetry.events.Inc(eventType.String()) + if isStreamSpecificEvent(eventType) { err = c.handleStreamEvent(header, dataPtr, dataLen) } else { err = c.handleGlobalEvent(header, dataPtr, dataLen) @@ -140,27 +173,29 @@ func isStreamSpecificEvent(eventType gpuebpf.CudaEventType) bool { func (c *cudaEventConsumer) handleStreamEvent(header *gpuebpf.CudaEventHeader, data unsafe.Pointer, dataLen int) error { streamHandler := c.getStreamHandler(header) + eventType := gpuebpf.CudaEventType(header.Type) - switch header.Type { + switch eventType { case gpuebpf.CudaEventTypeKernelLaunch: if dataLen != gpuebpf.SizeofCudaKernelLaunch { + c.telemetry.eventErrors.Inc(eventType.String(), telemetryEventErrorMismatch) return fmt.Errorf("Not enough data to parse kernel launch event, data size=%d, expecting %d", dataLen, gpuebpf.SizeofCudaKernelLaunch) - } streamHandler.handleKernelLaunch((*gpuebpf.CudaKernelLaunch)(data)) case gpuebpf.CudaEventTypeMemory: if dataLen != gpuebpf.SizeofCudaMemEvent { + c.telemetry.eventErrors.Inc(eventType.String(), telemetryEventErrorMismatch) return fmt.Errorf("Not enough data to parse memory event, data size=%d, expecting %d", dataLen, gpuebpf.SizeofCudaMemEvent) - } streamHandler.handleMemEvent((*gpuebpf.CudaMemEvent)(data)) case gpuebpf.CudaEventTypeSync: if dataLen != gpuebpf.SizeofCudaSync { + c.telemetry.eventErrors.Inc(eventType.String(), telemetryEventErrorMismatch) return fmt.Errorf("Not enough data to parse sync event, data size=%d, expecting %d", dataLen, gpuebpf.SizeofCudaSync) - } streamHandler.handleSync((*gpuebpf.CudaSync)(data)) default: + c.telemetry.eventErrors.Inc(telemetryEventTypeUnknown, telemetryEventErrorUnknownType) return fmt.Errorf("Unknown event type: %d", header.Type) } @@ -174,17 +209,19 @@ func getPidTidFromHeader(header *gpuebpf.CudaEventHeader) (uint32, uint32) { } func (c *cudaEventConsumer) handleGlobalEvent(header *gpuebpf.CudaEventHeader, data unsafe.Pointer, dataLen int) error { - switch header.Type { + eventType := gpuebpf.CudaEventType(header.Type) + switch eventType { case gpuebpf.CudaEventTypeSetDevice: if dataLen != gpuebpf.SizeofCudaSetDeviceEvent { + c.telemetry.eventErrors.Inc(eventType.String(), telemetryEventErrorMismatch) return fmt.Errorf("Not enough data to parse set device event, data size=%d, expecting %d", dataLen, gpuebpf.SizeofCudaSetDeviceEvent) - } csde := (*gpuebpf.CudaSetDeviceEvent)(data) pid, tid := getPidTidFromHeader(header) c.sysCtx.setDeviceSelection(int(pid), int(tid), csde.Device) default: + c.telemetry.eventErrors.Inc(telemetryEventTypeUnknown, telemetryEventErrorUnknownType) return fmt.Errorf("Unknown event type: %d", header.Type) } @@ -197,6 +234,7 @@ func (c *cudaEventConsumer) handleProcessExit(pid uint32) { log.Debugf("Process %d ended, marking stream %d as ended", pid, key.stream) // the probe is responsible for deleting the stream handler _ = handler.markEnd() + c.telemetry.finalizedProcesses.Inc() } } } @@ -209,6 +247,9 @@ func (c *cudaEventConsumer) getStreamKey(header *gpuebpf.CudaEventHeader) stream if err != nil { // We don't want to return an error here, as we can still process the event without the container ID log.Warnf("error getting container ID for cgroup %s: %s", cgroup, err) + c.telemetry.missingContainers.Inc("error") + } else if containerID == "" { + c.telemetry.missingContainers.Inc("missing") } key := streamKey{ @@ -238,6 +279,7 @@ func (c *cudaEventConsumer) getStreamHandler(header *gpuebpf.CudaEventHeader) *S key := c.getStreamKey(header) if _, ok := c.streamHandlers[key]; !ok { c.streamHandlers[key] = newStreamHandler(key.pid, key.containerID, c.sysCtx) + c.telemetry.activeHandlers.Set(float64(len(c.streamHandlers))) } return c.streamHandlers[key] @@ -264,4 +306,6 @@ func (c *cudaEventConsumer) cleanFinishedHandlers() { delete(c.streamHandlers, key) } } + + c.telemetry.activeHandlers.Set(float64(len(c.streamHandlers))) } diff --git a/pkg/gpu/consumer_test.go b/pkg/gpu/consumer_test.go index c7c7aa04837d7..6ab25e2e9a556 100644 --- a/pkg/gpu/consumer_test.go +++ b/pkg/gpu/consumer_test.go @@ -22,9 +22,9 @@ import ( func TestConsumerCanStartAndStop(t *testing.T) { handler := ddebpf.NewRingBufferHandler(consumerChannelSize) cfg := config.New() - ctx, err := getSystemContext(testutil.GetBasicNvmlMock(), kernel.ProcFSRoot(), testutil.GetWorkloadMetaMock(t)) + ctx, err := getSystemContext(testutil.GetBasicNvmlMock(), kernel.ProcFSRoot(), testutil.GetWorkloadMetaMock(t), testutil.GetTelemetryMock(t)) require.NoError(t, err) - consumer := newCudaEventConsumer(ctx, handler, cfg) + consumer := newCudaEventConsumer(ctx, handler, cfg, testutil.GetTelemetryMock(t)) consumer.Start() require.Eventually(t, func() bool { return consumer.running.Load() }, 100*time.Millisecond, 10*time.Millisecond) diff --git a/pkg/gpu/context.go b/pkg/gpu/context.go index 59d778bfea5c1..756a264761b98 100644 --- a/pkg/gpu/context.go +++ b/pkg/gpu/context.go @@ -16,6 +16,7 @@ import ( "github.com/NVIDIA/go-nvml/pkg/nvml" + "github.com/DataDog/datadog-agent/comp/core/telemetry" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/errors" "github.com/DataDog/datadog-agent/pkg/gpu/cuda" @@ -27,9 +28,6 @@ const nvidiaResourceName = "nvidia.com/gpu" // systemContext holds certain attributes about the system that are used by the GPU probe. type systemContext struct { - // maxGpuThreadsPerDevice maps each device index to the maximum number of threads it can run in parallel - maxGpuThreadsPerDevice map[int]int - // timeResolver allows to resolve kernel-time timestamps timeResolver *ktime.Resolver @@ -68,6 +66,26 @@ type systemContext struct { // workloadmeta is the workloadmeta component that we use to get necessary container metadata workloadmeta workloadmeta.Component + + // telemetry holds telemetry elements for the context + telemetry *contextTelemetry + + // fatbinTelemetry holds telemetry counters and histograms for the fatbin parsing process + fatbinTelemetry *fatbinTelemetry +} + +// contextTelemetry holds telemetry elements for the context +type contextTelemetry struct { + symbolCacheSize telemetry.Gauge + activePIDs telemetry.Gauge +} + +// fatbinTelemetry holds telemetry counters and histograms for the fatbin parsing process +type fatbinTelemetry struct { + readErrors telemetry.Counter + fatbinPayloads telemetry.Counter + kernelsPerFile telemetry.Histogram + kernelSizes telemetry.Histogram } // symbolsEntry embeds cuda.Symbols adding a field for keeping track of the last @@ -81,9 +99,8 @@ func (e *symbolsEntry) updateLastUsedTime() { e.lastUsedTime = time.Now() } -func getSystemContext(nvmlLib nvml.Interface, procRoot string, wmeta workloadmeta.Component) (*systemContext, error) { +func getSystemContext(nvmlLib nvml.Interface, procRoot string, wmeta workloadmeta.Component, tm telemetry.Component) (*systemContext, error) { ctx := &systemContext{ - maxGpuThreadsPerDevice: make(map[int]int), deviceSmVersions: make(map[int]int), cudaSymbols: make(map[string]*symbolsEntry), pidMaps: make(map[int][]*procfs.ProcMap), @@ -92,6 +109,8 @@ func getSystemContext(nvmlLib nvml.Interface, procRoot string, wmeta workloadmet selectedDeviceByPIDAndTID: make(map[int]map[int]int32), visibleDevicesCache: make(map[int][]nvml.Device), workloadmeta: wmeta, + telemetry: newContextTelemetry(tm), + fatbinTelemetry: newfatbinTelemetry(tm), } if err := ctx.fillDeviceInfo(); err != nil { @@ -112,6 +131,26 @@ func getSystemContext(nvmlLib nvml.Interface, procRoot string, wmeta workloadmet return ctx, nil } +func newContextTelemetry(tm telemetry.Component) *contextTelemetry { + subsystem := gpuTelemetryModule + "__context" + + return &contextTelemetry{ + symbolCacheSize: tm.NewGauge(subsystem, "symbol_cache_size", nil, "Number of CUDA symbols in the cache"), + activePIDs: tm.NewGauge(subsystem, "active_pids", nil, "Number of active PIDs being monitored"), + } +} + +func newfatbinTelemetry(tm telemetry.Component) *fatbinTelemetry { + subsystem := gpuTelemetryModule + "__fatbin_parser" + + return &fatbinTelemetry{ + readErrors: tm.NewCounter(subsystem, "read_errors", nil, "Number of errors reading fatbin data"), + fatbinPayloads: tm.NewCounter(subsystem, "fatbin_payloads", []string{"compression"}, "Number of fatbin payloads read"), + kernelsPerFile: tm.NewHistogram(subsystem, "kernels_per_file", nil, "Number of kernels per fatbin file", []float64{5, 10, 50, 100, 500}), + kernelSizes: tm.NewHistogram(subsystem, "kernel_sizes", nil, "Size of kernels in bytes", []float64{100, 1000, 10000, 100000, 1000000, 10000000}), + } +} + func getDeviceSmVersion(device nvml.Device) (int, error) { major, minor, ret := device.GetCudaComputeCapability() if ret != nvml.SUCCESS { @@ -137,13 +176,6 @@ func (ctx *systemContext) fillDeviceInfo() error { } ctx.deviceSmVersions[i] = smVersion - maxThreads, ret := dev.GetNumGpuCores() - if ret != nvml.SUCCESS { - return fmt.Errorf("error getting max threads for device %s: %s", dev, nvml.ErrorString(ret)) - } - - ctx.maxGpuThreadsPerDevice[i] = maxThreads - ctx.gpuDevices = append(ctx.gpuDevices, dev) } return nil @@ -157,13 +189,24 @@ func (ctx *systemContext) getCudaSymbols(path string) (*symbolsEntry, error) { data, err := cuda.GetSymbols(path) if err != nil { + ctx.fatbinTelemetry.readErrors.Inc() return nil, fmt.Errorf("error getting file data: %w", err) } + ctx.fatbinTelemetry.fatbinPayloads.Add(float64(data.Fatbin.CompressedPayloads), "compressed") + ctx.fatbinTelemetry.fatbinPayloads.Add(float64(data.Fatbin.UncompressedPayloads), "uncompressed") + ctx.fatbinTelemetry.kernelsPerFile.Observe(float64(data.Fatbin.NumKernels())) + + for kernel := range data.Fatbin.GetKernels() { + ctx.fatbinTelemetry.kernelsPerFile.Observe(float64(kernel.KernelSize)) + } + wrapper := &symbolsEntry{Symbols: data} wrapper.updateLastUsedTime() ctx.cudaSymbols[path] = wrapper + ctx.telemetry.symbolCacheSize.Set(float64(len(ctx.cudaSymbols))) + return wrapper, nil } @@ -195,6 +238,7 @@ func (ctx *systemContext) getProcessMemoryMaps(pid int) ([]*procfs.ProcMap, erro } ctx.pidMaps[pid] = maps + ctx.telemetry.activePIDs.Set(float64(len(ctx.pidMaps))) return maps, nil } @@ -203,6 +247,8 @@ func (ctx *systemContext) removeProcess(pid int) { delete(ctx.pidMaps, pid) delete(ctx.selectedDeviceByPIDAndTID, pid) delete(ctx.visibleDevicesCache, pid) + + ctx.telemetry.activePIDs.Set(float64(len(ctx.pidMaps))) } // cleanupOldEntries removes any old entries that have not been accessed in a while, to avoid @@ -216,6 +262,8 @@ func (ctx *systemContext) cleanupOldEntries() { delete(ctx.cudaSymbols, path) } } + + ctx.telemetry.symbolCacheSize.Set(float64(len(ctx.cudaSymbols))) } // filterDevicesForContainer filters the available GPU devices for the given diff --git a/pkg/gpu/context_test.go b/pkg/gpu/context_test.go index dbb6e1735df24..88a0ec59c7890 100644 --- a/pkg/gpu/context_test.go +++ b/pkg/gpu/context_test.go @@ -22,7 +22,7 @@ import ( func TestFilterDevicesForContainer(t *testing.T) { wmetaMock := testutil.GetWorkloadMetaMock(t) - sysCtx, err := getSystemContext(testutil.GetBasicNvmlMock(), kernel.ProcFSRoot(), wmetaMock) + sysCtx, err := getSystemContext(testutil.GetBasicNvmlMock(), kernel.ProcFSRoot(), wmetaMock, testutil.GetTelemetryMock(t)) require.NotNil(t, sysCtx) require.NoError(t, err) @@ -92,7 +92,7 @@ func TestGetCurrentActiveGpuDevice(t *testing.T) { }) wmetaMock := testutil.GetWorkloadMetaMock(t) - sysCtx, err := getSystemContext(testutil.GetBasicNvmlMock(), procFs, wmetaMock) + sysCtx, err := getSystemContext(testutil.GetBasicNvmlMock(), procFs, wmetaMock, testutil.GetTelemetryMock(t)) require.NotNil(t, sysCtx) require.NoError(t, err) diff --git a/pkg/gpu/cuda/fatbin.go b/pkg/gpu/cuda/fatbin.go index 275e0978b5b25..cf1abb6c93bcd 100644 --- a/pkg/gpu/cuda/fatbin.go +++ b/pkg/gpu/cuda/fatbin.go @@ -20,6 +20,8 @@ import ( "encoding/binary" "fmt" "io" + "iter" + "maps" "unsafe" "github.com/pierrec/lz4/v4" @@ -45,6 +47,12 @@ type Fatbin struct { // kernelNames is a map of kernel names to make easy lookup for HasKernelWithName kernelNames map[string]struct{} + + // CompressedPayloads is the number of compressed payloads found in the fatbin + CompressedPayloads int + + // UncompressedPayloads is the number of uncompressed payloads found in the fatbin + UncompressedPayloads int } // NewFatbin creates a new Fatbin instance @@ -64,6 +72,16 @@ func (fb *Fatbin) GetKernel(name string, smVersion uint32) *CubinKernel { return fb.kernels[key] } +// GetKernels returns an iterator over the kernels in the fatbin +func (fb *Fatbin) GetKernels() iter.Seq[*CubinKernel] { + return maps.Values(fb.kernels) +} + +// NumKernels returns the number of kernels in the fatbin +func (fb *Fatbin) NumKernels() int { + return len(fb.kernels) +} + // HasKernelWithName returns true if the fatbin has a kernel with the given name func (fb *Fatbin) HasKernelWithName(name string) bool { _, ok := fb.kernelNames[name] @@ -244,6 +262,7 @@ func parseFatbinData(buffer io.ReadSeeker, fatbin *Fatbin) error { // have once uncompressed. If it's zero, the payload is not compressed. var payload []byte if fbData.UncompressedPayloadSize != 0 { + fatbin.CompressedPayloads++ compressedPayload := make([]byte, fbData.PaddedPayloadSize) _, err := io.ReadFull(buffer, compressedPayload) if err != nil { @@ -259,6 +278,7 @@ func parseFatbinData(buffer io.ReadSeeker, fatbin *Fatbin) error { return fmt.Errorf("failed to decompress fatbin payload: %w", err) } } else { + fatbin.UncompressedPayloads++ payload = make([]byte, fbData.PaddedPayloadSize) _, err := io.ReadFull(buffer, payload) if err != nil { diff --git a/pkg/gpu/ebpf/kprobe_types.go b/pkg/gpu/ebpf/kprobe_types.go index dcb5f6ac450c7..3227ac57d322a 100644 --- a/pkg/gpu/ebpf/kprobe_types.go +++ b/pkg/gpu/ebpf/kprobe_types.go @@ -3,7 +3,9 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2024-present Datadog, Inc. -//go:build ignore +//go:build ignore || generate + +//go:generate go run golang.org/x/tools/cmd/stringer@latest -output kprobe_types_string_linux.go -type=CudaEventType -linecomment package ebpf @@ -25,10 +27,10 @@ type CudaMemEventType C.cuda_memory_event_type_t type CudaSetDeviceEvent C.cuda_set_device_event_t -const CudaEventTypeKernelLaunch = C.cuda_kernel_launch -const CudaEventTypeMemory = C.cuda_memory_event -const CudaEventTypeSync = C.cuda_sync -const CudaEventTypeSetDevice = C.cuda_set_device +const CudaEventTypeKernelLaunch CudaEventType = C.cuda_kernel_launch +const CudaEventTypeMemory CudaEventType = C.cuda_memory_event +const CudaEventTypeSync CudaEventType = C.cuda_sync +const CudaEventTypeSetDevice CudaEventType = C.cuda_set_device const CudaMemAlloc = C.cudaMalloc const CudaMemFree = C.cudaFree diff --git a/pkg/gpu/ebpf/kprobe_types_linux.go b/pkg/gpu/ebpf/kprobe_types_linux.go index 1a32675a49d9f..7e13a812226db 100644 --- a/pkg/gpu/ebpf/kprobe_types_linux.go +++ b/pkg/gpu/ebpf/kprobe_types_linux.go @@ -45,10 +45,10 @@ type CudaSetDeviceEvent struct { Pad_cgo_0 [4]byte } -const CudaEventTypeKernelLaunch = 0x0 -const CudaEventTypeMemory = 0x1 -const CudaEventTypeSync = 0x2 -const CudaEventTypeSetDevice = 0x3 +const CudaEventTypeKernelLaunch CudaEventType = 0x0 +const CudaEventTypeMemory CudaEventType = 0x1 +const CudaEventTypeSync CudaEventType = 0x2 +const CudaEventTypeSetDevice CudaEventType = 0x3 const CudaMemAlloc = 0x0 const CudaMemFree = 0x1 diff --git a/pkg/gpu/ebpf/kprobe_types_string_linux.go b/pkg/gpu/ebpf/kprobe_types_string_linux.go new file mode 100644 index 0000000000000..031a257f856dd --- /dev/null +++ b/pkg/gpu/ebpf/kprobe_types_string_linux.go @@ -0,0 +1,26 @@ +// Code generated by "stringer -output kprobe_types_string_linux.go -type=CudaEventType -linecomment"; DO NOT EDIT. + +package ebpf + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[CudaEventTypeKernelLaunch-0] + _ = x[CudaEventTypeMemory-1] + _ = x[CudaEventTypeSync-2] + _ = x[CudaEventTypeSetDevice-3] +} + +const _CudaEventType_name = "CudaEventTypeKernelLaunchCudaEventTypeMemoryCudaEventTypeSyncCudaEventTypeSetDevice" + +var _CudaEventType_index = [...]uint8{0, 25, 44, 61, 83} + +func (i CudaEventType) String() string { + if i >= CudaEventType(len(_CudaEventType_index)-1) { + return "CudaEventType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _CudaEventType_name[_CudaEventType_index[i]:_CudaEventType_index[i+1]] +} diff --git a/pkg/gpu/probe.go b/pkg/gpu/probe.go index fafc90df19c28..a3246848e1303 100644 --- a/pkg/gpu/probe.go +++ b/pkg/gpu/probe.go @@ -33,8 +33,9 @@ import ( ) const ( - gpuAttacherName = "gpu" - gpuModuleName = gpuAttacherName + gpuAttacherName = "gpu" + gpuModuleName = gpuAttacherName + gpuTelemetryModule = gpuModuleName // consumerChannelSize controls the size of the go channel that buffers ringbuffer // events (*ddebpf.RingBufferHandler). @@ -98,6 +99,19 @@ type Probe struct { deps ProbeDependencies sysCtx *systemContext eventHandler ddebpf.EventHandler + telemetry *probeTelemetry +} + +type probeTelemetry struct { + sentEntries telemetry.Counter +} + +func newProbeTelemetry(tm telemetry.Component) *probeTelemetry { + subsystem := gpuTelemetryModule + "__probe" + + return &probeTelemetry{ + sentEntries: tm.NewCounter(subsystem, "sent_entries", nil, "Number of GPU events sent to the agent"), + } } // NewProbe creates and starts a GPU monitoring probe, containing relevant eBPF programs (uprobes), the @@ -114,15 +128,16 @@ func NewProbe(cfg *config.Config, deps ProbeDependencies) (*Probe, error) { } attachCfg := getAttacherConfig(cfg) - sysCtx, err := getSystemContext(deps.NvmlLib, cfg.ProcRoot, deps.WorkloadMeta) + sysCtx, err := getSystemContext(deps.NvmlLib, cfg.ProcRoot, deps.WorkloadMeta, deps.Telemetry) if err != nil { return nil, fmt.Errorf("error getting system context: %w", err) } p := &Probe{ - cfg: cfg, - deps: deps, - sysCtx: sysCtx, + cfg: cfg, + deps: deps, + sysCtx: sysCtx, + telemetry: newProbeTelemetry(deps.Telemetry), } allowRC := cfg.EnableRuntimeCompiler && cfg.AllowRuntimeCompiledFallback @@ -154,9 +169,9 @@ func NewProbe(cfg *config.Config, deps ProbeDependencies) (*Probe, error) { return nil, fmt.Errorf("error creating uprobes attacher: %w", err) } - p.consumer = newCudaEventConsumer(sysCtx, p.eventHandler, p.cfg) + p.consumer = newCudaEventConsumer(sysCtx, p.eventHandler, p.cfg, deps.Telemetry) //TODO: decouple this to avoid sharing streamHandlers between consumer and statsGenerator - p.statsGenerator = newStatsGenerator(sysCtx, p.consumer.streamHandlers) + p.statsGenerator = newStatsGenerator(sysCtx, p.consumer.streamHandlers, deps.Telemetry) if err = p.start(); err != nil { return nil, err @@ -195,6 +210,7 @@ func (p *Probe) GetAndFlush() (*model.GPUStats, error) { return nil, fmt.Errorf("error getting current time: %w", err) } stats := p.statsGenerator.getStats(now) + p.telemetry.sentEntries.Add(float64(len(stats.Metrics))) p.cleanupFinished() return stats, nil @@ -217,8 +233,7 @@ func (p *Probe) initRCGPU(cfg *config.Config) error { func (p *Probe) initCOREGPU(cfg *config.Config) error { asset := getAssetName("gpu", cfg.BPFDebug) - var err error //nolint:gosimple // TODO - err = ddebpf.LoadCOREAsset(asset, func(ar bytecode.AssetReader, o manager.Options) error { + err := ddebpf.LoadCOREAsset(asset, func(ar bytecode.AssetReader, o manager.Options) error { return p.setupManager(ar, o) }) return err diff --git a/pkg/gpu/probe_test.go b/pkg/gpu/probe_test.go index ff2834b0327bc..7727f30724bf5 100644 --- a/pkg/gpu/probe_test.go +++ b/pkg/gpu/probe_test.go @@ -48,6 +48,7 @@ func (s *probeTestSuite) getProbe() *Probe { NvmlLib: testutil.GetBasicNvmlMock(), ProcessMonitor: consumerstestutil.NewTestProcessConsumer(t), WorkloadMeta: testutil.GetWorkloadMetaMock(t), + Telemetry: testutil.GetTelemetryMock(t), } probe, err := NewProbe(cfg, deps) require.NoError(t, err) diff --git a/pkg/gpu/stats.go b/pkg/gpu/stats.go index ba50db6c5226a..16957c5fcac01 100644 --- a/pkg/gpu/stats.go +++ b/pkg/gpu/stats.go @@ -12,6 +12,7 @@ import ( "github.com/NVIDIA/go-nvml/pkg/nvml" + "github.com/DataDog/datadog-agent/comp/core/telemetry" "github.com/DataDog/datadog-agent/pkg/collector/corechecks/gpu/model" ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -25,9 +26,14 @@ type statsGenerator struct { currGenerationKTime int64 // currGenerationTime is the kernel time of the current stats generation. aggregators map[model.StatsKey]*aggregator // aggregators contains the map of aggregators sysCtx *systemContext // sysCtx is the system context with global GPU-system data + telemetry *statsGeneratorTelemetry // telemetry contains the telemetry component for the stats generator } -func newStatsGenerator(sysCtx *systemContext, streamHandlers map[streamKey]*StreamHandler) *statsGenerator { +type statsGeneratorTelemetry struct { + aggregators telemetry.Gauge +} + +func newStatsGenerator(sysCtx *systemContext, streamHandlers map[streamKey]*StreamHandler, tm telemetry.Component) *statsGenerator { currKTime, _ := ddebpf.NowNanoseconds() return &statsGenerator{ streamHandlers: streamHandlers, @@ -35,6 +41,14 @@ func newStatsGenerator(sysCtx *systemContext, streamHandlers map[streamKey]*Stre lastGenerationKTime: currKTime, currGenerationKTime: currKTime, sysCtx: sysCtx, + telemetry: newStatsGeneratorTelemetry(tm), + } +} + +func newStatsGeneratorTelemetry(tm telemetry.Component) *statsGeneratorTelemetry { + subsystem := gpuTelemetryModule + "__stats_generator" + return &statsGeneratorTelemetry{ + aggregators: tm.NewGauge(subsystem, "aggregators", nil, "Number of active GPU stats aggregators"), } } @@ -81,6 +95,8 @@ func (g *statsGenerator) getStats(nowKtime int64) *model.GPUStats { stats.Metrics = append(stats.Metrics, entry) } + g.telemetry.aggregators.Set(float64(len(g.aggregators))) + g.lastGenerationKTime = g.currGenerationKTime return stats @@ -132,4 +148,6 @@ func (g *statsGenerator) cleanupFinishedAggregators() { delete(g.aggregators, pid) } } + + g.telemetry.aggregators.Set(float64(len(g.aggregators))) } diff --git a/pkg/gpu/stats_test.go b/pkg/gpu/stats_test.go index 5aa4b2304cf3a..7880a5ce8b146 100644 --- a/pkg/gpu/stats_test.go +++ b/pkg/gpu/stats_test.go @@ -31,7 +31,7 @@ func getMetricsEntry(key model.StatsKey, stats *model.GPUStats) *model.Utilizati } func getStatsGeneratorForTest(t *testing.T) (*statsGenerator, map[streamKey]*StreamHandler, int64) { - sysCtx, err := getSystemContext(testutil.GetBasicNvmlMock(), kernel.ProcFSRoot(), testutil.GetWorkloadMetaMock(t)) + sysCtx, err := getSystemContext(testutil.GetBasicNvmlMock(), kernel.ProcFSRoot(), testutil.GetWorkloadMetaMock(t), testutil.GetTelemetryMock(t)) require.NoError(t, err) require.NotNil(t, sysCtx) @@ -39,7 +39,7 @@ func getStatsGeneratorForTest(t *testing.T) (*statsGenerator, map[streamKey]*Str require.NoError(t, err) streamHandlers := make(map[streamKey]*StreamHandler) - statsGen := newStatsGenerator(sysCtx, streamHandlers) + statsGen := newStatsGenerator(sysCtx, streamHandlers, testutil.GetTelemetryMock(t)) statsGen.lastGenerationKTime = ktime statsGen.currGenerationKTime = ktime require.NotNil(t, statsGen) diff --git a/pkg/gpu/stream.go b/pkg/gpu/stream.go index 031ff228e8a3f..d82a787d17c52 100644 --- a/pkg/gpu/stream.go +++ b/pkg/gpu/stream.go @@ -52,7 +52,6 @@ type streamKey struct { // streamData contains kernel spans and allocations for a stream type streamData struct { - key streamKey //nolint:unused // TODO spans []*kernelSpan allocations []*memoryAllocation } diff --git a/pkg/gpu/stream_test.go b/pkg/gpu/stream_test.go index 0e4f0a5b734c8..422f55e6d9ac7 100644 --- a/pkg/gpu/stream_test.go +++ b/pkg/gpu/stream_test.go @@ -22,7 +22,7 @@ import ( ) func getSystemContextForTest(t *testing.T) *systemContext { - sysCtx, err := getSystemContext(testutil.GetBasicNvmlMock(), kernel.ProcFSRoot(), testutil.GetWorkloadMetaMock(t)) + sysCtx, err := getSystemContext(testutil.GetBasicNvmlMock(), kernel.ProcFSRoot(), testutil.GetWorkloadMetaMock(t), testutil.GetTelemetryMock(t)) require.NoError(t, err) require.NotNil(t, sysCtx) @@ -35,7 +35,7 @@ func TestKernelLaunchesHandled(t *testing.T) { kernStartTime := uint64(1) launch := &gpuebpf.CudaKernelLaunch{ Header: gpuebpf.CudaEventHeader{ - Type: gpuebpf.CudaEventTypeKernelLaunch, + Type: uint32(gpuebpf.CudaEventTypeKernelLaunch), Pid_tgid: 1, Ktime_ns: kernStartTime, Stream_id: 1, @@ -96,7 +96,7 @@ func TestMemoryAllocationsHandled(t *testing.T) { allocation := &gpuebpf.CudaMemEvent{ Header: gpuebpf.CudaEventHeader{ - Type: gpuebpf.CudaEventTypeMemory, + Type: uint32(gpuebpf.CudaEventTypeMemory), Pid_tgid: 1, Ktime_ns: memAllocTime, Stream_id: 1, @@ -108,7 +108,7 @@ func TestMemoryAllocationsHandled(t *testing.T) { free := &gpuebpf.CudaMemEvent{ Header: gpuebpf.CudaEventHeader{ - Type: gpuebpf.CudaEventTypeMemory, + Type: uint32(gpuebpf.CudaEventTypeMemory), Pid_tgid: 1, Ktime_ns: memFreeTime, Stream_id: 1, @@ -164,7 +164,7 @@ func TestMemoryAllocationsDetectLeaks(t *testing.T) { allocation := &gpuebpf.CudaMemEvent{ Header: gpuebpf.CudaEventHeader{ - Type: gpuebpf.CudaEventTypeMemory, + Type: uint32(gpuebpf.CudaEventTypeMemory), Pid_tgid: 1, Ktime_ns: memAllocTime, Stream_id: 1, @@ -202,7 +202,7 @@ func TestMemoryAllocationsNoCrashOnInvalidFree(t *testing.T) { allocation := &gpuebpf.CudaMemEvent{ Header: gpuebpf.CudaEventHeader{ - Type: gpuebpf.CudaEventTypeMemory, + Type: uint32(gpuebpf.CudaEventTypeMemory), Pid_tgid: 1, Ktime_ns: memAllocTime, Stream_id: 1, @@ -214,7 +214,7 @@ func TestMemoryAllocationsNoCrashOnInvalidFree(t *testing.T) { free := &gpuebpf.CudaMemEvent{ Header: gpuebpf.CudaEventHeader{ - Type: gpuebpf.CudaEventTypeMemory, + Type: uint32(gpuebpf.CudaEventTypeMemory), Pid_tgid: 1, Ktime_ns: memFreeTime, Stream_id: 1, @@ -240,7 +240,7 @@ func TestMemoryAllocationsMultipleAllocsHandled(t *testing.T) { allocation1 := &gpuebpf.CudaMemEvent{ Header: gpuebpf.CudaEventHeader{ - Type: gpuebpf.CudaEventTypeMemory, + Type: uint32(gpuebpf.CudaEventTypeMemory), Pid_tgid: 1, Ktime_ns: memAllocTime1, Stream_id: 1, @@ -252,7 +252,7 @@ func TestMemoryAllocationsMultipleAllocsHandled(t *testing.T) { free1 := &gpuebpf.CudaMemEvent{ Header: gpuebpf.CudaEventHeader{ - Type: gpuebpf.CudaEventTypeMemory, + Type: uint32(gpuebpf.CudaEventTypeMemory), Pid_tgid: 1, Ktime_ns: memFreeTime1, Stream_id: 1, @@ -263,7 +263,7 @@ func TestMemoryAllocationsMultipleAllocsHandled(t *testing.T) { allocation2 := &gpuebpf.CudaMemEvent{ Header: gpuebpf.CudaEventHeader{ - Type: gpuebpf.CudaEventTypeMemory, + Type: uint32(gpuebpf.CudaEventTypeMemory), Pid_tgid: 1, Ktime_ns: memAllocTime2, Stream_id: 1, @@ -275,7 +275,7 @@ func TestMemoryAllocationsMultipleAllocsHandled(t *testing.T) { free2 := &gpuebpf.CudaMemEvent{ Header: gpuebpf.CudaEventHeader{ - Type: gpuebpf.CudaEventTypeMemory, + Type: uint32(gpuebpf.CudaEventTypeMemory), Pid_tgid: 1, Ktime_ns: memFreeTime2, Stream_id: 1, @@ -322,7 +322,7 @@ func TestMemoryAllocationsMultipleAllocsHandled(t *testing.T) { func TestKernelLaunchesIncludeEnrichedKernelData(t *testing.T) { proc := kernel.ProcFSRoot() - sysCtx, err := getSystemContext(testutil.GetBasicNvmlMock(), proc, testutil.GetWorkloadMetaMock(t)) + sysCtx, err := getSystemContext(testutil.GetBasicNvmlMock(), proc, testutil.GetWorkloadMetaMock(t), testutil.GetTelemetryMock(t)) require.NoError(t, err) // Set up the caches in system context so no actual queries are done @@ -364,7 +364,7 @@ func TestKernelLaunchesIncludeEnrichedKernelData(t *testing.T) { kernStartTime := uint64(1) launch := &gpuebpf.CudaKernelLaunch{ Header: gpuebpf.CudaEventHeader{ - Type: gpuebpf.CudaEventTypeKernelLaunch, + Type: uint32(gpuebpf.CudaEventTypeKernelLaunch), Pid_tgid: uint64(pid<<32 + tid), Ktime_ns: kernStartTime, Stream_id: 1, diff --git a/pkg/gpu/testutil/mocks.go b/pkg/gpu/testutil/mocks.go index 009385125600a..3ac12184aac2c 100644 --- a/pkg/gpu/testutil/mocks.go +++ b/pkg/gpu/testutil/mocks.go @@ -18,6 +18,8 @@ import ( "go.uber.org/fx" "github.com/DataDog/datadog-agent/comp/core" + "github.com/DataDog/datadog-agent/comp/core/telemetry" + "github.com/DataDog/datadog-agent/comp/core/telemetry/telemetryimpl" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" workloadmetamock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/mock" @@ -45,6 +47,23 @@ var GPUCores = []int{DefaultGpuCores, 20, 30, 40, 50, 60, 70} // DefaultGpuUUID is the UUID for the default device returned by the mock var DefaultGpuUUID = GPUUUIDs[0] +// DefaultGPUName is the name for the default device returned by the mock +var DefaultGPUName = "Tesla T4" + +// DefaultGPUComputeCapMajor is the major number for the compute capabilities for the default device returned by the mock +var DefaultGPUComputeCapMajor = 7 + +// DefaultGPUComputeCapMinor is the minor number for the compute capabilities for the default device returned by the mock +var DefaultGPUComputeCapMinor = 5 + +// DefaultGPUArch is the architecture for the default device returned by the mock +var DefaultGPUArch = nvml.DeviceArchitecture(nvml.DEVICE_ARCH_HOPPER) + +// DefaultGPUAttributes is the attributes for the default device returned by the mock +var DefaultGPUAttributes = nvml.DeviceAttributes{ + MultiprocessorCount: 10, +} + // GetDeviceMock returns a mock of the nvml.Device with the given UUID. func GetDeviceMock(deviceIdx int) *nvmlmock.Device { return &nvmlmock.Device{ @@ -57,6 +76,15 @@ func GetDeviceMock(deviceIdx int) *nvmlmock.Device { GetUUIDFunc: func() (string, nvml.Return) { return GPUUUIDs[deviceIdx], nvml.SUCCESS }, + GetNameFunc: func() (string, nvml.Return) { + return DefaultGPUName, nvml.SUCCESS + }, + GetArchitectureFunc: func() (nvml.DeviceArchitecture, nvml.Return) { + return DefaultGPUArch, nvml.SUCCESS + }, + GetAttributesFunc: func() (nvml.DeviceAttributes, nvml.Return) { + return DefaultGPUAttributes, nvml.SUCCESS + }, } } @@ -89,6 +117,11 @@ func GetWorkloadMetaMock(t *testing.T) workloadmetamock.Mock { )) } +// GetTelemetryMock returns a mock of the telemetry.Component. +func GetTelemetryMock(t *testing.T) telemetry.Component { + return fxutil.Test[telemetry.Component](t, telemetryimpl.MockModule()) +} + // RequireDevicesEqual checks that the two devices are equal by comparing their UUIDs, which gives a better // output than using require.Equal on the devices themselves func RequireDevicesEqual(t *testing.T, expected, actual nvml.Device, msgAndArgs ...interface{}) { diff --git a/pkg/gpu/testutil/samplebins.go b/pkg/gpu/testutil/samplebins.go index c38865a405909..d6bf97b2ad283 100644 --- a/pkg/gpu/testutil/samplebins.go +++ b/pkg/gpu/testutil/samplebins.go @@ -46,7 +46,8 @@ const ( MinimalDockerImage dockerImage = "alpine:3.20.3" ) -type SampleArgs struct { //nolint:revive // TODO +// SampleArgs holds arguments for the sample binary +type SampleArgs struct { // StartWaitTimeSec represents the time in seconds to wait before the binary starting the CUDA calls StartWaitTimeSec int diff --git a/pkg/jmxfetch/jmxfetch.go b/pkg/jmxfetch/jmxfetch.go index 11d8bdfc2a12b..852ff8ef20a04 100644 --- a/pkg/jmxfetch/jmxfetch.go +++ b/pkg/jmxfetch/jmxfetch.go @@ -44,6 +44,14 @@ const ( jmxAllowAttachSelf = " -Djdk.attach.allowAttachSelf=true" ) +type DSDStatus int + +const ( + DSDStatusRunningUDSDatagram DSDStatus = iota + 1 + DSDStatusRunningUDP + DSDStatusUnknown +) + var ( jmxLogLevelMap = map[string]string{ "trace": "TRACE", @@ -208,9 +216,16 @@ func (j *JMXFetch) Start(manage bool) error { case ReporterJSON: reporter = "json" default: - if j.DSD != nil && j.DSD.UdsListenerRunning() { + dsdStatus := j.getDSDStatus() + if dsdStatus == DSDStatusRunningUDSDatagram { reporter = fmt.Sprintf("statsd:unix://%s", pkgconfigsetup.Datadog().GetString("dogstatsd_socket")) } else { + // We always use UDP if we don't definitively detect UDS running, but we want to let the user know if we + // actually detected that UDP should be running, or if we're just in fallback mode. + if dsdStatus == DSDStatusUnknown { + log.Warnf("DogStatsD status is unknown, falling back to UDP. JMXFetch may not be able to report metrics.") + } + bindHost := pkgconfigsetup.GetBindHost(pkgconfigsetup.Datadog()) if bindHost == "" || bindHost == "0.0.0.0" { bindHost = "localhost" @@ -496,3 +511,24 @@ func (j *JMXFetch) ConfigureFromInstance(instance integration.Data) error { return nil } + +func (j *JMXFetch) getDSDStatus() DSDStatus { + // Three possible states: DSD is running in the Core Agent, DSD is running via ADP, or the DSD status is unknown. + // + // We detect these through the `use_dogstatsd` configuration and the `DD_ADP_ENABLED` environment variable, and we + // detect whether or not we're listening on UDS or UDP via the configuration settings that define their listening + // address. + dsdEnabledInternally := pkgconfigsetup.Datadog().GetBool("use_dogstatsd") + adpEnabled := os.Getenv("DD_ADP_ENABLED") == "true" + dsdEnabled := dsdEnabledInternally || adpEnabled + udsEnabled := pkgconfigsetup.Datadog().GetString("dogstatsd_socket") != "" + udpEnabled := pkgconfigsetup.Datadog().GetInt("dogstatsd_port") != 0 + + if dsdEnabled && udsEnabled { + return DSDStatusRunningUDSDatagram + } else if dsdEnabled && udpEnabled { + return DSDStatusRunningUDP + } else { + return DSDStatusUnknown + } +} diff --git a/pkg/jmxfetch/utils.go b/pkg/jmxfetch/utils.go new file mode 100644 index 0000000000000..f0f2bcdf29b3b --- /dev/null +++ b/pkg/jmxfetch/utils.go @@ -0,0 +1,43 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build jmx + +package jmxfetch + +import "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" + +// GetJSONSerializableMap returns a JSON serializable map from a raw map +func GetJSONSerializableMap(m interface{}) interface{} { + switch x := m.(type) { + // unbelievably I cannot collapse this into the next (identical) case + case map[interface{}]interface{}: + j := integration.JSONMap{} + for k, v := range x { + j[k.(string)] = GetJSONSerializableMap(v) + } + return j + case integration.RawMap: + j := integration.JSONMap{} + for k, v := range x { + j[k.(string)] = GetJSONSerializableMap(v) + } + return j + case integration.JSONMap: + j := integration.JSONMap{} + for k, v := range x { + j[k] = GetJSONSerializableMap(v) + } + return j + case []interface{}: + j := make([]interface{}, len(x)) + + for i, v := range x { + j[i] = GetJSONSerializableMap(v) + } + return j + } + return m +} diff --git a/pkg/util/common_test.go b/pkg/jmxfetch/utils_test.go similarity index 58% rename from pkg/util/common_test.go rename to pkg/jmxfetch/utils_test.go index 6d59ec89c2056..b49dddee64c0c 100644 --- a/pkg/util/common_test.go +++ b/pkg/jmxfetch/utils_test.go @@ -3,24 +3,23 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package util +//go:build jmx + +package jmxfetch import ( "encoding/json" "fmt" "os" - "path/filepath" "testing" "github.com/stretchr/testify/assert" + "gopkg.in/yaml.v2" "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" - - "gopkg.in/yaml.v2" ) func TestJSONConverter(t *testing.T) { - checks := []string{ "cassandra", "kafka", @@ -33,7 +32,7 @@ func TestJSONConverter(t *testing.T) { var cf integration.RawMap // Read file contents - yamlFile, err := os.ReadFile(fmt.Sprintf("../jmxfetch/fixtures/%s.yaml", c)) + yamlFile, err := os.ReadFile(fmt.Sprintf("./fixtures/%s.yaml", c)) assert.NoError(t, err) // Parse configuration @@ -55,32 +54,3 @@ func TestJSONConverter(t *testing.T) { _, err := json.Marshal(GetJSONSerializableMap(j)) assert.NoError(t, err) } - -func TestCopyDir(t *testing.T) { - assert := assert.New(t) - src := t.TempDir() - dst := t.TempDir() - - files := map[string]string{ - "a/b/c/d.txt": "d.txt", - "e/f/g/h.txt": "h.txt", - "i/j/k.txt": "k.txt", - } - - for file, content := range files { - p := filepath.Join(src, file) - err := os.MkdirAll(filepath.Dir(p), os.ModePerm) - assert.NoError(err) - err = os.WriteFile(p, []byte(content), os.ModePerm) - assert.NoError(err) - } - err := CopyDir(src, dst) - assert.NoError(err) - - for file, content := range files { - p := filepath.Join(dst, file) - actual, err := os.ReadFile(p) - assert.NoError(err) - assert.Equal(string(actual), content) - } -} diff --git a/pkg/linters/components/pkgconfigusage/go.mod b/pkg/linters/components/pkgconfigusage/go.mod index 9b24d1e11940e..019a468ac9f26 100644 --- a/pkg/linters/components/pkgconfigusage/go.mod +++ b/pkg/linters/components/pkgconfigusage/go.mod @@ -5,7 +5,7 @@ go 1.23.0 require ( github.com/golangci/plugin-module-register v0.1.1 github.com/stretchr/testify v1.10.0 - golang.org/x/tools v0.28.0 + golang.org/x/tools v0.29.0 ) require ( diff --git a/pkg/linters/components/pkgconfigusage/go.sum b/pkg/linters/components/pkgconfigusage/go.sum index 11aca8345b971..b3a551976d8ab 100644 --- a/pkg/linters/components/pkgconfigusage/go.sum +++ b/pkg/linters/components/pkgconfigusage/go.sum @@ -24,8 +24,8 @@ golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= -golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= +golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE= +golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/pkg/logs/auditor/go.mod b/pkg/logs/auditor/go.mod index f9543ab0dda08..987dba18980b2 100644 --- a/pkg/logs/auditor/go.mod +++ b/pkg/logs/auditor/go.mod @@ -30,7 +30,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../util/fxutil github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../util/hostname/validate github.com/DataDog/datadog-agent/pkg/util/log => ../../util/log - github.com/DataDog/datadog-agent/pkg/util/optional => ../../util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../../util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../util/scrubber github.com/DataDog/datadog-agent/pkg/util/statstracker => ../../util/statstracker @@ -47,7 +47,7 @@ require ( github.com/DataDog/datadog-agent/pkg/logs/message v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/status/health v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 github.com/stretchr/testify v1.10.0 ) @@ -55,22 +55,22 @@ require ( github.com/DataDog/datadog-agent/comp/core/secrets v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect github.com/DataDog/viper v1.14.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect @@ -82,24 +82,24 @@ require ( github.com/hashicorp/hcl v1.0.1-vault-5 // indirect github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/numcpus v0.8.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/logs/auditor/go.sum b/pkg/logs/auditor/go.sum index 7fdf16db5981c..15a68c06d091d 100644 --- a/pkg/logs/auditor/go.sum +++ b/pkg/logs/auditor/go.sum @@ -71,7 +71,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -109,8 +108,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -137,8 +136,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -155,8 +154,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -169,8 +168,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -181,8 +180,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -235,8 +234,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -273,8 +272,8 @@ golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= @@ -302,8 +301,8 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/logs/client/go.mod b/pkg/logs/client/go.mod index 07b8c9ec2e3be..d0f87df0304b0 100644 --- a/pkg/logs/client/go.mod +++ b/pkg/logs/client/go.mod @@ -36,7 +36,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../util/hostname/validate github.com/DataDog/datadog-agent/pkg/util/http => ../../util/http github.com/DataDog/datadog-agent/pkg/util/log => ../../util/log - github.com/DataDog/datadog-agent/pkg/util/optional => ../../util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../../util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../util/scrubber github.com/DataDog/datadog-agent/pkg/util/statstracker => ../../util/statstracker @@ -51,7 +51,7 @@ replace ( require ( github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/config/mock v0.59.0 - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 github.com/DataDog/datadog-agent/pkg/logs/message v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/metrics v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3 @@ -60,10 +60,10 @@ require ( github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/backoff v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 github.com/DataDog/datadog-agent/pkg/version v0.59.1 github.com/stretchr/testify v1.10.0 - golang.org/x/net v0.33.0 + golang.org/x/net v0.34.0 ) require ( @@ -72,23 +72,23 @@ require ( github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect github.com/DataDog/viper v1.14.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/benbjohnson/clock v1.3.5 // indirect @@ -104,21 +104,21 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect github.com/klauspost/compress v1.17.11 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.60.1 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -130,10 +130,10 @@ require ( go.uber.org/fx v1.23.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect - google.golang.org/protobuf v1.35.2 // indirect + google.golang.org/protobuf v1.36.3 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/logs/client/go.sum b/pkg/logs/client/go.sum index b14b43571558e..2ec5d1c55647e 100644 --- a/pkg/logs/client/go.sum +++ b/pkg/logs/client/go.sum @@ -73,7 +73,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -115,8 +114,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -143,8 +142,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -161,8 +160,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -176,8 +175,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -188,8 +187,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -244,8 +243,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -263,8 +262,8 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -284,8 +283,8 @@ golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= @@ -313,8 +312,8 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/logs/client/http/destination.go b/pkg/logs/client/http/destination.go index 93bf35ec67167..71af3e9ae5c9c 100644 --- a/pkg/logs/client/http/destination.go +++ b/pkg/logs/client/http/destination.go @@ -397,12 +397,6 @@ func httpClientFactory(timeout time.Duration, cfg pkgconfigmodel.Reader) func() transportConfig := cfg.Get("logs_config.http_protocol") - // If any proxy is set, use http1 - // This will be removed in a future version - if cfg.GetProxies() != nil { - transportConfig = "http1" - } - // Configure transport based on user setting switch transportConfig { case "http1": diff --git a/pkg/logs/client/http/destination_test.go b/pkg/logs/client/http/destination_test.go index 3f65bba9b9e2c..3592efaf9db63 100644 --- a/pkg/logs/client/http/destination_test.go +++ b/pkg/logs/client/http/destination_test.go @@ -495,7 +495,7 @@ func TestTransportProtocol_HTTP1FallBack(t *testing.T) { assert.Equal(t, "HTTP/1.1", resp.Proto) } -func TestTransportProtocol_HTTP1WhenUsingProxy(t *testing.T) { +func TestTransportProtocol_HTTP2WhenUsingProxy(t *testing.T) { c := configmock.New(t) // Force client to use ALNP @@ -503,7 +503,7 @@ func TestTransportProtocol_HTTP1WhenUsingProxy(t *testing.T) { c.SetWithoutSource("skip_ssl_validation", true) // The test server uses TLS, so if we set the http proxy (not https), it still makes - // a request to the test server, but disable HTTP/2 since a proxy is configured. + // a request to the test server c.SetWithoutSource("proxy.http", "http://foo.bar") server := NewTestHTTPSServer(false) @@ -522,6 +522,38 @@ func TestTransportProtocol_HTTP1WhenUsingProxy(t *testing.T) { } defer resp.Body.Close() + // Assert that the server chose HTTP/2.0 because a proxy was configured + assert.Equal(t, "HTTP/2.0", resp.Proto) +} + +func TestTransportProtocol_HTTP1FallBackWhenUsingProxy(t *testing.T) { + c := configmock.New(t) + + // Force client to use ALNP + c.SetWithoutSource("logs_config.http_protocol", "auto") + c.SetWithoutSource("skip_ssl_validation", true) + + // The test server uses TLS, so if we set the http proxy (not https), it still makes + // a request to the test server + c.SetWithoutSource("proxy.http", "http://foo.bar") + + // Start the test server that only support HTTP/1.1 + server := NewTestHTTPSServer(true) + defer server.Close() + + timeout := 5 * time.Second + client := httpClientFactory(timeout, c)() + + req, err := http.NewRequest("POST", server.URL, nil) + if err != nil { + t.Fatalf("Failed to create request: %v", err) + } + resp, err := client.Do(req) + if err != nil { + t.Fatalf("Failed to send request: %v", err) + } + defer resp.Body.Close() + // Assert that the server chose HTTP/1.1 because a proxy was configured assert.Equal(t, "HTTP/1.1", resp.Proto) } diff --git a/pkg/logs/client/tcp/destination.go b/pkg/logs/client/tcp/destination.go index 1934ea2b3c930..3b88e1822d893 100644 --- a/pkg/logs/client/tcp/destination.go +++ b/pkg/logs/client/tcp/destination.go @@ -6,7 +6,9 @@ package tcp import ( + "context" "expvar" + "fmt" "net" "sync" "time" @@ -158,3 +160,15 @@ func (d *Destination) updateRetryState(err error, isRetrying chan bool) { } d.lastRetryError = err } + +// CheckConnectivityDiagnose is a diagnosis for TCP connections +func CheckConnectivityDiagnose(endpoint config.Endpoint, timeoutSeconds int) (url string, err error) { + operationTimeout := time.Second * time.Duration(timeoutSeconds) + connManager := NewConnectionManager(endpoint, statusinterface.NewNoopStatusProvider()) + ctx, cancel := context.WithTimeout(context.Background(), operationTimeout) + defer cancel() + + _, err = connManager.NewConnection(ctx) + + return fmt.Sprintf("%s:%d", endpoint.Host, endpoint.Port), err +} diff --git a/pkg/logs/client/tcp/destination_test.go b/pkg/logs/client/tcp/destination_test.go index 3a9c299d16efb..591e0e1cb1dff 100644 --- a/pkg/logs/client/tcp/destination_test.go +++ b/pkg/logs/client/tcp/destination_test.go @@ -6,11 +6,16 @@ package tcp import ( + "context" + "net" + "strconv" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/DataDog/datadog-agent/pkg/logs/client" + "github.com/DataDog/datadog-agent/pkg/logs/client/mock" "github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface" "github.com/DataDog/datadog-agent/comp/logs/agent/config" @@ -30,3 +35,85 @@ func TestDestinationHA(t *testing.T) { assert.Equal(t, isEndpointMRF, isDestMRF) } } + +// TestConnecitivityDiagnoseNoBlock ensures the connectivity diagnose doesn't +// block +func TestConnecitivityDiagnoseNoBlock(t *testing.T) { + endpoint := config.NewEndpoint("00000000", "host", 0, true) + done := make(chan struct{}) + + go func() { + CheckConnectivityDiagnose(endpoint, 1) + close(done) + }() + + select { + case <-done: + case <-time.After(10 * time.Second): + t.Error("TCP diagnosis check blocked for too long.") + } +} + +// TestConnectivityDiagnoseFails ensures the connectivity diagnosis connects +// successfully +func TestConnectivityDiagnoseOperationSuccess(t *testing.T) { + // Start the test TCP server + intake := mock.NewMockLogsIntake(t) + serverAddr := intake.Addr().String() + + // Simulate a client connecting to the server + conn, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatalf("Failed to connect to test TCP server: %v", err) + } + defer conn.Close() + + host, port, err := net.SplitHostPort(serverAddr) + assert.Nil(t, err) + portInt, err := strconv.Atoi(port) + assert.Nil(t, err) + + testSuccessEndpoint := config.NewEndpoint("api-key", host, portInt, false) + connManager := NewConnectionManager(testSuccessEndpoint, statusinterface.NewNoopStatusProvider()) + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + _, err = connManager.NewConnection(ctx) + assert.Nil(t, err) +} + +// TestConnectivityDiagnoseOperationFail ensure the connectivity diagnosis fails +// when provided with incorrect information +func TestConnectivityDiagnoseOperationFail(t *testing.T) { + // Start the test TCP server + intake := mock.NewMockLogsIntake(t) + serverAddr := intake.Addr().String() + + // Simulate a client connecting to the server + conn, err := net.Dial("tcp", serverAddr) + if err != nil { + t.Fatalf("Failed to connect to test TCP server: %v", err) + } + defer conn.Close() + + host, port, err := net.SplitHostPort(serverAddr) + assert.Nil(t, err) + portInt, err := strconv.Atoi(port) + assert.Nil(t, err) + + testFailEndpointWrongAddress := config.NewEndpoint("api-key", "failhost", portInt, false) + connManager := NewConnectionManager(testFailEndpointWrongAddress, statusinterface.NewNoopStatusProvider()) + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + _, err = connManager.NewConnection(ctx) + assert.NotNil(t, err) + + testFailEndpointWrongPort := config.NewEndpoint("api-key", host, portInt+1, false) + connManager = NewConnectionManager(testFailEndpointWrongPort, statusinterface.NewNoopStatusProvider()) + ctx, cancel = context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + _, err = connManager.NewConnection(ctx) + assert.NotNil(t, err) +} diff --git a/pkg/logs/diagnostic/go.mod b/pkg/logs/diagnostic/go.mod index 690eb4c56105e..a43206d0903fb 100644 --- a/pkg/logs/diagnostic/go.mod +++ b/pkg/logs/diagnostic/go.mod @@ -32,7 +32,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../util/fxutil github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../util/hostname/validate github.com/DataDog/datadog-agent/pkg/util/log => ../../util/log - github.com/DataDog/datadog-agent/pkg/util/optional => ../../util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../../util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../util/scrubber github.com/DataDog/datadog-agent/pkg/util/statstracker => ../../util/statstracker @@ -57,24 +57,24 @@ require ( github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect github.com/DataDog/viper v1.14.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect @@ -87,16 +87,16 @@ require ( github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -108,8 +108,8 @@ require ( go.uber.org/fx v1.23.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/logs/diagnostic/go.sum b/pkg/logs/diagnostic/go.sum index dd16364891695..77eac717c35df 100644 --- a/pkg/logs/diagnostic/go.sum +++ b/pkg/logs/diagnostic/go.sum @@ -72,7 +72,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -110,8 +109,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -138,8 +137,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -156,8 +155,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -171,8 +170,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -183,8 +182,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -239,8 +238,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -277,8 +276,8 @@ golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= @@ -306,8 +305,8 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/logs/internal/decoder/auto_multiline_detection/aggregator.go b/pkg/logs/internal/decoder/auto_multiline_detection/aggregator.go index 36892db9980f2..d47061f342bd1 100644 --- a/pkg/logs/internal/decoder/auto_multiline_detection/aggregator.go +++ b/pkg/logs/internal/decoder/auto_multiline_detection/aggregator.go @@ -18,12 +18,14 @@ import ( type bucket struct { tagTruncatedLogs bool tagMultiLineLogs bool + maxContentSize int message *message.Message originalDataLen int buffer *bytes.Buffer lineCount int - truncated bool + shouldTruncate bool + needsTruncation bool } func (b *bucket) add(msg *message.Message) { @@ -42,25 +44,39 @@ func (b *bucket) isEmpty() bool { return b.originalDataLen == 0 } -func (b *bucket) truncate() { - b.buffer.Write(message.TruncatedFlag) - b.truncated = true +func (b *bucket) reset() { + b.buffer.Reset() + b.message = nil + b.lineCount = 0 + b.originalDataLen = 0 + b.needsTruncation = false } func (b *bucket) flush() *message.Message { - defer func() { - b.buffer.Reset() - b.message = nil - b.lineCount = 0 - b.originalDataLen = 0 - b.truncated = false - }() + defer b.reset() + + lastWasTruncated := b.shouldTruncate + b.shouldTruncate = b.buffer.Len() >= b.maxContentSize || b.needsTruncation data := bytes.TrimSpace(b.buffer.Bytes()) content := make([]byte, len(data)) copy(content, data) - msg := message.NewRawMessage(content, b.message.Status, b.originalDataLen, b.message.ParsingExtra.Timestamp) + if lastWasTruncated { + // The previous line has been truncated because it was too long, + // the new line is just the remainder. Add the truncated flag at + // the beginning of the content. + content = append(message.TruncatedFlag, content...) + } + + if b.shouldTruncate { + // The current line is too long. Mark it truncated at the end. + content = append(content, message.TruncatedFlag...) + } + + msg := b.message + msg.SetContent(content) + msg.RawDataLen = b.originalDataLen tlmTags := []string{"false", "single_line"} if b.lineCount > 1 { @@ -71,11 +87,15 @@ func (b *bucket) flush() *message.Message { } } - if b.truncated { + if lastWasTruncated || b.shouldTruncate { msg.ParsingExtra.IsTruncated = true tlmTags[0] = "true" if b.tagTruncatedLogs { - msg.ParsingExtra.Tags = append(msg.ParsingExtra.Tags, message.TruncatedReasonTag("auto_multiline")) + if b.lineCount > 1 { + msg.ParsingExtra.Tags = append(msg.ParsingExtra.Tags, message.TruncatedReasonTag("auto_multiline")) + } else { + msg.ParsingExtra.Tags = append(msg.ParsingExtra.Tags, message.TruncatedReasonTag("single_line")) + } } } @@ -103,7 +123,7 @@ func NewAggregator(outputFn func(m *message.Message), maxContentSize int, flushT return &Aggregator{ outputFn: outputFn, - bucket: &bucket{buffer: bytes.NewBuffer(nil), tagTruncatedLogs: tagTruncatedLogs, tagMultiLineLogs: tagMultiLineLogs}, + bucket: &bucket{buffer: bytes.NewBuffer(nil), tagTruncatedLogs: tagTruncatedLogs, tagMultiLineLogs: tagMultiLineLogs, maxContentSize: maxContentSize, lineCount: 0, shouldTruncate: false, needsTruncation: false}, maxContentSize: maxContentSize, flushTimeout: flushTimeout, multiLineMatchInfo: multiLineMatchInfo, @@ -120,34 +140,49 @@ func (a *Aggregator) Aggregate(msg *message.Message, label Label) { // If `noAggregate` - flush the bucket immediately and then flush the next message. if label == noAggregate { a.Flush() - a.outputFn(msg) + a.bucket.shouldTruncate = false // noAggregate messages should never be truncated at the beginning (Could break JSON formatted messages) + a.bucket.add(msg) + a.Flush() return } // If `aggregate` and the bucket is empty - flush the next message. if label == aggregate && a.bucket.isEmpty() { - a.outputFn(msg) + a.bucket.add(msg) + a.Flush() return } - // If `startGroup` - flush the bucket. + // If `startGroup` - flush the old bucket to form a new group. if label == startGroup { - a.multiLineMatchInfo.Add(1) a.Flush() + a.multiLineMatchInfo.Add(1) + a.bucket.add(msg) + if msg.RawDataLen >= a.maxContentSize { + // Start group is too big to append anything to, flush it and reset. + a.Flush() + } + return + } - // At this point we either have `startGroup` with an empty bucket or `aggregate` with a non-empty bucket - // so we add the message to the bucket or flush if the bucket will overflow the max content size. - if msg.RawDataLen+a.bucket.buffer.Len() > a.maxContentSize && !a.bucket.isEmpty() { - a.bucket.truncate() // Truncate the end of the current bucket + // Check for a total buffer size larger than the limit. This should only be reachable by an aggregate label + // following a smaller than max-size start group label, and will result in the reset (flush) of the entire bucket. + // This reset will intentionally break multi-line detection and aggregation for logs larger than the limit, because + // doing so is safer than assuming we will correctly get a new startGroup for subsequent single line logs. + if msg.RawDataLen+a.bucket.buffer.Len() >= a.maxContentSize { + a.bucket.needsTruncation = true + a.bucket.lineCount++ // Account for the current (not yet processed) message being part of the same log a.Flush() - a.bucket.truncate() // Truncate the start of the next bucket - } - if !a.bucket.isEmpty() { - a.linesCombinedInfo.Add(1) + a.bucket.lineCount++ // Account for the previous (now flushed) message being part of the same log + a.bucket.add(msg) + a.Flush() + return } + // We're an aggregate label within a startGroup and within the maxContentSize. Append new multiline + a.linesCombinedInfo.Add(1) a.bucket.add(msg) } @@ -184,6 +219,7 @@ func (a *Aggregator) FlushChan() <-chan time.Time { // Flush flushes the aggregator. func (a *Aggregator) Flush() { if a.bucket.isEmpty() { + a.bucket.reset() return } a.outputFn(a.bucket.flush()) diff --git a/pkg/logs/internal/decoder/auto_multiline_detection/aggregator_test.go b/pkg/logs/internal/decoder/auto_multiline_detection/aggregator_test.go index 93906f22ed495..d9b15032f28d5 100644 --- a/pkg/logs/internal/decoder/auto_multiline_detection/aggregator_test.go +++ b/pkg/logs/internal/decoder/auto_multiline_detection/aggregator_test.go @@ -34,6 +34,11 @@ func assertMessageContent(t *testing.T, m *message.Message, content string) { assert.Equal(t, m.IsMultiLine, isMultiLine) } +func assertTrailingMultiline(t *testing.T, m *message.Message, content string) { + assert.Equal(t, content, string(m.GetContent())) + assert.Equal(t, m.IsMultiLine, true) +} + func TestNoAggregate(t *testing.T) { outputChan, outputFn := makeHandler() ag := NewAggregator(outputFn, 100, time.Duration(1*time.Second), false, false, status.NewInfoRegistry()) @@ -127,36 +132,84 @@ func TestTagTruncatedLogs(t *testing.T) { outputChan, outputFn := makeHandler() ag := NewAggregator(outputFn, 10, time.Duration(1*time.Second), true, false, status.NewInfoRegistry()) + // First 3 should be tagged as single line logs since they are too big to aggregate no matter what the label is. ag.Aggregate(newMessage("1234567890"), startGroup) - ag.Aggregate(newMessage("12345678901"), aggregate) // Causes overflow, truncate and flush + ag.Aggregate(newMessage("12345678901"), aggregate) ag.Aggregate(newMessage("12345"), aggregate) - ag.Aggregate(newMessage("6789"), aggregate) - ag.Aggregate(newMessage("3"), noAggregate) + + // Next 3 lines should be tagged as multiline since they were truncated after a group was started + ag.Aggregate(newMessage("1234"), startGroup) + ag.Aggregate(newMessage("5678"), aggregate) + ag.Aggregate(newMessage("90"), aggregate) + + // No aggregate should not be truncated + ag.Aggregate(newMessage("00"), noAggregate) msg := <-outputChan assert.True(t, msg.ParsingExtra.IsTruncated) - assert.Equal(t, msg.ParsingExtra.Tags, []string{message.TruncatedReasonTag("auto_multiline")}) + assert.Equal(t, msg.ParsingExtra.Tags, []string{message.TruncatedReasonTag("single_line")}) assertMessageContent(t, msg, "1234567890...TRUNCATED...") msg = <-outputChan assert.True(t, msg.ParsingExtra.IsTruncated) - assert.Equal(t, msg.ParsingExtra.Tags, []string{message.TruncatedReasonTag("auto_multiline")}) + assert.Equal(t, msg.ParsingExtra.Tags, []string{message.TruncatedReasonTag("single_line")}) assertMessageContent(t, msg, "...TRUNCATED...12345678901...TRUNCATED...") + msg = <-outputChan + assert.True(t, msg.ParsingExtra.IsTruncated) + assert.Equal(t, msg.ParsingExtra.Tags, []string{message.TruncatedReasonTag("single_line")}) + assertMessageContent(t, msg, "...TRUNCATED...12345") + msg = <-outputChan assert.True(t, msg.ParsingExtra.IsTruncated) assert.Equal(t, msg.ParsingExtra.Tags, []string{message.TruncatedReasonTag("auto_multiline")}) - assertMessageContent(t, msg, "...TRUNCATED...12345...TRUNCATED...") + assertMessageContent(t, msg, "1234\\n5678...TRUNCATED...") msg = <-outputChan assert.True(t, msg.ParsingExtra.IsTruncated) assert.Equal(t, msg.ParsingExtra.Tags, []string{message.TruncatedReasonTag("auto_multiline")}) - assertMessageContent(t, msg, "...TRUNCATED...6789") + assertTrailingMultiline(t, msg, "...TRUNCATED...90") msg = <-outputChan assert.False(t, msg.ParsingExtra.IsTruncated) assert.Empty(t, msg.ParsingExtra.Tags) - assertMessageContent(t, msg, "3") + assertMessageContent(t, msg, "00") +} + +func TestSingleGroupIsTruncatedAsMultilineLog(t *testing.T) { + outputChan, outputFn := makeHandler() + ag := NewAggregator(outputFn, 5, time.Duration(1*time.Second), true, false, status.NewInfoRegistry()) + + ag.Aggregate(newMessage("123"), startGroup) + ag.Aggregate(newMessage("456"), aggregate) + + msg := <-outputChan + assert.True(t, msg.ParsingExtra.IsTruncated) + assert.Equal(t, msg.ParsingExtra.Tags, []string{message.TruncatedReasonTag("auto_multiline")}) + assertTrailingMultiline(t, msg, "123...TRUNCATED...") + + msg = <-outputChan + assert.True(t, msg.ParsingExtra.IsTruncated) + assert.Equal(t, msg.ParsingExtra.Tags, []string{message.TruncatedReasonTag("auto_multiline")}) + assertTrailingMultiline(t, msg, "...TRUNCATED...456") +} + +func TestSingleLineTruncatedLogIsTaggedSingleLine(t *testing.T) { + outputChan, outputFn := makeHandler() + ag := NewAggregator(outputFn, 5, time.Duration(1*time.Second), true, false, status.NewInfoRegistry()) + + ag.Aggregate(newMessage("12345"), startGroup) // Exactly the size of the max message size - simulates truncation in the framer + ag.Aggregate(newMessage("456"), aggregate) + + msg := <-outputChan + assert.True(t, msg.ParsingExtra.IsTruncated) + assert.Equal(t, msg.ParsingExtra.Tags, []string{message.TruncatedReasonTag("single_line")}) + assertMessageContent(t, msg, "12345...TRUNCATED...") + + msg = <-outputChan + assert.True(t, msg.ParsingExtra.IsTruncated) + assert.Equal(t, msg.ParsingExtra.Tags, []string{message.TruncatedReasonTag("single_line")}) + assertMessageContent(t, msg, "...TRUNCATED...456") } func TestTagMultiLineLogs(t *testing.T) { @@ -164,7 +217,7 @@ func TestTagMultiLineLogs(t *testing.T) { ag := NewAggregator(outputFn, 10, time.Duration(1*time.Second), false, true, status.NewInfoRegistry()) ag.Aggregate(newMessage("12345"), startGroup) - ag.Aggregate(newMessage("67890"), aggregate) + ag.Aggregate(newMessage("6789"), aggregate) ag.Aggregate(newMessage("1"), aggregate) // Causes overflow, truncate and flush ag.Aggregate(newMessage("2"), noAggregate) @@ -172,13 +225,13 @@ func TestTagMultiLineLogs(t *testing.T) { assert.True(t, msg.ParsingExtra.IsMultiLine) assert.True(t, msg.ParsingExtra.IsTruncated) assert.Equal(t, msg.ParsingExtra.Tags, []string{message.MultiLineSourceTag("auto_multiline")}) - assertMessageContent(t, msg, "12345\\n67890...TRUNCATED...") + assertMessageContent(t, msg, "12345\\n6789...TRUNCATED...") msg = <-outputChan - assert.False(t, msg.ParsingExtra.IsMultiLine) + assert.True(t, msg.ParsingExtra.IsMultiLine) assert.True(t, msg.ParsingExtra.IsTruncated) - assert.Empty(t, msg.ParsingExtra.Tags) - assertMessageContent(t, msg, "...TRUNCATED...1") + assert.Equal(t, msg.ParsingExtra.Tags, []string{message.MultiLineSourceTag("auto_multiline")}) + assertTrailingMultiline(t, msg, "...TRUNCATED...1") msg = <-outputChan assert.False(t, msg.ParsingExtra.IsMultiLine) @@ -187,14 +240,58 @@ func TestTagMultiLineLogs(t *testing.T) { assertMessageContent(t, msg, "2") } -func TestStartGruopIsNotTruncatedWithoutAggreagation(t *testing.T) { +func TestSingleLineTooLongTruncation(t *testing.T) { outputChan, outputFn := makeHandler() ag := NewAggregator(outputFn, 5, time.Duration(1*time.Second), false, true, status.NewInfoRegistry()) - ag.Aggregate(newMessage("123456"), startGroup) + // Multi line log where each message is too large except the last one + ag.Aggregate(newMessage("123"), startGroup) + ag.Aggregate(newMessage("456"), aggregate) + ag.Aggregate(newMessage("123456"), aggregate) + ag.Aggregate(newMessage("123"), aggregate) // Force a flush ag.Aggregate(newMessage(""), startGroup) msg := <-outputChan - assertMessageContent(t, msg, "123456") + assertTrailingMultiline(t, msg, "123...TRUNCATED...") + msg = <-outputChan + assertTrailingMultiline(t, msg, "...TRUNCATED...456") + msg = <-outputChan + assertMessageContent(t, msg, "123456...TRUNCATED...") + msg = <-outputChan + assertMessageContent(t, msg, "...TRUNCATED...123") + + // Single line logs where each message is too large except the last + ag.Aggregate(newMessage("123456"), startGroup) + ag.Aggregate(newMessage("123456"), startGroup) + ag.Aggregate(newMessage("123456"), startGroup) + ag.Aggregate(newMessage("123"), startGroup) + // Force a flush + ag.Aggregate(newMessage(""), startGroup) + + msg = <-outputChan + assertMessageContent(t, msg, "123456...TRUNCATED...") + msg = <-outputChan + assertMessageContent(t, msg, "...TRUNCATED...123456...TRUNCATED...") + msg = <-outputChan + assertMessageContent(t, msg, "...TRUNCATED...123456...TRUNCATED...") + msg = <-outputChan + assertMessageContent(t, msg, "...TRUNCATED...123") + + // No aggregate logs should never be truncated from the previous message (Could break a JSON payload) + ag.Aggregate(newMessage("123456"), startGroup) + ag.Aggregate(newMessage("123456"), noAggregate) + ag.Aggregate(newMessage("123456"), startGroup) + ag.Aggregate(newMessage("123"), startGroup) + // Force a flush + ag.Aggregate(newMessage(""), startGroup) + + msg = <-outputChan + assertMessageContent(t, msg, "123456...TRUNCATED...") + msg = <-outputChan + assertMessageContent(t, msg, "123456...TRUNCATED...") + msg = <-outputChan + assertMessageContent(t, msg, "...TRUNCATED...123456...TRUNCATED...") + msg = <-outputChan + assertMessageContent(t, msg, "...TRUNCATED...123") } diff --git a/pkg/logs/internal/decoder/auto_multiline_detection/json_detector.go b/pkg/logs/internal/decoder/auto_multiline_detection/json_detector.go index 663db8d2d33f4..651c687a25af8 100644 --- a/pkg/logs/internal/decoder/auto_multiline_detection/json_detector.go +++ b/pkg/logs/internal/decoder/auto_multiline_detection/json_detector.go @@ -19,9 +19,9 @@ func NewJSONDetector() *JSONDetector { } // ProcessAndContinue checks if a message is a JSON message. -// This implements the Herustic interface - so we should stop processing if we detect a JSON message by returning false. +// This implements the Heuristic interface - so we should stop processing if we detect a JSON message by returning false. func (j *JSONDetector) ProcessAndContinue(context *messageContext) bool { - if jsonRegexp.Match(context.rawMessage) { + if context.labelAssignedBy == defaultLabelSource && jsonRegexp.Match(context.rawMessage) { context.label = noAggregate context.labelAssignedBy = "JSON_detector" return false diff --git a/pkg/logs/internal/decoder/auto_multiline_detection/json_detector_test.go b/pkg/logs/internal/decoder/auto_multiline_detection/json_detector_test.go index a325c0386cccc..ed8fe8246473b 100644 --- a/pkg/logs/internal/decoder/auto_multiline_detection/json_detector_test.go +++ b/pkg/logs/internal/decoder/auto_multiline_detection/json_detector_test.go @@ -40,11 +40,23 @@ func TestJsonDetector(t *testing.T) { for _, tc := range testCases { t.Run(string(tc.rawMessage), func(t *testing.T) { messageContext := &messageContext{ - rawMessage: []byte(tc.rawMessage), - label: aggregate, + rawMessage: []byte(tc.rawMessage), + label: aggregate, + labelAssignedBy: defaultLabelSource, } assert.Equal(t, tc.expectedResult, jsonDetector.ProcessAndContinue(messageContext)) assert.Equal(t, tc.expectedLabel, messageContext.label) }) } } + +func TestJsonDetectorDoesntOverrideAssignedLabel(t *testing.T) { + jsonDetector := NewJSONDetector() + messageContext := &messageContext{ + rawMessage: []byte(`{"key": "value"}`), + label: aggregate, + labelAssignedBy: "Not default!", + } + assert.Equal(t, true, jsonDetector.ProcessAndContinue(messageContext)) + assert.Equal(t, aggregate, messageContext.label) +} diff --git a/pkg/logs/internal/decoder/auto_multiline_detection/labeler.go b/pkg/logs/internal/decoder/auto_multiline_detection/labeler.go index 27867c2703232..516adc588ad8b 100644 --- a/pkg/logs/internal/decoder/auto_multiline_detection/labeler.go +++ b/pkg/logs/internal/decoder/auto_multiline_detection/labeler.go @@ -15,6 +15,8 @@ const ( startGroup Label = iota noAggregate aggregate + + defaultLabelSource = "default" ) type messageContext struct { @@ -36,7 +38,7 @@ type Heuristic interface { // Labeler labels log messages based on a set of heuristics. // Each Heuristic operates on the output of the previous heuristic - mutating the message context. -// A label is chosen when a herusitc signals the labeler to stop or when all herustics have been processed. +// A label is chosen when a herusitc signals the labeler to stop or when all Heuristics have been processed. type Labeler struct { lablerHeuristics []Heuristic analyticsHeuristics []Heuristic @@ -59,7 +61,7 @@ func (l *Labeler) Label(rawMessage []byte) Label { rawMessage: rawMessage, tokens: nil, label: aggregate, - labelAssignedBy: "default", + labelAssignedBy: defaultLabelSource, } for _, h := range l.lablerHeuristics { if !h.ProcessAndContinue(context) { diff --git a/pkg/logs/internal/decoder/auto_multiline_detection/pattern_table.go b/pkg/logs/internal/decoder/auto_multiline_detection/pattern_table.go index 483821034a4f4..b5056214e2865 100644 --- a/pkg/logs/internal/decoder/auto_multiline_detection/pattern_table.go +++ b/pkg/logs/internal/decoder/auto_multiline_detection/pattern_table.go @@ -142,7 +142,7 @@ func (p *PatternTable) DumpTable() []DiagnosticRow { } // ProcessAndContinue adds a pattern to the table and updates its label based on it's frequency. -// This implements the Herustic interface - so we should stop processing if the label was changed +// This implements the Heuristic interface - so we should stop processing if the label was changed // due to pattern detection. func (p *PatternTable) ProcessAndContinue(context *messageContext) bool { diff --git a/pkg/logs/internal/decoder/auto_multiline_detection/tokenizer.go b/pkg/logs/internal/decoder/auto_multiline_detection/tokenizer.go index a798ce44f58d9..8274e00f8507e 100644 --- a/pkg/logs/internal/decoder/auto_multiline_detection/tokenizer.go +++ b/pkg/logs/internal/decoder/auto_multiline_detection/tokenizer.go @@ -38,7 +38,7 @@ func NewTokenizer(maxEvalBytes int) *Tokenizer { } // ProcessAndContinue enriches the message context with tokens. -// This implements the Herustic interface - this heuristic does not stop processing. +// This implements the Heuristic interface - this heuristic does not stop processing. func (t *Tokenizer) ProcessAndContinue(context *messageContext) bool { maxBytes := len(context.rawMessage) if maxBytes > t.maxEvalBytes { diff --git a/pkg/logs/internal/decoder/auto_multiline_detection/user_samples.go b/pkg/logs/internal/decoder/auto_multiline_detection/user_samples.go index ee750936c85a8..0d5de85a2b712 100644 --- a/pkg/logs/internal/decoder/auto_multiline_detection/user_samples.go +++ b/pkg/logs/internal/decoder/auto_multiline_detection/user_samples.go @@ -117,7 +117,7 @@ func NewUserSamples(config model.Reader) *UserSamples { } // ProcessAndContinue applies a user sample to a log message. If it matches, a label is assigned. -// This implements the Herustic interface - so we should stop processing if we detect a user pattern by returning false. +// This implements the Heuristic interface - so we should stop processing if we detect a user pattern by returning false. func (j *UserSamples) ProcessAndContinue(context *messageContext) bool { if context.tokens == nil { log.Error("Tokens are required to process user samples") diff --git a/pkg/logs/internal/decoder/auto_multiline_handler.go b/pkg/logs/internal/decoder/auto_multiline_handler.go index e8253cd764d69..91af54893b86a 100644 --- a/pkg/logs/internal/decoder/auto_multiline_handler.go +++ b/pkg/logs/internal/decoder/auto_multiline_handler.go @@ -27,13 +27,12 @@ func NewAutoMultilineHandler(outputFn func(m *message.Message), maxContentSize i heuristics := []automultilinedetection.Heuristic{} heuristics = append(heuristics, automultilinedetection.NewTokenizer(pkgconfigsetup.Datadog().GetInt("logs_config.auto_multi_line.tokenizer_max_input_bytes"))) + heuristics = append(heuristics, automultilinedetection.NewUserSamples(pkgconfigsetup.Datadog())) if pkgconfigsetup.Datadog().GetBool("logs_config.auto_multi_line.enable_json_detection") { heuristics = append(heuristics, automultilinedetection.NewJSONDetector()) } - heuristics = append(heuristics, automultilinedetection.NewUserSamples(pkgconfigsetup.Datadog())) - if pkgconfigsetup.Datadog().GetBool("logs_config.auto_multi_line.enable_datetime_detection") { heuristics = append(heuristics, automultilinedetection.NewTimestampDetector( pkgconfigsetup.Datadog().GetFloat64("logs_config.auto_multi_line.timestamp_detector_match_threshold"))) diff --git a/pkg/logs/internal/decoder/multiline_handler.go b/pkg/logs/internal/decoder/multiline_handler.go index 67fba384cbb42..9f461dc7716ff 100644 --- a/pkg/logs/internal/decoder/multiline_handler.go +++ b/pkg/logs/internal/decoder/multiline_handler.go @@ -168,7 +168,7 @@ func (h *MultiLineHandler) sendBuffer() { if h.isBufferTruncated && pkgconfigsetup.Datadog().GetBool("logs_config.tag_truncated_logs") { msg.ParsingExtra.Tags = append(msg.ParsingExtra.Tags, message.TruncatedReasonTag("multiline_regex")) } - if h.isBufferTruncated && pkgconfigsetup.Datadog().GetBool("logs_config.tag_multi_line_logs") { + if h.linesCombined > 1 && pkgconfigsetup.Datadog().GetBool("logs_config.tag_multi_line_logs") { msg.ParsingExtra.Tags = append(msg.ParsingExtra.Tags, message.MultiLineSourceTag(h.multiLineTagValue)) } h.outputFn(msg) diff --git a/pkg/logs/internal/decoder/single_line_handler.go b/pkg/logs/internal/decoder/single_line_handler.go index 8e93cb63e0814..b69c2a78c86f6 100644 --- a/pkg/logs/internal/decoder/single_line_handler.go +++ b/pkg/logs/internal/decoder/single_line_handler.go @@ -59,7 +59,6 @@ func (h *SingleLineHandler) process(msg *message.Message) { // the new line is just a remainder, // adding the truncated flag at the beginning of the content content = append(message.TruncatedFlag, content...) - addTruncatedTag(msg) } // how should we detect logs which are too long before rendering them? @@ -67,6 +66,9 @@ func (h *SingleLineHandler) process(msg *message.Message) { // the line is too long, it needs to be cut off and send, // adding the truncated flag the end of the content content = append(content, message.TruncatedFlag...) + } + + if lastWasTruncated || h.shouldTruncate { addTruncatedTag(msg) } diff --git a/pkg/logs/internal/decoder/single_line_handler_test.go b/pkg/logs/internal/decoder/single_line_handler_test.go index 8fe3d00b574e5..579c39ed559b6 100644 --- a/pkg/logs/internal/decoder/single_line_handler_test.go +++ b/pkg/logs/internal/decoder/single_line_handler_test.go @@ -64,7 +64,7 @@ func TestSingleLineHandlerProcess(t *testing.T) { string(message.TruncatedFlag) + "aaaaaaaaaaaaaaaaaaaa" + string(message.TruncatedFlag), string(message.TruncatedFlag) + "wait, how many a's?", }, - expTags: [][]string{{truncateTag}, {truncateTag, truncateTag}, {truncateTag}}, + expTags: [][]string{{truncateTag}, {truncateTag}, {truncateTag}}, tagTruncatedLogs: true, }, { diff --git a/pkg/logs/launchers/container/launcher.go b/pkg/logs/launchers/container/launcher.go index 0fcd4ede92c1f..737d11074e61b 100644 --- a/pkg/logs/launchers/container/launcher.go +++ b/pkg/logs/launchers/container/launcher.go @@ -23,7 +23,7 @@ import ( sourcesPkg "github.com/DataDog/datadog-agent/pkg/logs/sources" "github.com/DataDog/datadog-agent/pkg/logs/tailers" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/util/startstop" ) @@ -60,13 +60,13 @@ type Launcher struct { // tailers contains the tailer for each source tailers map[*sourcesPkg.LogSource]tailerfactory.Tailer - wmeta optional.Option[workloadmeta.Component] + wmeta option.Option[workloadmeta.Component] tagger tagger.Component } // NewLauncher returns a new launcher -func NewLauncher(sources *sourcesPkg.LogSources, wmeta optional.Option[workloadmeta.Component], tagger tagger.Component) *Launcher { +func NewLauncher(sources *sourcesPkg.LogSources, wmeta option.Option[workloadmeta.Component], tagger tagger.Component) *Launcher { launcher := &Launcher{ sources: sources, tailers: make(map[*sourcesPkg.LogSource]tailerfactory.Tailer), diff --git a/pkg/logs/launchers/container/launcher_nodocker.go b/pkg/logs/launchers/container/launcher_nodocker.go index 12831d79342c3..63214c3868397 100644 --- a/pkg/logs/launchers/container/launcher_nodocker.go +++ b/pkg/logs/launchers/container/launcher_nodocker.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/logs/pipeline" sourcesPkg "github.com/DataDog/datadog-agent/pkg/logs/sources" "github.com/DataDog/datadog-agent/pkg/logs/tailers" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // A Launcher starts and stops new tailers for every new containers discovered by autodiscovery. @@ -25,7 +25,7 @@ import ( type Launcher struct{} // NewLauncher returns a new launcher -func NewLauncher(_ *sourcesPkg.LogSources, _ optional.Option[workloadmeta.Component], _ tagger.Component) *Launcher { +func NewLauncher(_ *sourcesPkg.LogSources, _ option.Option[workloadmeta.Component], _ tagger.Component) *Launcher { return &Launcher{} } diff --git a/pkg/logs/launchers/container/launcher_test.go b/pkg/logs/launchers/container/launcher_test.go index db167deece86b..e0b5d22a34b30 100644 --- a/pkg/logs/launchers/container/launcher_test.go +++ b/pkg/logs/launchers/container/launcher_test.go @@ -23,7 +23,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/logs/pipeline" "github.com/DataDog/datadog-agent/pkg/logs/sources" "github.com/DataDog/datadog-agent/pkg/logs/tailers" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // testFactory is a test implementation of tailerfactory.Factory. @@ -39,7 +39,7 @@ func (tf *testFactory) MakeTailer(source *sources.LogSource) (tailerfactory.Tail func TestStartStop(t *testing.T) { fakeTagger := mock.SetupFakeTagger(t) - l := NewLauncher(nil, optional.NewNoneOption[workloadmeta.Component](), fakeTagger) + l := NewLauncher(nil, option.None[workloadmeta.Component](), fakeTagger) sp := launchers.NewMockSourceProvider() pl := pipeline.NewMockProvider() @@ -59,7 +59,7 @@ func TestStartStop(t *testing.T) { func TestAddsRemovesSource(t *testing.T) { fakeTagger := mock.SetupFakeTagger(t) - l := NewLauncher(nil, optional.NewNoneOption[workloadmeta.Component](), fakeTagger) + l := NewLauncher(nil, option.None[workloadmeta.Component](), fakeTagger) l.tailerFactory = &testFactory{ makeTailer: func(source *sources.LogSource) (tailerfactory.Tailer, error) { return &tailerfactory.TestTailer{Name: source.Name}, nil @@ -90,7 +90,7 @@ func TestAddsRemovesSource(t *testing.T) { func TestCannotMakeTailer(t *testing.T) { fakeTagger := mock.SetupFakeTagger(t) - l := NewLauncher(nil, optional.NewNoneOption[workloadmeta.Component](), fakeTagger) + l := NewLauncher(nil, option.None[workloadmeta.Component](), fakeTagger) l.tailerFactory = &testFactory{ makeTailer: func(_ *sources.LogSource) (tailerfactory.Tailer, error) { return nil, errors.New("uhoh") @@ -113,7 +113,7 @@ func TestCannotMakeTailer(t *testing.T) { func TestCannotStartTailer(t *testing.T) { fakeTagger := mock.SetupFakeTagger(t) - l := NewLauncher(nil, optional.NewNoneOption[workloadmeta.Component](), fakeTagger) + l := NewLauncher(nil, option.None[workloadmeta.Component](), fakeTagger) l.tailerFactory = &testFactory{ makeTailer: func(source *sources.LogSource) (tailerfactory.Tailer, error) { return &tailerfactory.TestTailer{Name: source.Name, StartError: true}, nil diff --git a/pkg/logs/launchers/container/tailerfactory/factory.go b/pkg/logs/launchers/container/tailerfactory/factory.go index ff2e50191113c..baab568400d90 100644 --- a/pkg/logs/launchers/container/tailerfactory/factory.go +++ b/pkg/logs/launchers/container/tailerfactory/factory.go @@ -18,7 +18,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/logs/sources" dockerutilPkg "github.com/DataDog/datadog-agent/pkg/util/docker" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // Factory supports making new tailers. @@ -43,7 +43,7 @@ type factory struct { // workloadmetaStore is the global WLM store containing information about // containers and pods. - workloadmetaStore optional.Option[workloadmeta.Component] + workloadmetaStore option.Option[workloadmeta.Component] // cop allows the factory to determine whether the agent is logging // containers or pods. @@ -58,7 +58,7 @@ type factory struct { var _ Factory = (*factory)(nil) // New creates a new Factory. -func New(sources *sources.LogSources, pipelineProvider pipeline.Provider, registry auditor.Registry, workloadmetaStore optional.Option[workloadmeta.Component], tagger tagger.Component) Factory { +func New(sources *sources.LogSources, pipelineProvider pipeline.Provider, registry auditor.Registry, workloadmetaStore option.Option[workloadmeta.Component], tagger tagger.Component) Factory { return &factory{ sources: sources, pipelineProvider: pipelineProvider, diff --git a/pkg/logs/launchers/container/tailerfactory/file_test.go b/pkg/logs/launchers/container/tailerfactory/file_test.go index 314b804bfe8f0..2bb3292a83ad5 100644 --- a/pkg/logs/launchers/container/tailerfactory/file_test.go +++ b/pkg/logs/launchers/container/tailerfactory/file_test.go @@ -30,7 +30,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/logs/sources" dockerutilPkg "github.com/DataDog/datadog-agent/pkg/util/docker" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/util/pointer" ) @@ -295,7 +295,7 @@ func TestMakeK8sSource(t *testing.T) { tf := &factory{ pipelineProvider: pipeline.NewMockProvider(), cop: containersorpods.NewDecidedChooser(containersorpods.LogPods), - workloadmetaStore: optional.NewOption[workloadmeta.Component](store), + workloadmetaStore: option.New[workloadmeta.Component](store), } for _, sourceConfigType := range []string{"docker", "containerd"} { t.Run("source.Config.Type="+sourceConfigType, func(t *testing.T) { @@ -338,7 +338,7 @@ func TestMakeK8sSource_pod_not_found(t *testing.T) { require.NoError(t, os.MkdirAll(filepath.Dir(p), 0o777)) require.NoError(t, os.WriteFile(p, []byte("{}"), 0o666)) - workloadmetaStore := fxutil.Test[optional.Option[workloadmeta.Component]](t, fx.Options( + workloadmetaStore := fxutil.Test[option.Option[workloadmeta.Component]](t, fx.Options( fx.Provide(func() log.Component { return logmock.New(t) }), compConfig.MockModule(), fx.Supply(context.Background()), diff --git a/pkg/logs/launchers/file/launcher.go b/pkg/logs/launchers/file/launcher.go index d2713813e7782..56ff904457150 100644 --- a/pkg/logs/launchers/file/launcher.go +++ b/pkg/logs/launchers/file/launcher.go @@ -10,9 +10,6 @@ import ( "regexp" "time" - "github.com/DataDog/datadog-agent/pkg/util" - "github.com/DataDog/datadog-agent/pkg/util/log" - tagger "github.com/DataDog/datadog-agent/comp/core/tagger/def" "github.com/DataDog/datadog-agent/comp/logs/agent/config" flareController "github.com/DataDog/datadog-agent/comp/logs/agent/flare" @@ -27,6 +24,8 @@ import ( status "github.com/DataDog/datadog-agent/pkg/logs/status/utils" "github.com/DataDog/datadog-agent/pkg/logs/tailers" tailer "github.com/DataDog/datadog-agent/pkg/logs/tailers/file" + "github.com/DataDog/datadog-agent/pkg/util/log" + "github.com/DataDog/datadog-agent/pkg/util/procfilestats" "github.com/DataDog/datadog-agent/pkg/util/startstop" ) @@ -231,7 +230,7 @@ func (s *Launcher) scan() { log.Debugf("After starting new tailers, there are %d tailers running. Limit is %d.\n", tailersLen, s.tailingLimit) // Check how many file handles the Agent process has open and log a warning if the process is coming close to the OS file limit - fileStats, err := util.GetProcessFileStats() + fileStats, err := procfilestats.GetProcessFileStats() if err == nil { CheckProcessTelemetry(fileStats) } @@ -407,7 +406,7 @@ func (s *Launcher) createRotatedTailer(t *tailer.Tailer, file *tailer.File, patt } //nolint:revive // TODO(AML) Fix revive linter -func CheckProcessTelemetry(stats *util.ProcessFileStats) { +func CheckProcessTelemetry(stats *procfilestats.ProcessFileStats) { ratio := float64(stats.AgentOpenFiles) / float64(stats.OsFileLimit) if ratio > 0.9 { log.Errorf("Agent process has %v files open which is %0.f%% of the OS open file limit (%v). This is over 90%% utilization. This may be preventing log files from being tailed by the Agent and could interfere with the basic functionality of the Agent. OS file limit must be increased.", diff --git a/pkg/logs/launchers/integration/launcher.go b/pkg/logs/launchers/integration/launcher.go index 8b0c15f4476e7..30e510c6d634b 100644 --- a/pkg/logs/launchers/integration/launcher.go +++ b/pkg/logs/launchers/integration/launcher.go @@ -129,35 +129,7 @@ func (s *Launcher) run() { continue } - sources, err := ad.CreateSources(cfg.Config) - if err != nil { - ddLog.Error("Failed to create source ", err) - continue - } - - for _, source := range sources { - // TODO: integrations should only be allowed to have one IntegrationType config. - if source.Config.Type == config.IntegrationType { - // This check avoids duplicating files that have already been created - // by scanInitialFiles - logFile, exists := s.integrationToFile[cfg.IntegrationID] - - if !exists { - logFile, err = s.createFile(cfg.IntegrationID) - if err != nil { - ddLog.Error("Failed to create integration log file:", err) - continue - } - - // file to write the incoming logs to - s.integrationToFile[cfg.IntegrationID] = logFile - } - - filetypeSource := s.makeFileSource(source, logFile.fileWithPath) - s.sources.AddSource(filetypeSource) - } - } - + s.receiveSources(cfg) case log := <-s.integrationsLogsChan: if s.combinedUsageMax == 0 { continue @@ -170,6 +142,38 @@ func (s *Launcher) run() { } } +// receiveSources handles receiving incoming sources +func (s *Launcher) receiveSources(cfg integrations.IntegrationConfig) { + sources, err := ad.CreateSources(cfg.Config) + if err != nil { + ddLog.Errorf("Failed to create source for %q: %v", cfg.Config.Name, err) + return + } + + for _, source := range sources { + // TODO: integrations should only be allowed to have one IntegrationType config. + if source.Config.Type == config.IntegrationType { + // This check avoids duplicating files that have already been created + // by scanInitialFiles + logFile, exists := s.integrationToFile[cfg.IntegrationID] + + if !exists { + logFile, err = s.createFile(cfg.IntegrationID) + if err != nil { + ddLog.Errorf("Failed to create integration log file for %q: %v", source.Config.IntegrationName, err) + continue + } + + // file to write the incoming logs to + s.integrationToFile[cfg.IntegrationID] = logFile + } + + filetypeSource := s.makeFileSource(source, logFile.fileWithPath) + s.sources.AddSource(filetypeSource) + } + } +} + // receiveLogs handles writing incoming logs to their respective file as well as // enforcing size limitations func (s *Launcher) receiveLogs(log integrations.IntegrationLog) { diff --git a/pkg/logs/launchers/integration/launcher_test.go b/pkg/logs/launchers/integration/launcher_test.go index d1a69e8896748..17af17a3e6931 100644 --- a/pkg/logs/launchers/integration/launcher_test.go +++ b/pkg/logs/launchers/integration/launcher_test.go @@ -103,6 +103,17 @@ func (suite *LauncherTestSuite) TestSendLog() { assert.Equal(suite.T(), expectedPath, <-filepathChan) } +func (suite *LauncherTestSuite) TestEmptyConfig() { + mockConf := &integration.Config{} + mockConf.Provider = "container" + mockConf.LogsConfig = integration.Data(``) + + suite.s.Start(nil, nil, nil, nil) + suite.integrationsComp.RegisterIntegration("12345", *mockConf) + + assert.Equal(suite.T(), len(suite.s.sources.GetSources()), 0) +} + // TestNegativeCombinedUsageMax ensures errors in combinedUsageMax don't result // in panics from `deleteFile` func (suite *LauncherTestSuite) TestNegativeCombinedUsageMax() { diff --git a/pkg/logs/message/go.mod b/pkg/logs/message/go.mod index 93c9793bafaf5..226acea0d5b76 100644 --- a/pkg/logs/message/go.mod +++ b/pkg/logs/message/go.mod @@ -28,7 +28,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../util/fxutil github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../util/hostname/validate github.com/DataDog/datadog-agent/pkg/util/log => ../../util/log - github.com/DataDog/datadog-agent/pkg/util/optional => ../../util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../../util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../util/scrubber github.com/DataDog/datadog-agent/pkg/util/statstracker => ../../util/statstracker @@ -42,7 +42,7 @@ replace ( require ( github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 github.com/stretchr/testify v1.10.0 ) @@ -50,23 +50,23 @@ require ( github.com/DataDog/datadog-agent/comp/core/secrets v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect github.com/DataDog/viper v1.14.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect @@ -78,24 +78,24 @@ require ( github.com/hashicorp/hcl v1.0.1-vault-5 // indirect github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/numcpus v0.8.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/logs/message/go.sum b/pkg/logs/message/go.sum index 7fdf16db5981c..15a68c06d091d 100644 --- a/pkg/logs/message/go.sum +++ b/pkg/logs/message/go.sum @@ -71,7 +71,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -109,8 +108,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -137,8 +136,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -155,8 +154,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -169,8 +168,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -181,8 +180,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -235,8 +234,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -273,8 +272,8 @@ golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= @@ -302,8 +301,8 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/logs/metrics/go.mod b/pkg/logs/metrics/go.mod index 4bdfea3031f8c..5721696585532 100644 --- a/pkg/logs/metrics/go.mod +++ b/pkg/logs/metrics/go.mod @@ -7,7 +7,7 @@ replace ( github.com/DataDog/datadog-agent/comp/def => ../../../comp/def github.com/DataDog/datadog-agent/pkg/telemetry => ../../telemetry github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../util/fxutil - github.com/DataDog/datadog-agent/pkg/util/optional => ../../util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../util/option github.com/DataDog/datadog-agent/pkg/util/utilizationtracker => ../../util/utilizationtracker ) @@ -21,7 +21,7 @@ require ( github.com/DataDog/datadog-agent/comp/core/telemetry v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.55.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.55.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect @@ -31,7 +31,7 @@ require ( github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.60.1 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -40,7 +40,7 @@ require ( go.uber.org/fx v1.23.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/sys v0.28.0 // indirect - google.golang.org/protobuf v1.35.2 // indirect + golang.org/x/sys v0.29.0 // indirect + google.golang.org/protobuf v1.36.3 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/logs/metrics/go.sum b/pkg/logs/metrics/go.sum index 2565635bc3e93..148eadd0a10ff 100644 --- a/pkg/logs/metrics/go.sum +++ b/pkg/logs/metrics/go.sum @@ -27,8 +27,8 @@ github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+ github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= @@ -52,10 +52,10 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/pkg/logs/pipeline/go.mod b/pkg/logs/pipeline/go.mod index dbb50c6aec2e7..f3b2e673aeb70 100644 --- a/pkg/logs/pipeline/go.mod +++ b/pkg/logs/pipeline/go.mod @@ -13,6 +13,8 @@ replace ( github.com/DataDog/datadog-agent/comp/core/telemetry => ../../../comp/core/telemetry github.com/DataDog/datadog-agent/comp/def => ../../../comp/def github.com/DataDog/datadog-agent/comp/logs/agent/config => ../../../comp/logs/agent/config + github.com/DataDog/datadog-agent/comp/serializer/logscompression => ../../../comp/serializer/logscompression + github.com/DataDog/datadog-agent/comp/serializer/metricscompression => ../../../comp/serializer/metricscompression github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ../../collector/check/defaults github.com/DataDog/datadog-agent/pkg/config/env => ../../config/env github.com/DataDog/datadog-agent/pkg/config/mock => ../../config/mock @@ -37,13 +39,15 @@ replace ( github.com/DataDog/datadog-agent/pkg/status/health => ../../status/health github.com/DataDog/datadog-agent/pkg/telemetry => ../../telemetry github.com/DataDog/datadog-agent/pkg/util/backoff => ../../util/backoff + github.com/DataDog/datadog-agent/pkg/util/compression => ../../util/compression + github.com/DataDog/datadog-agent/pkg/util/defaultpaths => ../../util/defaultpaths github.com/DataDog/datadog-agent/pkg/util/executable => ../../util/executable github.com/DataDog/datadog-agent/pkg/util/filesystem => ../../util/filesystem github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../util/fxutil github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../util/hostname/validate github.com/DataDog/datadog-agent/pkg/util/http => ../../util/http github.com/DataDog/datadog-agent/pkg/util/log => ../../util/log - github.com/DataDog/datadog-agent/pkg/util/optional => ../../util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../../util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../util/scrubber github.com/DataDog/datadog-agent/pkg/util/startstop => ../../util/startstop @@ -59,7 +63,8 @@ replace ( require ( github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 + github.com/DataDog/datadog-agent/comp/serializer/logscompression v0.61.0 + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 github.com/DataDog/datadog-agent/pkg/logs/auditor v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/client v0.56.0-rc.3 @@ -71,7 +76,8 @@ require ( github.com/DataDog/datadog-agent/pkg/logs/sender v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/status/health v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 + github.com/DataDog/datadog-agent/pkg/util/compression v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 github.com/DataDog/datadog-agent/pkg/util/startstop v0.56.0-rc.3 github.com/hashicorp/go-multierror v1.1.1 github.com/stretchr/testify v1.10.0 @@ -79,13 +85,17 @@ require ( ) require ( - github.com/DataDog/agent-payload/v5 v5.0.138 // indirect + github.com/DataDog/agent-payload/v5 v5.0.141 // indirect + github.com/DataDog/datadog-agent/comp/core/config v0.61.0 // indirect + github.com/DataDog/datadog-agent/comp/core/flare/builder v0.59.0 // indirect + github.com/DataDog/datadog-agent/comp/core/flare/types v0.59.0 // indirect github.com/DataDog/datadog-agent/comp/core/secrets v0.59.0 // indirect github.com/DataDog/datadog-agent/comp/core/telemetry v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/def v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/mock v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect @@ -94,20 +104,21 @@ require ( github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/backoff v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/fxutil v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect github.com/DataDog/viper v1.14.0 // indirect + github.com/DataDog/zstd v1.5.6 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect @@ -124,21 +135,21 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect github.com/klauspost/compress v1.17.11 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.60.1 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -149,11 +160,11 @@ require ( go.uber.org/fx v1.23.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/net v0.34.0 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect - google.golang.org/protobuf v1.35.2 // indirect + google.golang.org/protobuf v1.36.3 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/logs/pipeline/go.sum b/pkg/logs/pipeline/go.sum index 19075c11eaba4..d51a9571f5a6f 100644 --- a/pkg/logs/pipeline/go.sum +++ b/pkg/logs/pipeline/go.sum @@ -1,11 +1,13 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/DataDog/agent-payload/v5 v5.0.138 h1:Wg7hmWuoLC/o0X3zZ+uGcfRHPyaytljudgSY9O59zjc= -github.com/DataDog/agent-payload/v5 v5.0.138/go.mod h1:lxh9lb5xYrBXjblpIWYUi4deJqVbkIfkjwesi5nskDc= +github.com/DataDog/agent-payload/v5 v5.0.141 h1:pV76CyTUEe/LFuS7fwarIfOX5seSuYZylzhj1aGY2DQ= +github.com/DataDog/agent-payload/v5 v5.0.141/go.mod h1:lxh9lb5xYrBXjblpIWYUi4deJqVbkIfkjwesi5nskDc= github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 h1:RoH7VLzTnxHEugRPIgnGlxwDFszFGI7b3WZZUtWuPRM= github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42/go.mod h1:TX7CTOQ3LbQjfAi4SwqUoR5gY1zfUk7VRBDTuArjaDc= github.com/DataDog/viper v1.14.0 h1:dIjTe/uJiah+QFqFZ+MXeqgmUvWhg37l37ZxFWxr3is= github.com/DataDog/viper v1.14.0/go.mod h1:wDdUVJ2SHaMaPrCZrlRCObwkubsX8j5sme3LaR/SGTc= +github.com/DataDog/zstd v1.5.6 h1:LbEglqepa/ipmmQJUDnSsfvA8e8IStVcGaFWDuxvGOY= +github.com/DataDog/zstd v1.5.6/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= @@ -79,7 +81,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -127,8 +128,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -155,8 +156,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -173,8 +174,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -188,8 +189,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -200,8 +201,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -260,8 +261,8 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -282,8 +283,8 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -305,8 +306,8 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= @@ -341,8 +342,8 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/logs/pipeline/pipeline.go b/pkg/logs/pipeline/pipeline.go index b0136dac860d9..f2e701598f041 100644 --- a/pkg/logs/pipeline/pipeline.go +++ b/pkg/logs/pipeline/pipeline.go @@ -13,6 +13,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface" "github.com/DataDog/datadog-agent/comp/logs/agent/config" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/def" pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/client" @@ -24,6 +25,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/logs/processor" "github.com/DataDog/datadog-agent/pkg/logs/sender" "github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface" + compressioncommon "github.com/DataDog/datadog-agent/pkg/util/compression" ) // Pipeline processes and sends messages to the backend @@ -48,7 +50,9 @@ func NewPipeline(outputChan chan *message.Payload, pipelineID int, status statusinterface.Status, hostname hostnameinterface.Component, - cfg pkgconfigmodel.Reader) *Pipeline { + cfg pkgconfigmodel.Reader, + compression logscompression.Component, +) *Pipeline { var senderDoneChan chan *sync.WaitGroup var flushWg *sync.WaitGroup @@ -77,7 +81,7 @@ func NewPipeline(outputChan chan *message.Payload, encoder = processor.RawEncoder } - strategy := getStrategy(strategyInput, senderInput, flushChan, endpoints, serverless, flushWg, pipelineMonitor) + strategy := getStrategy(strategyInput, senderInput, flushChan, endpoints, serverless, flushWg, pipelineMonitor, compression) logsSender = sender.NewSender(cfg, senderInput, outputChan, mainDestinations, pkgconfigsetup.Datadog().GetInt("logs_config.payload_channel_size"), senderDoneChan, flushWg, pipelineMonitor) inputChan := make(chan *message.Message, pkgconfigsetup.Datadog().GetInt("logs_config.message_channel_size")) @@ -156,13 +160,24 @@ func getDestinations(endpoints *config.Endpoints, destinationsContext *client.De } //nolint:revive // TODO(AML) Fix revive linter -func getStrategy(inputChan chan *message.Message, outputChan chan *message.Payload, flushChan chan struct{}, endpoints *config.Endpoints, serverless bool, flushWg *sync.WaitGroup, pipelineMonitor metrics.PipelineMonitor) sender.Strategy { +func getStrategy( + inputChan chan *message.Message, + outputChan chan *message.Payload, + flushChan chan struct{}, + endpoints *config.Endpoints, + serverless bool, + flushWg *sync.WaitGroup, + pipelineMonitor metrics.PipelineMonitor, + compressor logscompression.Component, +) sender.Strategy { if endpoints.UseHTTP || serverless { - encoder := sender.IdentityContentType + var encoder compressioncommon.Compressor + encoder = compressor.NewCompressor(compressioncommon.NoneKind, 0) if endpoints.Main.UseCompression { - encoder = sender.NewGzipContentEncoding(endpoints.Main.CompressionLevel) + encoder = compressor.NewCompressor(endpoints.Main.CompressionKind, endpoints.Main.CompressionLevel) } + return sender.NewBatchStrategy(inputChan, outputChan, flushChan, serverless, flushWg, sender.ArraySerializer, endpoints.BatchWait, endpoints.BatchMaxSize, endpoints.BatchMaxContentSize, "logs", encoder, pipelineMonitor) } - return sender.NewStreamStrategy(inputChan, outputChan, sender.IdentityContentType) + return sender.NewStreamStrategy(inputChan, outputChan, compressor.NewCompressor(compressioncommon.NoneKind, 0)) } diff --git a/pkg/logs/pipeline/provider.go b/pkg/logs/pipeline/provider.go index 15561004e8379..c7979c06623ad 100644 --- a/pkg/logs/pipeline/provider.go +++ b/pkg/logs/pipeline/provider.go @@ -13,6 +13,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface" "github.com/DataDog/datadog-agent/comp/logs/agent/config" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/def" pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/logs/auditor" "github.com/DataDog/datadog-agent/pkg/logs/client" @@ -54,19 +55,41 @@ type provider struct { serverless bool - status statusinterface.Status - hostname hostnameinterface.Component - cfg pkgconfigmodel.Reader + status statusinterface.Status + hostname hostnameinterface.Component + cfg pkgconfigmodel.Reader + compression logscompression.Component } // NewProvider returns a new Provider -func NewProvider(numberOfPipelines int, auditor auditor.Auditor, diagnosticMessageReceiver diagnostic.MessageReceiver, processingRules []*config.ProcessingRule, endpoints *config.Endpoints, destinationsContext *client.DestinationsContext, status statusinterface.Status, hostname hostnameinterface.Component, cfg pkgconfigmodel.Reader) Provider { - return newProvider(numberOfPipelines, auditor, diagnosticMessageReceiver, processingRules, endpoints, destinationsContext, false, status, hostname, cfg) +func NewProvider(numberOfPipelines int, + auditor auditor.Auditor, + diagnosticMessageReceiver diagnostic.MessageReceiver, + processingRules []*config.ProcessingRule, + endpoints *config.Endpoints, + destinationsContext *client.DestinationsContext, + status statusinterface.Status, + hostname hostnameinterface.Component, + cfg pkgconfigmodel.Reader, + compression logscompression.Component, +) Provider { + return newProvider(numberOfPipelines, auditor, diagnosticMessageReceiver, processingRules, endpoints, destinationsContext, false, status, hostname, cfg, compression) } // NewServerlessProvider returns a new Provider in serverless mode -func NewServerlessProvider(numberOfPipelines int, auditor auditor.Auditor, diagnosticMessageReceiver diagnostic.MessageReceiver, processingRules []*config.ProcessingRule, endpoints *config.Endpoints, destinationsContext *client.DestinationsContext, status statusinterface.Status, hostname hostnameinterface.Component, cfg pkgconfigmodel.Reader) Provider { - return newProvider(numberOfPipelines, auditor, diagnosticMessageReceiver, processingRules, endpoints, destinationsContext, true, status, hostname, cfg) +func NewServerlessProvider(numberOfPipelines int, + auditor auditor.Auditor, + diagnosticMessageReceiver diagnostic.MessageReceiver, + processingRules []*config.ProcessingRule, + endpoints *config.Endpoints, + destinationsContext *client.DestinationsContext, + status statusinterface.Status, + hostname hostnameinterface.Component, + cfg pkgconfigmodel.Reader, + compression logscompression.Component, +) Provider { + + return newProvider(numberOfPipelines, auditor, diagnosticMessageReceiver, processingRules, endpoints, destinationsContext, true, status, hostname, cfg, compression) } // NewMockProvider creates a new provider that will not provide any pipelines. @@ -74,7 +97,18 @@ func NewMockProvider() Provider { return &provider{} } -func newProvider(numberOfPipelines int, auditor auditor.Auditor, diagnosticMessageReceiver diagnostic.MessageReceiver, processingRules []*config.ProcessingRule, endpoints *config.Endpoints, destinationsContext *client.DestinationsContext, serverless bool, status statusinterface.Status, hostname hostnameinterface.Component, cfg pkgconfigmodel.Reader) Provider { +func newProvider(numberOfPipelines int, + auditor auditor.Auditor, + diagnosticMessageReceiver diagnostic.MessageReceiver, + processingRules []*config.ProcessingRule, + endpoints *config.Endpoints, + destinationsContext *client.DestinationsContext, + serverless bool, + status statusinterface.Status, + hostname hostnameinterface.Component, + cfg pkgconfigmodel.Reader, + compression logscompression.Component, +) Provider { return &provider{ numberOfPipelines: numberOfPipelines, auditor: auditor, @@ -88,6 +122,7 @@ func newProvider(numberOfPipelines int, auditor auditor.Auditor, diagnosticMessa status: status, hostname: hostname, cfg: cfg, + compression: compression, } } @@ -97,7 +132,7 @@ func (p *provider) Start() { p.outputChan = p.auditor.Channel() for i := 0; i < p.numberOfPipelines; i++ { - pipeline := NewPipeline(p.outputChan, p.processingRules, p.endpoints, p.destinationsContext, p.diagnosticMessageReceiver, p.serverless, i, p.status, p.hostname, p.cfg) + pipeline := NewPipeline(p.outputChan, p.processingRules, p.endpoints, p.destinationsContext, p.diagnosticMessageReceiver, p.serverless, i, p.status, p.hostname, p.cfg, p.compression) pipeline.Start() p.pipelines = append(p.pipelines, pipeline) } diff --git a/pkg/logs/pipeline/provider_test.go b/pkg/logs/pipeline/provider_test.go index 8f950c43dd6f3..c9aadc09139f5 100644 --- a/pkg/logs/pipeline/provider_test.go +++ b/pkg/logs/pipeline/provider_test.go @@ -13,6 +13,7 @@ import ( "go.uber.org/atomic" "github.com/DataDog/datadog-agent/comp/logs/agent/config" + compressionfx "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx-mock" "github.com/DataDog/datadog-agent/pkg/logs/auditor" "github.com/DataDog/datadog-agent/pkg/status/health" ) @@ -31,6 +32,7 @@ func (suite *ProviderTestSuite) SetupTest() { pipelines: []*Pipeline{}, endpoints: config.NewEndpoints(config.Endpoint{}, nil, true, false), currentPipelineIndex: atomic.NewUint32(0), + compression: compressionfx.NewMockCompressor(), } } diff --git a/pkg/logs/processor/go.mod b/pkg/logs/processor/go.mod index b98eacd627e97..8a8be8be5a7c8 100644 --- a/pkg/logs/processor/go.mod +++ b/pkg/logs/processor/go.mod @@ -35,7 +35,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../util/fxutil github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../util/hostname/validate github.com/DataDog/datadog-agent/pkg/util/log => ../../util/log - github.com/DataDog/datadog-agent/pkg/util/optional => ../../util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../../util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../util/scrubber github.com/DataDog/datadog-agent/pkg/util/statstracker => ../../util/statstracker @@ -48,16 +48,16 @@ replace ( ) require ( - github.com/DataDog/agent-payload/v5 v5.0.138 + github.com/DataDog/agent-payload/v5 v5.0.141 github.com/DataDog/datadog-agent/comp/core/hostname/hostnameinterface v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 github.com/DataDog/datadog-agent/pkg/logs/diagnostic v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/message v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/metrics v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/sds v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 github.com/stretchr/testify v1.10.0 ) @@ -67,7 +67,7 @@ require ( github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.59.0 // indirect @@ -75,16 +75,16 @@ require ( github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 // indirect github.com/DataDog/viper v1.14.0 // indirect @@ -103,21 +103,21 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect github.com/klauspost/compress v1.17.11 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.60.1 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -129,10 +129,10 @@ require ( go.uber.org/fx v1.23.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect - google.golang.org/protobuf v1.35.2 // indirect + google.golang.org/protobuf v1.36.3 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/logs/processor/go.sum b/pkg/logs/processor/go.sum index 1fb7bbe76cabd..85e936106ae3a 100644 --- a/pkg/logs/processor/go.sum +++ b/pkg/logs/processor/go.sum @@ -1,7 +1,7 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/DataDog/agent-payload/v5 v5.0.138 h1:Wg7hmWuoLC/o0X3zZ+uGcfRHPyaytljudgSY9O59zjc= -github.com/DataDog/agent-payload/v5 v5.0.138/go.mod h1:lxh9lb5xYrBXjblpIWYUi4deJqVbkIfkjwesi5nskDc= +github.com/DataDog/agent-payload/v5 v5.0.141 h1:pV76CyTUEe/LFuS7fwarIfOX5seSuYZylzhj1aGY2DQ= +github.com/DataDog/agent-payload/v5 v5.0.141/go.mod h1:lxh9lb5xYrBXjblpIWYUi4deJqVbkIfkjwesi5nskDc= github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 h1:RoH7VLzTnxHEugRPIgnGlxwDFszFGI7b3WZZUtWuPRM= github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42/go.mod h1:TX7CTOQ3LbQjfAi4SwqUoR5gY1zfUk7VRBDTuArjaDc= github.com/DataDog/viper v1.14.0 h1:dIjTe/uJiah+QFqFZ+MXeqgmUvWhg37l37ZxFWxr3is= @@ -79,7 +79,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -122,8 +121,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -150,8 +149,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -168,8 +167,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -183,8 +182,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -195,8 +194,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -255,8 +254,8 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -298,8 +297,8 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= @@ -334,8 +333,8 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/logs/sds/go.mod b/pkg/logs/sds/go.mod index f5818700af57b..be1d19b5da9c3 100644 --- a/pkg/logs/sds/go.mod +++ b/pkg/logs/sds/go.mod @@ -37,7 +37,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../util/hostname/validate github.com/DataDog/datadog-agent/pkg/util/http => ../../util/http github.com/DataDog/datadog-agent/pkg/util/log => ../../util/log - github.com/DataDog/datadog-agent/pkg/util/optional => ../../util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../../util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../util/scrubber github.com/DataDog/datadog-agent/pkg/util/statstracker => ../../util/statstracker @@ -49,10 +49,10 @@ replace ( ) require ( - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 github.com/DataDog/datadog-agent/pkg/logs/message v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 github.com/DataDog/dd-sensitive-data-scanner/sds-go/go v0.0.0-20240816154533-f7f9beb53a42 github.com/stretchr/testify v1.10.0 ) @@ -64,7 +64,7 @@ require ( github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.59.0 // indirect @@ -72,16 +72,16 @@ require ( github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect github.com/DataDog/viper v1.14.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect @@ -97,21 +97,21 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect github.com/klauspost/compress v1.17.11 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.60.1 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -123,10 +123,10 @@ require ( go.uber.org/fx v1.23.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect - google.golang.org/protobuf v1.35.2 // indirect + google.golang.org/protobuf v1.36.3 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/logs/sds/go.sum b/pkg/logs/sds/go.sum index c9341f0f48498..8c130c7951a55 100644 --- a/pkg/logs/sds/go.sum +++ b/pkg/logs/sds/go.sum @@ -73,7 +73,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -115,8 +114,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -143,8 +142,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -161,8 +160,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -176,8 +175,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -188,8 +187,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -244,8 +243,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -282,8 +281,8 @@ golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= @@ -311,8 +310,8 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/logs/sender/batch_strategy.go b/pkg/logs/sender/batch_strategy.go index 47ccbaf86009b..9d1c28eafff77 100644 --- a/pkg/logs/sender/batch_strategy.go +++ b/pkg/logs/sender/batch_strategy.go @@ -15,6 +15,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/logs/message" "github.com/DataDog/datadog-agent/pkg/logs/metrics" "github.com/DataDog/datadog-agent/pkg/telemetry" + "github.com/DataDog/datadog-agent/pkg/util/compression" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -31,12 +32,12 @@ type batchStrategy struct { flushWg *sync.WaitGroup buffer *MessageBuffer // pipelineName provides a name for the strategy to differentiate it from other instances in other internal pipelines - pipelineName string - serializer Serializer - batchWait time.Duration - contentEncoding ContentEncoding - stopChan chan struct{} // closed when the goroutine has finished - clock clock.Clock + pipelineName string + serializer Serializer + batchWait time.Duration + compression compression.Compressor + stopChan chan struct{} // closed when the goroutine has finished + clock clock.Clock // Telemtry pipelineMonitor metrics.PipelineMonitor @@ -54,9 +55,9 @@ func NewBatchStrategy(inputChan chan *message.Message, maxBatchSize int, maxContentSize int, pipelineName string, - contentEncoding ContentEncoding, + compression compression.Compressor, pipelineMonitor metrics.PipelineMonitor) Strategy { - return newBatchStrategyWithClock(inputChan, outputChan, flushChan, serverless, flushWg, serializer, batchWait, maxBatchSize, maxContentSize, pipelineName, clock.New(), contentEncoding, pipelineMonitor) + return newBatchStrategyWithClock(inputChan, outputChan, flushChan, serverless, flushWg, serializer, batchWait, maxBatchSize, maxContentSize, pipelineName, clock.New(), compression, pipelineMonitor) } func newBatchStrategyWithClock(inputChan chan *message.Message, @@ -70,7 +71,7 @@ func newBatchStrategyWithClock(inputChan chan *message.Message, maxContentSize int, pipelineName string, clock clock.Clock, - contentEncoding ContentEncoding, + compression compression.Compressor, pipelineMonitor metrics.PipelineMonitor) Strategy { return &batchStrategy{ @@ -82,7 +83,7 @@ func newBatchStrategyWithClock(inputChan chan *message.Message, buffer: NewMessageBuffer(maxBatchSize, maxContentSize), serializer: serializer, batchWait: batchWait, - contentEncoding: contentEncoding, + compression: compression, stopChan: make(chan struct{}), pipelineName: pipelineName, clock: clock, @@ -168,7 +169,7 @@ func (s *batchStrategy) sendMessages(messages []*message.Message, outputChan cha serializedMessage := s.serializer.Serialize(messages) log.Debugf("Send messages for pipeline %s (msg_count:%d, content_size=%d, avg_msg_size=%.2f)", s.pipelineName, len(messages), len(serializedMessage), float64(len(serializedMessage))/float64(len(messages))) - encodedPayload, err := s.contentEncoding.encode(serializedMessage) + encodedPayload, err := s.compression.Compress(serializedMessage) if err != nil { log.Warn("Encoding failed - dropping payload", err) s.utilization.Stop() @@ -183,7 +184,7 @@ func (s *batchStrategy) sendMessages(messages []*message.Message, outputChan cha p := &message.Payload{ Messages: messages, Encoded: encodedPayload, - Encoding: s.contentEncoding.name(), + Encoding: s.compression.ContentEncoding(), UnencodedSize: len(serializedMessage), } s.utilization.Stop() diff --git a/pkg/logs/sender/batch_strategy_test.go b/pkg/logs/sender/batch_strategy_test.go index 34cb6be7aa4e9..f5d3c1b27c464 100644 --- a/pkg/logs/sender/batch_strategy_test.go +++ b/pkg/logs/sender/batch_strategy_test.go @@ -12,8 +12,10 @@ import ( "github.com/benbjohnson/clock" "github.com/stretchr/testify/assert" + compressionfx "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx-mock" "github.com/DataDog/datadog-agent/pkg/logs/message" "github.com/DataDog/datadog-agent/pkg/logs/metrics" + "github.com/DataDog/datadog-agent/pkg/util/compression" ) func TestBatchStrategySendsPayloadWhenBufferIsFull(t *testing.T) { @@ -21,7 +23,7 @@ func TestBatchStrategySendsPayloadWhenBufferIsFull(t *testing.T) { output := make(chan *message.Payload) flushChan := make(chan struct{}) - s := NewBatchStrategy(input, output, flushChan, false, nil, LineSerializer, 100*time.Millisecond, 2, 2, "test", &identityContentType{}, metrics.NewNoopPipelineMonitor("")) + s := NewBatchStrategy(input, output, flushChan, false, nil, LineSerializer, 100*time.Millisecond, 2, 2, "test", compressionfx.NewMockCompressor().NewCompressor(compression.NoneKind, 1), metrics.NewNoopPipelineMonitor("")) s.Start() message1 := message.NewMessage([]byte("a"), nil, "", 0) @@ -53,7 +55,7 @@ func TestBatchStrategySendsPayloadWhenBufferIsOutdated(t *testing.T) { timerInterval := 100 * time.Millisecond clk := clock.NewMock() - s := newBatchStrategyWithClock(input, output, flushChan, false, nil, LineSerializer, timerInterval, 100, 100, "test", clk, &identityContentType{}, metrics.NewNoopPipelineMonitor("")) + s := newBatchStrategyWithClock(input, output, flushChan, false, nil, LineSerializer, timerInterval, 100, 100, "test", clk, compressionfx.NewMockCompressor().NewCompressor(compression.NoneKind, 1), metrics.NewNoopPipelineMonitor("")) s.Start() for round := 0; round < 3; round++ { @@ -78,7 +80,7 @@ func TestBatchStrategySendsPayloadWhenClosingInput(t *testing.T) { flushChan := make(chan struct{}) clk := clock.NewMock() - s := newBatchStrategyWithClock(input, output, flushChan, false, nil, LineSerializer, 100*time.Millisecond, 2, 2, "test", clk, &identityContentType{}, metrics.NewNoopPipelineMonitor("")) + s := newBatchStrategyWithClock(input, output, flushChan, false, nil, LineSerializer, 100*time.Millisecond, 2, 2, "test", clk, compressionfx.NewMockCompressor().NewCompressor(compression.NoneKind, 1), metrics.NewNoopPipelineMonitor("")) s.Start() message := message.NewMessage([]byte("a"), nil, "", 0) @@ -103,7 +105,7 @@ func TestBatchStrategyShouldNotBlockWhenStoppingGracefully(t *testing.T) { output := make(chan *message.Payload) flushChan := make(chan struct{}) - s := NewBatchStrategy(input, output, flushChan, false, nil, LineSerializer, 100*time.Millisecond, 2, 2, "test", &identityContentType{}, metrics.NewNoopPipelineMonitor("")) + s := NewBatchStrategy(input, output, flushChan, false, nil, LineSerializer, 100*time.Millisecond, 2, 2, "test", compressionfx.NewMockCompressor().NewCompressor(compression.NoneKind, 1), metrics.NewNoopPipelineMonitor("")) s.Start() message := message.NewMessage([]byte{}, nil, "", 0) @@ -127,7 +129,7 @@ func TestBatchStrategySynchronousFlush(t *testing.T) { // batch size is large so it will not flush until we trigger it manually // flush time is large so it won't automatically trigger during this test - strategy := NewBatchStrategy(input, output, flushChan, false, nil, LineSerializer, time.Hour, 100, 100, "test", &identityContentType{}, metrics.NewNoopPipelineMonitor("")) + strategy := NewBatchStrategy(input, output, flushChan, false, nil, LineSerializer, time.Hour, 100, 100, "test", compressionfx.NewMockCompressor().NewCompressor(compression.NoneKind, 1), metrics.NewNoopPipelineMonitor("")) strategy.Start() // all of these messages will get buffered @@ -172,7 +174,7 @@ func TestBatchStrategyFlushChannel(t *testing.T) { // batch size is large so it will not flush until we trigger it manually // flush time is large so it won't automatically trigger during this test - strategy := NewBatchStrategy(input, output, flushChan, false, nil, LineSerializer, time.Hour, 100, 100, "test", &identityContentType{}, metrics.NewNoopPipelineMonitor("")) + strategy := NewBatchStrategy(input, output, flushChan, false, nil, LineSerializer, time.Hour, 100, 100, "test", compressionfx.NewMockCompressor().NewCompressor(compression.NoneKind, 1), metrics.NewNoopPipelineMonitor("")) strategy.Start() // all of these messages will get buffered diff --git a/pkg/logs/sender/content_encoding.go b/pkg/logs/sender/content_encoding.go deleted file mode 100644 index 75f59587ff512..0000000000000 --- a/pkg/logs/sender/content_encoding.go +++ /dev/null @@ -1,73 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package sender - -import ( - "bytes" - "compress/gzip" -) - -// ContentEncoding encodes the payload -type ContentEncoding interface { - name() string - encode(payload []byte) ([]byte, error) -} - -// IdentityContentType encodes the payload using the identity function -var IdentityContentType ContentEncoding = &identityContentType{} - -type identityContentType struct{} - -func (c *identityContentType) name() string { - return "identity" -} - -func (c *identityContentType) encode(payload []byte) ([]byte, error) { - return payload, nil -} - -// GzipContentEncoding encodes the payload using gzip algorithm -type GzipContentEncoding struct { - level int -} - -// NewGzipContentEncoding creates a new Gzip content type -func NewGzipContentEncoding(level int) *GzipContentEncoding { - if level < gzip.NoCompression { - level = gzip.NoCompression - } else if level > gzip.BestCompression { - level = gzip.BestCompression - } - - return &GzipContentEncoding{ - level, - } -} - -func (c *GzipContentEncoding) name() string { - return "gzip" -} - -func (c *GzipContentEncoding) encode(payload []byte) ([]byte, error) { - var compressedPayload bytes.Buffer - gzipWriter, err := gzip.NewWriterLevel(&compressedPayload, c.level) - if err != nil { - return nil, err - } - _, err = gzipWriter.Write(payload) - if err != nil { - return nil, err - } - err = gzipWriter.Flush() - if err != nil { - return nil, err - } - err = gzipWriter.Close() - if err != nil { - return nil, err - } - return compressedPayload.Bytes(), nil -} diff --git a/pkg/logs/sender/content_encoding_test.go b/pkg/logs/sender/content_encoding_test.go deleted file mode 100644 index ef91096bfd7f2..0000000000000 --- a/pkg/logs/sender/content_encoding_test.go +++ /dev/null @@ -1,58 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -package sender - -import ( - "bytes" - "compress/gzip" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestIdentityContentType(t *testing.T) { - payload := []byte("my payload") - - encodedPayload, err := IdentityContentType.encode(payload) - assert.Nil(t, err) - - assert.Equal(t, payload, encodedPayload) -} - -func TestIdentityContentTypeName(t *testing.T) { - assert.Equal(t, IdentityContentType.name(), "identity") -} - -func TestGzipContentEncoding(t *testing.T) { - payload := []byte("my payload") - - encodedPayload, err := NewGzipContentEncoding(gzip.BestCompression).encode(payload) - assert.Nil(t, err) - - decompressedPayload, err := decompress(encodedPayload) - assert.Nil(t, err) - - assert.Equal(t, payload, decompressedPayload) -} - -func TestGzipContentEncodingName(t *testing.T) { - assert.Equal(t, NewGzipContentEncoding(gzip.BestCompression).name(), "gzip") -} - -func decompress(payload []byte) ([]byte, error) { - reader, err := gzip.NewReader(bytes.NewReader(payload)) - if err != nil { - return nil, err - } - - var buffer bytes.Buffer - _, err = buffer.ReadFrom(reader) - if err != nil { - return nil, err - } - - return buffer.Bytes(), nil -} diff --git a/pkg/logs/sender/go.mod b/pkg/logs/sender/go.mod index 28b1f5738002a..c299a426c43c5 100644 --- a/pkg/logs/sender/go.mod +++ b/pkg/logs/sender/go.mod @@ -3,6 +3,7 @@ module github.com/DataDog/datadog-agent/pkg/logs/sender go 1.22.0 replace ( + github.com/DataDog/datadog-agent/cmd/agent/common/path => ../../../cmd/agent/common/path github.com/DataDog/datadog-agent/comp/api/api/def => ../../../comp/api/api/def github.com/DataDog/datadog-agent/comp/core/config => ../../../comp/core/config github.com/DataDog/datadog-agent/comp/core/flare/builder => ../../../comp/core/flare/builder @@ -13,6 +14,7 @@ replace ( github.com/DataDog/datadog-agent/comp/core/telemetry => ../../../comp/core/telemetry github.com/DataDog/datadog-agent/comp/def => ../../../comp/def github.com/DataDog/datadog-agent/comp/logs/agent/config => ../../../comp/logs/agent/config + github.com/DataDog/datadog-agent/comp/serializer/logscompression => ../../../comp/serializer/logscompression github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ../../collector/check/defaults github.com/DataDog/datadog-agent/pkg/config/env => ../../config/env github.com/DataDog/datadog-agent/pkg/config/mock => ../../config/mock @@ -31,13 +33,15 @@ replace ( github.com/DataDog/datadog-agent/pkg/logs/util/testutils => ../util/testutils github.com/DataDog/datadog-agent/pkg/telemetry => ../../telemetry github.com/DataDog/datadog-agent/pkg/util/backoff => ../../util/backoff + github.com/DataDog/datadog-agent/pkg/util/compression => ../../util/compression + github.com/DataDog/datadog-agent/pkg/util/defaultpaths => ../../util/defaultpaths github.com/DataDog/datadog-agent/pkg/util/executable => ../../util/executable github.com/DataDog/datadog-agent/pkg/util/filesystem => ../../util/filesystem github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../util/fxutil github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../util/hostname/validate github.com/DataDog/datadog-agent/pkg/util/http => ../../util/http github.com/DataDog/datadog-agent/pkg/util/log => ../../util/log - github.com/DataDog/datadog-agent/pkg/util/optional => ../../util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../../util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../util/scrubber github.com/DataDog/datadog-agent/pkg/util/statstracker => ../../util/statstracker @@ -51,26 +55,31 @@ replace ( require ( github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3 + github.com/DataDog/datadog-agent/comp/serializer/logscompression v0.61.0 github.com/DataDog/datadog-agent/pkg/config/mock v0.59.0 - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 github.com/DataDog/datadog-agent/pkg/logs/client v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/message v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/metrics v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/sources v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/status/statusinterface v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 + github.com/DataDog/datadog-agent/pkg/util/compression v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 github.com/benbjohnson/clock v1.3.5 github.com/stretchr/testify v1.10.0 ) require ( + github.com/DataDog/datadog-agent/comp/core/config v0.61.0 // indirect + github.com/DataDog/datadog-agent/comp/core/flare/builder v0.59.0 // indirect + github.com/DataDog/datadog-agent/comp/core/flare/types v0.59.0 // indirect github.com/DataDog/datadog-agent/comp/core/secrets v0.59.0 // indirect github.com/DataDog/datadog-agent/comp/core/telemetry v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/def v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.59.0 // indirect @@ -78,19 +87,20 @@ require ( github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/backoff v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/fxutil v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect github.com/DataDog/viper v1.14.0 // indirect + github.com/DataDog/zstd v1.5.6 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect @@ -104,21 +114,21 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect github.com/klauspost/compress v1.17.11 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.60.1 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -130,11 +140,11 @@ require ( go.uber.org/fx v1.23.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/net v0.34.0 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect - google.golang.org/protobuf v1.35.2 // indirect + google.golang.org/protobuf v1.36.3 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/logs/sender/go.sum b/pkg/logs/sender/go.sum index b14b43571558e..fe2354cb4b6b1 100644 --- a/pkg/logs/sender/go.sum +++ b/pkg/logs/sender/go.sum @@ -2,6 +2,8 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/viper v1.14.0 h1:dIjTe/uJiah+QFqFZ+MXeqgmUvWhg37l37ZxFWxr3is= github.com/DataDog/viper v1.14.0/go.mod h1:wDdUVJ2SHaMaPrCZrlRCObwkubsX8j5sme3LaR/SGTc= +github.com/DataDog/zstd v1.5.6 h1:LbEglqepa/ipmmQJUDnSsfvA8e8IStVcGaFWDuxvGOY= +github.com/DataDog/zstd v1.5.6/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= @@ -73,7 +75,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -115,8 +116,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -143,8 +144,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -161,8 +162,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -176,8 +177,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -188,8 +189,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -244,8 +245,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -263,8 +264,8 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -284,8 +285,8 @@ golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= @@ -313,8 +314,8 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/logs/sender/stream_strategy.go b/pkg/logs/sender/stream_strategy.go index e31d455e5e12a..32cee91a95039 100644 --- a/pkg/logs/sender/stream_strategy.go +++ b/pkg/logs/sender/stream_strategy.go @@ -7,6 +7,7 @@ package sender import ( "github.com/DataDog/datadog-agent/pkg/logs/message" + "github.com/DataDog/datadog-agent/pkg/util/compression" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -14,19 +15,19 @@ import ( // that Message's Content. This is used for TCP destinations, which stream the output // without batching multiple messages together. type streamStrategy struct { - inputChan chan *message.Message - outputChan chan *message.Payload - contentEncoding ContentEncoding - done chan struct{} + inputChan chan *message.Message + outputChan chan *message.Payload + compression compression.Compressor + done chan struct{} } // NewStreamStrategy creates a new stream strategy -func NewStreamStrategy(inputChan chan *message.Message, outputChan chan *message.Payload, contentEncoding ContentEncoding) Strategy { +func NewStreamStrategy(inputChan chan *message.Message, outputChan chan *message.Payload, compression compression.Compressor) Strategy { return &streamStrategy{ - inputChan: inputChan, - outputChan: outputChan, - contentEncoding: contentEncoding, - done: make(chan struct{}), + inputChan: inputChan, + outputChan: outputChan, + compression: compression, + done: make(chan struct{}), } } @@ -38,7 +39,7 @@ func (s *streamStrategy) Start() { msg.Origin.LogSource.LatencyStats.Add(msg.GetLatency()) } - encodedPayload, err := s.contentEncoding.encode(msg.GetContent()) + encodedPayload, err := s.compression.Compress(msg.GetContent()) if err != nil { log.Warn("Encoding failed - dropping payload", err) return @@ -47,7 +48,7 @@ func (s *streamStrategy) Start() { s.outputChan <- &message.Payload{ Messages: []*message.Message{msg}, Encoded: encodedPayload, - Encoding: s.contentEncoding.name(), + Encoding: s.compression.ContentEncoding(), UnencodedSize: len(msg.GetContent()), } } diff --git a/pkg/logs/sender/stream_strategy_test.go b/pkg/logs/sender/stream_strategy_test.go index d535dafb169d1..f46f078f2f452 100644 --- a/pkg/logs/sender/stream_strategy_test.go +++ b/pkg/logs/sender/stream_strategy_test.go @@ -10,14 +10,16 @@ import ( "github.com/stretchr/testify/assert" + compressionfx "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx-mock" "github.com/DataDog/datadog-agent/pkg/logs/message" + "github.com/DataDog/datadog-agent/pkg/util/compression" ) func TestStreamStrategy(t *testing.T) { input := make(chan *message.Message) output := make(chan *message.Payload) - s := NewStreamStrategy(input, output, IdentityContentType) + s := NewStreamStrategy(input, output, compressionfx.NewMockCompressor().NewCompressor(compression.NoneKind, 1)) s.Start() content := []byte("a") @@ -45,7 +47,7 @@ func TestStreamStrategyShouldNotBlockWhenForceStopping(_ *testing.T) { input := make(chan *message.Message) output := make(chan *message.Payload) - s := NewStreamStrategy(input, output, IdentityContentType) + s := NewStreamStrategy(input, output, compressionfx.NewMockCompressor().NewCompressor(compression.NoneKind, 1)) message := message.NewMessage([]byte{}, nil, "", 0) go func() { @@ -60,7 +62,7 @@ func TestStreamStrategyShouldNotBlockWhenStoppingGracefully(t *testing.T) { input := make(chan *message.Message) output := make(chan *message.Payload) - s := NewStreamStrategy(input, output, IdentityContentType) + s := NewStreamStrategy(input, output, compressionfx.NewMockCompressor().NewCompressor(compression.NoneKind, 1)) message := message.NewMessage([]byte{}, nil, "", 0) go func() { diff --git a/pkg/logs/sources/go.mod b/pkg/logs/sources/go.mod index 47c016d543c91..f8d5fde65796c 100644 --- a/pkg/logs/sources/go.mod +++ b/pkg/logs/sources/go.mod @@ -27,7 +27,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../util/fxutil github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../util/hostname/validate github.com/DataDog/datadog-agent/pkg/util/log => ../../util/log - github.com/DataDog/datadog-agent/pkg/util/optional => ../../util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../../util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../util/scrubber github.com/DataDog/datadog-agent/pkg/util/statstracker => ../../util/statstracker @@ -41,7 +41,7 @@ replace ( require ( github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 github.com/stretchr/testify v1.10.0 ) @@ -50,21 +50,21 @@ require ( github.com/DataDog/datadog-agent/comp/core/secrets v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect github.com/DataDog/viper v1.14.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect @@ -76,24 +76,24 @@ require ( github.com/hashicorp/hcl v1.0.1-vault-5 // indirect github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/numcpus v0.8.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/logs/sources/go.sum b/pkg/logs/sources/go.sum index 7fdf16db5981c..15a68c06d091d 100644 --- a/pkg/logs/sources/go.sum +++ b/pkg/logs/sources/go.sum @@ -71,7 +71,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -109,8 +108,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -137,8 +136,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -155,8 +154,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -169,8 +168,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -181,8 +180,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -235,8 +234,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -273,8 +272,8 @@ golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= @@ -302,8 +301,8 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/logs/status/builder.go b/pkg/logs/status/builder.go index 9dfd646e21891..8ef1fcd3637e7 100644 --- a/pkg/logs/status/builder.go +++ b/pkg/logs/status/builder.go @@ -18,7 +18,7 @@ import ( sourcesPkg "github.com/DataDog/datadog-agent/pkg/logs/sources" status "github.com/DataDog/datadog-agent/pkg/logs/status/utils" "github.com/DataDog/datadog-agent/pkg/logs/tailers" - "github.com/DataDog/datadog-agent/pkg/util" + "github.com/DataDog/datadog-agent/pkg/util/procfilestats" ) // Builder is used to build the status. @@ -214,7 +214,7 @@ func (b *Builder) getMetricsStatus() map[string]string { func (b *Builder) getProcessFileStats() map[string]uint64 { stats := make(map[string]uint64) - fs, err := util.GetProcessFileStats() + fs, err := procfilestats.GetProcessFileStats() if err != nil { return stats } diff --git a/pkg/logs/status/statusinterface/status_noop.go b/pkg/logs/status/statusinterface/status_noop.go new file mode 100644 index 0000000000000..bd8234e8d9b21 --- /dev/null +++ b/pkg/logs/status/statusinterface/status_noop.go @@ -0,0 +1,22 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package statusinterface + +type noopStatusProvider struct{} + +// AddGlobalWarning keeps track of a warning message to display on the status. +func (mp *noopStatusProvider) AddGlobalWarning(string, string) { +} + +// RemoveGlobalWarning loses track of a warning message +// that does not need to be displayed on the status anymore. +func (mp *noopStatusProvider) RemoveGlobalWarning(string) { +} + +// NewNoopStatusProvider returns a mock instance of statusinterface to be used in tests +func NewNoopStatusProvider() Status { + return &noopStatusProvider{} +} diff --git a/pkg/logs/tailers/journald/tailer.go b/pkg/logs/tailers/journald/tailer.go index 6bc07e0d2ef2c..a8280944305c5 100644 --- a/pkg/logs/tailers/journald/tailer.go +++ b/pkg/logs/tailers/journald/tailer.go @@ -193,6 +193,11 @@ func (t *Tailer) setup() error { } func (t *Tailer) forwardMessages() { + defer func() { + // the decoder has successfully been flushed + close(t.done) + }() + for decodedMessage := range t.decoder.OutputChan { if len(decodedMessage.GetContent()) > 0 { t.outputChan <- decodedMessage @@ -250,7 +255,6 @@ func (t *Tailer) tail() { defer func() { t.journal.Close() t.decoder.Stop() - t.done <- struct{}{} }() for { select { diff --git a/pkg/logs/tailers/socket/tailer.go b/pkg/logs/tailers/socket/tailer.go index 459f92a2fc15d..f0500f6fd4f1e 100644 --- a/pkg/logs/tailers/socket/tailer.go +++ b/pkg/logs/tailers/socket/tailer.go @@ -98,6 +98,7 @@ func (t *Tailer) readForever() { log.Warnf("Couldn't read message from connection: %v", err) return } + t.source.RecordBytes(int64(len(data))) msg := decoder.NewInput(data) if ipAddress != "" && pkgconfigsetup.Datadog().GetBool("logs_config.use_sourcehost_tag") { lastColonIndex := strings.LastIndex(ipAddress, ":") diff --git a/pkg/logs/tailers/windowsevent/tailer.go b/pkg/logs/tailers/windowsevent/tailer.go index d7dc96393fa7f..5db7a658a7f73 100644 --- a/pkg/logs/tailers/windowsevent/tailer.go +++ b/pkg/logs/tailers/windowsevent/tailer.go @@ -27,10 +27,10 @@ import ( "github.com/DataDog/datadog-agent/pkg/logs/util/windowsevent" "github.com/DataDog/datadog-agent/pkg/telemetry" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/winutil/eventlog/api" - "github.com/DataDog/datadog-agent/pkg/util/winutil/eventlog/api/windows" - "github.com/DataDog/datadog-agent/pkg/util/winutil/eventlog/bookmark" - "github.com/DataDog/datadog-agent/pkg/util/winutil/eventlog/subscription" + evtapi "github.com/DataDog/datadog-agent/pkg/util/winutil/eventlog/api" + winevtapi "github.com/DataDog/datadog-agent/pkg/util/winutil/eventlog/api/windows" + evtbookmark "github.com/DataDog/datadog-agent/pkg/util/winutil/eventlog/bookmark" + evtsubscribe "github.com/DataDog/datadog-agent/pkg/util/winutil/eventlog/subscription" ) // Config is a event log tailer configuration @@ -51,6 +51,7 @@ type Tailer struct { cancelTail context.CancelFunc doneTail chan struct{} + done chan struct{} sub evtsubscribe.PullSubscription bookmark evtbookmark.Bookmark @@ -94,6 +95,7 @@ func (t *Tailer) toMessage(m *windowsevent.Map) (*message.Message, error) { func (t *Tailer) Start(bookmark string) { log.Infof("Starting windows event log tailing for channel %s query %s", t.config.ChannelPath, t.config.Query) t.doneTail = make(chan struct{}) + t.done = make(chan struct{}) ctx, ctxCancel := context.WithCancel(context.Background()) t.cancelTail = ctxCancel go t.forwardMessages() @@ -110,9 +112,16 @@ func (t *Tailer) Stop() { t.decoder.Stop() t.sub.Stop() + + <-t.done } func (t *Tailer) forwardMessages() { + defer func() { + // the decoder has successfully been flushed + close(t.done) + }() + for decodedMessage := range t.decoder.OutputChan { if len(decodedMessage.GetContent()) > 0 { t.outputChan <- decodedMessage diff --git a/pkg/logs/util/testutils/go.mod b/pkg/logs/util/testutils/go.mod index 1c02f6f2f8775..fa5f83de87c64 100644 --- a/pkg/logs/util/testutils/go.mod +++ b/pkg/logs/util/testutils/go.mod @@ -30,7 +30,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../util/fxutil github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../../util/hostname/validate github.com/DataDog/datadog-agent/pkg/util/log => ../../../util/log - github.com/DataDog/datadog-agent/pkg/util/optional => ../../../util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../../util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../util/scrubber github.com/DataDog/datadog-agent/pkg/util/statstracker => ../../../util/statstracker @@ -48,24 +48,24 @@ require ( github.com/DataDog/datadog-agent/comp/logs/agent/config v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/logs/status/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/statstracker v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect github.com/DataDog/viper v1.14.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect @@ -76,23 +76,23 @@ require ( github.com/hashicorp/hcl v1.0.1-vault-5 // indirect github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/pelletier/go-toml v1.9.5 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/numcpus v0.8.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/logs/util/testutils/go.sum b/pkg/logs/util/testutils/go.sum index 7fdf16db5981c..15a68c06d091d 100644 --- a/pkg/logs/util/testutils/go.sum +++ b/pkg/logs/util/testutils/go.sum @@ -71,7 +71,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -109,8 +108,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -137,8 +136,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -155,8 +154,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -169,8 +168,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -181,8 +180,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -235,8 +234,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -273,8 +272,8 @@ golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= @@ -302,8 +301,8 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/metrics/go.mod b/pkg/metrics/go.mod index 66e73954c2885..163f9caa836a2 100644 --- a/pkg/metrics/go.mod +++ b/pkg/metrics/go.mod @@ -7,6 +7,7 @@ replace ( github.com/DataDog/datadog-agent/comp/core/flare/builder => ../../comp/core/flare/builder github.com/DataDog/datadog-agent/comp/core/flare/types => ../../comp/core/flare/types github.com/DataDog/datadog-agent/comp/core/secrets => ../../comp/core/secrets + github.com/DataDog/datadog-agent/comp/core/tagger/origindetection => ../../comp/core/tagger/origindetection github.com/DataDog/datadog-agent/comp/core/telemetry => ../../comp/core/telemetry/ github.com/DataDog/datadog-agent/comp/def => ../../comp/def/ github.com/DataDog/datadog-agent/pkg/aggregator/ckey => ../aggregator/ckey/ @@ -27,7 +28,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/fxutil => ../util/fxutil/ github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../pkg/util/hostname/validate github.com/DataDog/datadog-agent/pkg/util/log => ../util/log/ - github.com/DataDog/datadog-agent/pkg/util/optional => ../util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../../pkg/util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../util/scrubber/ github.com/DataDog/datadog-agent/pkg/util/sort => ../util/sort/ @@ -40,38 +41,39 @@ replace ( require ( github.com/DataDog/datadog-agent/pkg/aggregator/ckey v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/config/mock v0.57.1 - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 github.com/DataDog/datadog-agent/pkg/tagger/types v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/tagset v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/buf v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 - github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.22.0 + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 + github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.24.0 github.com/stretchr/testify v1.10.0 go.uber.org/atomic v1.11.0 ) require ( github.com/DataDog/datadog-agent/comp/core/secrets v0.59.0 // indirect + github.com/DataDog/datadog-agent/comp/core/tagger/origindetection v0.62.0-rc.1 // indirect github.com/DataDog/datadog-agent/comp/core/telemetry v0.57.1 // indirect github.com/DataDog/datadog-agent/comp/def v0.57.1 // indirect github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/fxutil v0.57.1 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/sort v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect github.com/DataDog/sketches-go v1.4.6 // indirect github.com/DataDog/viper v1.14.0 // indirect @@ -89,21 +91,21 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect github.com/klauspost/compress v1.17.11 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.60.1 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -115,10 +117,10 @@ require ( go.uber.org/fx v1.23.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect - google.golang.org/protobuf v1.35.2 // indirect + google.golang.org/protobuf v1.36.3 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/metrics/go.sum b/pkg/metrics/go.sum index 31112092709e6..90a9dead5ba26 100644 --- a/pkg/metrics/go.sum +++ b/pkg/metrics/go.sum @@ -1,9 +1,9 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.22.0 h1:cXcKVEU1D0HlguR7GunnvuI70TghkarCa9DApqzMY94= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.22.0/go.mod h1:ES00EXfyEKgUkjd93tAXCxJA6i0seeOhZoS5Cj2qzzg= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.22.0 h1:63SzQz9Ab8XJj8fQKQz6UZNBhOm8rucwzbDfwTVF6dQ= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.22.0/go.mod h1:E/PY/aQ6S/N5hBPHXZRGmovs5b1BSi4RHGNcB4yP/Z0= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.24.0 h1:ttW3C3IN8p1goqyvaVpT4Blzg3lQ+sh4MTtB33BbpdE= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.24.0/go.mod h1:FpUbxBqKdi16CDJnRifUzmkETaEYR75xvh2Vo8vvJN0= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.24.0 h1:Uha4TTkbCcYTvUbkbfvUjUmxtPaPKCOtwwl91erkRRg= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.24.0/go.mod h1:RWoMSFb2Q+L0FSRYctEt8Wp0em+InUg+Oe+BU30e7gA= github.com/DataDog/sketches-go v1.4.6 h1:acd5fb+QdUzGrosfNLwrIhqyrbMORpvBy7mE+vHlT3I= github.com/DataDog/sketches-go v1.4.6/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg= github.com/DataDog/viper v1.14.0 h1:dIjTe/uJiah+QFqFZ+MXeqgmUvWhg37l37ZxFWxr3is= @@ -79,7 +79,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -123,8 +122,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -151,8 +150,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -169,8 +168,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -184,8 +183,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -196,8 +195,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -254,8 +253,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -292,8 +291,8 @@ golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= @@ -321,8 +320,8 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/metrics/metricsource.go b/pkg/metrics/metricsource.go index 5fcc0ff956952..8573aabb517d3 100644 --- a/pkg/metrics/metricsource.go +++ b/pkg/metrics/metricsource.go @@ -309,6 +309,51 @@ const ( MetricSourceMilvus MetricSourceNvidiaNim MetricSourceQuarkus + + // OpenTelemetry Collector receivers + MetricSourceOpenTelemetryCollectorUnknown + MetricSourceOpenTelemetryCollectorDockerstatsReceiver + MetricSourceOpenTelemetryCollectorElasticsearchReceiver + MetricSourceOpenTelemetryCollectorExpvarReceiver + MetricSourceOpenTelemetryCollectorFilestatsReceiver + MetricSourceOpenTelemetryCollectorFlinkmetricsReceiver + MetricSourceOpenTelemetryCollectorGitproviderReceiver + MetricSourceOpenTelemetryCollectorHaproxyReceiver + MetricSourceOpenTelemetryCollectorHostmetricsReceiver + MetricSourceOpenTelemetryCollectorHttpcheckReceiver + MetricSourceOpenTelemetryCollectorIisReceiver + MetricSourceOpenTelemetryCollectorK8sclusterReceiver + MetricSourceOpenTelemetryCollectorKafkametricsReceiver + MetricSourceOpenTelemetryCollectorKubeletstatsReceiver + MetricSourceOpenTelemetryCollectorMemcachedReceiver + MetricSourceOpenTelemetryCollectorMongodbatlasReceiver + MetricSourceOpenTelemetryCollectorMongodbReceiver + MetricSourceOpenTelemetryCollectorMysqlReceiver + MetricSourceOpenTelemetryCollectorNginxReceiver + MetricSourceOpenTelemetryCollectorNsxtReceiver + MetricSourceOpenTelemetryCollectorOracledbReceiver + MetricSourceOpenTelemetryCollectorPostgresqlReceiver + MetricSourceOpenTelemetryCollectorPrometheusReceiver + MetricSourceOpenTelemetryCollectorRabbitmqReceiver + MetricSourceOpenTelemetryCollectorRedisReceiver + MetricSourceOpenTelemetryCollectorRiakReceiver + MetricSourceOpenTelemetryCollectorSaphanaReceiver + MetricSourceOpenTelemetryCollectorSnmpReceiver + MetricSourceOpenTelemetryCollectorSnowflakeReceiver + MetricSourceOpenTelemetryCollectorSplunkenterpriseReceiver + MetricSourceOpenTelemetryCollectorSqlserverReceiver + MetricSourceOpenTelemetryCollectorSshcheckReceiver + MetricSourceOpenTelemetryCollectorStatsdReceiver + MetricSourceOpenTelemetryCollectorVcenterReceiver + MetricSourceOpenTelemetryCollectorZookeeperReceiver + MetricSourceOpenTelemetryCollectorActiveDirectorydsReceiver + MetricSourceOpenTelemetryCollectorAerospikeReceiver + MetricSourceOpenTelemetryCollectorApacheReceiver + MetricSourceOpenTelemetryCollectorApachesparkReceiver + MetricSourceOpenTelemetryCollectorAzuremonitorReceiver + MetricSourceOpenTelemetryCollectorBigipReceiver + MetricSourceOpenTelemetryCollectorChronyReceiver + MetricSourceOpenTelemetryCollectorCouchdbReceiver ) // String returns a string representation of MetricSource @@ -882,6 +927,92 @@ func (ms MetricSource) String() string { return "milvus" case MetricSourceQuarkus: return "quarkus" + case MetricSourceOpenTelemetryCollectorUnknown: + return "opentelemetry_collector_unknown" + case MetricSourceOpenTelemetryCollectorDockerstatsReceiver: + return "opentelemetry_collector_dockerstatsreceiver" + case MetricSourceOpenTelemetryCollectorElasticsearchReceiver: + return "opentelemetry_collector_elasticsearchreceiver" + case MetricSourceOpenTelemetryCollectorExpvarReceiver: + return "opentelemetry_collector_expvarreceiver" + case MetricSourceOpenTelemetryCollectorFilestatsReceiver: + return "opentelemetry_collector_filestatsreceiver" + case MetricSourceOpenTelemetryCollectorFlinkmetricsReceiver: + return "opentelemetry_collector_flinkmetricsreceiver" + case MetricSourceOpenTelemetryCollectorGitproviderReceiver: + return "opentelemetry_collector_gitproviderreceiver" + case MetricSourceOpenTelemetryCollectorHaproxyReceiver: + return "opentelemetry_collector_haproxyreceiver" + case MetricSourceOpenTelemetryCollectorHostmetricsReceiver: + return "opentelemetry_collector_hostmetricsreceiver" + case MetricSourceOpenTelemetryCollectorHttpcheckReceiver: + return "opentelemetry_collector_httpcheckreceiver" + case MetricSourceOpenTelemetryCollectorIisReceiver: + return "opentelemetry_collector_iisreceiver" + case MetricSourceOpenTelemetryCollectorK8sclusterReceiver: + return "opentelemetry_collector_k8sclusterreceiver" + case MetricSourceOpenTelemetryCollectorKafkametricsReceiver: + return "opentelemetry_collector_kafkametricsreceiver" + case MetricSourceOpenTelemetryCollectorKubeletstatsReceiver: + return "opentelemetry_collector_kubeletstatsreceiver" + case MetricSourceOpenTelemetryCollectorMemcachedReceiver: + return "opentelemetry_collector_memcachedreceiver" + case MetricSourceOpenTelemetryCollectorMongodbatlasReceiver: + return "opentelemetry_collector_mongodbatlasreceiver" + case MetricSourceOpenTelemetryCollectorMongodbReceiver: + return "opentelemetry_collector_mongodbreceiver" + case MetricSourceOpenTelemetryCollectorMysqlReceiver: + return "opentelemetry_collector_mysqlreceiver" + case MetricSourceOpenTelemetryCollectorNginxReceiver: + return "opentelemetry_collector_nginxreceiver" + case MetricSourceOpenTelemetryCollectorNsxtReceiver: + return "opentelemetry_collector_nsxtreceiver" + case MetricSourceOpenTelemetryCollectorOracledbReceiver: + return "opentelemetry_collector_oracledbreceiver" + case MetricSourceOpenTelemetryCollectorPostgresqlReceiver: + return "opentelemetry_collector_postgresqlreceiver" + case MetricSourceOpenTelemetryCollectorPrometheusReceiver: + return "opentelemetry_collector_prometheusreceiver" + case MetricSourceOpenTelemetryCollectorRabbitmqReceiver: + return "opentelemetry_collector_rabbitmqreceiver" + case MetricSourceOpenTelemetryCollectorRedisReceiver: + return "opentelemetry_collector_redisreceiver" + case MetricSourceOpenTelemetryCollectorRiakReceiver: + return "opentelemetry_collector_riakreceiver" + case MetricSourceOpenTelemetryCollectorSaphanaReceiver: + return "opentelemetry_collector_saphanareceiver" + case MetricSourceOpenTelemetryCollectorSnmpReceiver: + return "opentelemetry_collector_snmpreceiver" + case MetricSourceOpenTelemetryCollectorSnowflakeReceiver: + return "opentelemetry_collector_snowflakereceiver" + case MetricSourceOpenTelemetryCollectorSplunkenterpriseReceiver: + return "opentelemetry_collector_splunkenterprisereceiver" + case MetricSourceOpenTelemetryCollectorSqlserverReceiver: + return "opentelemetry_collector_sqlserverreceiver" + case MetricSourceOpenTelemetryCollectorSshcheckReceiver: + return "opentelemetry_collector_sshcheckreceiver" + case MetricSourceOpenTelemetryCollectorStatsdReceiver: + return "opentelemetry_collector_statsdreceiver" + case MetricSourceOpenTelemetryCollectorVcenterReceiver: + return "opentelemetry_collector_vcenterreceiver" + case MetricSourceOpenTelemetryCollectorZookeeperReceiver: + return "opentelemetry_collector_zookeeperreceiver" + case MetricSourceOpenTelemetryCollectorActiveDirectorydsReceiver: + return "opentelemetry_collector_activedirectorydsreceiver" + case MetricSourceOpenTelemetryCollectorAerospikeReceiver: + return "opentelemetry_collector_aerospikereceiver" + case MetricSourceOpenTelemetryCollectorApacheReceiver: + return "opentelemetry_collector_apachereceiver" + case MetricSourceOpenTelemetryCollectorApachesparkReceiver: + return "opentelemetry_collector_apachesparkreceiver" + case MetricSourceOpenTelemetryCollectorAzuremonitorReceiver: + return "opentelemetry_collector_azuremonitorreceiver" + case MetricSourceOpenTelemetryCollectorBigipReceiver: + return "opentelemetry_collector_bigipreceiver" + case MetricSourceOpenTelemetryCollectorChronyReceiver: + return "opentelemetry_collector_chronyreceiver" + case MetricSourceOpenTelemetryCollectorCouchdbReceiver: + return "opentelemetry_collector_couchdbreceiver" default: return "" } @@ -1426,6 +1557,92 @@ func CheckNameToMetricSource(name string) MetricSource { return MetricSourceMilvus case "quarkus": return MetricSourceQuarkus + case "opentelemetry_collector_unknown": + return MetricSourceOpenTelemetryCollectorUnknown + case "opentelemetry_collector_dockerstatsreceiver": + return MetricSourceOpenTelemetryCollectorDockerstatsReceiver + case "opentelemetry_collector_elasticsearchreceiver": + return MetricSourceOpenTelemetryCollectorElasticsearchReceiver + case "opentelemetry_collector_expvarreceiver": + return MetricSourceOpenTelemetryCollectorExpvarReceiver + case "opentelemetry_collector_filestatsreceiver": + return MetricSourceOpenTelemetryCollectorFilestatsReceiver + case "opentelemetry_collector_flinkmetricsreceiver": + return MetricSourceOpenTelemetryCollectorFlinkmetricsReceiver + case "opentelemetry_collector_gitproviderreceiver": + return MetricSourceOpenTelemetryCollectorGitproviderReceiver + case "opentelemetry_collector_haproxyreceiver": + return MetricSourceOpenTelemetryCollectorHaproxyReceiver + case "opentelemetry_collector_hostmetricsreceiver": + return MetricSourceOpenTelemetryCollectorHostmetricsReceiver + case "opentelemetry_collector_httpcheckreceiver": + return MetricSourceOpenTelemetryCollectorHttpcheckReceiver + case "opentelemetry_collector_iisreceiver": + return MetricSourceOpenTelemetryCollectorIisReceiver + case "opentelemetry_collector_k8sclusterreceiver": + return MetricSourceOpenTelemetryCollectorK8sclusterReceiver + case "opentelemetry_collector_kafkametricsreceiver": + return MetricSourceOpenTelemetryCollectorKafkametricsReceiver + case "opentelemetry_collector_kubeletstatsreceiver": + return MetricSourceOpenTelemetryCollectorKubeletstatsReceiver + case "opentelemetry_collector_memcachedreceiver": + return MetricSourceOpenTelemetryCollectorMemcachedReceiver + case "opentelemetry_collector_mongodbatlasreceiver": + return MetricSourceOpenTelemetryCollectorMongodbatlasReceiver + case "opentelemetry_collector_mongodbreceiver": + return MetricSourceOpenTelemetryCollectorMongodbReceiver + case "opentelemetry_collector_mysqlreceiver": + return MetricSourceOpenTelemetryCollectorMysqlReceiver + case "opentelemetry_collector_nginxreceiver": + return MetricSourceOpenTelemetryCollectorNginxReceiver + case "opentelemetry_collector_nsxtreceiver": + return MetricSourceOpenTelemetryCollectorNsxtReceiver + case "opentelemetry_collector_oracledbreceiver": + return MetricSourceOpenTelemetryCollectorOracledbReceiver + case "opentelemetry_collector_postgresqlreceiver": + return MetricSourceOpenTelemetryCollectorPostgresqlReceiver + case "opentelemetry_collector_prometheusreceiver": + return MetricSourceOpenTelemetryCollectorPrometheusReceiver + case "opentelemetry_collector_rabbitmqreceiver": + return MetricSourceOpenTelemetryCollectorRabbitmqReceiver + case "opentelemetry_collector_redisreceiver": + return MetricSourceOpenTelemetryCollectorRedisReceiver + case "opentelemetry_collector_riakreceiver": + return MetricSourceOpenTelemetryCollectorRiakReceiver + case "opentelemetry_collector_saphanareceiver": + return MetricSourceOpenTelemetryCollectorSaphanaReceiver + case "opentelemetry_collector_snmpreceiver": + return MetricSourceOpenTelemetryCollectorSnmpReceiver + case "opentelemetry_collector_snowflakereceiver": + return MetricSourceOpenTelemetryCollectorSnowflakeReceiver + case "opentelemetry_collector_splunkenterprisereceiver": + return MetricSourceOpenTelemetryCollectorSplunkenterpriseReceiver + case "opentelemetry_collector_sqlserverreceiver": + return MetricSourceOpenTelemetryCollectorSqlserverReceiver + case "opentelemetry_collector_sshcheckreceiver": + return MetricSourceOpenTelemetryCollectorSshcheckReceiver + case "opentelemetry_collector_statsdreceiver": + return MetricSourceOpenTelemetryCollectorStatsdReceiver + case "opentelemetry_collector_vcenterreceiver": + return MetricSourceOpenTelemetryCollectorVcenterReceiver + case "opentelemetry_collector_zookeeperreceiver": + return MetricSourceOpenTelemetryCollectorZookeeperReceiver + case "opentelemetry_collector_activedirectorydsreceiver": + return MetricSourceOpenTelemetryCollectorActiveDirectorydsReceiver + case "opentelemetry_collector_aerospikereceiver": + return MetricSourceOpenTelemetryCollectorAerospikeReceiver + case "opentelemetry_collector_apachereceiver": + return MetricSourceOpenTelemetryCollectorApacheReceiver + case "opentelemetry_collector_apachesparkreceiver": + return MetricSourceOpenTelemetryCollectorApachesparkReceiver + case "opentelemetry_collector_azuremonitorreceiver": + return MetricSourceOpenTelemetryCollectorAzuremonitorReceiver + case "opentelemetry_collector_bigipreceiver": + return MetricSourceOpenTelemetryCollectorBigipReceiver + case "opentelemetry_collector_chronyreceiver": + return MetricSourceOpenTelemetryCollectorChronyReceiver + case "opentelemetry_collector_couchdbreceiver": + return MetricSourceOpenTelemetryCollectorCouchdbReceiver default: return MetricSourceUnknown } diff --git a/pkg/network/config/config.go b/pkg/network/config/config.go index bf0e4ad498786..dc1de944ab076 100644 --- a/pkg/network/config/config.go +++ b/pkg/network/config/config.go @@ -218,6 +218,9 @@ type Config struct { // ClosedChannelSize specifies the size for closed channel for the tracer ClosedChannelSize int + // ClosedBufferWakeupCount specifies the number of events that will buffer in a perf buffer before userspace is woken up. + ClosedBufferWakeupCount int + // ExcludedSourceConnections is a map of source connections to blacklist ExcludedSourceConnections map[string][]string @@ -288,6 +291,15 @@ type Config struct { // EnableUSMEventStream enables USM to use the event stream instead // of netlink for receiving process events. EnableUSMEventStream bool + + // CustomBatchingEnabled enables the use of custom batching for eBPF perf events with perf buffers + CustomBatchingEnabled bool + + // USMKernelBufferPages defines the number of pages to allocate for the USM kernel buffer, used for either ring buffers or perf maps. + USMKernelBufferPages int + + // USMDataChannelSize specifies the size of the data channel for USM, used to temporarily store data from the kernel in user mode before processing. + USMDataChannelSize int } // New creates a config for the network tracer @@ -318,8 +330,9 @@ func New() *Config { MaxTrackedConnections: uint32(cfg.GetInt64(sysconfig.FullKeyPath(spNS, "max_tracked_connections"))), MaxClosedConnectionsBuffered: uint32(cfg.GetInt64(sysconfig.FullKeyPath(spNS, "max_closed_connections_buffered"))), MaxFailedConnectionsBuffered: uint32(cfg.GetInt64(sysconfig.FullKeyPath(netNS, "max_failed_connections_buffered"))), - ClosedConnectionFlushThreshold: cfg.GetInt(sysconfig.FullKeyPath(spNS, "closed_connection_flush_threshold")), - ClosedChannelSize: cfg.GetInt(sysconfig.FullKeyPath(spNS, "closed_channel_size")), + ClosedConnectionFlushThreshold: cfg.GetInt(sysconfig.FullKeyPath(netNS, "closed_connection_flush_threshold")), + ClosedChannelSize: cfg.GetInt(sysconfig.FullKeyPath(netNS, "closed_channel_size")), + ClosedBufferWakeupCount: cfg.GetInt(sysconfig.FullKeyPath(netNS, "closed_buffer_wakeup_count")), MaxConnectionsStateBuffered: cfg.GetInt(sysconfig.FullKeyPath(spNS, "max_connection_state_buffered")), ClientStateExpiry: 2 * time.Minute, @@ -334,6 +347,7 @@ func New() *Config { ProtocolClassificationEnabled: cfg.GetBool(sysconfig.FullKeyPath(netNS, "enable_protocol_classification")), NPMRingbuffersEnabled: cfg.GetBool(sysconfig.FullKeyPath(netNS, "enable_ringbuffers")), + CustomBatchingEnabled: cfg.GetBool(sysconfig.FullKeyPath(netNS, "enable_custom_batching")), EnableHTTPMonitoring: cfg.GetBool(sysconfig.FullKeyPath(smNS, "enable_http_monitoring")), EnableHTTP2Monitoring: cfg.GetBool(sysconfig.FullKeyPath(smNS, "enable_http2_monitoring")), @@ -392,6 +406,8 @@ func New() *Config { EnableUSMConnectionRollup: cfg.GetBool(sysconfig.FullKeyPath(smNS, "enable_connection_rollup")), EnableUSMRingBuffers: cfg.GetBool(sysconfig.FullKeyPath(smNS, "enable_ring_buffers")), EnableUSMEventStream: cfg.GetBool(sysconfig.FullKeyPath(smNS, "enable_event_stream")), + USMKernelBufferPages: cfg.GetInt(sysconfig.FullKeyPath(smNS, "kernel_buffer_pages")), + USMDataChannelSize: cfg.GetInt(sysconfig.FullKeyPath(smNS, "data_channel_size")), } httpRRKey := sysconfig.FullKeyPath(smNS, "http_replace_rules") diff --git a/pkg/network/config/config_test.go b/pkg/network/config/config_test.go index b2f8929608cb5..4d59ddc984375 100644 --- a/pkg/network/config/config_test.go +++ b/pkg/network/config/config_test.go @@ -1365,6 +1365,56 @@ func TestUSMEventStream(t *testing.T) { }) } +func TestUSMKernelBufferPages(t *testing.T) { + t.Run("default value", func(t *testing.T) { + mock.NewSystemProbe(t) + cfg := New() + + assert.Equal(t, cfg.USMKernelBufferPages, 16) + }) + + t.Run("via yaml", func(t *testing.T) { + mockSystemProbe := mock.NewSystemProbe(t) + mockSystemProbe.SetWithoutSource("service_monitoring_config.kernel_buffer_pages", 109) + cfg := New() + + assert.Equal(t, cfg.USMKernelBufferPages, 109) + }) + + t.Run("via ENV variable", func(t *testing.T) { + mock.NewSystemProbe(t) + t.Setenv("DD_SERVICE_MONITORING_CONFIG_KERNEL_BUFFER_PAGES", "109") + cfg := New() + + assert.Equal(t, cfg.USMKernelBufferPages, 109) + }) +} + +func TestUSMDataChannelSize(t *testing.T) { + t.Run("default value", func(t *testing.T) { + mock.NewSystemProbe(t) + cfg := New() + + assert.Equal(t, cfg.USMDataChannelSize, 100) + }) + + t.Run("via yaml", func(t *testing.T) { + mockSystemProbe := mock.NewSystemProbe(t) + mockSystemProbe.SetWithoutSource("service_monitoring_config.data_channel_size", 109) + cfg := New() + + assert.Equal(t, cfg.USMDataChannelSize, 109) + }) + + t.Run("via ENV variable", func(t *testing.T) { + mock.NewSystemProbe(t) + t.Setenv("DD_SERVICE_MONITORING_CONFIG_DATA_CHANNEL_SIZE", "109") + cfg := New() + + assert.Equal(t, cfg.USMDataChannelSize, 109) + }) +} + func TestMaxUSMConcurrentRequests(t *testing.T) { t.Run("default value", func(t *testing.T) { mock.NewSystemProbe(t) diff --git a/pkg/network/dns/packet_source_windows.go b/pkg/network/dns/packet_source_windows.go index 52ae79ffa4cef..3267a9e15ae8c 100644 --- a/pkg/network/dns/packet_source_windows.go +++ b/pkg/network/dns/packet_source_windows.go @@ -8,6 +8,7 @@ package dns import ( + "sync" "time" "github.com/google/gopacket" @@ -20,7 +21,9 @@ import ( var _ filter.PacketSource = &windowsPacketSource{} type windowsPacketSource struct { - di *dnsDriver + di *dnsDriver + exit chan struct{} + mu sync.Mutex } // newWindowsPacketSource constructs a new packet source @@ -29,11 +32,23 @@ func newWindowsPacketSource(telemetrycomp telemetry.Component) (filter.PacketSou if err != nil { return nil, err } - return &windowsPacketSource{di: di}, nil + return &windowsPacketSource{ + di: di, + exit: make(chan struct{}), + }, nil } -func (p *windowsPacketSource) VisitPackets(exit <-chan struct{}, visit func([]byte, filter.PacketInfo, time.Time) error) error { +func (p *windowsPacketSource) VisitPackets(visit func([]byte, filter.PacketInfo, time.Time) error) error { + p.mu.Lock() + defer p.mu.Unlock() for { + // break out of loop if exit is closed + select { + case <-p.exit: + return nil + default: + } + didReadPacket, err := p.di.ReadDNSPacket(visit) if err != nil { return err @@ -41,13 +56,6 @@ func (p *windowsPacketSource) VisitPackets(exit <-chan struct{}, visit func([]by if !didReadPacket { return nil } - - // break out of loop if exit is closed - select { - case <-exit: - return nil - default: - } } } @@ -56,5 +64,10 @@ func (p *windowsPacketSource) LayerType() gopacket.LayerType { } func (p *windowsPacketSource) Close() { + close(p.exit) + + // wait for the VisitPackets loop to finish, then close + p.mu.Lock() + defer p.mu.Unlock() _ = p.di.Close() } diff --git a/pkg/network/dns/snooper.go b/pkg/network/dns/snooper.go index c1bc700a33b3c..a13432af33a68 100644 --- a/pkg/network/dns/snooper.go +++ b/pkg/network/dns/snooper.go @@ -122,8 +122,9 @@ func (s *socketFilterSnooper) Start() error { func (s *socketFilterSnooper) Close() { s.once.Do(func() { close(s.exit) - s.wg.Wait() + // close the packet capture loop and wait for it to finish s.source.Close() + s.wg.Wait() s.cache.Close() if s.statKeeper != nil { s.statKeeper.Close() @@ -170,7 +171,7 @@ func (s *socketFilterSnooper) processPacket(data []byte, _ filter.PacketInfo, ts func (s *socketFilterSnooper) pollPackets() { for { - err := s.source.VisitPackets(s.exit, s.processPacket) + err := s.source.VisitPackets(s.processPacket) if err != nil { log.Warnf("error reading packet: %s", err) diff --git a/pkg/network/ebpf/bpf_module.go b/pkg/network/ebpf/bpf_module.go index dc9e6279c6d57..8e4d71bff3248 100644 --- a/pkg/network/ebpf/bpf_module.go +++ b/pkg/network/ebpf/bpf_module.go @@ -19,9 +19,10 @@ import ( // prebuiltModulesInUse is a global object which is responsible for keeping a list of all the prebuilt ebpf assets in use. // This is used to report ebpf asset telemetry var prebuiltModulesInUse = map[string]struct{}{} -var telemetrymu sync.Mutex +var telemetryMu sync.Mutex -func ModuleFileName(moduleName string, debug bool) string { //nolint:revive // TODO +// ModuleFileName constructs the module file name based on the module name +func ModuleFileName(moduleName string, debug bool) string { if debug { return fmt.Sprintf("%s-debug.o", moduleName) } @@ -35,8 +36,8 @@ func readModule(bpfDir, moduleName string, debug bool) (bytecode.AssetReader, er return nil, fmt.Errorf("couldn't find asset: %s", err) } - telemetrymu.Lock() - defer telemetrymu.Unlock() + telemetryMu.Lock() + defer telemetryMu.Unlock() prebuiltModulesInUse[moduleName] = struct{}{} return ebpfReader, nil } @@ -66,7 +67,8 @@ func ReadOffsetBPFModule(bpfDir string, debug bool) (bytecode.AssetReader, error return readModule(bpfDir, "offset-guess", debug) } -func ReadFentryTracerModule(bpfDir string, debug bool) (bytecode.AssetReader, error) { //nolint:revive // TODO +// ReadFentryTracerModule from the asset file +func ReadFentryTracerModule(bpfDir string, debug bool) (bytecode.AssetReader, error) { return readModule(bpfDir, "tracer-fentry", debug) } @@ -75,9 +77,10 @@ func ReadConntrackBPFModule(bpfDir string, debug bool) (bytecode.AssetReader, er return readModule(bpfDir, "conntrack", debug) } -func GetModulesInUse() []string { //nolint:revive // TODO - telemetrymu.Lock() - defer telemetrymu.Unlock() +// GetModulesInUse returns a list of modules in use +func GetModulesInUse() []string { + telemetryMu.Lock() + defer telemetryMu.Unlock() return maps.Keys(prebuiltModulesInUse) } diff --git a/pkg/network/ebpf/c/prebuilt/usm.c b/pkg/network/ebpf/c/prebuilt/usm.c index 8b5e62ede108e..99217fa2bcfc9 100644 --- a/pkg/network/ebpf/c/prebuilt/usm.c +++ b/pkg/network/ebpf/c/prebuilt/usm.c @@ -7,6 +7,7 @@ #include "offsets.h" #include "protocols/classification/dispatcher-helpers.h" +#include "protocols/flush.h" #include "protocols/http/buffer.h" #include "protocols/http/http.h" #include "protocols/http2/decoding.h" @@ -48,19 +49,4 @@ int BPF_BYPASSABLE_KPROBE(kprobe__tcp_sendmsg, struct sock *sk) { return 0; } -SEC("tracepoint/net/netif_receive_skb") -int tracepoint__net__netif_receive_skb(void *ctx) { - CHECK_BPF_PROGRAM_BYPASSED() - log_debug("tracepoint/net/netif_receive_skb"); - // flush batch to userspace - // because perf events can't be sent from socket filter programs - http_batch_flush(ctx); - http2_batch_flush(ctx); - terminated_http2_batch_flush(ctx); - kafka_batch_flush(ctx); - postgres_batch_flush(ctx); - redis_batch_flush(ctx); - return 0; -} - char _license[] SEC("license") = "GPL"; diff --git a/pkg/network/ebpf/c/protocols/classification/usm-context.h b/pkg/network/ebpf/c/protocols/classification/classification-context.h similarity index 67% rename from pkg/network/ebpf/c/protocols/classification/usm-context.h rename to pkg/network/ebpf/c/protocols/classification/classification-context.h index 47f637c1291cb..526f5f8e0f1e5 100644 --- a/pkg/network/ebpf/c/protocols/classification/usm-context.h +++ b/pkg/network/ebpf/c/protocols/classification/classification-context.h @@ -1,5 +1,5 @@ -#ifndef __USM_CONTEXT_H -#define __USM_CONTEXT_H +#ifndef __CLASSIFICATION_CONTEXT_H +#define __CLASSIFICATION_CONTEXT_H #include "tracer/tracer.h" #include "protocols/classification/common.h" @@ -20,10 +20,10 @@ typedef struct { conn_tuple_t tuple; skb_info_t skb_info; classification_buffer_t buffer; - // bit mask with layers that should be skiped + // bit mask with layers that should be skipped u16 routing_skip_layers; classification_prog_t routing_current_program; -} usm_context_t; +} classification_context_t; // Kernels before 4.7 do not know about per-cpu array maps. #if defined(COMPILE_PREBUILT) || defined(COMPILE_CORE) || (defined(COMPILE_RUNTIME) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0)) @@ -42,12 +42,12 @@ typedef struct { // words, there is a chance that ingress and egress packets can be processed // concurrently on the same CPU, which is why have a dedicated per CPU map entry // for each direction in order to avoid data corruption. -BPF_PERCPU_ARRAY_MAP(classification_buf, usm_context_t, 2) +BPF_PERCPU_ARRAY_MAP(classification_buf, classification_context_t, 2) #else BPF_ARRAY_MAP(classification_buf, __u8, 1) #endif -static __always_inline usm_context_t* __get_usm_context(struct __sk_buff *skb) { +static __always_inline classification_context_t* __get_classification_context(struct __sk_buff *skb) { // we use the packet direction as the key to the CPU map const u32 key = skb->pkt_type == PACKET_OUTGOING; return bpf_map_lookup_elem(&classification_buf, &key); @@ -60,36 +60,36 @@ static __always_inline void __init_buffer(struct __sk_buff *skb, skb_info_t *skb buffer->size = payload_length < CLASSIFICATION_MAX_BUFFER ? payload_length : CLASSIFICATION_MAX_BUFFER; } -static __always_inline usm_context_t* usm_context_init(struct __sk_buff *skb, conn_tuple_t *tuple, skb_info_t *skb_info) { +static __always_inline classification_context_t* classification_context_init(struct __sk_buff *skb, conn_tuple_t *tuple, skb_info_t *skb_info) { if (!skb || !skb_info) { return NULL; } - usm_context_t *usm_context = __get_usm_context(skb); - if (!usm_context) { + classification_context_t *classification_context = __get_classification_context(skb); + if (!classification_context) { return NULL; } - usm_context->owner = skb; - usm_context->tuple = *tuple; - usm_context->skb_info = *skb_info; - __init_buffer(skb, skb_info, &usm_context->buffer); - return usm_context; + classification_context->owner = skb; + classification_context->tuple = *tuple; + classification_context->skb_info = *skb_info; + __init_buffer(skb, skb_info, &classification_context->buffer); + return classification_context; } -static __always_inline usm_context_t* usm_context(struct __sk_buff *skb) { - usm_context_t *usm_context = __get_usm_context(skb); - if (!usm_context) { +static __always_inline classification_context_t* classification_context(struct __sk_buff *skb) { + classification_context_t *classification_context = __get_classification_context(skb); + if (!classification_context) { return NULL; } // sanity check - if (usm_context->owner != skb) { - log_debug("invalid usm context"); + if (classification_context->owner != skb) { + log_debug("invalid classification context"); return NULL; } - return usm_context; + return classification_context; } #endif diff --git a/pkg/network/ebpf/c/protocols/classification/defs.h b/pkg/network/ebpf/c/protocols/classification/defs.h index 46dbfd8e7df51..235d638d2f5b5 100644 --- a/pkg/network/ebpf/c/protocols/classification/defs.h +++ b/pkg/network/ebpf/c/protocols/classification/defs.h @@ -76,7 +76,7 @@ typedef enum { // Each `protocol_t` entry is implicitly associated to a single // `protocol_layer_t` value (see notes above). // -//In order to determine which `protocol_layer_t` a `protocol_t` belongs to, +// In order to determine which `protocol_layer_t` a `protocol_t` belongs to, // users can call `get_protocol_layer` typedef enum { LAYER_UNKNOWN, @@ -103,7 +103,7 @@ typedef struct { // `protocol_stack_t` is embedded in the `conn_stats_t` type, which is used // across the whole NPM kernel code. If we added the 64-bit timestamp field // directly to `protocol_stack_t`, we would go from 4 bytes to 12 bytes, which -// bloats the eBPF stack size of some NPM probes. Using the wrapper type +// bloats the eBPF stack size of some NPM probes. Using the wrapper type // prevents that, because we pretty much only store the wrapper type in the // connection_protocol map, but elsewhere in the code we're still using // protocol_stack_t, so this is change is "transparent" to most of the code. @@ -123,6 +123,8 @@ typedef enum { CLASSIFICATION_GRPC_PROG, __PROG_ENCRYPTION, // Encryption classification programs go here + CLASSIFICATION_TLS_CLIENT_PROG, + CLASSIFICATION_TLS_SERVER_PROG, CLASSIFICATION_PROG_MAX, } classification_prog_t; diff --git a/pkg/network/ebpf/c/protocols/classification/protocol-classification.h b/pkg/network/ebpf/c/protocols/classification/protocol-classification.h index e3f44bfa16bdc..8c2659f96634f 100644 --- a/pkg/network/ebpf/c/protocols/classification/protocol-classification.h +++ b/pkg/network/ebpf/c/protocols/classification/protocol-classification.h @@ -7,12 +7,12 @@ #include "port_range.h" #include "protocols/amqp/helpers.h" +#include "protocols/classification/classification-context.h" #include "protocols/classification/common.h" #include "protocols/classification/defs.h" #include "protocols/classification/maps.h" #include "protocols/classification/structs.h" #include "protocols/classification/stack-helpers.h" -#include "protocols/classification/usm-context.h" #include "protocols/classification/routing.h" #include "protocols/grpc/defs.h" #include "protocols/grpc/helpers.h" @@ -61,20 +61,20 @@ // times will result in traversing only the api and encryption-layer programs // updates the the protocol stack and adds the current layer to the routing skip list -static __always_inline void update_protocol_information(usm_context_t *usm_ctx, protocol_stack_t *stack, protocol_t proto) { +static __always_inline void update_protocol_information(classification_context_t *classification_ctx, protocol_stack_t *stack, protocol_t proto) { set_protocol(stack, proto); - usm_ctx->routing_skip_layers |= proto; + classification_ctx->routing_skip_layers |= proto; } // Check if the connections is used for gRPC traffic. -static __always_inline void classify_grpc(usm_context_t *usm_ctx, protocol_stack_t *protocol_stack, struct __sk_buff *skb, skb_info_t *skb_info) { +static __always_inline void classify_grpc(classification_context_t *classification_ctx, protocol_stack_t *protocol_stack, struct __sk_buff *skb, skb_info_t *skb_info) { grpc_status_t status = is_grpc(skb, skb_info); if (status == PAYLOAD_UNDETERMINED) { return; } if (status == PAYLOAD_GRPC) { - update_protocol_information(usm_ctx, protocol_stack, PROTOCOL_GRPC); + update_protocol_information(classification_ctx, protocol_stack, PROTOCOL_GRPC); } // Whether the traffic is gRPC or not, we can mark the stack as fully @@ -142,33 +142,44 @@ __maybe_unused static __always_inline void protocol_classifier_entrypoint(struct return; } - usm_context_t *usm_ctx = usm_context_init(skb, &skb_tup, &skb_info); - if (!usm_ctx) { + classification_context_t *classification_ctx = classification_context_init(skb, &skb_tup, &skb_info); + if (!classification_ctx) { return; } - protocol_stack_t *protocol_stack = get_protocol_stack_if_exists(&usm_ctx->tuple); + protocol_stack_t *protocol_stack = get_protocol_stack_if_exists(&classification_ctx->tuple); if (is_fully_classified(protocol_stack) || is_protocol_layer_known(protocol_stack, LAYER_ENCRYPTION)) { return; } // Load information that will be later on used to route tail-calls - init_routing_cache(usm_ctx, protocol_stack); + init_routing_cache(classification_ctx, protocol_stack); - const char *buffer = &(usm_ctx->buffer.data[0]); + const char *buffer = &(classification_ctx->buffer.data[0]); protocol_t app_layer_proto = get_protocol_from_stack(protocol_stack, LAYER_APPLICATION); - if ((app_layer_proto == PROTOCOL_UNKNOWN || app_layer_proto == PROTOCOL_POSTGRES) && is_tls(buffer, usm_ctx->buffer.size, skb_info.data_end)) { - protocol_stack = get_or_create_protocol_stack(&usm_ctx->tuple); + tls_record_header_t tls_hdr = {0}; + + if ((app_layer_proto == PROTOCOL_UNKNOWN || app_layer_proto == PROTOCOL_POSTGRES) && is_tls(skb, skb_info.data_off, skb_info.data_end, &tls_hdr)) { + protocol_stack = get_or_create_protocol_stack(&classification_ctx->tuple); if (!protocol_stack) { return; } // TLS classification - update_protocol_information(usm_ctx, protocol_stack, PROTOCOL_TLS); - // The connection is TLS encrypted, thus we cannot classify the protocol - // using the socket filter and therefore we can bail out; + if (tls_hdr.content_type != TLS_HANDSHAKE) { + // We can't classify TLS encrypted traffic further, so return early + update_protocol_information(classification_ctx, protocol_stack, PROTOCOL_TLS); + return; + } + + // Parse TLS handshake payload + tls_info_t *tags = get_or_create_tls_enhanced_tags(&classification_ctx->tuple); + if (tags) { + // The packet is a TLS handshake, so trigger tail calls to extract metadata from the payload + goto next_program; + } return; } @@ -177,15 +188,15 @@ __maybe_unused static __always_inline void protocol_classifier_entrypoint(struct } if (app_layer_proto == PROTOCOL_UNKNOWN) { - app_layer_proto = classify_applayer_protocols(buffer, usm_ctx->buffer.size); + app_layer_proto = classify_applayer_protocols(buffer, classification_ctx->buffer.size); } if (app_layer_proto != PROTOCOL_UNKNOWN) { - protocol_stack = get_or_create_protocol_stack(&usm_ctx->tuple); + protocol_stack = get_or_create_protocol_stack(&classification_ctx->tuple); if (!protocol_stack) { return; } - update_protocol_information(usm_ctx, protocol_stack, app_layer_proto); + update_protocol_information(classification_ctx, protocol_stack, app_layer_proto); if (app_layer_proto == PROTOCOL_HTTP2) { // If we found HTTP2, then we try to classify its content. @@ -197,73 +208,125 @@ __maybe_unused static __always_inline void protocol_classifier_entrypoint(struct } next_program: - classification_next_program(skb, usm_ctx); + classification_next_program(skb, classification_ctx); +} + +__maybe_unused static __always_inline void protocol_classifier_entrypoint_tls_handshake_client(struct __sk_buff *skb) { + classification_context_t *classification_ctx = classification_context(skb); + if (!classification_ctx) { + return; + } + tls_info_t* tls_info = get_tls_enhanced_tags(&classification_ctx->tuple); + if (!tls_info) { + goto next_program; + } + __u32 offset = classification_ctx->skb_info.data_off + sizeof(tls_record_header_t); + __u32 data_end = classification_ctx->skb_info.data_end; + if (!is_tls_handshake_client_hello(skb, offset, classification_ctx->skb_info.data_end)) { + goto next_program; + } + if (!parse_client_hello(skb, offset, data_end, tls_info)) { + return; + } + +next_program: + classification_next_program(skb, classification_ctx); +} + +__maybe_unused static __always_inline void protocol_classifier_entrypoint_tls_handshake_server(struct __sk_buff *skb) { + classification_context_t *classification_ctx = classification_context(skb); + if (!classification_ctx) { + return; + } + tls_info_t* tls_info = get_tls_enhanced_tags(&classification_ctx->tuple); + if (!tls_info) { + goto next_program; + } + __u32 offset = classification_ctx->skb_info.data_off + sizeof(tls_record_header_t); + __u32 data_end = classification_ctx->skb_info.data_end; + if (!is_tls_handshake_server_hello(skb, offset, data_end)) { + goto next_program; + } + if (!parse_server_hello(skb, offset, data_end, tls_info)) { + return; + } + + protocol_stack_t *protocol_stack = get_protocol_stack_if_exists(&classification_ctx->tuple); + if (!protocol_stack) { + return; + } + update_protocol_information(classification_ctx, protocol_stack, PROTOCOL_TLS); + // We can't classify TLS encrypted traffic further, so return early + return; + +next_program: + classification_next_program(skb, classification_ctx); } __maybe_unused static __always_inline void protocol_classifier_entrypoint_queues(struct __sk_buff *skb) { - usm_context_t *usm_ctx = usm_context(skb); - if (!usm_ctx) { + classification_context_t *classification_ctx = classification_context(skb); + if (!classification_ctx) { return; } - const char *buffer = &(usm_ctx->buffer.data[0]); - protocol_t cur_fragment_protocol = classify_queue_protocols(skb, &usm_ctx->skb_info, buffer, usm_ctx->buffer.size); + const char *buffer = &(classification_ctx->buffer.data[0]); + protocol_t cur_fragment_protocol = classify_queue_protocols(skb, &classification_ctx->skb_info, buffer, classification_ctx->buffer.size); if (!cur_fragment_protocol) { goto next_program; } - protocol_stack_t *protocol_stack = get_or_create_protocol_stack(&usm_ctx->tuple); + protocol_stack_t *protocol_stack = get_or_create_protocol_stack(&classification_ctx->tuple); if (!protocol_stack) { return; } - update_protocol_information(usm_ctx, protocol_stack, cur_fragment_protocol); + update_protocol_information(classification_ctx, protocol_stack, cur_fragment_protocol); mark_as_fully_classified(protocol_stack); next_program: - classification_next_program(skb, usm_ctx); + classification_next_program(skb, classification_ctx); } __maybe_unused static __always_inline void protocol_classifier_entrypoint_dbs(struct __sk_buff *skb) { - usm_context_t *usm_ctx = usm_context(skb); - if (!usm_ctx) { + classification_context_t *classification_ctx = classification_context(skb); + if (!classification_ctx) { return; } - const char *buffer = &usm_ctx->buffer.data[0]; - protocol_t cur_fragment_protocol = classify_db_protocols(&usm_ctx->tuple, buffer, usm_ctx->buffer.size); + const char *buffer = &classification_ctx->buffer.data[0]; + protocol_t cur_fragment_protocol = classify_db_protocols(&classification_ctx->tuple, buffer, classification_ctx->buffer.size); if (!cur_fragment_protocol) { goto next_program; } - protocol_stack_t *protocol_stack = get_or_create_protocol_stack(&usm_ctx->tuple); + protocol_stack_t *protocol_stack = get_or_create_protocol_stack(&classification_ctx->tuple); if (!protocol_stack) { return; } - update_protocol_information(usm_ctx, protocol_stack, cur_fragment_protocol); + update_protocol_information(classification_ctx, protocol_stack, cur_fragment_protocol); mark_as_fully_classified(protocol_stack); next_program: - classification_next_program(skb, usm_ctx); + classification_next_program(skb, classification_ctx); } __maybe_unused static __always_inline void protocol_classifier_entrypoint_grpc(struct __sk_buff *skb) { - usm_context_t *usm_ctx = usm_context(skb); - if (!usm_ctx) { + classification_context_t *classification_ctx = classification_context(skb); + if (!classification_ctx) { return; } // gRPC classification can happen only if the application layer is known // So if we don't have a protocol stack, we can continue to the next program. - protocol_stack_t *protocol_stack = get_protocol_stack_if_exists(&usm_ctx->tuple); + protocol_stack_t *protocol_stack = get_protocol_stack_if_exists(&classification_ctx->tuple); if (protocol_stack) { // The GRPC classification program can be called without a prior // classification of HTTP2, which is a precondition. protocol_t app_layer_proto = get_protocol_from_stack(protocol_stack, LAYER_APPLICATION); if (app_layer_proto == PROTOCOL_HTTP2) { - classify_grpc(usm_ctx, protocol_stack, skb, &usm_ctx->skb_info); + classify_grpc(classification_ctx, protocol_stack, skb, &classification_ctx->skb_info); } } - classification_next_program(skb, usm_ctx); + classification_next_program(skb, classification_ctx); } #endif diff --git a/pkg/network/ebpf/c/protocols/classification/routing-helpers.h b/pkg/network/ebpf/c/protocols/classification/routing-helpers.h index 9e2ba628851c6..f046625e6af7c 100644 --- a/pkg/network/ebpf/c/protocols/classification/routing-helpers.h +++ b/pkg/network/ebpf/c/protocols/classification/routing-helpers.h @@ -18,29 +18,7 @@ static __always_inline bool has_available_program(classification_prog_t current_ return true; } -#pragma clang diagnostic push -// The following check is ignored because *currently* there are no API or -// Encryption classification programs registerd. -// Therefore the enum containing all BPF programs looks like the following: -// -// typedef enum { -// CLASSIFICATION_PROG_UNKNOWN = 0, -// __PROG_APPLICATION, -// APPLICATION_PROG_A -// APPLICATION_PROG_B -// APPLICATION_PROG_C -// ... -// __PROG_API, -// // No programs here -// __PROG_ENCRYPTION, -// // No programs here -// CLASSIFICATION_PROG_MAX, -// } classification_prog_t; -// -// Which means that the following conditionals will always evaluate to false: -// a) current_program > __PROG_API && current_program < __PROG_ENCRYPTION -// b) current_program > __PROG_ENCRYPTION && current_program < CLASSIFICATION_PROG_MAX -#pragma clang diagnostic ignored "-Wtautological-overlap-compare" +// get_current_program_layer returns the layer bit of the current program static __always_inline u16 get_current_program_layer(classification_prog_t current_program) { if (current_program > __PROG_APPLICATION && current_program < __PROG_API) { return LAYER_APPLICATION_BIT; @@ -56,20 +34,19 @@ static __always_inline u16 get_current_program_layer(classification_prog_t curre return 0; } -#pragma clang diagnostic pop -static __always_inline classification_prog_t next_layer_entrypoint(usm_context_t *usm_ctx) { - u16 to_skip = usm_ctx->routing_skip_layers; +static __always_inline classification_prog_t next_layer_entrypoint(classification_context_t *classification_ctx) { + u16 to_skip = classification_ctx->routing_skip_layers; + if (!(to_skip&LAYER_ENCRYPTION_BIT)) { + return __PROG_ENCRYPTION+1; + } if (!(to_skip&LAYER_APPLICATION_BIT)) { return __PROG_APPLICATION+1; } if (!(to_skip&LAYER_API_BIT)) { return __PROG_API+1; } - if (!(to_skip&LAYER_ENCRYPTION_BIT)) { - return __PROG_ENCRYPTION+1; - } return CLASSIFICATION_PROG_UNKNOWN; } diff --git a/pkg/network/ebpf/c/protocols/classification/routing.h b/pkg/network/ebpf/c/protocols/classification/routing.h index 9a4b534553b35..f224f6c764e71 100644 --- a/pkg/network/ebpf/c/protocols/classification/routing.h +++ b/pkg/network/ebpf/c/protocols/classification/routing.h @@ -20,10 +20,10 @@ BPF_PROG_ARRAY(classification_progs, CLASSIFICATION_PROG_MAX) // not the application layer protocol is known at the time of the call. When a // certain protocol layer is known, the function "skips" to the entry-point of // the next layer and so forth. -static __always_inline classification_prog_t __get_next_program(usm_context_t *usm_ctx) { - classification_prog_t current_program = usm_ctx->routing_current_program; +static __always_inline classification_prog_t __get_next_program(classification_context_t *classification_ctx) { + classification_prog_t current_program = classification_ctx->routing_current_program; u16 current_layer_bit = get_current_program_layer(current_program); - bool current_layer_known = usm_ctx->routing_skip_layers & current_layer_bit; + bool current_layer_known = classification_ctx->routing_skip_layers & current_layer_bit; if (has_available_program(current_program) && !current_layer_known) { // advance to the next program belonging to this protocol layer @@ -32,28 +32,28 @@ static __always_inline classification_prog_t __get_next_program(usm_context_t *u // there are not other programs belonging to the same layer to be executed, // so we skip to the first program of the next layer that is not known - usm_ctx->routing_skip_layers |= current_layer_bit; - return next_layer_entrypoint(usm_ctx); + classification_ctx->routing_skip_layers |= current_layer_bit; + return next_layer_entrypoint(classification_ctx); } -static __always_inline void classification_next_program(struct __sk_buff *skb, usm_context_t *usm_ctx) { - classification_prog_t next_program = __get_next_program(usm_ctx); +static __always_inline void classification_next_program(struct __sk_buff *skb, classification_context_t *classification_ctx) { + classification_prog_t next_program = __get_next_program(classification_ctx); if (next_program == CLASSIFICATION_PROG_UNKNOWN || next_program == CLASSIFICATION_PROG_MAX) { log_debug("classification tail-call: skb=%p tail-end", skb); return; } - log_debug("classification tail-call: skb=%p from=%d to=%d", skb, usm_ctx->routing_current_program, next_program); - usm_ctx->routing_current_program = next_program; + log_debug("classification tail-call: skb=%p from=%d to=%d", skb, classification_ctx->routing_current_program, next_program); + classification_ctx->routing_current_program = next_program; bpf_tail_call_compat(skb, &classification_progs, next_program); } // init_routing_cache is executed once per packet, at the socket filter entrypoint. // the information loaded here is persisted throughout multiple bpf tail-calls and // it's used for the purposes of figuring out which BPF program to execute next. -static __always_inline void init_routing_cache(usm_context_t *usm_ctx, protocol_stack_t *stack) { - usm_ctx->routing_skip_layers = 0; - usm_ctx->routing_current_program = CLASSIFICATION_PROG_UNKNOWN; +static __always_inline void init_routing_cache(classification_context_t *classification_ctx, protocol_stack_t *stack) { + classification_ctx->routing_skip_layers = 0; + classification_ctx->routing_current_program = CLASSIFICATION_PROG_UNKNOWN; // No protocol stack, nothing to mark for skipping if (!stack) { @@ -64,13 +64,13 @@ static __always_inline void init_routing_cache(usm_context_t *usm_ctx, protocol_ // 1) If the protocol for that layer is known // 2) If there are no programs registered for that layer if (stack->layer_application || !has_available_program(__PROG_APPLICATION)) { - usm_ctx->routing_skip_layers |= LAYER_APPLICATION_BIT; + classification_ctx->routing_skip_layers |= LAYER_APPLICATION_BIT; } if (stack->layer_api || !has_available_program(__PROG_API)) { - usm_ctx->routing_skip_layers |= LAYER_API_BIT; + classification_ctx->routing_skip_layers |= LAYER_API_BIT; } if (stack->layer_encryption || !has_available_program(__PROG_ENCRYPTION)) { - usm_ctx->routing_skip_layers |= LAYER_ENCRYPTION_BIT; + classification_ctx->routing_skip_layers |= LAYER_ENCRYPTION_BIT; } } diff --git a/pkg/network/ebpf/c/protocols/events.h b/pkg/network/ebpf/c/protocols/events.h index 8aba74be87708..c9a254b09f618 100644 --- a/pkg/network/ebpf/c/protocols/events.h +++ b/pkg/network/ebpf/c/protocols/events.h @@ -2,6 +2,7 @@ #define __USM_EVENTS_H #include "protocols/events-types.h" +#include "bpf_telemetry.h" #define _STR(x) #x /* USM_EVENTS_INIT defines two functions used for the purposes of buffering and sending @@ -28,7 +29,7 @@ return val > 0; \ } \ \ - static __always_inline void name##_batch_flush(struct pt_regs *ctx) { \ + static __always_inline void name##_batch_flush_common(struct pt_regs *ctx, bool with_telemetry) { \ if (!is_##name##_monitoring_enabled()) { \ return; \ } \ @@ -54,13 +55,25 @@ } \ \ if (use_ring_buffer) { \ - perf_ret = bpf_ringbuf_output(&name##_batch_events, batch, sizeof(batch_data_t), 0);\ + if (with_telemetry) { \ + perf_ret = bpf_ringbuf_output_with_telemetry(&name##_batch_events, batch, sizeof(batch_data_t), 0);\ + } else { \ + perf_ret = bpf_ringbuf_output(&name##_batch_events, batch, sizeof(batch_data_t), 0);\ + } \ } else { \ - perf_ret = bpf_perf_event_output(ctx, \ + if (with_telemetry) { \ + perf_ret = bpf_perf_event_output_with_telemetry(ctx, \ + &name##_batch_events, \ + key.cpu, \ + batch, \ + sizeof(batch_data_t)); \ + } else { \ + perf_ret = bpf_perf_event_output(ctx, \ &name##_batch_events, \ key.cpu, \ batch, \ sizeof(batch_data_t)); \ + } \ } \ if (perf_ret < 0) { \ _LOG(name, "batch flush error: cpu: %d idx: %llu err: %ld", \ @@ -77,6 +90,14 @@ } \ } \ \ + static __always_inline void name##_batch_flush(struct pt_regs *ctx) { \ + name##_batch_flush_common(ctx, false); \ + } \ + \ + static __always_inline void name##_batch_flush_with_telemetry(struct pt_regs *ctx) { \ + name##_batch_flush_common(ctx, true); \ + } \ + \ static __always_inline void name##_batch_enqueue(value *event) { \ u32 zero = 0; \ batch_state_t *batch_state = bpf_map_lookup_elem(&name##_batch_state, &zero); \ diff --git a/pkg/network/ebpf/c/protocols/flush.h b/pkg/network/ebpf/c/protocols/flush.h new file mode 100644 index 0000000000000..cf040a66dc83f --- /dev/null +++ b/pkg/network/ebpf/c/protocols/flush.h @@ -0,0 +1,31 @@ +#ifndef __USM_FLUSH_H +#define __USM_FLUSH_H + +#include "bpf_bypass.h" + +#include "protocols/http/http.h" +#include "protocols/http2/decoding.h" +#include "protocols/kafka/kafka-parsing.h" +#include "protocols/postgres/decoding.h" +#include "protocols/redis/decoding.h" + +// flush all batched events to userspace for all protocols. +// because perf events can't be sent from socket filter programs. +static __always_inline void flush(void *ctx) { + http_batch_flush_with_telemetry(ctx); + http2_batch_flush(ctx); + terminated_http2_batch_flush(ctx); + kafka_batch_flush(ctx); + postgres_batch_flush(ctx); + redis_batch_flush(ctx); +} + +SEC("tracepoint/net/netif_receive_skb") +int tracepoint__net__netif_receive_skb(void *ctx) { + CHECK_BPF_PROGRAM_BYPASSED() + log_debug("tracepoint/net/netif_receive_skb"); + flush(ctx); + return 0; +} + +#endif diff --git a/pkg/network/ebpf/c/protocols/http2/decoding.h b/pkg/network/ebpf/c/protocols/http2/decoding.h index 7abd0a642f897..2582951c20ea5 100644 --- a/pkg/network/ebpf/c/protocols/http2/decoding.h +++ b/pkg/network/ebpf/c/protocols/http2/decoding.h @@ -625,11 +625,6 @@ static __always_inline void handle_first_frame(pktbuf_t pkt, __u32 *external_dat return; } - http2_telemetry_t *http2_tel = get_telemetry(pkt); - if (http2_tel == NULL) { - return; - } - incomplete_frame_t *incomplete_frame = bpf_map_lookup_elem(&http2_incomplete_frames, tup); bool has_valid_first_frame = pktbuf_get_first_frame(pkt, incomplete_frame, ¤t_frame); // If we have a state and we consumed it, then delete it. diff --git a/pkg/network/ebpf/c/protocols/tls/https.h b/pkg/network/ebpf/c/protocols/tls/https.h index ac3c509daaeb3..fb10d6496e6af 100644 --- a/pkg/network/ebpf/c/protocols/tls/https.h +++ b/pkg/network/ebpf/c/protocols/tls/https.h @@ -77,6 +77,7 @@ static __always_inline void tls_process(struct pt_regs *ctx, conn_tuple_t *t, vo // we're in the context of TLS hookpoints, thus the protocol is TLS. set_protocol(stack, PROTOCOL_TLS); + set_protocol_flag(stack, FLAG_USM_ENABLED); const __u32 zero = 0; protocol_t protocol = get_protocol_from_stack(stack, LAYER_APPLICATION); diff --git a/pkg/network/ebpf/c/protocols/tls/tls.h b/pkg/network/ebpf/c/protocols/tls/tls.h index 0f7f2cdf9ee56..813c548360a9a 100644 --- a/pkg/network/ebpf/c/protocols/tls/tls.h +++ b/pkg/network/ebpf/c/protocols/tls/tls.h @@ -1,9 +1,9 @@ #ifndef __TLS_H #define __TLS_H -#include "ktypes.h" -#include "bpf_builtins.h" +#include "tracer/tracer.h" +// TLS version constants (SSL versions are deprecated, included for completeness) #define SSL_VERSION20 0x0200 #define SSL_VERSION30 0x0300 #define TLS_VERSION10 0x0301 @@ -11,122 +11,612 @@ #define TLS_VERSION12 0x0303 #define TLS_VERSION13 0x0304 -#define TLS_HANDSHAKE 0x16 -#define TLS_APPLICATION_DATA 0x17 +// TLS Content Types (https://www.rfc-editor.org/rfc/rfc5246#page-19 6.2. Record Layer) +#define TLS_HANDSHAKE 0x16 +#define TLS_APPLICATION_DATA 0x17 +#define TLS_CHANGE_CIPHER_SPEC 0x14 +#define TLS_ALERT 0x15 -/* https://www.rfc-editor.org/rfc/rfc5246#page-19 6.2. Record Layer */ +// TLS Handshake Types +#define TLS_HANDSHAKE_CLIENT_HELLO 0x01 +#define TLS_HANDSHAKE_SERVER_HELLO 0x02 + +// Bitmask constants for offered versions +#define TLS_VERSION10_BIT 1 << 0 +#define TLS_VERSION11_BIT 1 << 1 +#define TLS_VERSION12_BIT 1 << 2 +#define TLS_VERSION13_BIT 1 << 3 +// Maximum number of extensions to parse when looking for SUPPORTED_VERSIONS_EXTENSION +#define MAX_EXTENSIONS 16 +// The supported_versions extension for TLS 1.3 is described in RFC 8446 Section 4.2.1 +#define SUPPORTED_VERSIONS_EXTENSION 0x002B + +// Maximum TLS record payload size (16 KB) #define TLS_MAX_PAYLOAD_LENGTH (1 << 14) -// TLS record layer header structure +// The following field lengths and message formats are defined by the TLS specifications +// For TLS 1.2 (and earlier) see: +// RFC 5246 - The Transport Layer Security (TLS) Protocol Version 1.2 +// https://tools.ietf.org/html/rfc5246 +// Particularly Section 7.4 details handshake messages and their fields, and Section 6.2.1 +// covers the TLS record layer. +// For TLS 1.3, see: +// RFC 8446 - The Transport Layer Security (TLS) Protocol Version 1.3 +// https://tools.ietf.org/html/rfc8446 +// Many handshake structures are similar, but some extensions (like supported_versions) are defined here +#define TLS_HANDSHAKE_LENGTH 3 // Handshake length is 3 bytes (RFC 5246 Section 7.4) +#define TLS_HELLO_MESSAGE_HEADER_SIZE 4 // handshake_type(1) + length(3) +#define RANDOM_LENGTH 32 // Random field length in Client/Server Hello (RFC 5246 Section 7.4.1.2) +#define PROTOCOL_VERSION_LENGTH 2 // Protocol version field is 2 bytes (RFC 5246 Section 6.2.1) +#define SESSION_ID_LENGTH 1 // Session ID length field is 1 byte (RFC 5246 Section 7.4.1.2) +#define CIPHER_SUITES_LENGTH 2 // Cipher Suites length field is 2 bytes (RFC 5246 Section 7.4.1.2) +#define COMPRESSION_METHODS_LENGTH 1 // Compression Methods length field is 1 byte (RFC 5246 Section 7.4.1.2) +#define EXTENSION_TYPE_LENGTH 2 // Extension Type field is 2 bytes (RFC 5246 Section 7.4.1.4) +#define EXTENSION_LENGTH_FIELD 2 // Extension Length field is 2 bytes (RFC 5246 Section 7.4.1.4) + +// For single-byte fields (list lengths, etc.) +#define SINGLE_BYTE_LENGTH 1 + +// Minimum extension header length = Extension Type (2 bytes) + Extension Length (2 bytes) = 4 bytes +#define MIN_EXTENSION_HEADER_LENGTH (EXTENSION_TYPE_LENGTH + EXTENSION_LENGTH_FIELD) + +// Maximum number of supported versions we unroll for (all TLS versions) +#define MAX_SUPPORTED_VERSIONS 4 + +// TLS record layer header structure (RFC 5246) typedef struct { __u8 content_type; __u16 version; __u16 length; } __attribute__((packed)) tls_record_header_t; -typedef struct { +// Checks if the TLS version is valid +static __always_inline bool is_valid_tls_version(__u16 version) { + switch (version) { + case SSL_VERSION20: + case SSL_VERSION30: + case TLS_VERSION10: + case TLS_VERSION11: + case TLS_VERSION12: + case TLS_VERSION13: + return true; + default: + return false; + } +} + +// set_tls_offered_version sets the bit corresponding to the offered version in the offered_versions field of tls_info +static __always_inline void set_tls_offered_version(tls_info_t *tls_info, __u16 version) { + switch (version) { + case TLS_VERSION10: + tls_info->offered_versions |= TLS_VERSION10_BIT; + break; + case TLS_VERSION11: + tls_info->offered_versions |= TLS_VERSION11_BIT; + break; + case TLS_VERSION12: + tls_info->offered_versions |= TLS_VERSION12_BIT; + break; + case TLS_VERSION13: + tls_info->offered_versions |= TLS_VERSION13_BIT; + break; + default: + break; + } +} + +// TLS Record Header (RFC 5246 Section 6.2.1) +// +// +---------+---------+---------+-----------+ +// | type(1) | version(2) | length(2) | +// +---------+---------+---------+-----------+ +// type: 1 byte (TLS_CONTENT_TYPE) +// version: 2 bytes (e.g., 0x03 0x03 for TLS 1.2) +// length: 2 bytes (total number of payload bytes following this header) + +// read_tls_record_header reads the TLS record header from the packet +// Reference: RFC 5246 Section 6.2.1 (Record Layer), https://tools.ietf.org/html/rfc5246#section-6.2.1 +// Validates the record header fields (content_type, version, length) and checks for correctness within packet bounds. +static __always_inline bool read_tls_record_header(struct __sk_buff *skb, __u32 header_offset, __u32 data_end, tls_record_header_t *tls_hdr) { + // Ensure there's enough space for TLS record header + if (header_offset + sizeof(tls_record_header_t) > data_end) { + return false; + } + + // Read TLS record header + if (bpf_skb_load_bytes(skb, header_offset, tls_hdr, sizeof(tls_record_header_t)) < 0) { + return false; + } + + // Convert fields to host byte order + tls_hdr->version = bpf_ntohs(tls_hdr->version); + tls_hdr->length = bpf_ntohs(tls_hdr->length); + + // Validate version and length + if (!is_valid_tls_version(tls_hdr->version)) { + return false; + } + if (tls_hdr->length > TLS_MAX_PAYLOAD_LENGTH) { + return false; + } + + // Ensure we don't read beyond the packet + return header_offset + sizeof(tls_record_header_t) + tls_hdr->length <= data_end; +} + +// TLS Handshake Message Header (RFC 5246 Section 7.4) +// +---------+---------+---------+---------+ +// | handshake_type(1) | length(3 bytes) | +// +---------+---------+---------+---------+ +// +// The handshake_type identifies the handshake message (e.g., ClientHello, ServerHello). +// length indicates the size of the handshake message that follows (not including these 4 bytes). + +// is_valid_tls_handshake checks if the TLS handshake message is valid +// The function expects the record to have already been validated. It further checks that the +// handshake_type and handshake_length are consistent. +static __always_inline bool is_valid_tls_handshake(struct __sk_buff *skb, __u32 header_offset, __u32 data_end, const tls_record_header_t *hdr) { + // At this point, we know from read_tls_record_header() that: + // - hdr->version is a valid TLS version + // - hdr->length fits entirely within the packet (header_offset + hdr->length <= data_end) + + __u32 handshake_offset = header_offset + sizeof(tls_record_header_t); + + // Ensure we don't read beyond the packet + if (handshake_offset + SINGLE_BYTE_LENGTH > data_end) { + return false; + } + // Read handshake_type (1 byte) __u8 handshake_type; - __u8 length[3]; - __u16 version; -} __attribute__((packed)) tls_hello_message_t; + if (bpf_skb_load_bytes(skb, handshake_offset, &handshake_type, SINGLE_BYTE_LENGTH) < 0) { + return false; + } -#define TLS_HANDSHAKE_CLIENT_HELLO 0x01 -#define TLS_HANDSHAKE_SERVER_HELLO 0x02 -// The size of the handshake type and the length. -#define TLS_HELLO_MESSAGE_HEADER_SIZE 4 + // Read handshake_length (3 bytes) + __u32 length_offset = handshake_offset + SINGLE_BYTE_LENGTH; + if (length_offset + TLS_HANDSHAKE_LENGTH > data_end) { + return false; + } + __u8 handshake_length_bytes[TLS_HANDSHAKE_LENGTH]; + if (bpf_skb_load_bytes(skb, length_offset, handshake_length_bytes, TLS_HANDSHAKE_LENGTH) < 0) { + return false; + } -// is_valid_tls_version checks if the given version is a valid TLS version as -// defined in the TLS specification. -static __always_inline bool is_valid_tls_version(__u16 version) { - switch (version) { - case SSL_VERSION20: - case SSL_VERSION30: - case TLS_VERSION10: - case TLS_VERSION11: - case TLS_VERSION12: - case TLS_VERSION13: + __u32 handshake_length = (handshake_length_bytes[0] << 16) | + (handshake_length_bytes[1] << 8) | + handshake_length_bytes[2]; + + // Verify that the handshake message length plus the 4-byte handshake header (1 byte type + 3 bytes length) + // matches the total length defined in the record header. + // If handshake_length + TLS_HELLO_MESSAGE_HEADER_SIZE != hdr->length, the handshake message structure is inconsistent. + if (handshake_length + TLS_HELLO_MESSAGE_HEADER_SIZE != hdr->length) { + return false; + } + + // Check that the handshake_type is one of the expected values (ClientHello or ServerHello). + // This ensures we are dealing with a known handshake message type. + if (handshake_type != TLS_HANDSHAKE_CLIENT_HELLO && handshake_type != TLS_HANDSHAKE_SERVER_HELLO) { + return false; + } + + // At this point, we've confirmed: + // - The handshake message fits within the record. + // - The handshake_type is a known TLS Hello message. + // - The handshake_length matches the record header's length. + return true; +} + +// is_tls checks if the packet is a TLS packet by reading and validating the TLS record header +// Reference: RFC 5246 Section 6.2.1 (Record Layer), https://tools.ietf.org/html/rfc5246#section-6.2.1 +// Validates that content_type matches known TLS types (Handshake, Application Data, etc.). +static __always_inline bool is_tls(struct __sk_buff *skb, __u32 header_offset, __u32 data_end, tls_record_header_t *tls_hdr) { + // Read and validate the TLS record header + if (!read_tls_record_header(skb, header_offset, data_end, tls_hdr)) { + return false; + } + + switch (tls_hdr->content_type) { + case TLS_HANDSHAKE: + return is_valid_tls_handshake(skb, header_offset, data_end, tls_hdr); + case TLS_APPLICATION_DATA: + case TLS_CHANGE_CIPHER_SPEC: + case TLS_ALERT: return true; + default: + return false; + } +} + +// parse_tls_handshake_header extracts handshake_length and protocol_version from a TLS handshake message +// References: +// - RFC 5246 Section 7.4 (Handshake Protocol Overview), https://tools.ietf.org/html/rfc5246#section-7.4 +// For ClientHello and ServerHello, this includes parsing the handshake type (skipped prior) and the 3-byte length field, followed by a 2-byte protocol version field. +static __always_inline bool parse_tls_handshake_header(struct __sk_buff *skb, __u32 *offset, __u32 data_end, __u32 *handshake_length, __u16 *protocol_version) { + *offset += SINGLE_BYTE_LENGTH; // Move past handshake type (1 byte) + + // Read handshake length (3 bytes) + if (*offset + TLS_HANDSHAKE_LENGTH > data_end) { + return false; + } + __u8 handshake_length_bytes[TLS_HANDSHAKE_LENGTH]; + if (bpf_skb_load_bytes(skb, *offset, handshake_length_bytes, TLS_HANDSHAKE_LENGTH) < 0) { + return false; } + *handshake_length = (handshake_length_bytes[0] << 16) | + (handshake_length_bytes[1] << 8) | + handshake_length_bytes[2]; + *offset += TLS_HANDSHAKE_LENGTH; - return false; + // Ensure we don't read beyond the packet + if (*offset + *handshake_length > data_end) { + return false; + } + + // Read protocol version (2 bytes) + if (*offset + PROTOCOL_VERSION_LENGTH > data_end) { + return false; + } + __u16 version; + if (bpf_skb_load_bytes(skb, *offset, &version, PROTOCOL_VERSION_LENGTH) < 0) { + return false; + } + *protocol_version = bpf_ntohs(version); + *offset += PROTOCOL_VERSION_LENGTH; + + return true; } -// is_valid_tls_app_data checks if the buffer is a valid TLS Application Data -// record header. The record header is considered valid if: -// - the TLS version field is a known SSL/TLS version -// - the payload length is below the maximum payload length defined in the -// standard. -// - the payload length + the size of the record header is less than the size -// of the skb -static __always_inline bool is_valid_tls_app_data(tls_record_header_t *hdr, __u32 buf_size, __u32 skb_len) { - return sizeof(*hdr) + hdr->length <= skb_len; +// skip_random_and_session_id Skips the Random (32 bytes) and the Session ID from the TLS Hello messages +// References: +// - RFC 5246 Section 7.4.1.2 (Client Hello and Server Hello): https://tools.ietf.org/html/rfc5246#section-7.4.1.2 +// ClientHello and ServerHello contain a "random" field (32 bytes) followed by a "session_id_length" (1 byte) +// and a session_id of that length. This helper increments the offset accordingly after reading and skipping these fields. +static __always_inline bool skip_random_and_session_id(struct __sk_buff *skb, __u32 *offset, __u32 data_end) { + // Skip Random (32 bytes) + *offset += RANDOM_LENGTH; + + // Read Session ID Length (1 byte) + if (*offset + SESSION_ID_LENGTH > data_end) { + return false; + } + __u8 session_id_length; + if (bpf_skb_load_bytes(skb, *offset, &session_id_length, SESSION_ID_LENGTH) < 0) { + return false; + } + *offset += SESSION_ID_LENGTH; + + // Skip Session ID + *offset += session_id_length; + + // Ensure we don't read beyond the packet + return *offset <= data_end; +} + +// parse_supported_versions_extension looks for the supported_versions extension in the ClientHello or ServerHello and populates tags +// References: +// - For TLS 1.3 supported_versions extension: RFC 8446 Section 4.2.1: https://tools.ietf.org/html/rfc8446#section-4.2.1 +// For ClientHello this extension contains a list of supported versions (2 bytes each) preceded by a 1-byte length. +// supported_versions extension structure: +// +-----+--------------------+ +// | len(1) | versions(2 * N) | +// +-----+--------------------+ +// For ServerHello (TLS 1.3), it contains a single selected_version (2 bytes). +// +---------------------+ +// | selected_version(2) | +// +---------------------+ +static __always_inline bool parse_supported_versions_extension(struct __sk_buff *skb, __u32 *offset, __u32 data_end, __u32 extensions_end, tls_info_t *tags, bool is_client_hello) { + if (is_client_hello) { + // Read supported version list length (1 byte) + if (*offset + SINGLE_BYTE_LENGTH > data_end || *offset + SINGLE_BYTE_LENGTH > extensions_end) { + return false; + } + __u8 sv_list_length; + if (bpf_skb_load_bytes(skb, *offset, &sv_list_length, SINGLE_BYTE_LENGTH) < 0) { + return false; + } + *offset += SINGLE_BYTE_LENGTH; + + if (*offset + sv_list_length > data_end || *offset + sv_list_length > extensions_end) { + return false; + } + + // Parse the list of supported versions (2 bytes each) + __u8 sv_offset = 0; + __u16 sv_version; + #pragma unroll(MAX_SUPPORTED_VERSIONS) + for (int idx = 0; idx < MAX_SUPPORTED_VERSIONS; idx++) { + if (sv_offset + 1 >= sv_list_length) { + break; + } + // Each supported version is 2 bytes + if (*offset + PROTOCOL_VERSION_LENGTH > data_end) { + return false; + } + + if (bpf_skb_load_bytes(skb, *offset, &sv_version, PROTOCOL_VERSION_LENGTH) < 0) { + return false; + } + sv_version = bpf_ntohs(sv_version); + *offset += PROTOCOL_VERSION_LENGTH; + + set_tls_offered_version(tags, sv_version); + sv_offset += PROTOCOL_VERSION_LENGTH; + } + } else { + // ServerHello + // The selected_version field is 2 bytes + if (*offset + PROTOCOL_VERSION_LENGTH > data_end) { + return false; + } + + // Read selected version (2 bytes) + __u16 selected_version; + if (bpf_skb_load_bytes(skb, *offset, &selected_version, PROTOCOL_VERSION_LENGTH) < 0) { + return false; + } + selected_version = bpf_ntohs(selected_version); + *offset += PROTOCOL_VERSION_LENGTH; + + tags->chosen_version = selected_version; + } + + return true; +} + +// parse_tls_extensions parses TLS extensions in both ClientHello and ServerHello +// References: +// - RFC 5246 Section 7.4.1.4 (Hello Extensions): https://tools.ietf.org/html/rfc5246#section-7.4.1.4 +// - For TLS 1.3 supported_versions extension: RFC 8446 Section 4.2.1: https://tools.ietf.org/html/rfc8446#section-4.2.1 +// This function iterates over extensions, reading the extension_type and extension_length, and if it encounters +// the supported_versions extension, it calls parse_supported_versions_extension to handle it. +// ASCII snippet for a single extension: +// +---------+---------+--------------------------------+ +// | ext_type(2) | ext_length(2) | ext_data(ext_length) | +// +---------+---------+--------------------------------+ +// For multiple extensions, they are just concatenated one after another. +static __always_inline bool parse_tls_extensions(struct __sk_buff *skb, __u32 *offset, __u32 data_end, __u32 extensions_end, tls_info_t *tags, bool is_client_hello) { + __u16 extension_type; + __u16 extension_length; + + #pragma unroll(MAX_EXTENSIONS) + for (int i = 0; i < MAX_EXTENSIONS; i++) { + if (*offset + MIN_EXTENSION_HEADER_LENGTH > extensions_end) { + break; + } + + // Read Extension Type (2 bytes) + if (bpf_skb_load_bytes(skb, *offset, &extension_type, EXTENSION_TYPE_LENGTH) < 0) { + return false; + } + extension_type = bpf_ntohs(extension_type); + *offset += EXTENSION_TYPE_LENGTH; + + // Read Extension Length (2 bytes) + if (bpf_skb_load_bytes(skb, *offset, &extension_length, EXTENSION_LENGTH_FIELD) < 0) { + return false; + } + extension_length = bpf_ntohs(extension_length); + *offset += EXTENSION_LENGTH_FIELD; + + if (*offset + extension_length > data_end || *offset + extension_length > extensions_end) { + return false; + } + + if (extension_type == SUPPORTED_VERSIONS_EXTENSION) { + if (!parse_supported_versions_extension(skb, offset, data_end, extensions_end, tags, is_client_hello)) { + return false; + } + } else { + // Skip other extensions + *offset += extension_length; + } + + if (*offset >= extensions_end) { + break; + } + } + + return true; } -// is_tls_handshake checks if the given TLS message header is a valid TLS -// handshake message. The message is considered valid if: -// - The type matches CLIENT_HELLO or SERVER_HELLO -// - The version is a known SSL/TLS version -static __always_inline bool is_tls_handshake(tls_record_header_t *hdr, const char *buf, __u32 buf_size) { - // Checking the buffer size contains at least the size of the tls record header and the tls hello message header. - if (sizeof(tls_record_header_t) + sizeof(tls_hello_message_t) > buf_size) { +// parse_client_hello parses the ClientHello message and populates tags +// Reference: RFC 5246 Section 7.4.1.2 (Client Hello), https://tools.ietf.org/html/rfc5246 +// Structure (simplified): +// handshake_type (1 byte), length (3 bytes), version (2 bytes), random(32 bytes), session_id_length(1 byte), session_id(variable), cipher_suites_length(2 bytes), cipher_suites(variable), compression_methods_length(1 byte), compression_methods(variable), extensions_length(2 bytes), extensions(variable) +// After the handshake header (handshake_type + length), the ClientHello fields are: +// +----------------------------+ +// | client_version (2) | +// +----------------------------+ +// | random (32) | +// +----------------------------+ +// | session_id_length (1) | +// | session_id (...) | +// +----------------------------+ +// | cipher_suites_length(2) | +// | cipher_suites(...) | +// +----------------------------+ +// | compression_methods_len(1) | +// | compression_methods(...) | +// +----------------------------+ +// | extensions_length (2) | +// | extensions(...) | +// +----------------------------+ +static __always_inline bool parse_client_hello(struct __sk_buff *skb, __u32 offset, __u32 data_end, tls_info_t *tags) { + __u32 handshake_length; + __u16 client_version; + + if (!parse_tls_handshake_header(skb, &offset, data_end, &handshake_length, &client_version)) { + return false; + } + + set_tls_offered_version(tags, client_version); + + // TLS 1.2 is the highest version we will see in the header. If the connection is actually a higher version (1.3), + // it must be extracted from the extensions. Lower versions (1.0, 1.1) will not have extensions. + if (client_version != TLS_VERSION12) { + return true; + } + + if (!skip_random_and_session_id(skb, &offset, data_end)) { + return false; + } + + // Read Cipher Suites Length (2 bytes) + if (offset + CIPHER_SUITES_LENGTH > data_end) { + return false; + } + __u16 cipher_suites_length; + if (bpf_skb_load_bytes(skb, offset, &cipher_suites_length, CIPHER_SUITES_LENGTH) < 0) { + return false; + } + cipher_suites_length = bpf_ntohs(cipher_suites_length); + offset += CIPHER_SUITES_LENGTH; + + // Skip Cipher Suites + offset += cipher_suites_length; + + // Read Compression Methods Length (1 byte) + if (offset + COMPRESSION_METHODS_LENGTH > data_end) { + return false; + } + __u8 compression_methods_length; + if (bpf_skb_load_bytes(skb, offset, &compression_methods_length, COMPRESSION_METHODS_LENGTH) < 0) { return false; } - // Checking the tls record header length is greater than the tls hello message header length. - if (hdr->length < sizeof(tls_hello_message_t)) { + offset += COMPRESSION_METHODS_LENGTH; + + // Skip Compression Methods + offset += compression_methods_length; + + // Check if extensions are present + if (offset + EXTENSION_LENGTH_FIELD > data_end) { return false; } - // Getting the tls hello message header. - tls_hello_message_t msg = *(tls_hello_message_t *)(buf + sizeof(tls_record_header_t)); - // If the message is not a CLIENT_HELLO or SERVER_HELLO, we don't attempt to classify. - if (msg.handshake_type != TLS_HANDSHAKE_CLIENT_HELLO && msg.handshake_type != TLS_HANDSHAKE_SERVER_HELLO) { + // Read Extensions Length (2 bytes) + __u16 extensions_length; + if (bpf_skb_load_bytes(skb, offset, &extensions_length, EXTENSION_LENGTH_FIELD) < 0) { return false; } - // Converting the fields to host byte order. - __u32 length = msg.length[0] << 16 | msg.length[1] << 8 | msg.length[2]; - // TLS handshake message length should be equal to the record header length minus the size of the hello message - // header. - if (length + TLS_HELLO_MESSAGE_HEADER_SIZE != hdr->length) { + extensions_length = bpf_ntohs(extensions_length); + offset += EXTENSION_LENGTH_FIELD; + + if (offset + extensions_length > data_end) { return false; } - msg.version = bpf_ntohs(msg.version); - return is_valid_tls_version(msg.version) && msg.version >= hdr->version; + __u32 extensions_end = offset + extensions_length; + + return parse_tls_extensions(skb, &offset, data_end, extensions_end, tags, true); } -// is_tls checks if the given buffer is a valid TLS record header. We are -// currently checking for two types of record headers: -// - TLS Handshake record headers -// - TLS Application Data record headers -static __always_inline bool is_tls(const char *buf, __u32 buf_size, __u32 skb_len) { - if (buf_size < sizeof(tls_record_header_t)) { +// parse_server_hello parses the ServerHello message and populates tags +// Reference: RFC 5246 Section 7.4.1.2 (Server Hello), https://tools.ietf.org/html/rfc5246 +// Structure (simplified): +// handshake_type(1), length(3), version(2), random(32), session_id_length(1), session_id(variable), cipher_suite(2), compression_method(1), extensions_length(2), extensions(variable) +// After the handshake header (handshake_type + length), the ServerHello fields are: +// +------------------------+ +// | server_version (2) | +// +------------------------+ +// | random (32) | +// +------------------------+ +// | session_id_length (1) | +// | session_id (...) | +// +------------------------+ +// | cipher_suite (2) | +// +------------------------+ +// | compression_method (1) | +// +------------------------+ +// | extensions_length(2) | +// | extensions(...) | +// +------------------------+ +static __always_inline bool parse_server_hello(struct __sk_buff *skb, __u32 offset, __u32 data_end, tls_info_t *tags) { + __u32 handshake_length; + __u16 server_version; + + if (!parse_tls_handshake_header(skb, &offset, data_end, &handshake_length, &server_version)) { return false; } - // Copying struct to the stack, to avoid modifying the original buffer that will be used for other classifiers. - tls_record_header_t tls_record_header = *(tls_record_header_t *)buf; - // Converting the fields to host byte order. - tls_record_header.version = bpf_ntohs(tls_record_header.version); - tls_record_header.length = bpf_ntohs(tls_record_header.length); + // Set the version here and try to get the "real" version from the extensions if possible + // Note: In TLS 1.3, the server_version field is set to 1.2 + // The actual version is embedded in the supported_versions extension + tags->chosen_version = server_version; - // Checking the version in the record header. - if (!is_valid_tls_version(tls_record_header.version)) { + if (!skip_random_and_session_id(skb, &offset, data_end)) { return false; } - // Checking the length in the record header is not greater than the maximum payload length. - if (tls_record_header.length > TLS_MAX_PAYLOAD_LENGTH) { + // Read Cipher Suite (2 bytes) + if (offset + CIPHER_SUITES_LENGTH > data_end) { return false; } - switch (tls_record_header.content_type) { - case TLS_HANDSHAKE: - return is_tls_handshake(&tls_record_header, buf, buf_size); - case TLS_APPLICATION_DATA: - return is_valid_tls_app_data(&tls_record_header, buf_size, skb_len); + __u16 cipher_suite; + if (bpf_skb_load_bytes(skb, offset, &cipher_suite, CIPHER_SUITES_LENGTH) < 0) { + return false; + } + cipher_suite = bpf_ntohs(cipher_suite); + offset += CIPHER_SUITES_LENGTH; + + // Skip Compression Method (1 byte) + offset += COMPRESSION_METHODS_LENGTH; + + tags->cipher_suite = cipher_suite; + + // TLS 1.2 is the highest version we will see in the header. If the connection is actually a higher version (1.3), + // it must be extracted from the extensions. Lower versions (1.0, 1.1) will not have extensions. + if (tags->chosen_version != TLS_VERSION12) { + return true; + } + + if (offset + EXTENSION_LENGTH_FIELD > data_end) { + return false; + } + + // Read Extensions Length (2 bytes) + __u16 extensions_length; + if (bpf_skb_load_bytes(skb, offset, &extensions_length, EXTENSION_LENGTH_FIELD) < 0) { + return false; + } + extensions_length = bpf_ntohs(extensions_length); + offset += EXTENSION_LENGTH_FIELD; + + __u32 handshake_end = offset + handshake_length; + if (offset + extensions_length > data_end || offset + extensions_length > handshake_end) { + return false; + } + + __u32 extensions_end = offset + extensions_length; + + return parse_tls_extensions(skb, &offset, data_end, extensions_end, tags, false); +} + +// is_tls_handshake_type checks if the handshake type at the given offset matches the expected type (e.g., ClientHello or ServerHello) +// References: +// - RFC 5246 Section 7.4 (Handshake Protocol Overview), https://tools.ietf.org/html/rfc5246#section-7.4 +// The handshake_type is a single byte enumerated value. +static __always_inline bool is_tls_handshake_type(struct __sk_buff *skb, __u32 offset, __u32 data_end, __u8 expected_handshake_type) { + // The handshake type is a single byte enumerated value + if (offset + SINGLE_BYTE_LENGTH > data_end) { + return false; + } + __u8 handshake_type; + if (bpf_skb_load_bytes(skb, offset, &handshake_type, SINGLE_BYTE_LENGTH) < 0) { + return false; } - return false; + return handshake_type == expected_handshake_type; +} + +// is_tls_handshake_client_hello checks if the packet is a TLS ClientHello message +static __always_inline bool is_tls_handshake_client_hello(struct __sk_buff *skb, __u32 offset, __u32 data_end) { + return is_tls_handshake_type(skb, offset, data_end, TLS_HANDSHAKE_CLIENT_HELLO); +} + +// is_tls_handshake_server_hello checks if the packet is a TLS ServerHello message +static __always_inline bool is_tls_handshake_server_hello(struct __sk_buff *skb, __u32 offset, __u32 data_end) { + return is_tls_handshake_type(skb, offset, data_end, TLS_HANDSHAKE_SERVER_HELLO); } -#endif +#endif // __TLS_H diff --git a/pkg/network/ebpf/c/runtime/usm.c b/pkg/network/ebpf/c/runtime/usm.c index bf3737b311a92..d706ee7fc281b 100644 --- a/pkg/network/ebpf/c/runtime/usm.c +++ b/pkg/network/ebpf/c/runtime/usm.c @@ -16,6 +16,7 @@ #include "pid_tgid.h" #include "protocols/classification/dispatcher-helpers.h" +#include "protocols/flush.h" #include "protocols/http/buffer.h" #include "protocols/http/http.h" #include "protocols/http2/decoding.h" @@ -64,21 +65,6 @@ int BPF_BYPASSABLE_KPROBE(kprobe__tcp_sendmsg, struct sock *sk) { return 0; } -SEC("tracepoint/net/netif_receive_skb") -int tracepoint__net__netif_receive_skb(void *ctx) { - CHECK_BPF_PROGRAM_BYPASSED() - log_debug("tracepoint/net/netif_receive_skb"); - // flush batch to userspace - // because perf events can't be sent from socket filter programs - http_batch_flush(ctx); - http2_batch_flush(ctx); - terminated_http2_batch_flush(ctx); - kafka_batch_flush(ctx); - postgres_batch_flush(ctx); - redis_batch_flush(ctx); - return 0; -} - // GO TLS PROBES // func (c *Conn) Write(b []byte) (int, error) diff --git a/pkg/network/ebpf/c/shared-libraries/probes.h b/pkg/network/ebpf/c/shared-libraries/probes.h index a45e9d0b494e1..f547170cd929e 100644 --- a/pkg/network/ebpf/c/shared-libraries/probes.h +++ b/pkg/network/ebpf/c/shared-libraries/probes.h @@ -18,42 +18,35 @@ static __always_inline void fill_path_safe(lib_path_t *path, const char *path_ar } } -static __always_inline void do_sys_open_helper_enter(const char *filename) { - lib_path_t path = { 0 }; - if (bpf_probe_read_user_with_telemetry(path.buf, sizeof(path.buf), filename) >= 0) { +static __always_inline bool fill_lib_path(lib_path_t *path, const char *path_argument) { + path->pid = GET_USER_MODE_PID(bpf_get_current_pid_tgid()); + if (bpf_probe_read_user_with_telemetry(path->buf, sizeof(path->buf), path_argument) >= 0) { // Find the null character and clean up the garbage following it #pragma unroll for (int i = 0; i < LIB_PATH_MAX_SIZE; i++) { - if (path.buf[i] == 0) { - path.len = i; + if (path->buf[i] == 0) { + path->len = i; break; } } } else { - fill_path_safe(&path, filename); + fill_path_safe(path, path_argument); } - // Bail out if the path size is larger than our buffer - if (!path.len) { - return; - } - - u64 pid_tgid = bpf_get_current_pid_tgid(); - path.pid = GET_USER_MODE_PID(pid_tgid); - bpf_map_update_with_telemetry(open_at_args, &pid_tgid, &path, BPF_ANY); - return; + return path->len > 0; } -static __always_inline void do_sys_open_helper_exit(exit_sys_ctx *args) { - u64 pid_tgid = bpf_get_current_pid_tgid(); - - // If file couldn't be opened, bail out - if (args->ret < 0) { - goto cleanup; +static __always_inline void do_sys_open_helper_enter(const char *filename) { + lib_path_t path = { 0 }; + if (fill_lib_path(&path, filename)) { + u64 pid_tgid = bpf_get_current_pid_tgid(); + bpf_map_update_with_telemetry(open_at_args, &pid_tgid, &path, BPF_ANY); } + return; +} - lib_path_t *path = bpf_map_lookup_elem(&open_at_args, &pid_tgid); - if (path == NULL) { +static __always_inline void push_event_if_relevant(void *ctx, lib_path_t *path, long return_code) { + if (return_code < 0) { return; } @@ -79,32 +72,57 @@ static __always_inline void do_sys_open_helper_exit(exit_sys_ctx *args) { } } if (!is_shared_library) { - goto cleanup; + return; + } + if (i + LIB_SO_SUFFIX_SIZE > path->len) { + return; } - u64 crypto_libset_enabled = 0; LOAD_CONSTANT("crypto_libset_enabled", crypto_libset_enabled); if (crypto_libset_enabled && (match6chars(0, 'l', 'i', 'b', 's', 's', 'l') || match6chars(0, 'c', 'r', 'y', 'p', 't', 'o') || match6chars(0, 'g', 'n', 'u', 't', 'l', 's'))) { - bpf_perf_event_output((void *)args, &crypto_shared_libraries, BPF_F_CURRENT_CPU, path, sizeof(lib_path_t)); - goto cleanup; + bpf_perf_event_output(ctx, &crypto_shared_libraries, BPF_F_CURRENT_CPU, path, sizeof(lib_path_t)); + return; } u64 gpu_libset_enabled = 0; LOAD_CONSTANT("gpu_libset_enabled", gpu_libset_enabled); if (gpu_libset_enabled && (match6chars(0, 'c', 'u', 'd', 'a', 'r', 't'))) { - bpf_perf_event_output((void *)args, &gpu_shared_libraries, BPF_F_CURRENT_CPU, path, sizeof(lib_path_t)); + bpf_perf_event_output(ctx, &gpu_shared_libraries, BPF_F_CURRENT_CPU, path, sizeof(lib_path_t)); + } +} + +static __always_inline void do_sys_open_helper_exit(exit_sys_ctx *args) { + u64 pid_tgid = bpf_get_current_pid_tgid(); + lib_path_t *path = bpf_map_lookup_elem(&open_at_args, &pid_tgid); + if (path == NULL) { + return; } -cleanup: + push_event_if_relevant(args, path, args->ret); bpf_map_delete_elem(&open_at_args, &pid_tgid); return; } +// This definition is the same for all architectures. +#ifndef O_WRONLY +#define O_WRONLY 00000001 +#endif + +static __always_inline int should_ignore_flags(int flags) +{ + return flags & O_WRONLY; +} + SEC("tracepoint/syscalls/sys_enter_open") int tracepoint__syscalls__sys_enter_open(enter_sys_open_ctx *args) { CHECK_BPF_PROGRAM_BYPASSED() + + if (should_ignore_flags(args->flags)) { + return 0; + } + do_sys_open_helper_enter(args->filename); return 0; } @@ -119,6 +137,11 @@ int tracepoint__syscalls__sys_exit_open(exit_sys_ctx *args) { SEC("tracepoint/syscalls/sys_enter_openat") int tracepoint__syscalls__sys_enter_openat(enter_sys_openat_ctx *args) { CHECK_BPF_PROGRAM_BYPASSED() + + if (should_ignore_flags(args->flags)) { + return 0; + } + do_sys_open_helper_enter(args->filename); return 0; } @@ -133,6 +156,15 @@ int tracepoint__syscalls__sys_exit_openat(exit_sys_ctx *args) { SEC("tracepoint/syscalls/sys_enter_openat2") int tracepoint__syscalls__sys_enter_openat2(enter_sys_openat2_ctx *args) { CHECK_BPF_PROGRAM_BYPASSED() + + if (args->how != NULL) { + __u64 flags = 0; + bpf_probe_read_user(&flags, sizeof(flags), &args->how->flags); + if (should_ignore_flags(flags)) { + return 0; + } + } + do_sys_open_helper_enter(args->filename); return 0; } @@ -144,4 +176,17 @@ int tracepoint__syscalls__sys_exit_openat2(exit_sys_ctx *args) { return 0; } +SEC("fexit/do_sys_openat2") +int BPF_BYPASSABLE_PROG(do_sys_openat2_exit, int dirfd, const char *pathname, openat2_open_how *how, long ret) { + if (how != NULL && should_ignore_flags(how->flags)) { + return 0; + } + + lib_path_t path = { 0 }; + if (fill_lib_path(&path, pathname)) { + push_event_if_relevant(ctx, &path, ret); + } + return 0; +} + #endif diff --git a/pkg/network/ebpf/c/shared-libraries/types.h b/pkg/network/ebpf/c/shared-libraries/types.h index ea2159d5d07eb..2680088c0cc64 100644 --- a/pkg/network/ebpf/c/shared-libraries/types.h +++ b/pkg/network/ebpf/c/shared-libraries/types.h @@ -37,6 +37,12 @@ typedef struct { int mode; } enter_sys_openat_ctx; +typedef struct { + __u64 flags; + __u64 mode; + __u64 resolve; +} openat2_open_how; + typedef struct { unsigned short common_type; unsigned char common_flags; @@ -46,7 +52,7 @@ typedef struct { int dfd; const char* filename; - void *how; + openat2_open_how *how; size_t usize; } enter_sys_openat2_ctx; diff --git a/pkg/network/ebpf/c/tracer.c b/pkg/network/ebpf/c/tracer.c index 66cefbee1d08d..9e1dbb2b3969f 100644 --- a/pkg/network/ebpf/c/tracer.c +++ b/pkg/network/ebpf/c/tracer.c @@ -33,6 +33,18 @@ int socket__classifier_entry(struct __sk_buff *skb) { return 0; } +SEC("socket/classifier_tls_handshake_client") +int socket__classifier_tls_handshake_client(struct __sk_buff *skb) { + protocol_classifier_entrypoint_tls_handshake_client(skb); + return 0; +} + +SEC("socket/classifier_tls_handshake_server") +int socket__classifier_tls_handshake_server(struct __sk_buff *skb) { + protocol_classifier_entrypoint_tls_handshake_server(skb); + return 0; +} + SEC("socket/classifier_queues") int socket__classifier_queues(struct __sk_buff *skb) { protocol_classifier_entrypoint_queues(skb); @@ -304,7 +316,9 @@ int BPF_BYPASSABLE_KRETPROBE(kretprobe__tcp_close_clean_protocols) { bpf_map_delete_elem(&tcp_close_args, &pid_tgid); } - bpf_tail_call_compat(ctx, &tcp_close_progs, 0); + if (is_batching_enabled()) { + bpf_tail_call_compat(ctx, &tcp_close_progs, 0); + } return 0; } diff --git a/pkg/network/ebpf/c/tracer/events.h b/pkg/network/ebpf/c/tracer/events.h index 1de94d755b1f3..474d1f67f6e53 100644 --- a/pkg/network/ebpf/c/tracer/events.h +++ b/pkg/network/ebpf/c/tracer/events.h @@ -23,6 +23,7 @@ static __always_inline void clean_protocol_classification(conn_tuple_t *tup) { conn_tuple.netns = 0; normalize_tuple(&conn_tuple); delete_protocol_stack(&conn_tuple, NULL, FLAG_TCP_CLOSE_DELETION); + bpf_map_delete_elem(&tls_enhanced_tags, &conn_tuple); conn_tuple_t *skb_tup_ptr = bpf_map_lookup_elem(&conn_tuple_to_socket_skb_conn_tuple, &conn_tuple); if (skb_tup_ptr == NULL) { @@ -31,14 +32,36 @@ static __always_inline void clean_protocol_classification(conn_tuple_t *tup) { conn_tuple_t skb_tup = *skb_tup_ptr; delete_protocol_stack(&skb_tup, NULL, FLAG_TCP_CLOSE_DELETION); + bpf_map_delete_elem(&tls_enhanced_tags, &skb_tup); bpf_map_delete_elem(&conn_tuple_to_socket_skb_conn_tuple, &conn_tuple); } +static __always_inline bool is_batching_enabled() { + __u64 batching_enabled = 0; + LOAD_CONSTANT("batching_enabled", batching_enabled); + return batching_enabled != 0; +} + +__maybe_unused static __always_inline __u64 get_ringbuf_flags(size_t data_size) { + if (is_batching_enabled()) { + return 0; + } + + __u64 ringbuffer_wakeup_size = 0; + LOAD_CONSTANT("ringbuffer_wakeup_size", ringbuffer_wakeup_size); + if (ringbuffer_wakeup_size == 0) { + return 0; + } + + __u64 sz = bpf_ringbuf_query(&conn_close_event, DD_BPF_RB_AVAIL_DATA); + return (sz + data_size) >= ringbuffer_wakeup_size ? DD_BPF_RB_FORCE_WAKEUP : DD_BPF_RB_NO_WAKEUP; +} + __maybe_unused static __always_inline void submit_closed_conn_event(void *ctx, int cpu, void *event_data, size_t data_size) { __u64 ringbuffers_enabled = 0; LOAD_CONSTANT("ringbuffers_enabled", ringbuffers_enabled); if (ringbuffers_enabled > 0) { - bpf_ringbuf_output(&conn_close_event, event_data, data_size, 0); + bpf_ringbuf_output(&conn_close_event, event_data, data_size, get_ringbuf_flags(data_size)); } else { bpf_perf_event_output(ctx, &conn_close_event, cpu, event_data, data_size); } @@ -94,32 +117,34 @@ static __always_inline int cleanup_conn(void *ctx, conn_tuple_t *tup, struct soc // if we added another field conn.conn_stats.duration = bpf_ktime_get_ns() - conn.conn_stats.duration; - // Batch TCP closed connections before generating a perf event - batch_t *batch_ptr = bpf_map_lookup_elem(&conn_close_batch, &cpu); - if (batch_ptr == NULL) { - return -1; - } + if (is_batching_enabled()) { + // Batch TCP closed connections before generating a perf event + batch_t *batch_ptr = bpf_map_lookup_elem(&conn_close_batch, &cpu); + if (batch_ptr == NULL) { + return -1; + } - // TODO: Can we turn this into a macro based on TCP_CLOSED_BATCH_SIZE? - switch (batch_ptr->len) { - case 0: - batch_ptr->c0 = conn; - batch_ptr->len++; - return 0; - case 1: - batch_ptr->c1 = conn; - batch_ptr->len++; - return 0; - case 2: - batch_ptr->c2 = conn; - batch_ptr->len++; - return 0; - case 3: - batch_ptr->c3 = conn; - batch_ptr->len++; - // In this case the batch is ready to be flushed, which we defer to kretprobe/tcp_close - // in order to cope with the eBPF stack limitation of 512 bytes. - return 0; + // TODO: Can we turn this into a macro based on TCP_CLOSED_BATCH_SIZE? + switch (batch_ptr->len) { + case 0: + batch_ptr->c0 = conn; + batch_ptr->len++; + return 0; + case 1: + batch_ptr->c1 = conn; + batch_ptr->len++; + return 0; + case 2: + batch_ptr->c2 = conn; + batch_ptr->len++; + return 0; + case 3: + batch_ptr->c3 = conn; + batch_ptr->len++; + // In this case the batch is ready to be flushed, which we defer to kretprobe/tcp_close + // in order to cope with the eBPF stack limitation of 512 bytes. + return 0; + } } // If we hit this section it means we had one or more interleaved tcp_close calls. @@ -127,11 +152,13 @@ static __always_inline int cleanup_conn(void *ctx, conn_tuple_t *tup, struct soc // frequent of a case to cause performance issues and avoid cases where // we drop whole connections, which impacts things USM connection matching. submit_closed_conn_event(ctx, cpu, &conn, sizeof(conn_t)); - if (is_tcp) { - increment_telemetry_count(unbatched_tcp_close); - } - if (is_udp) { - increment_telemetry_count(unbatched_udp_close); + if (is_batching_enabled()) { + if (is_tcp) { + increment_telemetry_count(unbatched_tcp_close); + } + if (is_udp) { + increment_telemetry_count(unbatched_udp_close); + } } return 0; } diff --git a/pkg/network/ebpf/c/tracer/maps.h b/pkg/network/ebpf/c/tracer/maps.h index e6123782f8ea5..33a85212d8abc 100644 --- a/pkg/network/ebpf/c/tracer/maps.h +++ b/pkg/network/ebpf/c/tracer/maps.h @@ -36,7 +36,7 @@ BPF_PERF_EVENT_ARRAY_MAP(conn_close_event, __u32) * or BPF_MAP_TYPE_PERCPU_ARRAY, but they are not available in * some of the Kernels we support (4.4 ~ 4.6) */ -BPF_HASH_MAP(conn_close_batch, __u32, batch_t, 1024) +BPF_HASH_MAP(conn_close_batch, __u32, batch_t, 1) /* * Map to hold struct sock parameter for tcp_sendmsg calls @@ -132,4 +132,7 @@ BPF_HASH_MAP(tcp_close_args, __u64, conn_tuple_t, 1024) // by using tail call. BPF_PROG_ARRAY(tcp_close_progs, 1) +// Map to store extra information about TLS connections like version, cipher, etc. +BPF_HASH_MAP(tls_enhanced_tags, conn_tuple_t, tls_info_wrapper_t, 0) + #endif diff --git a/pkg/network/ebpf/c/tracer/stats.h b/pkg/network/ebpf/c/tracer/stats.h index 862d8e32f81f1..ca1d6d19e309e 100644 --- a/pkg/network/ebpf/c/tracer/stats.h +++ b/pkg/network/ebpf/c/tracer/stats.h @@ -23,6 +23,55 @@ static __always_inline __u64 offset_rtt(); static __always_inline __u64 offset_rtt_var(); #endif +static __always_inline tls_info_t* get_tls_enhanced_tags(conn_tuple_t* tuple) { + conn_tuple_t normalized_tup = *tuple; + normalize_tuple(&normalized_tup); + tls_info_wrapper_t *wrapper = bpf_map_lookup_elem(&tls_enhanced_tags, &normalized_tup); + if (!wrapper) { + return NULL; + } + wrapper->updated = bpf_ktime_get_ns(); + return &wrapper->info; +} + +static __always_inline tls_info_t* get_or_create_tls_enhanced_tags(conn_tuple_t *tuple) { + tls_info_t *tags = get_tls_enhanced_tags(tuple); + if (!tags) { + conn_tuple_t normalized_tup = *tuple; + normalize_tuple(&normalized_tup); + tls_info_wrapper_t empty_tags_wrapper = {}; + empty_tags_wrapper.updated = bpf_ktime_get_ns(); + + bpf_map_update_with_telemetry(tls_enhanced_tags, &normalized_tup, &empty_tags_wrapper, BPF_ANY); + tls_info_wrapper_t *wrapper_ptr = bpf_map_lookup_elem(&tls_enhanced_tags, &normalized_tup); + if (!wrapper_ptr) { + return NULL; + } + tags = &wrapper_ptr->info; + } + return tags; +} + +// merge_tls_info modifies `this` by merging it with `that` +static __always_inline void merge_tls_info(tls_info_t *this, tls_info_t *that) { + if (!this || !that) { + return; + } + + // Merge chosen_version if not already set + if (this->chosen_version == 0 && that->chosen_version != 0) { + this->chosen_version = that->chosen_version; + } + + // Merge cipher_suite if not already set + if (this->cipher_suite == 0 && that->cipher_suite != 0) { + this->cipher_suite = that->cipher_suite; + } + + // Merge offered_versions bitmask + this->offered_versions |= that->offered_versions; +} + static __always_inline conn_stats_ts_t *get_conn_stats(conn_tuple_t *t, struct sock *sk) { conn_stats_ts_t *cs = bpf_map_lookup_elem(&conn_stats, t); if (cs) { @@ -112,6 +161,9 @@ static __always_inline void update_protocol_classification_information(conn_tupl mark_protocol_direction(t, &conn_tuple_copy, protocol_stack); merge_protocol_stacks(&stats->protocol_stack, protocol_stack); + tls_info_t *tls_tags = get_tls_enhanced_tags(&conn_tuple_copy); + merge_tls_info(&stats->tls_tags, tls_tags); + conn_tuple_t *cached_skb_conn_tup_ptr = bpf_map_lookup_elem(&conn_tuple_to_socket_skb_conn_tuple, &conn_tuple_copy); if (!cached_skb_conn_tup_ptr) { return; @@ -124,6 +176,9 @@ static __always_inline void update_protocol_classification_information(conn_tupl set_protocol_flag(protocol_stack, FLAG_NPM_ENABLED); mark_protocol_direction(t, &conn_tuple_copy, protocol_stack); merge_protocol_stacks(&stats->protocol_stack, protocol_stack); + + tls_tags = get_tls_enhanced_tags(&conn_tuple_copy); + merge_tls_info(&stats->tls_tags, tls_tags); } static __always_inline void determine_connection_direction(conn_tuple_t *t, conn_stats_ts_t *conn_stats) { diff --git a/pkg/network/ebpf/c/tracer/telemetry.h b/pkg/network/ebpf/c/tracer/telemetry.h index b723f1c4b8b71..326f9311d6237 100644 --- a/pkg/network/ebpf/c/tracer/telemetry.h +++ b/pkg/network/ebpf/c/tracer/telemetry.h @@ -21,8 +21,6 @@ enum telemetry_counter { udp_send_processed, udp_send_missed, udp_dropped_conns, - double_flush_attempts_close, - double_flush_attempts_done, unsupported_tcp_failures, tcp_done_missing_pid, tcp_connect_failed_tuple, @@ -60,12 +58,6 @@ static __always_inline void increment_telemetry_count(enum telemetry_counter cou case udp_dropped_conns: __sync_fetch_and_add(&val->udp_dropped_conns, 1); break; - case double_flush_attempts_close: - __sync_fetch_and_add(&val->double_flush_attempts_close, 1); - break; - case double_flush_attempts_done: - __sync_fetch_and_add(&val->double_flush_attempts_done, 1); - break; case unsupported_tcp_failures: __sync_fetch_and_add(&val->unsupported_tcp_failures, 1); break; diff --git a/pkg/network/ebpf/c/tracer/tracer.h b/pkg/network/ebpf/c/tracer/tracer.h index d688359dc9d79..a4966adfcbe95 100644 --- a/pkg/network/ebpf/c/tracer/tracer.h +++ b/pkg/network/ebpf/c/tracer/tracer.h @@ -29,6 +29,17 @@ typedef enum { #define CONN_DIRECTION_MASK 0b11 +typedef struct { + __u16 chosen_version; + __u16 cipher_suite; + __u8 offered_versions; +} tls_info_t; + +typedef struct { + tls_info_t info; + __u64 updated; +} tls_info_wrapper_t; + typedef struct { __u64 sent_bytes; __u64 recv_bytes; @@ -54,6 +65,7 @@ typedef struct { protocol_stack_t protocol_stack; __u8 flags; __u8 direction; + tls_info_t tls_tags; } conn_stats_ts_t; // Connection flags @@ -108,8 +120,6 @@ typedef struct { __u64 udp_sends_processed; __u64 udp_sends_missed; __u64 udp_dropped_conns; - __u64 double_flush_attempts_close; - __u64 double_flush_attempts_done; __u64 unsupported_tcp_failures; __u64 tcp_done_missing_pid; __u64 tcp_connect_failed_tuple; diff --git a/pkg/network/ebpf/kprobe_types.go b/pkg/network/ebpf/kprobe_types.go index 90d7eb1f331ae..ab5f569a9c3ec 100644 --- a/pkg/network/ebpf/kprobe_types.go +++ b/pkg/network/ebpf/kprobe_types.go @@ -30,6 +30,8 @@ type UDPRecvSock C.udp_recv_sock_t type BindSyscallArgs C.bind_syscall_args_t type ProtocolStack C.protocol_stack_t type ProtocolStackWrapper C.protocol_stack_wrapper_t +type TLSTags C.tls_info_t +type TLSTagsWrapper C.tls_info_wrapper_t // udp_recv_sock_t have *sock and *msghdr struct members, we make them opaque here type _Ctype_struct_sock uint64 @@ -63,7 +65,9 @@ const SizeofConn = C.sizeof_conn_t type ClassificationProgram = uint32 const ( - ClassificationQueues ClassificationProgram = C.CLASSIFICATION_QUEUES_PROG - ClassificationDBs ClassificationProgram = C.CLASSIFICATION_DBS_PROG - ClassificationGRPC ClassificationProgram = C.CLASSIFICATION_GRPC_PROG + ClassificationTLSClient ClassificationProgram = C.CLASSIFICATION_TLS_CLIENT_PROG + ClassificationTLSServer ClassificationProgram = C.CLASSIFICATION_TLS_SERVER_PROG + ClassificationQueues ClassificationProgram = C.CLASSIFICATION_QUEUES_PROG + ClassificationDBs ClassificationProgram = C.CLASSIFICATION_DBS_PROG + ClassificationGRPC ClassificationProgram = C.CLASSIFICATION_GRPC_PROG ) diff --git a/pkg/network/ebpf/kprobe_types_linux.go b/pkg/network/ebpf/kprobe_types_linux.go index bf9bbf38210f3..4053f4863f7c2 100644 --- a/pkg/network/ebpf/kprobe_types_linux.go +++ b/pkg/network/ebpf/kprobe_types_linux.go @@ -32,7 +32,7 @@ type ConnStats struct { Protocol_stack ProtocolStack Flags uint8 Direction uint8 - Pad_cgo_0 [6]byte + Tls_tags TLSTags } type Conn struct { Tup ConnTuple @@ -65,8 +65,6 @@ type Telemetry struct { Udp_sends_processed uint64 Udp_sends_missed uint64 Udp_dropped_conns uint64 - Double_flush_attempts_close uint64 - Double_flush_attempts_done uint64 Unsupported_tcp_failures uint64 Tcp_done_missing_pid uint64 Tcp_connect_failed_tuple uint64 @@ -103,6 +101,16 @@ type ProtocolStackWrapper struct { Stack ProtocolStack Updated uint64 } +type TLSTags struct { + Chosen_version uint16 + Cipher_suite uint16 + Offered_versions uint8 + Pad_cgo_0 [1]byte +} +type TLSTagsWrapper struct { + Info TLSTags + Updated uint64 +} type _Ctype_struct_sock uint64 type _Ctype_struct_msghdr uint64 @@ -135,7 +143,9 @@ const SizeofConn = 0x78 type ClassificationProgram = uint32 const ( - ClassificationQueues ClassificationProgram = 0x2 - ClassificationDBs ClassificationProgram = 0x3 - ClassificationGRPC ClassificationProgram = 0x5 + ClassificationTLSClient ClassificationProgram = 0x7 + ClassificationTLSServer ClassificationProgram = 0x8 + ClassificationQueues ClassificationProgram = 0x2 + ClassificationDBs ClassificationProgram = 0x3 + ClassificationGRPC ClassificationProgram = 0x5 ) diff --git a/pkg/network/ebpf/probes/probes.go b/pkg/network/ebpf/probes/probes.go index cd72ca3ba1fe9..1f983ed25b691 100644 --- a/pkg/network/ebpf/probes/probes.go +++ b/pkg/network/ebpf/probes/probes.go @@ -27,6 +27,10 @@ const ( // ProtocolClassifierEntrySocketFilter runs a classifier algorithm as a socket filter ProtocolClassifierEntrySocketFilter ProbeFuncName = "socket__classifier_entry" + // ProtocolClassifierTLSClientSocketFilter runs classification rules for the TLS client hello packet + ProtocolClassifierTLSClientSocketFilter ProbeFuncName = "socket__classifier_tls_handshake_client" + // ProtocolClassifierTLSServerSocketFilter runs classification rules for the TLS server hello packet + ProtocolClassifierTLSServerSocketFilter ProbeFuncName = "socket__classifier_tls_handshake_server" // ProtocolClassifierQueuesSocketFilter runs a classification rules for Queue protocols. ProtocolClassifierQueuesSocketFilter ProbeFuncName = "socket__classifier_queues" // ProtocolClassifierDBsSocketFilter runs a classification rules for DB protocols. @@ -232,6 +236,8 @@ const ( ConnectionProtocolMap BPFMapName = "connection_protocol" // ConnectionTupleToSocketSKBConnMap is the map storing the connection tuple to socket skb conn tuple ConnectionTupleToSocketSKBConnMap BPFMapName = "conn_tuple_to_socket_skb_conn_tuple" + // EnhancedTLSTagsMap is the map storing additional tags for TLS connections (version, cipher, etc.) + EnhancedTLSTagsMap BPFMapName = "tls_enhanced_tags" // ClassificationProgsMap is the map storing the programs to run on classification events ClassificationProgsMap BPFMapName = "classification_progs" // TCPCloseProgsMap is the map storing the programs to run on TCP close events diff --git a/pkg/network/encoding/encoding_test.go b/pkg/network/encoding/encoding_test.go index c34183e192695..416642c484d6a 100644 --- a/pkg/network/encoding/encoding_test.go +++ b/pkg/network/encoding/encoding_test.go @@ -29,6 +29,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/network/protocols/http" "github.com/DataDog/datadog-agent/pkg/network/protocols/kafka" "github.com/DataDog/datadog-agent/pkg/network/protocols/telemetry" + "github.com/DataDog/datadog-agent/pkg/network/protocols/tls" "github.com/DataDog/datadog-agent/pkg/process/util" ) @@ -229,6 +230,7 @@ func TestSerialization(t *testing.T) { }, }, ProtocolStack: protocols.Stack{Application: protocols.HTTP}, + TLSTags: tls.Tags{ChosenVersion: 0, CipherSuite: 0, OfferedVersions: 0}, }, {ConnectionTuple: network.ConnectionTuple{ Source: util.AddressFromString("10.1.1.1"), @@ -241,6 +243,7 @@ func TestSerialization(t *testing.T) { }, StaticTags: tagOpenSSL | tagTLS, ProtocolStack: protocols.Stack{Application: protocols.HTTP2}, + TLSTags: tls.Tags{ChosenVersion: 0, CipherSuite: 0, OfferedVersions: 0}, DNSStats: map[dns.Hostname]map[dns.QueryType]dns.Stats{ dns.ToHostname("foo.com"): { dns.TypeA: { diff --git a/pkg/network/encoding/marshal/format.go b/pkg/network/encoding/marshal/format.go index 99cf5c4c75694..2e0d40955191a 100644 --- a/pkg/network/encoding/marshal/format.go +++ b/pkg/network/encoding/marshal/format.go @@ -120,9 +120,10 @@ func FormatConnection(builder *model.ConnectionBuilder, conn network.ConnectionS httpStaticTags, httpDynamicTags := httpEncoder.GetHTTPAggregationsAndTags(conn, builder) http2StaticTags, http2DynamicTags := http2Encoder.WriteHTTP2AggregationsAndTags(conn, builder) + tlsDynamicTags := conn.TLSTags.GetDynamicTags() staticTags := httpStaticTags | http2StaticTags - dynamicTags := mergeDynamicTags(httpDynamicTags, http2DynamicTags) + dynamicTags := mergeDynamicTags(httpDynamicTags, http2DynamicTags, tlsDynamicTags) staticTags |= kafkaEncoder.WriteKafkaAggregations(conn, builder) staticTags |= postgresEncoder.WritePostgresAggregations(conn, builder) diff --git a/pkg/network/event_common.go b/pkg/network/event_common.go index 6eea961c2aa3c..8a268e4508d2b 100644 --- a/pkg/network/event_common.go +++ b/pkg/network/event_common.go @@ -23,6 +23,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/network/protocols/kafka" "github.com/DataDog/datadog-agent/pkg/network/protocols/postgres" "github.com/DataDog/datadog-agent/pkg/network/protocols/redis" + "github.com/DataDog/datadog-agent/pkg/network/protocols/tls" "github.com/DataDog/datadog-agent/pkg/process/util" ) @@ -79,6 +80,9 @@ type ConnectionFamily uint8 type ConnectionDirection uint8 const ( + // UNKNOWN represents connections where the direction is not known (yet) + UNKNOWN ConnectionDirection = 0 + // INCOMING represents connections inbound to the host INCOMING ConnectionDirection = 1 // incoming @@ -284,6 +288,7 @@ type ConnectionStats struct { RTTVar uint32 StaticTags uint64 ProtocolStack protocols.Stack + TLSTags tls.Tags // keep these fields last because they are 1 byte each and otherwise inflate the struct size due to alignment SPortIsEphemeral EphemeralPortType diff --git a/pkg/network/event_common_linux.go b/pkg/network/event_common_linux.go index 99058b017903a..c922d80dbac82 100644 --- a/pkg/network/event_common_linux.go +++ b/pkg/network/event_common_linux.go @@ -3,10 +3,21 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//go:build linux +//go:build linux_bpf package network +import ( + "fmt" + "math" + "time" + "unsafe" + + netebpf "github.com/DataDog/datadog-agent/pkg/network/ebpf" + "github.com/DataDog/datadog-agent/pkg/network/protocols" + "github.com/DataDog/datadog-agent/pkg/network/protocols/tls" +) + // Sub returns s-other. // // This implementation is different from the implementation on @@ -50,3 +61,100 @@ func (s StatCounters) Sub(other StatCounters) (sc StatCounters, underflow bool) return sc, false } + +// UnmarshalBinary converts a raw byte slice to a ConnectionStats object +func (c *ConnectionStats) UnmarshalBinary(data []byte) error { + if len(data) < netebpf.SizeofConn { + return fmt.Errorf("'Conn' binary data too small, received %d but expected %d bytes", len(data), netebpf.SizeofConn) + } + + ct := (*netebpf.Conn)(unsafe.Pointer(&data[0])) + c.FromConn(ct) + return nil +} + +// FromConn populates relevant fields on ConnectionStats from the connection data +func (c *ConnectionStats) FromConn(ct *netebpf.Conn) { + c.FromTupleAndStats(&ct.Tup, &ct.Conn_stats) + c.FromTCPStats(&ct.Tcp_stats) +} + +// FromTupleAndStats populates relevant fields on ConnectionStats from the arguments +func (c *ConnectionStats) FromTupleAndStats(t *netebpf.ConnTuple, s *netebpf.ConnStats) { + *c = ConnectionStats{ConnectionTuple: ConnectionTuple{ + Pid: t.Pid, + NetNS: t.Netns, + Source: t.SourceAddress(), + Dest: t.DestAddress(), + SPort: t.Sport, + DPort: t.Dport, + }, + Monotonic: StatCounters{ + SentBytes: s.Sent_bytes, + RecvBytes: s.Recv_bytes, + SentPackets: uint64(s.Sent_packets), + RecvPackets: uint64(s.Recv_packets), + }, + LastUpdateEpoch: s.Timestamp, + IsAssured: s.IsAssured(), + Cookie: StatCookie(s.Cookie), + } + + if s.Duration <= uint64(math.MaxInt64) { + c.Duration = time.Duration(s.Duration) * time.Nanosecond + } + + c.ProtocolStack = protocols.Stack{ + API: protocols.API(s.Protocol_stack.Api), + Application: protocols.Application(s.Protocol_stack.Application), + Encryption: protocols.Encryption(s.Protocol_stack.Encryption), + } + + c.TLSTags = tls.Tags{ + ChosenVersion: s.Tls_tags.Chosen_version, + CipherSuite: s.Tls_tags.Cipher_suite, + OfferedVersions: s.Tls_tags.Offered_versions, + } + + if t.Type() == netebpf.TCP { + c.Type = TCP + } else { + c.Type = UDP + } + + switch t.Family() { + case netebpf.IPv4: + c.Family = AFINET + case netebpf.IPv6: + c.Family = AFINET6 + } + + c.SPortIsEphemeral = IsPortInEphemeralRange(c.Family, c.Type, t.Sport) + + switch s.ConnectionDirection() { + case netebpf.Incoming: + c.Direction = INCOMING + case netebpf.Outgoing: + c.Direction = OUTGOING + default: + c.Direction = OUTGOING + } +} + +// FromTCPStats populates relevant fields on ConnectionStats from the arguments +func (c *ConnectionStats) FromTCPStats(tcpStats *netebpf.TCPStats) { + if c.Type != TCP || tcpStats == nil { + return + } + + c.Monotonic.Retransmits = tcpStats.Retransmits + c.Monotonic.TCPEstablished = tcpStats.State_transitions >> netebpf.Established & 1 + c.Monotonic.TCPClosed = tcpStats.State_transitions >> netebpf.Close & 1 + c.RTT = tcpStats.Rtt + c.RTTVar = tcpStats.Rtt_var + if tcpStats.Failure_reason > 0 { + c.TCPFailures = map[uint16]uint32{ + tcpStats.Failure_reason: 1, + } + } +} diff --git a/pkg/network/event_common_notlinux.go b/pkg/network/event_common_notlinux.go index 938acfb127f7a..b366328c545d4 100644 --- a/pkg/network/event_common_notlinux.go +++ b/pkg/network/event_common_notlinux.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//go:build !linux +//go:build !linux_bpf package network diff --git a/pkg/network/filter/packet_source.go b/pkg/network/filter/packet_source.go index c75ad1222e9bf..c1b6208d3bbb1 100644 --- a/pkg/network/filter/packet_source.go +++ b/pkg/network/filter/packet_source.go @@ -19,15 +19,16 @@ type PacketInfo interface{} // PacketSource reads raw packet data type PacketSource interface { // VisitPackets reads all new raw packets that are available, invoking the given callback for each packet. - // If no packet is available, VisitPacket returns immediately. + // If no packet is available, VisitPacket blocks until OptPollTimeout and returns. // The format of the packet is dependent on the implementation of PacketSource -- i.e. it may be an ethernet frame, or a IP frame. // The data buffer is reused between invocations of VisitPacket and thus should not be pointed to. - // If the cancel channel is closed, VisitPackets will stop reading. - VisitPackets(cancel <-chan struct{}, visitor func(data []byte, info PacketInfo, timestamp time.Time) error) error + // If the PacketSource is closed, VisitPackets will stop reading. + VisitPackets(visitor func(data []byte, info PacketInfo, timestamp time.Time) error) error // LayerType returns the type of packet this source reads LayerType() gopacket.LayerType - // Close closes the packet source + // Close closes the packet source. This will cancel VisitPackets if it is currently polling. + // Close() will not return until after VisitPackets has been canceled/returned. Close() } diff --git a/pkg/network/filter/packet_source_linux.go b/pkg/network/filter/packet_source_linux.go index b272a984b86f9..d2ef751aa5243 100644 --- a/pkg/network/filter/packet_source_linux.go +++ b/pkg/network/filter/packet_source_linux.go @@ -47,6 +47,8 @@ var packetSourceTelemetry = struct { // AFPacketSource provides a RAW_SOCKET attached to an eBPF SOCKET_FILTER type AFPacketSource struct { *afpacket.TPacket + // store AFPacketInfo used to visit packets to avoid malloc on a per-packet basis + afPacketInfo *AFPacketInfo exit chan struct{} } @@ -59,6 +61,11 @@ type AFPacketInfo struct { PktType uint8 } +// GetPacketInfoBuffer returns a pointer to AFPacketInfo which is reused between calls +func (p *AFPacketSource) GetPacketInfoBuffer() *AFPacketInfo { + return p.afPacketInfo +} + // OptSnapLen specifies the maximum length of the packet to read // // Defaults to 4096 bytes @@ -98,8 +105,9 @@ func NewAFPacketSource(size int, opts ...interface{}) (*AFPacketSource, error) { } ps := &AFPacketSource{ - TPacket: rawSocket, - exit: make(chan struct{}), + TPacket: rawSocket, + afPacketInfo: &AFPacketInfo{}, + exit: make(chan struct{}), } go ps.pollStats() @@ -131,22 +139,17 @@ func (p *AFPacketSource) SetBPF(filter []bpf.RawInstruction) error { type zeroCopyPacketReader interface { ZeroCopyReadPacketData() (data []byte, ci gopacket.CaptureInfo, err error) + // GetPacketInfoBuffer returns a pointer to AFPacketInfo which is reused between calls + GetPacketInfoBuffer() *AFPacketInfo } // AFPacketVisitor is the callback that AFPacketSource will trigger for packets // The data buffer is reused between calls, so be careful type AFPacketVisitor = func(data []byte, info PacketInfo, t time.Time) error -func visitPackets(p zeroCopyPacketReader, exit <-chan struct{}, visit AFPacketVisitor) error { - pktInfo := &AFPacketInfo{} +func visitPackets(p zeroCopyPacketReader, visit AFPacketVisitor) error { + pktInfo := p.GetPacketInfoBuffer() for { - // allow the read loop to be prematurely interrupted - select { - case <-exit: - return nil - default: - } - data, stats, err := p.ZeroCopyReadPacketData() // Immediately retry for EAGAIN @@ -154,7 +157,7 @@ func visitPackets(p zeroCopyPacketReader, exit <-chan struct{}, visit AFPacketVi continue } - if err == afpacket.ErrTimeout { + if err == afpacket.ErrTimeout || err == afpacket.ErrCancelled { return nil } @@ -177,8 +180,8 @@ func visitPackets(p zeroCopyPacketReader, exit <-chan struct{}, visit AFPacketVi } // VisitPackets starts reading packets from the source -func (p *AFPacketSource) VisitPackets(exit <-chan struct{}, visit AFPacketVisitor) error { - return visitPackets(p, exit, visit) +func (p *AFPacketSource) VisitPackets(visit AFPacketVisitor) error { + return visitPackets(p, visit) } // LayerType is the gopacket.LayerType for this source diff --git a/pkg/network/filter/packet_source_linux_test.go b/pkg/network/filter/packet_source_linux_test.go index 23342d6aa5b57..cc7174ab9fd73 100644 --- a/pkg/network/filter/packet_source_linux_test.go +++ b/pkg/network/filter/packet_source_linux_test.go @@ -8,23 +8,35 @@ package filter import ( + "testing" + "time" + "github.com/google/gopacket" "github.com/google/gopacket/afpacket" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/sys/unix" - "testing" - "time" ) -type mockPacketReader struct { +type mockPacketCapture struct { data []byte ci gopacket.CaptureInfo err error } +type mockPacketReader struct { + packets <-chan mockPacketCapture +} + func (m *mockPacketReader) ZeroCopyReadPacketData() (data []byte, ci gopacket.CaptureInfo, err error) { - return m.data, m.ci, m.err + capture, ok := <-m.packets + if !ok { + return nil, gopacket.CaptureInfo{}, afpacket.ErrCancelled + } + return capture.data, capture.ci, capture.err +} +func (m *mockPacketReader) GetPacketInfoBuffer() *AFPacketInfo { + return &AFPacketInfo{} } func mockCaptureInfo(ancillaryData []interface{}) gopacket.CaptureInfo { @@ -38,17 +50,21 @@ func mockCaptureInfo(ancillaryData []interface{}) gopacket.CaptureInfo { } func expectAncillaryPktType(t *testing.T, ancillaryData []interface{}, pktType uint8) { - exit := make(chan struct{}) - - p := mockPacketReader{ + packets := make(chan mockPacketCapture, 1) + packets <- mockPacketCapture{ data: []byte{}, ci: mockCaptureInfo(ancillaryData), err: nil, } + close(packets) + + p := mockPacketReader{ + packets: packets, + } visited := false - err := visitPackets(&p, exit, func(_ []byte, info PacketInfo, _ time.Time) error { + err := visitPackets(&p, func(_ []byte, info PacketInfo, _ time.Time) error { // make sure the callback ran since it's responsible for the require call visited = true @@ -57,8 +73,6 @@ func expectAncillaryPktType(t *testing.T, ancillaryData []interface{}, pktType u // use assert so that we close the exit channel on failure assert.Equal(t, pktType, pktInfo.PktType) - // trigger exit so it only reads one packet - close(exit) return nil }) require.NoError(t, err) diff --git a/pkg/network/netlink/consumer_test.go b/pkg/network/netlink/consumer_test.go index ef3b8008852bb..17e39646a7b52 100644 --- a/pkg/network/netlink/consumer_test.go +++ b/pkg/network/netlink/consumer_test.go @@ -47,8 +47,6 @@ func TestConsumerKeepsRunningAfterCircuitBreakerTrip(t *testing.T) { go func() { defer close(exited) - for range ev { //nolint:revive // TODO - } }() isRecvLoopRunning := c.recvLoopRunning.Load diff --git a/pkg/network/protocols/events/batch_offsets.go b/pkg/network/protocols/events/batch_offsets.go index 8aceb45bdbf08..00f5cd0684469 100644 --- a/pkg/network/protocols/events/batch_offsets.go +++ b/pkg/network/protocols/events/batch_offsets.go @@ -40,7 +40,7 @@ func newOffsetManager(numCPUS int) *offsetManager { } // Get returns the data offset that hasn't been consumed yet for a given batch -func (o *offsetManager) Get(cpu int, batch *batch, syncing bool) (begin, end int) { +func (o *offsetManager) Get(cpu int, batch *Batch, syncing bool) (begin, end int) { o.mux.Lock() defer o.mux.Unlock() state := o.stateByCPU[cpu] @@ -85,6 +85,6 @@ func (o *offsetManager) NextBatchID(cpu int) int { return o.stateByCPU[cpu].nextBatchID } -func batchComplete(b *batch) bool { +func batchComplete(b *Batch) bool { return b.Cap > 0 && b.Len == b.Cap } diff --git a/pkg/network/protocols/events/batch_offsets_test.go b/pkg/network/protocols/events/batch_offsets_test.go index 54c444e4def32..9cc439e131b32 100644 --- a/pkg/network/protocols/events/batch_offsets_test.go +++ b/pkg/network/protocols/events/batch_offsets_test.go @@ -21,21 +21,21 @@ func TestOffsets(t *testing.T) { assert.Equal(t, 0, offsets.NextBatchID(1)) // reading full batch: cpu=0 batchID=0 - begin, end := offsets.Get(0, &batch{Idx: 0, Len: 10, Cap: 10}, false) + begin, end := offsets.Get(0, &Batch{Idx: 0, Len: 10, Cap: 10}, false) assert.Equal(t, 0, begin) assert.Equal(t, 10, end) // nextBatchID is advanced to 1 for cpu=0 assert.Equal(t, 1, offsets.NextBatchID(0)) // reading partial batch: cpu=1 batchID=0 sync=true - begin, end = offsets.Get(1, &batch{Idx: 0, Len: 8, Cap: 10}, true) + begin, end = offsets.Get(1, &Batch{Idx: 0, Len: 8, Cap: 10}, true) assert.Equal(t, 0, begin) assert.Equal(t, 8, end) // nextBatchID remains 0 for cpu=1 since this batch hasn't been filled up yet assert.Equal(t, 0, offsets.NextBatchID(1)) // reading full batch: cpu=1 batchID=0 - begin, end = offsets.Get(1, &batch{Idx: 0, Len: 10, Cap: 10}, false) + begin, end = offsets.Get(1, &Batch{Idx: 0, Len: 10, Cap: 10}, false) // notice we only read now the remaining offsets assert.Equal(t, 8, begin) assert.Equal(t, 10, end) @@ -43,14 +43,14 @@ func TestOffsets(t *testing.T) { assert.Equal(t, 1, offsets.NextBatchID(1)) // reading partial batch: cpu=0 batchID=1 sync=true - begin, end = offsets.Get(0, &batch{Idx: 1, Len: 4, Cap: 10}, true) + begin, end = offsets.Get(0, &Batch{Idx: 1, Len: 4, Cap: 10}, true) assert.Equal(t, 0, begin) assert.Equal(t, 4, end) // nextBatchID remains 1 for cpu=0 assert.Equal(t, 1, offsets.NextBatchID(0)) // reading partial batch: cpu=0 batchID=1 sync=true - begin, end = offsets.Get(0, &batch{Idx: 1, Len: 5, Cap: 10}, true) + begin, end = offsets.Get(0, &Batch{Idx: 1, Len: 5, Cap: 10}, true) assert.Equal(t, 4, begin) assert.Equal(t, 5, end) // nextBatchID remains 1 for cpu=0 @@ -63,20 +63,20 @@ func TestDelayedBatchReads(t *testing.T) { // this emulates the scenario where we preemptively read (sync=true) two // complete batches in a row before they are read from perf buffer - begin, end := offsets.Get(0, &batch{Idx: 0, Len: 10, Cap: 10}, true) + begin, end := offsets.Get(0, &Batch{Idx: 0, Len: 10, Cap: 10}, true) assert.Equal(t, 0, begin) assert.Equal(t, 10, end) - begin, end = offsets.Get(0, &batch{Idx: 1, Len: 10, Cap: 10}, true) + begin, end = offsets.Get(0, &Batch{Idx: 1, Len: 10, Cap: 10}, true) assert.Equal(t, 0, begin) assert.Equal(t, 10, end) // now the "delayed" batches from perf buffer are read in sequence - begin, end = offsets.Get(0, &batch{Idx: 0, Len: 10, Cap: 10}, true) + begin, end = offsets.Get(0, &Batch{Idx: 0, Len: 10, Cap: 10}, true) assert.Equal(t, 0, begin) assert.Equal(t, 0, end) - begin, end = offsets.Get(0, &batch{Idx: 1, Len: 10, Cap: 10}, true) + begin, end = offsets.Get(0, &Batch{Idx: 1, Len: 10, Cap: 10}, true) assert.Equal(t, 0, begin) assert.Equal(t, 0, end) } @@ -85,11 +85,11 @@ func TestUnchangedBatchRead(t *testing.T) { const numCPUs = 1 offsets := newOffsetManager(numCPUs) - begin, end := offsets.Get(0, &batch{Idx: 0, Len: 5, Cap: 10}, true) + begin, end := offsets.Get(0, &Batch{Idx: 0, Len: 5, Cap: 10}, true) assert.Equal(t, 0, begin) assert.Equal(t, 5, end) - begin, end = offsets.Get(0, &batch{Idx: 0, Len: 5, Cap: 10}, true) + begin, end = offsets.Get(0, &Batch{Idx: 0, Len: 5, Cap: 10}, true) assert.Equal(t, 5, begin) assert.Equal(t, 5, end) } @@ -99,13 +99,13 @@ func TestReadGap(t *testing.T) { offsets := newOffsetManager(numCPUs) // this emulates the scenario where a batch is lost in the perf buffer - begin, end := offsets.Get(0, &batch{Idx: 0, Len: 10, Cap: 10}, true) + begin, end := offsets.Get(0, &Batch{Idx: 0, Len: 10, Cap: 10}, true) assert.Equal(t, 0, begin) assert.Equal(t, 10, end) assert.Equal(t, 1, offsets.NextBatchID(0)) // batch idx=1 was lost - begin, end = offsets.Get(0, &batch{Idx: 2, Len: 10, Cap: 10}, true) + begin, end = offsets.Get(0, &Batch{Idx: 2, Len: 10, Cap: 10}, true) assert.Equal(t, 0, begin) assert.Equal(t, 10, end) assert.Equal(t, 3, offsets.NextBatchID(0)) diff --git a/pkg/network/protocols/events/batch_reader.go b/pkg/network/protocols/events/batch_reader.go index 124127c397c21..4e1e0dbd0766c 100644 --- a/pkg/network/protocols/events/batch_reader.go +++ b/pkg/network/protocols/events/batch_reader.go @@ -14,20 +14,20 @@ import ( ddsync "github.com/DataDog/datadog-agent/pkg/util/sync" ) -var batchPool = ddsync.NewDefaultTypedPool[batch]() +var batchPool = ddsync.NewDefaultTypedPool[Batch]() type batchReader struct { sync.Mutex numCPUs int - batchMap *maps.GenericMap[batchKey, batch] + batchMap *maps.GenericMap[batchKey, Batch] offsets *offsetManager workerPool *workerPool stopped bool } -func newBatchReader(offsetManager *offsetManager, batchMap *maps.GenericMap[batchKey, batch], numCPUs int) (*batchReader, error) { +func newBatchReader(offsetManager *offsetManager, batchMap *maps.GenericMap[batchKey, Batch], numCPUs int) (*batchReader, error) { // initialize eBPF maps - batch := new(batch) + batch := new(Batch) for i := 0; i < numCPUs; i++ { // Ring buffer events don't have CPU information, so we associate each // batch entry with a CPU during startup. This information is used by @@ -57,7 +57,7 @@ func newBatchReader(offsetManager *offsetManager, batchMap *maps.GenericMap[batc // ReadAll batches from eBPF (concurrently) and execute the given // callback function for each batch -func (r *batchReader) ReadAll(f func(cpu int, b *batch)) { +func (r *batchReader) ReadAll(f func(cpu int, b *Batch)) { // This lock is used only for the purposes of synchronizing termination // and it's only held while *enqueing* the jobs. r.Lock() @@ -77,7 +77,7 @@ func (r *batchReader) ReadAll(f func(cpu int, b *batch)) { b := batchPool.Get() defer func() { - *b = batch{} + *b = Batch{} batchPool.Put(b) }() diff --git a/pkg/network/protocols/events/configuration.go b/pkg/network/protocols/events/configuration.go index 0888c8d2fde7d..1b2421312f732 100644 --- a/pkg/network/protocols/events/configuration.go +++ b/pkg/network/protocols/events/configuration.go @@ -26,16 +26,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/log" ) -// defaultPerfBufferSize controls the amount of memory in bytes used *per CPU* -// allocated for buffering perf event data -var defaultPerfEventBufferSize = 16 * os.Getpagesize() - -// defaultPerfHandlerSize controls the size of the go channel that buffers perf -// events (*ddebpf.PerfHandler). All perf events handled by this library have -// fixed size (sizeof(batch_data_t)) which is ~4KB, so by choosing a value of -// 100 we'll be buffering up to ~400KB of data in *Go* heap memory. -const defaultPerfHandlerSize = 100 - // Configure a given `*manager.Manager` for event processing // This essentially instantiates the perf map/ring buffers and configure the // eBPF maps where events are enqueued. @@ -56,20 +46,22 @@ func Configure(cfg *config.Config, proto string, m *manager.Manager, o *manager. useRingBuffer := cfg.EnableUSMRingBuffers && features.HaveMapType(ebpf.RingBuf) == nil utils.AddBoolConst(o, useRingBuffer, "use_ring_buffer") + bufferSize := cfg.USMKernelBufferPages * os.Getpagesize() + if useRingBuffer { - setupPerfRing(proto, m, o, numCPUs) + setupPerfRing(proto, m, o, numCPUs, cfg.USMDataChannelSize, bufferSize) } else { - setupPerfMap(proto, m) + setupPerfMap(proto, m, cfg.USMDataChannelSize, bufferSize) } } -func setupPerfMap(proto string, m *manager.Manager) { - handler := ddebpf.NewPerfHandler(defaultPerfHandlerSize) +func setupPerfMap(proto string, m *manager.Manager, dataChannelSize, perfEventBufferSize int) { + handler := ddebpf.NewPerfHandler(dataChannelSize) mapName := eventMapName(proto) pm := &manager.PerfMap{ Map: manager.Map{Name: mapName}, PerfMapOptions: manager.PerfMapOptions{ - PerfRingBufferSize: defaultPerfEventBufferSize, + PerfRingBufferSize: perfEventBufferSize, // Our events are already batched on the kernel side, so it's // desirable to have Watermark set to 1 @@ -90,10 +82,11 @@ func setupPerfMap(proto string, m *manager.Manager) { setHandler(proto, handler) } -func setupPerfRing(proto string, m *manager.Manager, o *manager.Options, numCPUs int) { - handler := ddebpf.NewRingBufferHandler(defaultPerfHandlerSize) +func setupPerfRing(proto string, m *manager.Manager, o *manager.Options, numCPUs int, dataChannelSize, ringBufferSize int) { + handler := ddebpf.NewRingBufferHandler(dataChannelSize) mapName := eventMapName(proto) - ringBufferSize := toPowerOf2(numCPUs * defaultPerfEventBufferSize) + // Adjusting ring buffer size with the number of CPUs and rounding it to the nearest power of 2 + ringBufferSize = toPowerOf2(numCPUs * ringBufferSize) rb := &manager.RingBuffer{ Map: manager.Map{Name: mapName}, RingBufferOptions: manager.RingBufferOptions{ @@ -159,7 +152,7 @@ func removeRingBufferHelperCalls(m *manager.Manager) { // TODO: this is not the intended API usage of a `ebpf.Modifier`. // Once we have access to the `ddebpf.Manager`, add this modifier to its list of // `EnabledModifiers` and let it control the execution of the callbacks - patcher := ddebpf.NewHelperCallRemover(asm.FnRingbufOutput) + patcher := ddebpf.NewHelperCallRemover(asm.FnRingbufOutput, asm.FnRingbufQuery, asm.FnRingbufReserve, asm.FnRingbufSubmit, asm.FnRingbufDiscard) err := patcher.BeforeInit(m, names.NewModuleName("usm"), nil) if err != nil { diff --git a/pkg/network/protocols/events/consumer.go b/pkg/network/protocols/events/consumer.go index eb00d29678b65..4d996996c91d4 100644 --- a/pkg/network/protocols/events/consumer.go +++ b/pkg/network/protocols/events/consumer.go @@ -25,7 +25,7 @@ import ( const ( batchMapSuffix = "_batches" eventsMapSuffix = "_batch_events" - sizeOfBatch = int(unsafe.Sizeof(batch{})) + sizeOfBatch = int(unsafe.Sizeof(Batch{})) ) var errInvalidPerfEvent = errors.New("invalid perf event") @@ -59,7 +59,7 @@ type Consumer[V any] struct { // 2) be thread-safe, as the callback may be executed concurrently from multiple go-routines; func NewConsumer[V any](proto string, ebpf *manager.Manager, callback func([]V)) (*Consumer[V], error) { batchMapName := proto + batchMapSuffix - batchMap, err := maps.GetMap[batchKey, batch](ebpf, batchMapName) + batchMap, err := maps.GetMap[batchKey, Batch](ebpf, batchMapName) if err != nil { return nil, fmt.Errorf("unable to find map %s: %s", batchMapName, err) } @@ -164,7 +164,7 @@ func (c *Consumer[V]) Start() { return } - c.batchReader.ReadAll(func(_ int, b *batch) { + c.batchReader.ReadAll(func(_ int, b *Batch) { c.process(b, true) }) if log.ShouldLog(log.DebugLvl) { @@ -209,7 +209,7 @@ func (c *Consumer[V]) Stop() { close(c.syncRequest) } -func (c *Consumer[V]) process(b *batch, syncing bool) { +func (c *Consumer[V]) process(b *Batch, syncing bool) { cpu := int(b.Cpu) // Determine the subset of data we're interested in as we might have read @@ -246,7 +246,7 @@ func (c *Consumer[V]) process(b *batch, syncing bool) { c.callback(events) } -func batchFromEventData(data []byte) (*batch, error) { +func batchFromEventData(data []byte) (*Batch, error) { if len(data) < sizeOfBatch { // For some reason the eBPF program sent us a perf event with a size // different from what we're expecting. @@ -260,10 +260,10 @@ func batchFromEventData(data []byte) (*batch, error) { return nil, errInvalidPerfEvent } - return (*batch)(unsafe.Pointer(&data[0])), nil + return (*Batch)(unsafe.Pointer(&data[0])), nil } -func pointerToElement[V any](b *batch, elementIdx int) *V { +func pointerToElement[V any](b *Batch, elementIdx int) *V { offset := elementIdx * int(b.Event_size) return (*V)(unsafe.Pointer(uintptr(unsafe.Pointer(&b.Data[0])) + uintptr(offset))) } diff --git a/pkg/network/protocols/events/consumer_test.go b/pkg/network/protocols/events/consumer_test.go index 4b666ba4149de..44c23d5866062 100644 --- a/pkg/network/protocols/events/consumer_test.go +++ b/pkg/network/protocols/events/consumer_test.go @@ -17,14 +17,10 @@ import ( manager "github.com/DataDog/ebpf-manager" "github.com/cilium/ebpf" - "github.com/cilium/ebpf/features" - "github.com/cilium/ebpf/perf" - "github.com/cilium/ebpf/ringbuf" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" - "github.com/DataDog/datadog-agent/pkg/ebpf/bytecode" "github.com/DataDog/datadog-agent/pkg/network/config" "github.com/DataDog/datadog-agent/pkg/util/kernel" ) @@ -38,7 +34,7 @@ func TestConsumer(t *testing.T) { const numEvents = 100 c := config.New() - program, err := newEBPFProgram(c) + program, err := NewEBPFProgram(c) require.NoError(t, err) var mux sync.Mutex @@ -51,7 +47,7 @@ func TestConsumer(t *testing.T) { } } - consumer, err := NewConsumer("test", program, callback) + consumer, err := NewConsumer("test", program.Manager, callback) require.NoError(t, err) consumer.Start() @@ -86,16 +82,16 @@ func TestInvalidBatchCountMetric(t *testing.T) { } c := config.New() - program, err := newEBPFProgram(c) + program, err := NewEBPFProgram(c) require.NoError(t, err) t.Cleanup(func() { program.Stop(manager.CleanAll) }) - consumer, err := NewConsumer("test", program, func([]uint64) {}) + consumer, err := NewConsumer("test", program.Manager, func([]uint64) {}) require.NoError(t, err) // We are creating a raw sample with a data length of 4, which is smaller than sizeOfBatch // and would be considered an invalid batch. - recordSample(c, consumer, []byte("test")) + RecordSample(c, consumer, []byte("test")) consumer.Start() t.Cleanup(func() { consumer.Stop() }) @@ -113,23 +109,7 @@ type eventGenerator struct { testFile *os.File } -// recordSample records a sample using the consumer handler. -func recordSample(c *config.Config, consumer *Consumer[uint64], sampleData []byte) { - // Ring buffers require kernel version 5.8.0 or higher, therefore, the handler is chosen based on the kernel version. - if c.EnableUSMRingBuffers && features.HaveMapType(ebpf.RingBuf) == nil { - handler := consumer.handler.(*ddebpf.RingBufferHandler) - handler.RecordHandler(&ringbuf.Record{ - RawSample: sampleData, - }, nil, nil) - } else { - handler := consumer.handler.(*ddebpf.PerfHandler) - handler.RecordHandler(&perf.Record{ - RawSample: sampleData, - }, nil, nil) - } -} - -func newEventGenerator(program *manager.Manager, t *testing.T) *eventGenerator { +func newEventGenerator(program *ddebpf.Manager, t *testing.T) *eventGenerator { m, _, _ := program.GetMap("test") require.NotNilf(t, m, "couldn't find test map") @@ -169,45 +149,3 @@ func (e *eventGenerator) Generate(eventID uint64) error { func (e *eventGenerator) Stop() { e.testFile.Close() } - -func newEBPFProgram(c *config.Config) (*manager.Manager, error) { - bc, err := bytecode.GetReader(c.BPFDir, "usm_events_test-debug.o") - if err != nil { - return nil, err - } - defer bc.Close() - - m := &manager.Manager{ - Probes: []*manager.Probe{ - { - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - EBPFFuncName: "tracepoint__syscalls__sys_enter_write", - }, - }, - }, - } - options := manager.Options{ - RemoveRlimit: true, - ActivatedProbes: []manager.ProbesSelector{ - &manager.ProbeSelector{ - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - EBPFFuncName: "tracepoint__syscalls__sys_enter_write", - }, - }, - }, - ConstantEditors: []manager.ConstantEditor{ - { - Name: "test_monitoring_enabled", - Value: uint64(1), - }, - }, - } - - Configure(config.New(), "test", m, &options) - err = m.InitWithOptions(bc, options) - if err != nil { - return nil, err - } - - return m, nil -} diff --git a/pkg/network/protocols/events/test_helpers.go b/pkg/network/protocols/events/test_helpers.go new file mode 100644 index 0000000000000..2e68e7cb59bff --- /dev/null +++ b/pkg/network/protocols/events/test_helpers.go @@ -0,0 +1,82 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build linux_bpf && test + +package events + +import ( + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/features" + "github.com/cilium/ebpf/perf" + "github.com/cilium/ebpf/ringbuf" + + manager "github.com/DataDog/ebpf-manager" + + ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" + "github.com/DataDog/datadog-agent/pkg/ebpf/bytecode" + ebpftelemetry "github.com/DataDog/datadog-agent/pkg/ebpf/telemetry" + "github.com/DataDog/datadog-agent/pkg/network/config" +) + +// NewEBPFProgram creates a new test eBPF program. +func NewEBPFProgram(c *config.Config) (*ddebpf.Manager, error) { + bc, err := bytecode.GetReader(c.BPFDir, "usm_events_test-debug.o") + if err != nil { + return nil, err + } + defer bc.Close() + + m := &manager.Manager{ + Probes: []*manager.Probe{ + { + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + EBPFFuncName: "tracepoint__syscalls__sys_enter_write", + }, + }, + }, + } + options := manager.Options{ + RemoveRlimit: true, + ActivatedProbes: []manager.ProbesSelector{ + &manager.ProbeSelector{ + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + EBPFFuncName: "tracepoint__syscalls__sys_enter_write", + }, + }, + }, + ConstantEditors: []manager.ConstantEditor{ + { + Name: "test_monitoring_enabled", + Value: uint64(1), + }, + }, + } + ddEbpfManager := ddebpf.NewManager(m, "usm", &ebpftelemetry.ErrorsTelemetryModifier{}) + + Configure(config.New(), "test", ddEbpfManager.Manager, &options) + err = ddEbpfManager.InitWithOptions(bc, &options) + if err != nil { + return nil, err + } + + return ddEbpfManager, nil +} + +// RecordSample records a sample using the consumer Handler. +func RecordSample[V any](c *config.Config, consumer *Consumer[V], sampleData []byte) { + // Ring buffers require kernel version 5.8.0 or higher, therefore, the Handler is chosen based on the kernel version. + if c.EnableUSMRingBuffers && features.HaveMapType(ebpf.RingBuf) == nil { + handler := consumer.handler.(*ddebpf.RingBufferHandler) + handler.RecordHandler(&ringbuf.Record{ + RawSample: sampleData, + }, nil, nil) + } else { + handler := consumer.handler.(*ddebpf.PerfHandler) + handler.RecordHandler(&perf.Record{ + RawSample: sampleData, + }, nil, nil) + } +} diff --git a/pkg/network/protocols/events/types.go b/pkg/network/protocols/events/types.go index 87a75f96bc894..2797f8b70493e 100644 --- a/pkg/network/protocols/events/types.go +++ b/pkg/network/protocols/events/types.go @@ -12,7 +12,7 @@ package events */ import "C" -type batch C.batch_data_t +type Batch C.batch_data_t type batchKey C.batch_key_t const ( diff --git a/pkg/network/protocols/events/types_linux.go b/pkg/network/protocols/events/types_linux.go index e0d408f118ada..5c41349812204 100644 --- a/pkg/network/protocols/events/types_linux.go +++ b/pkg/network/protocols/events/types_linux.go @@ -3,7 +3,7 @@ package events -type batch struct { +type Batch struct { Idx uint64 Cpu uint16 Len uint16 diff --git a/pkg/network/protocols/http/event_pipeline_test.go b/pkg/network/protocols/http/event_pipeline_test.go new file mode 100644 index 0000000000000..5ad8c227f26cb --- /dev/null +++ b/pkg/network/protocols/http/event_pipeline_test.go @@ -0,0 +1,175 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build linux_bpf + +package http + +import ( + "bytes" + "encoding/binary" + "fmt" + "net/http" + "runtime" + "sync" + "testing" + "time" + "unsafe" + + "github.com/DataDog/datadog-agent/pkg/network/config" + "github.com/DataDog/datadog-agent/pkg/network/protocols/events" + "github.com/stretchr/testify/require" +) + +const ( + batchDataSize = 4096 +) + +type HTTPEventData struct { + Method uint8 + StatusCode uint16 + RequestFragment []byte +} + +// eBPFEventToBytes serializes the provided events into a byte array. +func eBPFEventToBytes(b *testing.B, events []EbpfEvent, numOfEventsInBatch int) [batchDataSize]int8 { + var result [batchDataSize]int8 + var buffer bytes.Buffer + + // Serialize the events in the slice + for i := 0; i < numOfEventsInBatch; i++ { + // Use the two events alternately. Each iteration will use a different one. + event := events[i%len(events)] + require.NoError(b, binary.Write(&buffer, binary.LittleEndian, event)) + } + + serializedData := buffer.Bytes() + // Ensure the serialized data fits into the result array + require.LessOrEqualf(b, len(serializedData), len(result), "serialized data exceeds batchDataSize bytes") + + for i, b := range serializedData { + result[i] = int8(b) + } + + return result +} + +// setupBenchmark sets up the benchmark environment by creating a consumer, protocol, and configuration. +func setupBenchmark(b *testing.B, c *config.Config, i, totalEventsCount, numOfEventsInBatch int, httpEvents []EbpfEvent, wg *sync.WaitGroup) (*events.Consumer[EbpfEvent], *protocol) { + require.NotEmpty(b, httpEvents, "httpEvents slice is empty") + + program, err := events.NewEBPFProgram(c) + require.NoError(b, err) + + httpTelemetry := NewTelemetry(fmt.Sprintf("http_%s_%d_%d", b.Name(), b.N, i)) + + p := protocol{ + cfg: c, + telemetry: httpTelemetry, + statkeeper: NewStatkeeper(c, httpTelemetry, NewIncompleteBuffer(c, httpTelemetry)), + } + consumer, err := events.NewConsumer("test", program.Manager, p.processHTTP) + require.NoError(b, err) + + // Using a wait group to ensure the goroutine finishes before the benchmark ends. + wg.Add(1) + go func() { + defer wg.Done() + generateMockEvents(b, c, consumer, httpEvents, numOfEventsInBatch, totalEventsCount) + }() + + return consumer, &p +} + +// generateMockEvents generates mock events to be used in the benchmark. +func generateMockEvents(b *testing.B, c *config.Config, consumer *events.Consumer[EbpfEvent], httpEvents []EbpfEvent, numOfEventsInBatch, totalEvents int) { + // TODO: Determine if testing the CPU flow is necessary. + mockBatch := events.Batch{ + Len: uint16(numOfEventsInBatch), + Cap: uint16(numOfEventsInBatch), + Event_size: uint16(unsafe.Sizeof(httpEvents[0])), + Data: eBPFEventToBytes(b, httpEvents, numOfEventsInBatch), + } + + for i := 0; i < totalEvents/numOfEventsInBatch; i++ { + mockBatch.Idx = uint64(i) + var buf bytes.Buffer + require.NoError(b, binary.Write(&buf, binary.LittleEndian, &mockBatch)) + events.RecordSample(c, consumer, buf.Bytes()) + buf.Reset() + } +} + +// createHTTPEvents creates a slice of HTTP events to be used in the benchmark. +func createHTTPEvents(eventsData []HTTPEventData) []EbpfEvent { + events := make([]EbpfEvent, len(eventsData)) + for i, data := range eventsData { + events[i] = EbpfEvent{ + Tuple: ConnTuple{}, + Http: EbpfTx{ + Request_started: 1, + Response_last_seen: 2, + Request_method: data.Method, + Response_status_code: data.StatusCode, + Request_fragment: requestFragment(data.RequestFragment), + }, + } + } + return events +} + +// BenchmarkHTTPEventConsumer benchmarks the consumer with a large number of events to measure the performance. +func BenchmarkHTTPEventConsumer(b *testing.B) { + // Set MemProfileRate to 1 in order to collect every allocation + runtime.MemProfileRate = 1 + var wg sync.WaitGroup + + b.ReportAllocs() + b.ResetTimer() + + testCases := []struct { + name string + totalEventsCount int + // Serialized data can't exceed batchDataSize bytes that why we can insert 14 events in a batch. + numOfEventsInBatch int + httpEvents []EbpfEvent + }{ + {"SmallBatch", 1000, 8, createHTTPEvents([]HTTPEventData{ + {Method: uint8(MethodGet), StatusCode: http.StatusOK, RequestFragment: []byte("GET / HTTP/1.1")}, + {Method: uint8(MethodPost), StatusCode: http.StatusCreated, RequestFragment: []byte("POST /create HTTP/1.1")}, + })}, + {"MediumBatch", 38000, 10, createHTTPEvents([]HTTPEventData{ + {Method: uint8(MethodGet), StatusCode: http.StatusOK, RequestFragment: []byte("GET / HTTP/1.1")}, + {Method: uint8(MethodPost), StatusCode: http.StatusCreated, RequestFragment: []byte("POST /create HTTP/1.1")}, + })}, + {"LargeBatch", 42000, 14, createHTTPEvents([]HTTPEventData{ + {Method: uint8(MethodGet), StatusCode: http.StatusOK, RequestFragment: []byte("GET / HTTP/1.1")}, + {Method: uint8(MethodPost), StatusCode: http.StatusCreated, RequestFragment: []byte("POST /create HTTP/1.1")}, + })}, + {"MaxEventsCount", 3150000, 14, createHTTPEvents([]HTTPEventData{ + {Method: uint8(MethodGet), StatusCode: http.StatusOK, RequestFragment: []byte("GET / HTTP/1.1")}, + {Method: uint8(MethodDelete), StatusCode: http.StatusAccepted, RequestFragment: []byte("DELETE /delete HTTP/1.1")}, + })}, + } + + for _, tc := range testCases { + b.Run(tc.name, func(b *testing.B) { + for i := 0; i < b.N; i++ { + consumer, p := setupBenchmark(b, config.New(), i, tc.totalEventsCount, tc.numOfEventsInBatch, tc.httpEvents, &wg) + + consumer.Start() + wg.Wait() + + require.Eventually(b, func() bool { + if tc.totalEventsCount == int(p.telemetry.hits2XX.counterPlain.Get()) { + b.Logf("USM summary: %s", p.telemetry.metricGroup.Summary()) + return true + } + return false + }, 5*time.Second, 100*time.Millisecond) + } + }) + } +} diff --git a/pkg/network/protocols/http/protocol.go b/pkg/network/protocols/http/protocol.go index 0c1c4e8b2bc06..ac8bb888b5de5 100644 --- a/pkg/network/protocols/http/protocol.go +++ b/pkg/network/protocols/http/protocol.go @@ -195,7 +195,7 @@ func (p *protocol) setupMapCleaner(mgr *manager.Manager) { log.Errorf("error getting http_in_flight map: %s", err) return } - mapCleaner, err := ddebpf.NewMapCleaner[netebpf.ConnTuple, EbpfTx](httpMap, 1024, inFlightMap, "usm_monitor") + mapCleaner, err := ddebpf.NewMapCleaner[netebpf.ConnTuple, EbpfTx](httpMap, protocols.DefaultMapCleanerBatchSize, inFlightMap, "usm_monitor") if err != nil { log.Errorf("error creating map cleaner: %s", err) return diff --git a/pkg/network/protocols/http2/dynamic_table.go b/pkg/network/protocols/http2/dynamic_table.go index 1a20ae06603c7..a9a2d12d68f5c 100644 --- a/pkg/network/protocols/http2/dynamic_table.go +++ b/pkg/network/protocols/http2/dynamic_table.go @@ -16,13 +16,12 @@ import ( ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/network/config" netebpf "github.com/DataDog/datadog-agent/pkg/network/ebpf" + "github.com/DataDog/datadog-agent/pkg/network/protocols" "github.com/DataDog/datadog-agent/pkg/network/protocols/events" ) const ( terminatedConnectionsEventStream = "terminated_http2" - - defaultMapCleanerBatchSize = 1024 ) // DynamicTable encapsulates the management of the dynamic table in the user mode. @@ -86,7 +85,7 @@ func (dt *DynamicTable) setupDynamicTableMapCleaner(mgr *manager.Manager, cfg *c return fmt.Errorf("error getting http2 dynamic table map: %w", err) } - mapCleaner, err := ddebpf.NewMapCleaner[HTTP2DynamicTableIndex, HTTP2DynamicTableEntry](dynamicTableMap, defaultMapCleanerBatchSize, dynamicTable, "usm_monitor") + mapCleaner, err := ddebpf.NewMapCleaner[HTTP2DynamicTableIndex, HTTP2DynamicTableEntry](dynamicTableMap, protocols.DefaultMapCleanerBatchSize, dynamicTable, "usm_monitor") if err != nil { return fmt.Errorf("error creating a map cleaner for http2 dynamic table: %w", err) } diff --git a/pkg/network/protocols/http2/model_linux.go b/pkg/network/protocols/http2/model_linux.go index 6af78e626eca1..f5675cd8372eb 100644 --- a/pkg/network/protocols/http2/model_linux.go +++ b/pkg/network/protocols/http2/model_linux.go @@ -14,6 +14,7 @@ import ( "net" "strconv" "strings" + "sync" "time" "golang.org/x/net/http2/hpack" @@ -29,13 +30,13 @@ import ( var oversizedLogLimit = log.NewLogLimit(10, time.Minute*10) // validatePath validates the given path. -func validatePath(str string) error { +func validatePath(str []byte) error { if len(str) == 0 { return errors.New("decoded path is empty") } // ensure we found a '/' at the beginning of the path if str[0] != '/' { - return fmt.Errorf("decoded path '%s' doesn't start with '/'", str) + return fmt.Errorf("decoded path (%#v) doesn't start with '/'", str) } return nil } @@ -51,27 +52,41 @@ func validatePathSize(size uint8) error { return nil } +// Buffer pool to be used for decoding HTTP2 paths. +// This is used to avoid allocating a new buffer for each path decoding. +var bufPool = sync.Pool{ + New: func() interface{} { return new(bytes.Buffer) }, +} + // decodeHTTP2Path tries to decode (Huffman) the path from the given buffer. // Possible errors: // - If the given pathSize is 0. // - If the given pathSize is larger than the buffer size. // - If the Huffman decoding fails. // - If the decoded path doesn't start with a '/'. -func decodeHTTP2Path(buf [maxHTTP2Path]byte, pathSize uint8) ([]byte, error) { +func decodeHTTP2Path(buf [maxHTTP2Path]byte, pathSize uint8, output []byte) ([]byte, error) { if err := validatePathSize(pathSize); err != nil { return nil, err } - str, err := hpack.HuffmanDecodeToString(buf[:pathSize]) + tmpBuffer := bufPool.Get().(*bytes.Buffer) + tmpBuffer.Reset() + defer bufPool.Put(tmpBuffer) + + n, err := hpack.HuffmanDecode(tmpBuffer, buf[:pathSize]) if err != nil { return nil, err } - if err = validatePath(str); err != nil { + if err = validatePath(tmpBuffer.Bytes()); err != nil { return nil, err } - return []byte(str), nil + if n > len(output) { + n = len(output) + } + copy(output[:n], tmpBuffer.Bytes()) + return output[:n], nil } // Path returns the URL from the request fragment captured in eBPF. @@ -87,10 +102,9 @@ func (tx *EbpfTx) Path(buffer []byte) ([]byte, bool) { } } - var res []byte var err error if tx.Stream.Path.Is_huffman_encoded { - res, err = decodeHTTP2Path(tx.Stream.Path.Raw_buffer, tx.Stream.Path.Length) + buffer, err = decodeHTTP2Path(tx.Stream.Path.Raw_buffer, tx.Stream.Path.Length, buffer) if err != nil { if oversizedLogLimit.ShouldLog() { log.Warnf("unable to decode HTTP2 path (%#v) due to: %s", tx.Stream.Path.Raw_buffer[:tx.Stream.Path.Length], err) @@ -98,32 +112,35 @@ func (tx *EbpfTx) Path(buffer []byte) ([]byte, bool) { return nil, false } } else { - if err = validatePathSize(tx.Stream.Path.Length); err != nil { + if tx.Stream.Path.Length == 0 { if oversizedLogLimit.ShouldLog() { - log.Warnf("path size: %d is invalid due to: %s", tx.Stream.Path.Length, err) + log.Warn("path size: 0 is invalid") } return nil, false + } else if int(tx.Stream.Path.Length) > len(tx.Stream.Path.Raw_buffer) { + if oversizedLogLimit.ShouldLog() { + log.Warnf("Truncating as path size: %d is greater than the buffer size: %d", tx.Stream.Path.Length, len(buffer)) + } + tx.Stream.Path.Length = uint8(len(tx.Stream.Path.Raw_buffer)) } - - res = tx.Stream.Path.Raw_buffer[:tx.Stream.Path.Length] - if err = validatePath(string(res)); err != nil { + n := copy(buffer, tx.Stream.Path.Raw_buffer[:tx.Stream.Path.Length]) + // Truncating exceeding nulls. + buffer = buffer[:n] + if err = validatePath(buffer); err != nil { if oversizedLogLimit.ShouldLog() { - log.Warnf("path %s is invalid due to: %s", string(res), err) + // The error already contains the path, so we don't need to log it again. + log.Warn(err) } return nil, false } - - res = tx.Stream.Path.Raw_buffer[:tx.Stream.Path.Length] } // Ignore query parameters - queryStart := bytes.IndexByte(res, byte('?')) + queryStart := bytes.IndexByte(buffer, byte('?')) if queryStart == -1 { - queryStart = len(res) + queryStart = len(buffer) } - - n := copy(buffer, res[:queryStart]) - return buffer[:n], true + return buffer[:queryStart], true } // RequestLatency returns the latency of the request in nanoseconds diff --git a/pkg/network/protocols/http2/model_test.go b/pkg/network/protocols/http2/model_test.go index 818b0144b3773..f52c7635ddb01 100644 --- a/pkg/network/protocols/http2/model_test.go +++ b/pkg/network/protocols/http2/model_test.go @@ -9,14 +9,87 @@ package http2 import ( "fmt" + "strings" "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "golang.org/x/net/http2/hpack" "github.com/DataDog/datadog-agent/pkg/network/protocols/http" ) +func TestHTTP2LongPath(t *testing.T) { + tests := []struct { + name string + rawPath string + expectedPath string + huffmanEnabled bool + outBufSize int + }{ + { + name: "Long path with huffman with bigger out buffer", + rawPath: fmt.Sprintf("/%s", strings.Repeat("a", maxHTTP2Path+1)), + huffmanEnabled: true, + }, + { + name: "Long path with huffman with shorter out buffer", + rawPath: fmt.Sprintf("/%s", strings.Repeat("a", maxHTTP2Path+1)), + expectedPath: fmt.Sprintf("/%s", strings.Repeat("a", 19)), + huffmanEnabled: true, + outBufSize: 20, + }, + { + name: "Long path without huffman with bigger out buffer", + rawPath: fmt.Sprintf("/%s", strings.Repeat("a", maxHTTP2Path+1)), + // The path is truncated to maxHTTP2Path (including the leading '/') + expectedPath: fmt.Sprintf("/%s", strings.Repeat("a", maxHTTP2Path-1)), + }, + { + name: "Long path without huffman with shorter out buffer", + rawPath: fmt.Sprintf("/%s", strings.Repeat("a", maxHTTP2Path+1)), + expectedPath: fmt.Sprintf("/%s", strings.Repeat("a", 19)), + outBufSize: 20, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var buf []byte + var arr [maxHTTP2Path]uint8 + if tt.huffmanEnabled { + buf = hpack.AppendHuffmanString(buf, tt.rawPath) + } else { + buf = append(buf, tt.rawPath...) + } + copy(arr[:], buf) + + request := &EbpfTx{ + Stream: HTTP2Stream{ + Path: http2Path{ + Is_huffman_encoded: tt.huffmanEnabled, + Raw_buffer: arr, + Length: uint8(len(buf)), + }, + }, + } + + if tt.outBufSize == 0 { + tt.outBufSize = http.BufferSize + } + outBuf := make([]byte, tt.outBufSize) + + path, ok := request.Path(outBuf) + require.True(t, ok) + expectedPath := tt.rawPath + if tt.expectedPath != "" { + expectedPath = tt.expectedPath + } + assert.Equal(t, expectedPath, string(path)) + }) + } +} + func TestHTTP2Path(t *testing.T) { tests := []struct { name string @@ -25,13 +98,9 @@ func TestHTTP2Path(t *testing.T) { expectedErr bool }{ { - name: "Short path", + name: "Sanity", rawPath: "/hello.HelloService/SayHello", }, - { - name: "Long path", - rawPath: "/resourcespb.ResourceTagging/GetResourceTags", - }, { name: "Path does not start with /", rawPath: "hello.HelloService/SayHello", diff --git a/pkg/network/protocols/http2/protocol.go b/pkg/network/protocols/http2/protocol.go index 1380073499693..cb90a5a48700d 100644 --- a/pkg/network/protocols/http2/protocol.go +++ b/pkg/network/protocols/http2/protocol.go @@ -401,7 +401,7 @@ func (p *Protocol) setupHTTP2InFlightMapCleaner(mgr *manager.Manager) { log.Errorf("error getting %q map: %s", InFlightMap, err) return } - mapCleaner, err := ddebpf.NewMapCleaner[HTTP2StreamKey, HTTP2Stream](http2Map, 1024, InFlightMap, "usm_monitor") + mapCleaner, err := ddebpf.NewMapCleaner[HTTP2StreamKey, HTTP2Stream](http2Map, protocols.DefaultMapCleanerBatchSize, InFlightMap, "usm_monitor") if err != nil { log.Errorf("error creating map cleaner: %s", err) return diff --git a/pkg/network/protocols/kafka/protocol.go b/pkg/network/protocols/kafka/protocol.go index 54bcb67ff5fdf..521610a7c8fca 100644 --- a/pkg/network/protocols/kafka/protocol.go +++ b/pkg/network/protocols/kafka/protocol.go @@ -335,7 +335,7 @@ func (p *protocol) setupInFlightMapCleaner(mgr *manager.Manager) error { if err != nil { return err } - mapCleaner, err := ddebpf.NewMapCleaner[KafkaTransactionKey, KafkaTransaction](kafkaInFlight, 1024, inFlightMap, "usm_monitor") + mapCleaner, err := ddebpf.NewMapCleaner[KafkaTransactionKey, KafkaTransaction](kafkaInFlight, protocols.DefaultMapCleanerBatchSize, inFlightMap, "usm_monitor") if err != nil { return err } diff --git a/pkg/network/protocols/postgres/protocol.go b/pkg/network/protocols/postgres/protocol.go index c0d7aa37a6747..b90d5a17403bb 100644 --- a/pkg/network/protocols/postgres/protocol.go +++ b/pkg/network/protocols/postgres/protocol.go @@ -259,7 +259,7 @@ func (p *protocol) setupMapCleaner(mgr *manager.Manager) { log.Errorf("error getting %s map: %s", InFlightMap, err) return } - mapCleaner, err := ddebpf.NewMapCleaner[netebpf.ConnTuple, postgresebpf.EbpfTx](postgresInflight, 1024, InFlightMap, "usm_monitor") + mapCleaner, err := ddebpf.NewMapCleaner[netebpf.ConnTuple, postgresebpf.EbpfTx](postgresInflight, protocols.DefaultMapCleanerBatchSize, InFlightMap, "usm_monitor") if err != nil { log.Errorf("error creating map cleaner: %s", err) return diff --git a/pkg/network/protocols/protocols.go b/pkg/network/protocols/protocols.go index acec71ad05d33..5d5e1220959d6 100644 --- a/pkg/network/protocols/protocols.go +++ b/pkg/network/protocols/protocols.go @@ -23,6 +23,8 @@ const ( TLSDispatcherProgramsMap = "tls_process_progs" ProtocolDispatcherClassificationPrograms = "dispatcher_classification_progs" TLSProtocolDispatcherClassificationPrograms = "tls_dispatcher_classification_progs" + + DefaultMapCleanerBatchSize = 1 ) // Protocol is the interface that represents a protocol supported by USM. diff --git a/pkg/network/protocols/redis/protocol.go b/pkg/network/protocols/redis/protocol.go index 831d019b30416..575e6460c729a 100644 --- a/pkg/network/protocols/redis/protocol.go +++ b/pkg/network/protocols/redis/protocol.go @@ -174,7 +174,7 @@ func (p *protocol) setupMapCleaner(mgr *manager.Manager) { return } - mapCleaner, err := ddebpf.NewMapCleaner[netebpf.ConnTuple, EbpfTx](redisInFlight, 1024, inFlightMap, "usm_monitor") + mapCleaner, err := ddebpf.NewMapCleaner[netebpf.ConnTuple, EbpfTx](redisInFlight, protocols.DefaultMapCleanerBatchSize, inFlightMap, "usm_monitor") if err != nil { log.Errorf("error creating map cleaner: %s", err) return diff --git a/pkg/network/protocols/telemetry/metric_group.go b/pkg/network/protocols/telemetry/metric_group.go index c1036d4aadc05..ce5eef1bcefa1 100644 --- a/pkg/network/protocols/telemetry/metric_group.go +++ b/pkg/network/protocols/telemetry/metric_group.go @@ -97,7 +97,7 @@ func (mg *MetricGroup) Summary() string { ) // safeguard against division by zero - if timeDelta == 0 { + if timeDelta < 1 { timeDelta = 1 } diff --git a/pkg/network/protocols/tls/types.go b/pkg/network/protocols/tls/types.go new file mode 100644 index 0000000000000..c3014c3f65f3e --- /dev/null +++ b/pkg/network/protocols/tls/types.go @@ -0,0 +1,135 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +// Package tls contains definitions and methods related to tags parsed from the TLS handshake +package tls + +import ( + "crypto/tls" + "fmt" +) + +// Constants for tag keys +const ( + TagTLSVersion = "tls.version:" + TagTLSCipherSuiteID = "tls.cipher_suite_id:" + TagTLSClientVersion = "tls.client_version:" + version10 = "tls_1.0" + version11 = "tls_1.1" + version12 = "tls_1.2" + version13 = "tls_1.3" +) + +// Bitmask constants for Offered_versions matching kernelspace definitions +const ( + OfferedTLSVersion10 uint8 = 0x01 + OfferedTLSVersion11 uint8 = 0x02 + OfferedTLSVersion12 uint8 = 0x04 + OfferedTLSVersion13 uint8 = 0x08 +) + +// VersionTags maps TLS versions to tag names for server chosen version (exported for testing) +var VersionTags = map[uint16]string{ + tls.VersionTLS10: TagTLSVersion + version10, + tls.VersionTLS11: TagTLSVersion + version11, + tls.VersionTLS12: TagTLSVersion + version12, + tls.VersionTLS13: TagTLSVersion + version13, +} + +// ClientVersionTags maps TLS versions to tag names for client offered versions (exported for testing) +var ClientVersionTags = map[uint16]string{ + tls.VersionTLS10: TagTLSClientVersion + version10, + tls.VersionTLS11: TagTLSClientVersion + version11, + tls.VersionTLS12: TagTLSClientVersion + version12, + tls.VersionTLS13: TagTLSClientVersion + version13, +} + +// Mapping of offered version bitmasks to version constants +var offeredVersionBitmask = []struct { + bitMask uint8 + version uint16 +}{ + {OfferedTLSVersion10, tls.VersionTLS10}, + {OfferedTLSVersion11, tls.VersionTLS11}, + {OfferedTLSVersion12, tls.VersionTLS12}, + {OfferedTLSVersion13, tls.VersionTLS13}, +} + +// Tags holds the TLS tags. It is used to store the TLS version, cipher suite and offered versions. +// We can't use the struct from eBPF as the definition is shared with windows. +type Tags struct { + ChosenVersion uint16 + CipherSuite uint16 + OfferedVersions uint8 +} + +// MergeWith merges the tags from another Tags struct into this one +func (t *Tags) MergeWith(that Tags) { + if t.ChosenVersion == 0 { + t.ChosenVersion = that.ChosenVersion + } + if t.CipherSuite == 0 { + t.CipherSuite = that.CipherSuite + } + if t.OfferedVersions == 0 { + t.OfferedVersions = that.OfferedVersions + } + +} + +// IsEmpty returns true if all fields are zero +func (t *Tags) IsEmpty() bool { + if t == nil { + return true + } + return t.ChosenVersion == 0 && t.CipherSuite == 0 && t.OfferedVersions == 0 +} + +// String returns a string representation of the Tags struct +func (t *Tags) String() string { + return fmt.Sprintf("ChosenVersion: %d, CipherSuite: %d, OfferedVersions: %d", t.ChosenVersion, t.CipherSuite, t.OfferedVersions) +} + +// parseOfferedVersions parses the Offered_versions bitmask into a slice of version strings +func parseOfferedVersions(offeredVersions uint8) []string { + versions := make([]string, 0, len(offeredVersionBitmask)) + for _, ov := range offeredVersionBitmask { + if (offeredVersions & ov.bitMask) != 0 { + if name := ClientVersionTags[ov.version]; name != "" { + versions = append(versions, name) + } + } + } + return versions +} + +func hexCipherSuiteTag(cipherSuite uint16) string { + return fmt.Sprintf("%s0x%04X", TagTLSCipherSuiteID, cipherSuite) +} + +// GetDynamicTags generates dynamic tags based on TLS information +func (t *Tags) GetDynamicTags() map[string]struct{} { + if t.IsEmpty() { + return nil + } + tags := make(map[string]struct{}) + + // Server chosen version + if tag, ok := VersionTags[t.ChosenVersion]; ok { + tags[tag] = struct{}{} + } + + // Client offered versions + for _, versionName := range parseOfferedVersions(t.OfferedVersions) { + tags[versionName] = struct{}{} + } + + // Cipher suite ID as hex string + if t.CipherSuite != 0 { + tags[hexCipherSuiteTag(t.CipherSuite)] = struct{}{} + } + + return tags +} diff --git a/pkg/network/protocols/tls/types_test.go b/pkg/network/protocols/tls/types_test.go new file mode 100644 index 0000000000000..979cc2bbdba32 --- /dev/null +++ b/pkg/network/protocols/tls/types_test.go @@ -0,0 +1,128 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +package tls + +import ( + "crypto/tls" + "fmt" + "reflect" + "testing" +) + +func TestParseOfferedVersions(t *testing.T) { + tests := []struct { + offeredVersions uint8 + expected []string + }{ + {0x00, []string{}}, // No versions offered + {OfferedTLSVersion10, []string{"tls.client_version:tls_1.0"}}, + {OfferedTLSVersion11, []string{"tls.client_version:tls_1.1"}}, + {OfferedTLSVersion12, []string{"tls.client_version:tls_1.2"}}, + {OfferedTLSVersion13, []string{"tls.client_version:tls_1.3"}}, + {OfferedTLSVersion10 | OfferedTLSVersion12, []string{"tls.client_version:tls_1.0", "tls.client_version:tls_1.2"}}, + {OfferedTLSVersion11 | OfferedTLSVersion13, []string{"tls.client_version:tls_1.1", "tls.client_version:tls_1.3"}}, + {0xFF, []string{"tls.client_version:tls_1.0", "tls.client_version:tls_1.1", "tls.client_version:tls_1.2", "tls.client_version:tls_1.3"}}, // All bits set + {0x40, []string{}}, // Undefined bit set + {0x80, []string{}}, // Undefined bit set + } + + for _, test := range tests { + t.Run(fmt.Sprintf("OfferedVersions_0x%02X", test.offeredVersions), func(t *testing.T) { + result := parseOfferedVersions(test.offeredVersions) + if !reflect.DeepEqual(result, test.expected) { + t.Errorf("parseOfferedVersions(0x%02X) = %v; want %v", test.offeredVersions, result, test.expected) + } + }) + } +} + +func TestGetTLSDynamicTags(t *testing.T) { + tests := []struct { + name string + tlsTags *Tags + expected map[string]struct{} + }{ + { + name: "Nil_TLSTags", + tlsTags: nil, + expected: nil, + }, + { + name: "All_Fields_Populated", + tlsTags: &Tags{ + ChosenVersion: tls.VersionTLS12, + CipherSuite: 0x009C, + OfferedVersions: OfferedTLSVersion11 | OfferedTLSVersion12, + }, + expected: map[string]struct{}{ + "tls.version:tls_1.2": {}, + "tls.cipher_suite_id:0x009C": {}, + "tls.client_version:tls_1.1": {}, + "tls.client_version:tls_1.2": {}, + }, + }, + { + name: "Unknown_Chosen_Version", + tlsTags: &Tags{ + ChosenVersion: 0xFFFF, // Unknown version + CipherSuite: 0x00FF, + OfferedVersions: OfferedTLSVersion13, + }, + expected: map[string]struct{}{ + "tls.cipher_suite_id:0x00FF": {}, + "tls.client_version:tls_1.3": {}, + }, + }, + { + name: "No_Offered_Versions", + tlsTags: &Tags{ + ChosenVersion: tls.VersionTLS13, + CipherSuite: 0x1301, + OfferedVersions: 0x00, + }, + expected: map[string]struct{}{ + "tls.version:tls_1.3": {}, + "tls.cipher_suite_id:0x1301": {}, + }, + }, + { + name: "Zero_Cipher_Suite", + tlsTags: &Tags{ + ChosenVersion: tls.VersionTLS10, + OfferedVersions: OfferedTLSVersion10, + }, + expected: map[string]struct{}{ + "tls.version:tls_1.0": {}, + "tls.client_version:tls_1.0": {}, + }, + }, + { + name: "All_Bits_Set_In_Offered_Versions", + tlsTags: &Tags{ + ChosenVersion: tls.VersionTLS12, + CipherSuite: 0xC02F, + OfferedVersions: 0xFF, // All bits set + }, + expected: map[string]struct{}{ + "tls.version:tls_1.2": {}, + "tls.cipher_suite_id:0xC02F": {}, + "tls.client_version:tls_1.0": {}, + "tls.client_version:tls_1.1": {}, + "tls.client_version:tls_1.2": {}, + "tls.client_version:tls_1.3": {}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + result := test.tlsTags.GetDynamicTags() + if !reflect.DeepEqual(result, test.expected) { + t.Errorf("GetDynamicTags(%v) = %v; want %v", test.tlsTags, result, test.expected) + } + }) + } +} diff --git a/pkg/network/state.go b/pkg/network/state.go index 39094959989ac..48f07d83242d7 100644 --- a/pkg/network/state.go +++ b/pkg/network/state.go @@ -1420,6 +1420,7 @@ func (ac *aggregateConnection) merge(c *ConnectionStats) { } ac.ProtocolStack.MergeWith(c.ProtocolStack) + ac.TLSTags.MergeWith(c.TLSTags) if ac.DNSStats == nil { ac.DNSStats = c.DNSStats @@ -1483,6 +1484,7 @@ func (ns *networkState) mergeConnectionStats(a, b *ConnectionStats) (collision b } a.ProtocolStack.MergeWith(b.ProtocolStack) + a.TLSTags.MergeWith(b.TLSTags) return false } diff --git a/pkg/network/state_linux_test.go b/pkg/network/state_linux_test.go index 2a1f3a9b4df80..a7ec0737efd14 100644 --- a/pkg/network/state_linux_test.go +++ b/pkg/network/state_linux_test.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//go:build linux +//go:build linux_bpf package network diff --git a/pkg/network/tracer/connection/batch_extractor.go b/pkg/network/tracer/connection/batch_extractor.go index bfe97da86c784..27a836ba79d7e 100644 --- a/pkg/network/tracer/connection/batch_extractor.go +++ b/pkg/network/tracer/connection/batch_extractor.go @@ -5,7 +5,7 @@ //go:build linux_bpf -package connection //nolint:revive // TODO +package connection import ( "time" diff --git a/pkg/network/tracer/connection/ebpf_tracer.go b/pkg/network/tracer/connection/ebpf_tracer.go index 9fc47be3a76d3..56350cbd1f1eb 100644 --- a/pkg/network/tracer/connection/ebpf_tracer.go +++ b/pkg/network/tracer/connection/ebpf_tracer.go @@ -5,13 +5,13 @@ //go:build linux_bpf +// Package connection provides tracing for connections package connection import ( "errors" "fmt" "io" - "math" "sync" "time" @@ -20,46 +20,45 @@ import ( "github.com/cilium/ebpf" "github.com/prometheus/client_golang/prometheus" "go.uber.org/atomic" + "golang.org/x/sys/unix" telemetryComponent "github.com/DataDog/datadog-agent/comp/core/telemetry" ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/ebpf/maps" + "github.com/DataDog/datadog-agent/pkg/ebpf/perf" ebpftelemetry "github.com/DataDog/datadog-agent/pkg/ebpf/telemetry" "github.com/DataDog/datadog-agent/pkg/network" "github.com/DataDog/datadog-agent/pkg/network/config" netebpf "github.com/DataDog/datadog-agent/pkg/network/ebpf" "github.com/DataDog/datadog-agent/pkg/network/ebpf/probes" - "github.com/DataDog/datadog-agent/pkg/network/protocols" "github.com/DataDog/datadog-agent/pkg/network/tracer/connection/fentry" "github.com/DataDog/datadog-agent/pkg/network/tracer/connection/kprobe" "github.com/DataDog/datadog-agent/pkg/network/tracer/connection/util" "github.com/DataDog/datadog-agent/pkg/telemetry" + "github.com/DataDog/datadog-agent/pkg/util/encoding" "github.com/DataDog/datadog-agent/pkg/util/log" + ddsync "github.com/DataDog/datadog-agent/pkg/util/sync" ) const ( - defaultClosedChannelSize = 500 - defaultFailedChannelSize = 500 - connTracerModuleName = "network_tracer__ebpf" + connTracerModuleName = "network_tracer__ebpf" ) var tcpOngoingConnectMapTTL = 30 * time.Minute.Nanoseconds() +var tlsTagsMapTTL = 3 * time.Minute.Nanoseconds() -var EbpfTracerTelemetry = struct { //nolint:revive // TODO +// EbpfTracerTelemetry holds telemetry from the EBPF tracer +var EbpfTracerTelemetry = struct { connections telemetry.Gauge tcpFailedConnects *prometheus.Desc - TcpSentMiscounts *prometheus.Desc //nolint:revive // TODO - unbatchedTcpClose *prometheus.Desc //nolint:revive // TODO - unbatchedUdpClose *prometheus.Desc //nolint:revive // TODO - UdpSendsProcessed *prometheus.Desc //nolint:revive // TODO - UdpSendsMissed *prometheus.Desc //nolint:revive // TODO - UdpDroppedConns *prometheus.Desc //nolint:revive // TODO - // doubleFlushAttemptsClose is a counter measuring the number of attempts to flush a closed connection twice from tcp_close - doubleFlushAttemptsClose *prometheus.Desc - // doubleFlushAttemptsDone is a counter measuring the number of attempts to flush a closed connection twice from tcp_done - doubleFlushAttemptsDone *prometheus.Desc - // unsupportedTcpFailures is a counter measuring the number of attempts to flush a TCP failure that is not supported - unsupportedTcpFailures *prometheus.Desc //nolint:revive // TODO + tcpSentMiscounts *prometheus.Desc + unbatchedTCPClose *prometheus.Desc + unbatchedUDPClose *prometheus.Desc + udpSendsProcessed *prometheus.Desc + udpSendsMissed *prometheus.Desc + udpDroppedConns *prometheus.Desc + // unsupportedTCPFailures is a counter measuring the number of attempts to flush a TCP failure that is not supported + unsupportedTCPFailures *prometheus.Desc // tcpDoneMissingPid is a counter measuring the number of TCP connections with a PID mismatch between tcp_connect and tcp_done tcpDoneMissingPid *prometheus.Desc tcpConnectFailedTuple *prometheus.Desc @@ -73,27 +72,23 @@ var EbpfTracerTelemetry = struct { //nolint:revive // TODO iterationDups telemetry.Counter iterationAborts telemetry.Counter - lastTcpFailedConnects *atomic.Int64 //nolint:revive // TODO - LastTcpSentMiscounts *atomic.Int64 //nolint:revive // TODO - lastUnbatchedTcpClose *atomic.Int64 //nolint:revive // TODO - lastUnbatchedUdpClose *atomic.Int64 //nolint:revive // TODO - lastUdpSendsProcessed *atomic.Int64 //nolint:revive // TODO - lastUdpSendsMissed *atomic.Int64 //nolint:revive // TODO - lastUdpDroppedConns *atomic.Int64 //nolint:revive // TODO - // lastDoubleFlushAttemptsClose is a counter measuring the diff between the last two values of doubleFlushAttemptsClose - lastDoubleFlushAttemptsClose *atomic.Int64 - // lastDoubleFlushAttemptsDone is a counter measuring the diff between the last two values of doubleFlushAttemptsDone - lastDoubleFlushAttemptsDone *atomic.Int64 - // lastUnsupportedTcpFailures is a counter measuring the diff between the last two values of unsupportedTcpFailures - lastUnsupportedTcpFailures *atomic.Int64 //nolint:revive // TODO - // lastTcpDoneMissingPid is a counter measuring the diff between the last two values of tcpDoneMissingPid - lastTcpDoneMissingPid *atomic.Int64 //nolint:revive // TODO - lastTcpConnectFailedTuple *atomic.Int64 //nolint:revive // TODO - lastTcpDoneFailedTuple *atomic.Int64 //nolint:revive // TODO - lastTcpFinishConnectFailedTuple *atomic.Int64 //nolint:revive // TODO - lastTcpCloseTargetFailures *atomic.Int64 //nolint:revive // TODO - lastTcpDoneConnectionFlush *atomic.Int64 //nolint:revive // TODO - lastTcpCloseConnectionFlush *atomic.Int64 //nolint:revive // TODO + lastTCPFailedConnects *atomic.Int64 + LastTCPSentMiscounts *atomic.Int64 + lastUnbatchedTCPClose *atomic.Int64 + lastUnbatchedUDPClose *atomic.Int64 + lastUDPSendsProcessed *atomic.Int64 + lastUDPSendsMissed *atomic.Int64 + lastUDPDroppedConns *atomic.Int64 + // lastUnsupportedTCPFailures is a counter measuring the diff between the last two values of unsupportedTCPFailures + lastUnsupportedTCPFailures *atomic.Int64 + // lastTCPDoneMissingPid is a counter measuring the diff between the last two values of tcpDoneMissingPid + lastTCPDoneMissingPid *atomic.Int64 + lastTCPConnectFailedTuple *atomic.Int64 + lastTCPDoneFailedTuple *atomic.Int64 + lastTCPFinishConnectFailedTuple *atomic.Int64 + lastTCPCloseTargetFailures *atomic.Int64 + lastTCPDoneConnectionFlush *atomic.Int64 + lastTCPCloseConnectionFlush *atomic.Int64 }{ telemetry.NewGauge(connTracerModuleName, "connections", []string{"ip_proto", "family"}, "Gauge measuring the number of active connections in the EBPF map"), prometheus.NewDesc(connTracerModuleName+"__tcp_failed_connects", "Counter measuring the number of failed TCP connections in the EBPF map", nil, nil), @@ -103,8 +98,6 @@ var EbpfTracerTelemetry = struct { //nolint:revive // TODO prometheus.NewDesc(connTracerModuleName+"__udp_sends_processed", "Counter measuring the number of processed UDP sends in EBPF", nil, nil), prometheus.NewDesc(connTracerModuleName+"__udp_sends_missed", "Counter measuring failures to process UDP sends in EBPF", nil, nil), prometheus.NewDesc(connTracerModuleName+"__udp_dropped_conns", "Counter measuring the number of dropped UDP connections in the EBPF map", nil, nil), - prometheus.NewDesc(connTracerModuleName+"__double_flush_attempts_close", "Counter measuring the number of attempts to flush a closed connection twice from tcp_close", nil, nil), - prometheus.NewDesc(connTracerModuleName+"__double_flush_attempts_done", "Counter measuring the number of attempts to flush a closed connection twice from tcp_done", nil, nil), prometheus.NewDesc(connTracerModuleName+"__unsupported_tcp_failures", "Counter measuring the number of attempts to flush a TCP failure that is not supported", nil, nil), prometheus.NewDesc(connTracerModuleName+"__tcp_done_missing_pid", "Counter measuring the number of TCP connections with a missing PID in tcp_done", nil, nil), prometheus.NewDesc(connTracerModuleName+"__tcp_connect_failed_tuple", "Counter measuring the number of failed TCP connections due to tuple collisions", nil, nil), @@ -132,12 +125,10 @@ var EbpfTracerTelemetry = struct { //nolint:revive // TODO atomic.NewInt64(0), atomic.NewInt64(0), atomic.NewInt64(0), - atomic.NewInt64(0), - atomic.NewInt64(0), } type ebpfTracer struct { - m *manager.Manager + m *ddebpf.Manager conns *maps.GenericMap[netebpf.ConnTuple, netebpf.ConnStats] tcpStats *maps.GenericMap[netebpf.ConnTuple, netebpf.TCPStats] @@ -149,6 +140,8 @@ type ebpfTracer struct { // periodically clean the ongoing connection pid map ongoingConnectCleaner *ddebpf.MapCleaner[netebpf.SkpConn, netebpf.PidTs] + // periodically clean the enhanced TLS tags map + TLSTagsCleaner *ddebpf.MapCleaner[netebpf.ConnTuple, netebpf.TLSTagsWrapper] removeTuple *netebpf.ConnTuple @@ -157,8 +150,6 @@ type ebpfTracer struct { ebpfTracerType TracerType - exitTelemetry chan struct{} - ch *cookieHasher } @@ -177,6 +168,7 @@ func newEbpfTracer(config *config.Config, _ telemetryComponent.Component) (Trace probes.PortBindingsMap: {MaxEntries: config.MaxTrackedConnections, EditorFlag: manager.EditMaxEntries}, probes.UDPPortBindingsMap: {MaxEntries: config.MaxTrackedConnections, EditorFlag: manager.EditMaxEntries}, probes.ConnectionProtocolMap: {MaxEntries: config.MaxTrackedConnections, EditorFlag: manager.EditMaxEntries}, + probes.EnhancedTLSTagsMap: {MaxEntries: config.MaxTrackedConnections, EditorFlag: manager.EditMaxEntries}, probes.ConnectionTupleToSocketSKBConnMap: {MaxEntries: config.MaxTrackedConnections, EditorFlag: manager.EditMaxEntries}, probes.TCPOngoingConnectPid: {MaxEntries: config.MaxTrackedConnections, EditorFlag: manager.EditMaxEntries}, probes.TCPRecvMsgArgsMap: {MaxEntries: config.MaxTrackedConnections / 32, EditorFlag: manager.EditMaxEntries}, @@ -194,24 +186,36 @@ func newEbpfTracer(config *config.Config, _ telemetryComponent.Component) (Trace manager.ConstantEditor{Name: "ephemeral_range_begin", Value: uint64(begin)}, manager.ConstantEditor{Name: "ephemeral_range_end", Value: uint64(end)}) - closedChannelSize := defaultClosedChannelSize - if config.ClosedChannelSize > 0 { - closedChannelSize = config.ClosedChannelSize + connPool := ddsync.NewDefaultTypedPool[network.ConnectionStats]() + var extractor *batchExtractor + + util.AddBoolConst(&mgrOptions, "batching_enabled", config.CustomBatchingEnabled) + if config.CustomBatchingEnabled { + numCPUs, err := ebpf.PossibleCPU() + if err != nil { + return nil, fmt.Errorf("could not determine number of CPUs: %w", err) + } + extractor = newBatchExtractor(numCPUs) + mgrOptions.MapSpecEditors[probes.ConnCloseBatchMap] = manager.MapSpecEditor{ + MaxEntries: uint32(numCPUs), + EditorFlag: manager.EditMaxEntries, + } } - var connCloseEventHandler ddebpf.EventHandler - var failedConnsHandler ddebpf.EventHandler - if config.RingBufferSupportedNPM() { - connCloseEventHandler = ddebpf.NewRingBufferHandler(closedChannelSize) - failedConnsHandler = ddebpf.NewRingBufferHandler(defaultFailedChannelSize) - } else { - connCloseEventHandler = ddebpf.NewPerfHandler(closedChannelSize) - failedConnsHandler = ddebpf.NewPerfHandler(defaultFailedChannelSize) + + tr := &ebpfTracer{ + removeTuple: &netebpf.ConnTuple{}, + ch: newCookieHasher(), + } + + connCloseEventHandler, err := initClosedConnEventHandler(config, tr.closedPerfCallback, connPool, extractor) + if err != nil { + return nil, err } - var m *manager.Manager - var tracerType TracerType = TracerTypeFentry //nolint:revive // TODO + var m *ddebpf.Manager + var tracerType = TracerTypeFentry var closeTracerFn func() - m, closeTracerFn, err := fentry.LoadTracer(config, mgrOptions, connCloseEventHandler) + m, closeTracerFn, err = fentry.LoadTracer(config, mgrOptions, connCloseEventHandler) if err != nil && !errors.Is(err, fentry.ErrorNotSupported) { // failed to load fentry tracer return nil, err @@ -221,26 +225,23 @@ func newEbpfTracer(config *config.Config, _ telemetryComponent.Component) (Trace // load the kprobe tracer log.Info("loading kprobe-based tracer") var kprobeTracerType kprobe.TracerType - m, closeTracerFn, kprobeTracerType, err = kprobe.LoadTracer(config, mgrOptions, connCloseEventHandler, failedConnsHandler) + m, closeTracerFn, kprobeTracerType, err = kprobe.LoadTracer(config, mgrOptions, connCloseEventHandler) if err != nil { return nil, err } tracerType = TracerType(kprobeTracerType) } m.DumpHandler = dumpMapsHandler - ddebpf.AddNameMappings(m, "npm_tracer") + ddebpf.AddNameMappings(m.Manager, "npm_tracer") - numCPUs, err := ebpf.PossibleCPU() - if err != nil { - return nil, fmt.Errorf("could not determine number of CPUs: %w", err) - } - extractor := newBatchExtractor(numCPUs) - batchMgr, err := newConnBatchManager(m, extractor) - if err != nil { - return nil, fmt.Errorf("could not create connection batch manager: %w", err) + var flusher perf.Flusher = connCloseEventHandler + if config.CustomBatchingEnabled { + flusher, err = newConnBatchManager(m.Manager, extractor, connPool, tr.closedPerfCallback) + if err != nil { + return nil, err + } } - - closeConsumer := newTCPCloseConsumer(connCloseEventHandler, batchMgr) + tr.closeConsumer = newTCPCloseConsumer(flusher, connPool) // Failed connections are not supported on prebuilt if tracerType == TracerTypeKProbePrebuilt { @@ -250,32 +251,26 @@ func newEbpfTracer(config *config.Config, _ telemetryComponent.Component) (Trace config.TCPFailedConnectionsEnabled = false } - tr := &ebpfTracer{ - m: m, - config: config, - closeConsumer: closeConsumer, - removeTuple: &netebpf.ConnTuple{}, - closeTracer: closeTracerFn, - ebpfTracerType: tracerType, - exitTelemetry: make(chan struct{}), - ch: newCookieHasher(), - } + tr.m = m + tr.config = config + tr.closeTracer = closeTracerFn + tr.ebpfTracerType = tracerType - tr.setupMapCleaner(m) + tr.setupMapCleaners(m.Manager) - tr.conns, err = maps.GetMap[netebpf.ConnTuple, netebpf.ConnStats](m, probes.ConnMap) + tr.conns, err = maps.GetMap[netebpf.ConnTuple, netebpf.ConnStats](m.Manager, probes.ConnMap) if err != nil { tr.Stop() return nil, fmt.Errorf("error retrieving the bpf %s map: %s", probes.ConnMap, err) } - tr.tcpStats, err = maps.GetMap[netebpf.ConnTuple, netebpf.TCPStats](m, probes.TCPStatsMap) + tr.tcpStats, err = maps.GetMap[netebpf.ConnTuple, netebpf.TCPStats](m.Manager, probes.TCPStatsMap) if err != nil { tr.Stop() return nil, fmt.Errorf("error retrieving the bpf %s map: %s", probes.TCPStatsMap, err) } - if tr.tcpRetransmits, err = maps.GetMap[netebpf.ConnTuple, uint32](m, probes.TCPRetransmitsMap); err != nil { + if tr.tcpRetransmits, err = maps.GetMap[netebpf.ConnTuple, uint32](m.Manager, probes.TCPRetransmitsMap); err != nil { tr.Stop() return nil, fmt.Errorf("error retrieving the bpf %s map: %s", probes.TCPRetransmitsMap, err) } @@ -283,6 +278,63 @@ func newEbpfTracer(config *config.Config, _ telemetryComponent.Component) (Trace return tr, nil } +func initClosedConnEventHandler(config *config.Config, closedCallback func(*network.ConnectionStats), pool ddsync.Pool[network.ConnectionStats], extractor *batchExtractor) (*perf.EventHandler, error) { + connHasher := newCookieHasher() + singleConnHandler := encoding.BinaryUnmarshalCallback(pool.Get, func(b *network.ConnectionStats, err error) { + if err != nil { + if b != nil { + pool.Put(b) + } + log.Debug(err.Error()) + return + } + if b != nil { + connHasher.Hash(b) + } + closedCallback(b) + }) + + handler := singleConnHandler + perfMode := perf.WakeupEvents(config.ClosedBufferWakeupCount) + // multiply by number of connections with in-buffer batching to have same effective size as with custom batching + chanSize := config.ClosedChannelSize * config.ClosedBufferWakeupCount + if config.CustomBatchingEnabled { + perfMode = perf.Watermark(1) + chanSize = config.ClosedChannelSize + handler = func(buf []byte) { + l := len(buf) + switch { + case l >= netebpf.SizeofBatch: + b := netebpf.ToBatch(buf) + for rc := extractor.NextConnection(b); rc != nil; rc = extractor.NextConnection(b) { + c := pool.Get() + c.FromConn(rc) + connHasher.Hash(c) + + closedCallback(c) + } + case l >= netebpf.SizeofConn: + singleConnHandler(buf) + case l == 0: + singleConnHandler(nil) + default: + log.Debugf("unexpected %q binary data of size %d bytes", probes.ConnCloseEventMap, l) + } + } + } + + perfBufferSize := util.ComputeDefaultClosedConnPerfBufferSize() + mode := perf.UsePerfBuffers(perfBufferSize, chanSize, perfMode) + if config.RingBufferSupportedNPM() { + mode = perf.UpgradePerfBuffers(perfBufferSize, chanSize, perfMode, util.ComputeDefaultClosedConnRingBufferSize()) + } + + return perf.NewEventHandler(probes.ConnCloseEventMap, handler, mode, + perf.SendTelemetry(config.InternalTelemetryEnabled), + perf.RingBufferEnabledConstantName("ringbuffers_enabled"), + perf.RingBufferWakeupSize("ringbuffer_wakeup_size", uint64(config.ClosedBufferWakeupCount*(netebpf.SizeofConn+unix.BPF_RINGBUF_HDR_SZ)))) +} + func boolConst(name string, value bool) manager.ConstantEditor { c := manager.ConstantEditor{ Name: name, @@ -295,6 +347,10 @@ func boolConst(name string, value bool) manager.ConstantEditor { return c } +func (t *ebpfTracer) closedPerfCallback(c *network.ConnectionStats) { + t.closeConsumer.Callback(c) +} + func (t *ebpfTracer) Start(callback func(*network.ConnectionStats)) (err error) { defer func() { if err != nil { @@ -307,11 +363,13 @@ func (t *ebpfTracer) Start(callback func(*network.ConnectionStats)) (err error) return fmt.Errorf("error initializing port binding maps: %s", err) } + t.closeConsumer.Start(callback) + if err := t.m.Start(); err != nil { + t.closeConsumer.Stop() return fmt.Errorf("could not start ebpf manager: %s", err) } - t.closeConsumer.Start(callback) return nil } @@ -334,12 +392,12 @@ func (t *ebpfTracer) FlushPending() { func (t *ebpfTracer) Stop() { t.stopOnce.Do(func() { - close(t.exitTelemetry) - ddebpf.RemoveNameMappings(t.m) - ebpftelemetry.UnregisterTelemetry(t.m) + ddebpf.RemoveNameMappings(t.m.Manager) + ebpftelemetry.UnregisterTelemetry(t.m.Manager) _ = t.m.Stop(manager.CleanAll) t.closeConsumer.Stop() t.ongoingConnectCleaner.Stop() + t.TLSTagsCleaner.Stop() if t.closeTracer != nil { t.closeTracer() } @@ -370,7 +428,7 @@ func (t *ebpfTracer) GetConnections(buffer *network.ConnectionBuffer, filter fun tcp := new(netebpf.TCPStats) var tcp4, tcp6, udp4, udp6 float64 - entries := t.conns.Iterate() + entries := t.conns.IterateWithBatchSize(1000) for entries.Next(key, stats) { if cookie, exists := connsByTuple[*key]; exists && cookie == stats.Cookie { // already seen the connection in current batch processing, @@ -379,7 +437,8 @@ func (t *ebpfTracer) GetConnections(buffer *network.ConnectionBuffer, filter fun continue } - populateConnStats(conn, key, stats, t.ch) + conn.FromTupleAndStats(key, stats) + t.ch.Hash(conn) connsByTuple[*key] = stats.Cookie isTCP := conn.Type == network.TCP @@ -403,7 +462,7 @@ func (t *ebpfTracer) GetConnections(buffer *network.ConnectionBuffer, filter fun } if t.getTCPStats(tcp, key) { - updateTCPStats(conn, tcp) + conn.FromTCPStats(tcp) } if retrans, ok := t.getTCPRetransmits(key, seen); ok && conn.Type == network.TCP { conn.Monotonic.Retransmits = retrans @@ -480,7 +539,7 @@ func (t *ebpfTracer) Remove(conn *network.ConnectionStats) error { func (t *ebpfTracer) getEBPFTelemetry() *netebpf.Telemetry { var zero uint32 - mp, err := maps.GetMap[uint32, netebpf.Telemetry](t.m, probes.TelemetryMap) + mp, err := maps.GetMap[uint32, netebpf.Telemetry](t.m.Manager, probes.TelemetryMap) if err != nil { log.Warnf("error retrieving telemetry map: %s", err) return nil @@ -501,15 +560,13 @@ func (t *ebpfTracer) getEBPFTelemetry() *netebpf.Telemetry { // Describe returns all descriptions of the collector func (t *ebpfTracer) Describe(ch chan<- *prometheus.Desc) { ch <- EbpfTracerTelemetry.tcpFailedConnects - ch <- EbpfTracerTelemetry.TcpSentMiscounts - ch <- EbpfTracerTelemetry.unbatchedTcpClose - ch <- EbpfTracerTelemetry.unbatchedUdpClose - ch <- EbpfTracerTelemetry.UdpSendsProcessed - ch <- EbpfTracerTelemetry.UdpSendsMissed - ch <- EbpfTracerTelemetry.UdpDroppedConns - ch <- EbpfTracerTelemetry.doubleFlushAttemptsClose - ch <- EbpfTracerTelemetry.doubleFlushAttemptsDone - ch <- EbpfTracerTelemetry.unsupportedTcpFailures + ch <- EbpfTracerTelemetry.tcpSentMiscounts + ch <- EbpfTracerTelemetry.unbatchedTCPClose + ch <- EbpfTracerTelemetry.unbatchedUDPClose + ch <- EbpfTracerTelemetry.udpSendsProcessed + ch <- EbpfTracerTelemetry.udpSendsMissed + ch <- EbpfTracerTelemetry.udpDroppedConns + ch <- EbpfTracerTelemetry.unsupportedTCPFailures ch <- EbpfTracerTelemetry.tcpDoneMissingPid ch <- EbpfTracerTelemetry.tcpConnectFailedTuple ch <- EbpfTracerTelemetry.tcpDoneFailedTuple @@ -525,72 +582,64 @@ func (t *ebpfTracer) Collect(ch chan<- prometheus.Metric) { if ebpfTelemetry == nil { return } - delta := int64(ebpfTelemetry.Tcp_failed_connect) - EbpfTracerTelemetry.lastTcpFailedConnects.Load() - EbpfTracerTelemetry.lastTcpFailedConnects.Store(int64(ebpfTelemetry.Tcp_failed_connect)) + delta := int64(ebpfTelemetry.Tcp_failed_connect) - EbpfTracerTelemetry.lastTCPFailedConnects.Load() + EbpfTracerTelemetry.lastTCPFailedConnects.Store(int64(ebpfTelemetry.Tcp_failed_connect)) ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.tcpFailedConnects, prometheus.CounterValue, float64(delta)) - delta = int64(ebpfTelemetry.Tcp_sent_miscounts) - EbpfTracerTelemetry.LastTcpSentMiscounts.Load() - EbpfTracerTelemetry.LastTcpSentMiscounts.Store(int64(ebpfTelemetry.Tcp_sent_miscounts)) - ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.TcpSentMiscounts, prometheus.CounterValue, float64(delta)) - - delta = int64(ebpfTelemetry.Unbatched_tcp_close) - EbpfTracerTelemetry.lastUnbatchedTcpClose.Load() - EbpfTracerTelemetry.lastUnbatchedTcpClose.Store(int64(ebpfTelemetry.Unbatched_tcp_close)) - ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.unbatchedTcpClose, prometheus.CounterValue, float64(delta)) + delta = int64(ebpfTelemetry.Tcp_sent_miscounts) - EbpfTracerTelemetry.LastTCPSentMiscounts.Load() + EbpfTracerTelemetry.LastTCPSentMiscounts.Store(int64(ebpfTelemetry.Tcp_sent_miscounts)) + ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.tcpSentMiscounts, prometheus.CounterValue, float64(delta)) - delta = int64(ebpfTelemetry.Unbatched_udp_close) - EbpfTracerTelemetry.lastUnbatchedUdpClose.Load() - EbpfTracerTelemetry.lastUnbatchedUdpClose.Store(int64(ebpfTelemetry.Unbatched_udp_close)) - ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.unbatchedUdpClose, prometheus.CounterValue, float64(delta)) + delta = int64(ebpfTelemetry.Unbatched_tcp_close) - EbpfTracerTelemetry.lastUnbatchedTCPClose.Load() + EbpfTracerTelemetry.lastUnbatchedTCPClose.Store(int64(ebpfTelemetry.Unbatched_tcp_close)) + ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.unbatchedTCPClose, prometheus.CounterValue, float64(delta)) - delta = int64(ebpfTelemetry.Udp_sends_processed) - EbpfTracerTelemetry.lastUdpSendsProcessed.Load() - EbpfTracerTelemetry.lastUdpSendsProcessed.Store(int64(ebpfTelemetry.Udp_sends_processed)) - ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.UdpSendsProcessed, prometheus.CounterValue, float64(delta)) + delta = int64(ebpfTelemetry.Unbatched_udp_close) - EbpfTracerTelemetry.lastUnbatchedUDPClose.Load() + EbpfTracerTelemetry.lastUnbatchedUDPClose.Store(int64(ebpfTelemetry.Unbatched_udp_close)) + ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.unbatchedUDPClose, prometheus.CounterValue, float64(delta)) - delta = int64(ebpfTelemetry.Udp_sends_missed) - EbpfTracerTelemetry.lastUdpSendsMissed.Load() - EbpfTracerTelemetry.lastUdpSendsMissed.Store(int64(ebpfTelemetry.Udp_sends_missed)) - ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.UdpSendsMissed, prometheus.CounterValue, float64(delta)) + delta = int64(ebpfTelemetry.Udp_sends_processed) - EbpfTracerTelemetry.lastUDPSendsProcessed.Load() + EbpfTracerTelemetry.lastUDPSendsProcessed.Store(int64(ebpfTelemetry.Udp_sends_processed)) + ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.udpSendsProcessed, prometheus.CounterValue, float64(delta)) - delta = int64(ebpfTelemetry.Udp_dropped_conns) - EbpfTracerTelemetry.lastUdpDroppedConns.Load() - EbpfTracerTelemetry.lastUdpDroppedConns.Store(int64(ebpfTelemetry.Udp_dropped_conns)) - ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.UdpDroppedConns, prometheus.CounterValue, float64(delta)) + delta = int64(ebpfTelemetry.Udp_sends_missed) - EbpfTracerTelemetry.lastUDPSendsMissed.Load() + EbpfTracerTelemetry.lastUDPSendsMissed.Store(int64(ebpfTelemetry.Udp_sends_missed)) + ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.udpSendsMissed, prometheus.CounterValue, float64(delta)) - delta = int64(ebpfTelemetry.Double_flush_attempts_close) - EbpfTracerTelemetry.lastDoubleFlushAttemptsClose.Load() - EbpfTracerTelemetry.lastDoubleFlushAttemptsClose.Store(int64(ebpfTelemetry.Double_flush_attempts_close)) - ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.doubleFlushAttemptsClose, prometheus.CounterValue, float64(delta)) + delta = int64(ebpfTelemetry.Udp_dropped_conns) - EbpfTracerTelemetry.lastUDPDroppedConns.Load() + EbpfTracerTelemetry.lastUDPDroppedConns.Store(int64(ebpfTelemetry.Udp_dropped_conns)) + ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.udpDroppedConns, prometheus.CounterValue, float64(delta)) - delta = int64(ebpfTelemetry.Double_flush_attempts_done) - EbpfTracerTelemetry.lastDoubleFlushAttemptsDone.Load() - EbpfTracerTelemetry.lastDoubleFlushAttemptsDone.Store(int64(ebpfTelemetry.Double_flush_attempts_done)) - ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.doubleFlushAttemptsDone, prometheus.CounterValue, float64(delta)) + delta = int64(ebpfTelemetry.Unsupported_tcp_failures) - EbpfTracerTelemetry.lastUnsupportedTCPFailures.Load() + EbpfTracerTelemetry.lastUnsupportedTCPFailures.Store(int64(ebpfTelemetry.Unsupported_tcp_failures)) + ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.unsupportedTCPFailures, prometheus.CounterValue, float64(delta)) - delta = int64(ebpfTelemetry.Unsupported_tcp_failures) - EbpfTracerTelemetry.lastUnsupportedTcpFailures.Load() - EbpfTracerTelemetry.lastUnsupportedTcpFailures.Store(int64(ebpfTelemetry.Unsupported_tcp_failures)) - ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.unsupportedTcpFailures, prometheus.CounterValue, float64(delta)) - - delta = int64(ebpfTelemetry.Tcp_done_missing_pid) - EbpfTracerTelemetry.lastTcpDoneMissingPid.Load() - EbpfTracerTelemetry.lastTcpDoneMissingPid.Store(int64(ebpfTelemetry.Tcp_done_missing_pid)) + delta = int64(ebpfTelemetry.Tcp_done_missing_pid) - EbpfTracerTelemetry.lastTCPDoneMissingPid.Load() + EbpfTracerTelemetry.lastTCPDoneMissingPid.Store(int64(ebpfTelemetry.Tcp_done_missing_pid)) ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.tcpDoneMissingPid, prometheus.CounterValue, float64(delta)) - delta = int64(ebpfTelemetry.Tcp_connect_failed_tuple) - EbpfTracerTelemetry.lastTcpConnectFailedTuple.Load() - EbpfTracerTelemetry.lastTcpConnectFailedTuple.Store(int64(ebpfTelemetry.Tcp_connect_failed_tuple)) + delta = int64(ebpfTelemetry.Tcp_connect_failed_tuple) - EbpfTracerTelemetry.lastTCPConnectFailedTuple.Load() + EbpfTracerTelemetry.lastTCPConnectFailedTuple.Store(int64(ebpfTelemetry.Tcp_connect_failed_tuple)) ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.tcpConnectFailedTuple, prometheus.CounterValue, float64(delta)) - delta = int64(ebpfTelemetry.Tcp_done_failed_tuple) - EbpfTracerTelemetry.lastTcpDoneFailedTuple.Load() - EbpfTracerTelemetry.lastTcpDoneFailedTuple.Store(int64(ebpfTelemetry.Tcp_done_failed_tuple)) + delta = int64(ebpfTelemetry.Tcp_done_failed_tuple) - EbpfTracerTelemetry.lastTCPDoneFailedTuple.Load() + EbpfTracerTelemetry.lastTCPDoneFailedTuple.Store(int64(ebpfTelemetry.Tcp_done_failed_tuple)) ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.tcpDoneFailedTuple, prometheus.CounterValue, float64(delta)) - delta = int64(ebpfTelemetry.Tcp_finish_connect_failed_tuple) - EbpfTracerTelemetry.lastTcpFinishConnectFailedTuple.Load() - EbpfTracerTelemetry.lastTcpFinishConnectFailedTuple.Store(int64(ebpfTelemetry.Tcp_finish_connect_failed_tuple)) + delta = int64(ebpfTelemetry.Tcp_finish_connect_failed_tuple) - EbpfTracerTelemetry.lastTCPFinishConnectFailedTuple.Load() + EbpfTracerTelemetry.lastTCPFinishConnectFailedTuple.Store(int64(ebpfTelemetry.Tcp_finish_connect_failed_tuple)) ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.tcpFinishConnectFailedTuple, prometheus.CounterValue, float64(delta)) - delta = int64(ebpfTelemetry.Tcp_close_target_failures) - EbpfTracerTelemetry.lastTcpCloseTargetFailures.Load() - EbpfTracerTelemetry.lastTcpCloseTargetFailures.Store(int64(ebpfTelemetry.Tcp_close_target_failures)) + delta = int64(ebpfTelemetry.Tcp_close_target_failures) - EbpfTracerTelemetry.lastTCPCloseTargetFailures.Load() + EbpfTracerTelemetry.lastTCPCloseTargetFailures.Store(int64(ebpfTelemetry.Tcp_close_target_failures)) ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.tcpCloseTargetFailures, prometheus.CounterValue, float64(delta)) - delta = int64(ebpfTelemetry.Tcp_done_connection_flush) - EbpfTracerTelemetry.lastTcpDoneConnectionFlush.Load() - EbpfTracerTelemetry.lastTcpDoneConnectionFlush.Store(int64(ebpfTelemetry.Tcp_done_connection_flush)) + delta = int64(ebpfTelemetry.Tcp_done_connection_flush) - EbpfTracerTelemetry.lastTCPDoneConnectionFlush.Load() + EbpfTracerTelemetry.lastTCPDoneConnectionFlush.Store(int64(ebpfTelemetry.Tcp_done_connection_flush)) ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.tcpDoneConnectionFlush, prometheus.CounterValue, float64(delta)) - delta = int64(ebpfTelemetry.Tcp_close_connection_flush) - EbpfTracerTelemetry.lastTcpCloseConnectionFlush.Load() - EbpfTracerTelemetry.lastTcpCloseConnectionFlush.Store(int64(ebpfTelemetry.Tcp_close_connection_flush)) + delta = int64(ebpfTelemetry.Tcp_close_connection_flush) - EbpfTracerTelemetry.lastTCPCloseConnectionFlush.Load() + EbpfTracerTelemetry.lastTCPCloseConnectionFlush.Store(int64(ebpfTelemetry.Tcp_close_connection_flush)) ch <- prometheus.MustNewConstMetric(EbpfTracerTelemetry.tcpCloseConnectionFlush, prometheus.CounterValue, float64(delta)) } @@ -610,7 +659,7 @@ func (t *ebpfTracer) initializePortBindingMaps() error { return fmt.Errorf("failed to read initial TCP pid->port mapping: %s", err) } - tcpPortMap, err := maps.GetMap[netebpf.PortBinding, uint32](t.m, probes.PortBindingsMap) + tcpPortMap, err := maps.GetMap[netebpf.PortBinding, uint32](t.m.Manager, probes.PortBindingsMap) if err != nil { return fmt.Errorf("failed to get TCP port binding map: %w", err) } @@ -628,7 +677,7 @@ func (t *ebpfTracer) initializePortBindingMaps() error { return fmt.Errorf("failed to read initial UDP pid->port mapping: %s", err) } - udpPortMap, err := maps.GetMap[netebpf.PortBinding, uint32](t.m, probes.UDPPortBindingsMap) + udpPortMap, err := maps.GetMap[netebpf.PortBinding, uint32](t.m.Manager, probes.UDPPortBindingsMap) if err != nil { return fmt.Errorf("failed to get UDP port binding map: %w", err) } @@ -683,8 +732,14 @@ func (t *ebpfTracer) getTCPStats(stats *netebpf.TCPStats, tuple *netebpf.ConnTup return t.tcpStats.Lookup(tuple, stats) == nil } -// setupMapCleaner sets up a map cleaner for the tcp_ongoing_connect_pid map -func (t *ebpfTracer) setupMapCleaner(m *manager.Manager) { +// setupMapCleaners sets up the map cleaners for the eBPF maps +func (t *ebpfTracer) setupMapCleaners(m *manager.Manager) { + t.setupOngoingConnectMapCleaner(m) + t.setupTLSTagsMapCleaner(m) +} + +// setupOngoingConnectMapCleaner sets up a map cleaner for the tcp_ongoing_connect_pid map +func (t *ebpfTracer) setupOngoingConnectMapCleaner(m *manager.Manager) { tcpOngoingConnectPidMap, _, err := m.GetMap(probes.TCPOngoingConnectPid) if err != nil { log.Errorf("error getting %v map: %s", probes.TCPOngoingConnectPid, err) @@ -708,80 +763,24 @@ func (t *ebpfTracer) setupMapCleaner(m *manager.Manager) { t.ongoingConnectCleaner = tcpOngoingConnectPidCleaner } -func populateConnStats(stats *network.ConnectionStats, t *netebpf.ConnTuple, s *netebpf.ConnStats, ch *cookieHasher) { - *stats = network.ConnectionStats{ConnectionTuple: network.ConnectionTuple{ - Pid: t.Pid, - NetNS: t.Netns, - Source: t.SourceAddress(), - Dest: t.DestAddress(), - SPort: t.Sport, - DPort: t.Dport, - }, - Monotonic: network.StatCounters{ - SentBytes: s.Sent_bytes, - RecvBytes: s.Recv_bytes, - SentPackets: uint64(s.Sent_packets), - RecvPackets: uint64(s.Recv_packets), - }, - LastUpdateEpoch: s.Timestamp, - IsAssured: s.IsAssured(), - Cookie: network.StatCookie(s.Cookie), - } - - if s.Duration <= uint64(math.MaxInt64) { - stats.Duration = time.Duration(s.Duration) * time.Nanosecond - } - - stats.ProtocolStack = protocols.Stack{ - API: protocols.API(s.Protocol_stack.Api), - Application: protocols.Application(s.Protocol_stack.Application), - Encryption: protocols.Encryption(s.Protocol_stack.Encryption), - } - - if t.Type() == netebpf.TCP { - stats.Type = network.TCP - } else { - stats.Type = network.UDP - } - - switch t.Family() { - case netebpf.IPv4: - stats.Family = network.AFINET - case netebpf.IPv6: - stats.Family = network.AFINET6 - } - - stats.SPortIsEphemeral = network.IsPortInEphemeralRange(stats.Family, stats.Type, t.Sport) - - switch s.ConnectionDirection() { - case netebpf.Incoming: - stats.Direction = network.INCOMING - case netebpf.Outgoing: - stats.Direction = network.OUTGOING - default: - stats.Direction = network.OUTGOING - } - - if ch != nil { - ch.Hash(stats) +// setupTLSTagsMapCleaner sets up a map cleaner for the tls_enhanced_tags map +func (t *ebpfTracer) setupTLSTagsMapCleaner(m *manager.Manager) { + TLSTagsMap, _, err := m.GetMap(probes.EnhancedTLSTagsMap) + if err != nil { + log.Errorf("error getting %v map: %s", probes.EnhancedTLSTagsMap, err) + return } -} -func updateTCPStats(conn *network.ConnectionStats, tcpStats *netebpf.TCPStats) { - if conn.Type != network.TCP { + TLSTagsMapCleaner, err := ddebpf.NewMapCleaner[netebpf.ConnTuple, netebpf.TLSTagsWrapper](TLSTagsMap, 1024, probes.EnhancedTLSTagsMap, "npm_tracer") + if err != nil { + log.Errorf("error creating map cleaner: %s", err) return } + // slight jitter to avoid all maps being cleaned at the same time + TLSTagsMapCleaner.Clean(time.Second*70, nil, nil, func(now int64, _ netebpf.ConnTuple, val netebpf.TLSTagsWrapper) bool { + ts := int64(val.Updated) + return ts > 0 && now-ts > tlsTagsMapTTL + }) - if tcpStats != nil { - conn.Monotonic.Retransmits = tcpStats.Retransmits - conn.Monotonic.TCPEstablished = tcpStats.State_transitions >> netebpf.Established & 1 - conn.Monotonic.TCPClosed = tcpStats.State_transitions >> netebpf.Close & 1 - conn.RTT = tcpStats.Rtt - conn.RTTVar = tcpStats.Rtt_var - if tcpStats.Failure_reason > 0 { - conn.TCPFailures = map[uint16]uint32{ - tcpStats.Failure_reason: 1, - } - } - } + t.TLSTagsCleaner = TLSTagsMapCleaner } diff --git a/pkg/network/tracer/connection/ebpfless/tcp_processor.go b/pkg/network/tracer/connection/ebpfless/tcp_processor.go index 73ea6a1152aa3..15cf29540adb5 100644 --- a/pkg/network/tracer/connection/ebpfless/tcp_processor.go +++ b/pkg/network/tracer/connection/ebpfless/tcp_processor.go @@ -9,7 +9,6 @@ package ebpfless import ( "fmt" - "github.com/DataDog/datadog-agent/pkg/util/log" "syscall" "time" @@ -19,6 +18,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/network" "github.com/DataDog/datadog-agent/pkg/network/config" + "github.com/DataDog/datadog-agent/pkg/util/log" ) type connectionState struct { @@ -61,6 +61,9 @@ type connectionState struct { // Can we make all connections in TCPProcessor have a ConnectionStats no matter what, and // filter them out in GetConnections? lastUpdateEpoch uint64 + + // connDirection has the direction of the connection, if we saw the SYN packet + connDirection network.ConnectionDirection } func (st *connectionState) hasMissedHandshake() bool { @@ -71,9 +74,9 @@ func (st *connectionState) hasMissedHandshake() bool { type TCPProcessor struct { cfg *config.Config // pendingConns contains connections with tcpState == connStatAttempted - pendingConns map[network.ConnectionTuple]*connectionState + pendingConns map[PCAPTuple]*connectionState // establishedConns contains connections with tcpState == connStatEstablished - establishedConns map[network.ConnectionTuple]*connectionState + establishedConns map[PCAPTuple]*connectionState } // TODO make this into a config value @@ -84,8 +87,8 @@ const pendingConnTimeoutNs = uint64(5 * time.Second) func NewTCPProcessor(cfg *config.Config) *TCPProcessor { return &TCPProcessor{ cfg: cfg, - pendingConns: make(map[network.ConnectionTuple]*connectionState, maxPendingConns), - establishedConns: make(map[network.ConnectionTuple]*connectionState, cfg.MaxTrackedConnections), + pendingConns: make(map[PCAPTuple]*connectionState, maxPendingConns), + establishedConns: make(map[PCAPTuple]*connectionState, cfg.MaxTrackedConnections), } } @@ -135,6 +138,10 @@ func (t *TCPProcessor) updateSynFlag(conn *network.ConnectionStats, st *connecti if tcp.RST { return } + // if this is the initial SYN, store the connection direction + if tcp.SYN && !tcp.ACK { + st.connDirection = connDirectionFromPktType(pktType) + } // progress the synStates based off this packet if pktType == unix.PACKET_OUTGOING { st.localSynState.update(tcp.SYN, tcp.ACK) @@ -247,6 +254,10 @@ func (t *TCPProcessor) updateRstFlag(conn *network.ConnectionStats, st *connecti if st.tcpState == connStatAttempted { reason = syscall.ECONNREFUSED } + + if conn.TCPFailures == nil { + conn.TCPFailures = make(map[uint16]uint32) + } conn.TCPFailures[uint16(reason)]++ if st.tcpState == connStatEstablished { @@ -278,7 +289,12 @@ func (t *TCPProcessor) Process(conn *network.ConnectionStats, timestampNs uint64 return ProcessResultNone, nil } - st := t.getConn(conn.ConnectionTuple) + tuple := MakeEbpflessTuple(conn.ConnectionTuple) + st, ok := t.getConn(tuple) + if !ok { + // create a fresh state object that will be stored by moveConn later + st = &connectionState{} + } origState := st.tcpState t.updateSynFlag(conn, st, pktType, tcp, payloadLen) @@ -288,7 +304,7 @@ func (t *TCPProcessor) Process(conn *network.ConnectionStats, timestampNs uint64 stateChanged := st.tcpState != origState if stateChanged { - ok := t.moveConn(conn.ConnectionTuple, st) + ok := t.moveConn(tuple, st) // if the map is full then we are unable to move the connection, report that if !ok { return ProcessResultMapFull, nil @@ -306,26 +322,25 @@ func (t *TCPProcessor) Process(conn *network.ConnectionStats, timestampNs uint64 return ProcessResultNone, nil } -func (t *TCPProcessor) getConn(tuple network.ConnectionTuple) *connectionState { +func (t *TCPProcessor) getConn(tuple PCAPTuple) (*connectionState, bool) { if st, ok := t.establishedConns[tuple]; ok { - return st + return st, true } if st, ok := t.pendingConns[tuple]; ok { - return st + return st, true } - // otherwise, create a fresh state object that will be stored by moveConn later - return &connectionState{} + return nil, false } // RemoveConn clears a ConnectionTuple from its internal state. -func (t *TCPProcessor) RemoveConn(tuple network.ConnectionTuple) { +func (t *TCPProcessor) RemoveConn(tuple PCAPTuple) { delete(t.pendingConns, tuple) delete(t.establishedConns, tuple) } // moveConn moves a connection to the correct map based on its tcpState. // If it had to drop the connection because the target map was full, it returns false. -func (t *TCPProcessor) moveConn(tuple network.ConnectionTuple, st *connectionState) bool { +func (t *TCPProcessor) moveConn(tuple PCAPTuple, st *connectionState) bool { t.RemoveConn(tuple) switch st.tcpState { @@ -365,3 +380,28 @@ func (t *TCPProcessor) CleanupExpiredPendingConns(timestampNs uint64) { } } } + +// MakeEbpflessTuple converts a network.ConnectionTuple to a PCAPTuple. +// See the PCAPTuple doc for more information. +func MakeEbpflessTuple(tuple network.ConnectionTuple) PCAPTuple { + ret := PCAPTuple(tuple) + ret.Pid = 0 + ret.Direction = network.UNKNOWN + return ret +} + +// MakeConnStatsTuple converts a PCAPTuple to a network.ConnectionTuple. +func MakeConnStatsTuple(tuple PCAPTuple) network.ConnectionTuple { + // Direction is still 0, this will get set by the ebpfless tracer in finalizeConnectionDirection + return network.ConnectionTuple(tuple) +} + +// GetConnDirection returns the direction of the connection. +// If the SYN packet was not seen (for a pre-existing connection), it returns ConnDirUnknown. +func (t *TCPProcessor) GetConnDirection(tuple PCAPTuple) (network.ConnectionDirection, bool) { + conn, ok := t.getConn(tuple) + if !ok { + return network.UNKNOWN, false + } + return conn.connDirection, true +} diff --git a/pkg/network/tracer/connection/ebpfless/tcp_processor_test.go b/pkg/network/tracer/connection/ebpfless/tcp_processor_test.go index 44def44a2860c..866930ddeb421 100644 --- a/pkg/network/tracer/connection/ebpfless/tcp_processor_test.go +++ b/pkg/network/tracer/connection/ebpfless/tcp_processor_test.go @@ -8,12 +8,13 @@ package ebpfless import ( - "github.com/DataDog/datadog-agent/pkg/network/config" "net" "syscall" "testing" "time" + "github.com/DataDog/datadog-agent/pkg/network/config" + "golang.org/x/sys/unix" "github.com/google/gopacket/layers" @@ -176,6 +177,15 @@ func newTCPTestFixture(t *testing.T) *tcpTestFixture { } } +func (fixture *tcpTestFixture) getConnectionState() *connectionState { + tuple := MakeEbpflessTuple(fixture.conn.ConnectionTuple) + conn, ok := fixture.tcp.getConn(tuple) + if ok { + return conn + } + return &connectionState{} +} + func (fixture *tcpTestFixture) runPkt(pkt testCapture) ProcessResult { if fixture.conn == nil { fixture.conn = makeTCPStates(pkt) @@ -200,9 +210,8 @@ func (fixture *tcpTestFixture) runAgainstState(packets []testCapture, expected [ expectedStrs = append(expectedStrs, labelForState(expected[i])) fixture.runPkt(pkt) - connTuple := fixture.conn.ConnectionTuple - actual := fixture.tcp.getConn(connTuple).tcpState - actualStrs = append(actualStrs, labelForState(actual)) + tcpState := fixture.getConnectionState().tcpState + actualStrs = append(actualStrs, labelForState(tcpState)) } require.Equal(fixture.t, expectedStrs, actualStrs) } @@ -815,3 +824,41 @@ func TestPendingConnExpiry(t *testing.T) { f.tcp.CleanupExpiredPendingConns(now + tenSecNs) require.Empty(t, f.tcp.pendingConns) } + +func TestTCPProcessorConnDirection(t *testing.T) { + pb := newPacketBuilder(lowerSeq, higherSeq) + + t.Run("outgoing", func(t *testing.T) { + f := newTCPTestFixture(t) + capture := []testCapture{ + pb.outgoing(0, 0, 0, SYN), + pb.incoming(0, 0, 1, SYN|ACK), + pb.outgoing(0, 1, 1, ACK), + } + f.runPkts(capture) + + require.Equal(t, network.OUTGOING, f.getConnectionState().connDirection) + }) + t.Run("incoming", func(t *testing.T) { + f := newTCPTestFixture(t) + capture := []testCapture{ + pb.incoming(0, 0, 0, SYN), + pb.outgoing(0, 0, 1, SYN|ACK), + pb.incoming(0, 1, 1, ACK), + } + f.runPkts(capture) + + require.Equal(t, network.INCOMING, f.getConnectionState().connDirection) + }) + t.Run("preexisting", func(t *testing.T) { + f := newTCPTestFixture(t) + capture := []testCapture{ + // just sending data, no SYN + pb.outgoing(1, 10, 10, ACK), + pb.incoming(1, 10, 11, ACK), + } + f.runPkts(capture) + + require.Equal(t, network.UNKNOWN, f.getConnectionState().connDirection) + }) +} diff --git a/pkg/network/tracer/connection/ebpfless/tcp_utils.go b/pkg/network/tracer/connection/ebpfless/tcp_utils.go index 7a30737e734ae..1175c517f1e93 100644 --- a/pkg/network/tracer/connection/ebpfless/tcp_utils.go +++ b/pkg/network/tracer/connection/ebpfless/tcp_utils.go @@ -16,11 +16,28 @@ import ( "github.com/google/gopacket/layers" + "github.com/DataDog/datadog-agent/pkg/network" "github.com/DataDog/datadog-agent/pkg/telemetry" ) const ebpflessModuleName = "ebpfless_network_tracer" +// PCAPTuple represents a unique key for an ebpfless tracer connection. +// It represents a network.ConnectionTuple with only the fields that are available +// via packet capture: PID and Direction are zeroed out. +type PCAPTuple network.ConnectionTuple + +func connDirectionFromPktType(pktType uint8) network.ConnectionDirection { + switch pktType { + case unix.PACKET_HOST: + return network.INCOMING + case unix.PACKET_OUTGOING: + return network.OUTGOING + default: + return network.UNKNOWN + } +} + // ProcessResult represents what the ebpfless tracer should do with ConnectionStats after processing a packet type ProcessResult uint8 diff --git a/pkg/network/tracer/connection/ebpfless_tracer.go b/pkg/network/tracer/connection/ebpfless_tracer.go index 3eb5a03344b2a..17320147fda83 100644 --- a/pkg/network/tracer/connection/ebpfless_tracer.go +++ b/pkg/network/tracer/connection/ebpfless_tracer.go @@ -53,15 +53,16 @@ type ebpfLessTracer struct { config *config.Config - packetSrc *filter.AFPacketSource - exit chan struct{} - scratchConn *network.ConnectionStats + packetSrc *filter.AFPacketSource + // packetSrcBusy is needed because you can't close packetSrc while it's still visiting + packetSrcBusy sync.WaitGroup + exit chan struct{} udp *udpProcessor tcp *ebpfless.TCPProcessor // connection maps - conns map[network.ConnectionTuple]*network.ConnectionStats + conns map[ebpfless.PCAPTuple]*network.ConnectionStats boundPorts *ebpfless.BoundPorts cookieHasher *cookieHasher @@ -78,15 +79,15 @@ func newEbpfLessTracer(cfg *config.Config) (*ebpfLessTracer, error) { } tr := &ebpfLessTracer{ - config: cfg, - packetSrc: packetSrc, - exit: make(chan struct{}), - scratchConn: &network.ConnectionStats{}, - udp: &udpProcessor{}, - tcp: ebpfless.NewTCPProcessor(cfg), - conns: make(map[network.ConnectionTuple]*network.ConnectionStats, cfg.MaxTrackedConnections), - boundPorts: ebpfless.NewBoundPorts(cfg), - cookieHasher: newCookieHasher(), + config: cfg, + packetSrc: packetSrc, + packetSrcBusy: sync.WaitGroup{}, + exit: make(chan struct{}), + udp: &udpProcessor{}, + tcp: ebpfless.NewTCPProcessor(cfg), + conns: make(map[ebpfless.PCAPTuple]*network.ConnectionStats, cfg.MaxTrackedConnections), + boundPorts: ebpfless.NewBoundPorts(cfg), + cookieHasher: newCookieHasher(), } tr.ns, err = netns.Get() @@ -103,7 +104,11 @@ func (t *ebpfLessTracer) Start(closeCallback func(*network.ConnectionStats)) err return fmt.Errorf("could not update bound ports: %w", err) } + t.packetSrcBusy.Add(1) go func() { + defer func() { + t.packetSrcBusy.Done() + }() var eth layers.Ethernet var ip4 layers.IPv4 var ip6 layers.IPv6 @@ -113,7 +118,7 @@ func (t *ebpfLessTracer) Start(closeCallback func(*network.ConnectionStats)) err parser := gopacket.NewDecodingLayerParser(layers.LayerTypeEthernet, ð, &ip4, &ip6, &tcp, &udp) parser.IgnoreUnsupported = true for { - err := t.packetSrc.VisitPackets(t.exit, func(b []byte, info filter.PacketInfo, _ time.Time) error { + err := t.packetSrc.VisitPackets(func(b []byte, info filter.PacketInfo, _ time.Time) error { if err := parser.DecodeLayers(b, &decoded); err != nil { return fmt.Errorf("error decoding packet layers: %w", err) } @@ -133,9 +138,16 @@ func (t *ebpfLessTracer) Start(closeCallback func(*network.ConnectionStats)) err }) if err != nil { - log.Errorf("exiting packet loop: %s", err) + log.Errorf("exiting visiting packets: %s", err) return } + + // Properly synchronizes termination process + select { + case <-t.exit: + return + default: + } } }() @@ -151,54 +163,41 @@ func (t *ebpfLessTracer) processConnection( decoded []gopacket.LayerType, closeCallback func(*network.ConnectionStats), ) error { - t.scratchConn.Source, t.scratchConn.Dest = util.Address{}, util.Address{} - t.scratchConn.SPort, t.scratchConn.DPort = 0, 0 - t.scratchConn.TCPFailures = make(map[uint16]uint32) - var ip4Present, ip6Present, udpPresent, tcpPresent bool - for _, layerType := range decoded { - switch layerType { - case layers.LayerTypeIPv4: - t.scratchConn.Source = util.AddressFromNetIP(ip4.SrcIP) - t.scratchConn.Dest = util.AddressFromNetIP(ip4.DstIP) - t.scratchConn.Family = network.AFINET - ip4Present = true - case layers.LayerTypeIPv6: - t.scratchConn.Source = util.AddressFromNetIP(ip6.SrcIP) - t.scratchConn.Dest = util.AddressFromNetIP(ip6.DstIP) - t.scratchConn.Family = network.AFINET6 - ip6Present = true - case layers.LayerTypeTCP: - t.scratchConn.SPort = uint16(tcp.SrcPort) - t.scratchConn.DPort = uint16(tcp.DstPort) - t.scratchConn.Type = network.TCP - tcpPresent = true - case layers.LayerTypeUDP: - t.scratchConn.SPort = uint16(udp.SrcPort) - t.scratchConn.DPort = uint16(udp.DstPort) - t.scratchConn.Type = network.UDP - udpPresent = true - } - } + tuple, flags := buildTuple(pktType, ip4, ip6, udp, tcp, decoded) // check if we have all the basic pieces - if !udpPresent && !tcpPresent { + if !flags.udpPresent && !flags.tcpPresent { log.Debugf("ignoring packet since its not udp or tcp") ebpfLessTracerTelemetry.skippedPackets.Inc("not_tcp_udp") return nil } + if !flags.ip4Present && !flags.ip6Present { + return fmt.Errorf("expected to have an IP layer") + } - t.determineConnectionDirection(t.scratchConn, pktType) - flipSourceDest(t.scratchConn, pktType) + // don't trace families/protocols that are disabled by configuration + switch tuple.Type { + case network.UDP: + if (flags.ip4Present && !t.config.CollectUDPv4Conns) || (flags.ip6Present && !t.config.CollectUDPv6Conns) { + return nil + } + case network.TCP: + if (flags.ip4Present && !t.config.CollectTCPv4Conns) || (flags.ip6Present && !t.config.CollectTCPv6Conns) { + return nil + } + } t.m.Lock() defer t.m.Unlock() - conn := t.conns[t.scratchConn.ConnectionTuple] - if conn == nil { - conn = &network.ConnectionStats{} - *conn = *t.scratchConn - t.cookieHasher.Hash(conn) - conn.Duration = time.Duration(time.Now().UnixNano()) + conn, ok := t.conns[tuple] + isNewConn := !ok + if isNewConn { + conn = &network.ConnectionStats{ + // NOTE: this tuple does not have the connection direction set yet. + // That will be set from determineConnectionDirection later + ConnectionTuple: ebpfless.MakeConnStatsTuple(tuple), + } } var ts int64 @@ -208,22 +207,12 @@ func (t *ebpfLessTracer) processConnection( } conn.LastUpdateEpoch = uint64(ts) - if !ip4Present && !ip6Present { - return nil - } - var result ebpfless.ProcessResult switch conn.Type { case network.UDP: - if (ip4Present && !t.config.CollectUDPv4Conns) || (ip6Present && !t.config.CollectUDPv6Conns) { - return nil - } result = ebpfless.ProcessResultStoreConn err = t.udp.process(conn, pktType, udp) case network.TCP: - if (ip4Present && !t.config.CollectTCPv4Conns) || (ip6Present && !t.config.CollectTCPv6Conns) { - return nil - } result, err = t.tcp.Process(conn, uint64(ts), pktType, ip4, ip6, tcp) default: err = fmt.Errorf("unsupported connection type %d", conn.Type) @@ -240,49 +229,124 @@ func (t *ebpfLessTracer) processConnection( switch result { case ebpfless.ProcessResultNone: case ebpfless.ProcessResultStoreConn: - maxTrackedConns := int(t.config.MaxTrackedConnections) - ok := ebpfless.WriteMapWithSizeLimit(t.conns, conn.ConnectionTuple, conn, maxTrackedConns) - if !ok { - // we don't have enough space to add this connection, remove its TCP state tracking + // if we fail to store this connection at any point, remove its TCP state tracking + storeConnOk := false + defer func() { + if storeConnOk { + return + } if conn.Type == network.TCP { - t.tcp.RemoveConn(conn.ConnectionTuple) + t.tcp.RemoveConn(tuple) } ebpfLessTracerTelemetry.droppedConnections.Inc() + }() + + if isNewConn { + conn.Duration = time.Duration(time.Now().UnixNano()) + direction, err := t.determineConnectionDirection(conn, pktType) + if err != nil { + return err + } + if direction == network.UNKNOWN { + return fmt.Errorf("could not determine connection direction") + } + conn.Direction = direction + + // now that the direction is set, hash the connection + t.cookieHasher.Hash(conn) } + maxTrackedConns := int(t.config.MaxTrackedConnections) + storeConnOk = ebpfless.WriteMapWithSizeLimit(t.conns, tuple, conn, maxTrackedConns) case ebpfless.ProcessResultCloseConn: - delete(t.conns, conn.ConnectionTuple) + delete(t.conns, tuple) closeCallback(conn) case ebpfless.ProcessResultMapFull: - delete(t.conns, conn.ConnectionTuple) + delete(t.conns, tuple) ebpfLessTracerTelemetry.droppedConnections.Inc() } return nil } -func flipSourceDest(conn *network.ConnectionStats, pktType uint8) { +type packetFlags struct { + ip4Present, ip6Present, udpPresent, tcpPresent bool +} + +// buildTuple converts the packet capture layer info into an EbpflessTuple with flags that indicate which layers were present. +func buildTuple(pktType uint8, ip4 *layers.IPv4, ip6 *layers.IPv6, udp *layers.UDP, tcp *layers.TCP, decoded []gopacket.LayerType) (ebpfless.PCAPTuple, packetFlags) { + var tuple ebpfless.PCAPTuple + var flags packetFlags + for _, layerType := range decoded { + switch layerType { + case layers.LayerTypeIPv4: + tuple.Source = util.AddressFromNetIP(ip4.SrcIP) + tuple.Dest = util.AddressFromNetIP(ip4.DstIP) + tuple.Family = network.AFINET + flags.ip4Present = true + case layers.LayerTypeIPv6: + tuple.Source = util.AddressFromNetIP(ip6.SrcIP) + tuple.Dest = util.AddressFromNetIP(ip6.DstIP) + tuple.Family = network.AFINET6 + flags.ip6Present = true + case layers.LayerTypeTCP: + tuple.SPort = uint16(tcp.SrcPort) + tuple.DPort = uint16(tcp.DstPort) + tuple.Type = network.TCP + flags.tcpPresent = true + case layers.LayerTypeUDP: + tuple.SPort = uint16(udp.SrcPort) + tuple.DPort = uint16(udp.DstPort) + tuple.Type = network.UDP + flags.udpPresent = true + } + } + if pktType == unix.PACKET_HOST { - conn.Dest, conn.Source = conn.Source, conn.Dest - conn.DPort, conn.SPort = conn.SPort, conn.DPort + tuple.Dest, tuple.Source = tuple.Source, tuple.Dest + tuple.DPort, tuple.SPort = tuple.SPort, tuple.DPort } + return tuple, flags } -func (t *ebpfLessTracer) determineConnectionDirection(conn *network.ConnectionStats, pktType uint8) { - t.m.Lock() - defer t.m.Unlock() +// determineConnectionDirection returns connection direction using information from the TCP processor. +// If the TCP processor doesn't know the direction, it will attempt to guess. +func (t *ebpfLessTracer) determineConnectionDirection(conn *network.ConnectionStats, pktType uint8) (network.ConnectionDirection, error) { + if conn.Type == network.TCP { + tuple := ebpfless.MakeEbpflessTuple(conn.ConnectionTuple) + dir, ok := t.tcp.GetConnDirection(tuple) + if !ok { + return network.UNKNOWN, fmt.Errorf("finalizeConnectionDirection: expected to find TCP connection for tuple: %+v", tuple) + } + switch dir { + case network.INCOMING: + case network.OUTGOING: + return dir, nil + case network.UNKNOWN: + // This happens when the TCP processor missed the SYN packet. + // Fall through and guess the direction. + } + } ok := t.boundPorts.Find(conn.Type, conn.SPort) if ok { // incoming connection - conn.Direction = network.INCOMING - return + return network.INCOMING, nil + } + // for local connections - the destination could be a bound port + if conn.Dest.Addr.IsLoopback() { + ok := t.boundPorts.Find(conn.Type, conn.DPort) + if ok { + return network.OUTGOING, nil + } } switch pktType { case unix.PACKET_HOST: - conn.Direction = network.INCOMING + return network.INCOMING, nil case unix.PACKET_OUTGOING: - conn.Direction = network.OUTGOING + return network.OUTGOING, nil + default: + return network.UNKNOWN, fmt.Errorf("unknown packet type %d", pktType) } } @@ -293,6 +357,10 @@ func (t *ebpfLessTracer) Stop() { } close(t.exit) + // close the packet capture loop and wait for it to finish + t.packetSrc.Close() + t.packetSrcBusy.Wait() + t.ns.Close() t.boundPorts.Stop() } @@ -342,9 +410,10 @@ func (t *ebpfLessTracer) cleanupPendingConns() error { func (t *ebpfLessTracer) FlushPending() {} func (t *ebpfLessTracer) remove(conn *network.ConnectionStats) error { - delete(t.conns, conn.ConnectionTuple) + tuple := ebpfless.MakeEbpflessTuple(conn.ConnectionTuple) + delete(t.conns, tuple) if conn.Type == network.TCP { - t.tcp.RemoveConn(conn.ConnectionTuple) + t.tcp.RemoveConn(tuple) } return nil } diff --git a/pkg/network/tracer/connection/fentry/manager.go b/pkg/network/tracer/connection/fentry/manager.go index b41820b7ffedf..6936861aca94a 100644 --- a/pkg/network/tracer/connection/fentry/manager.go +++ b/pkg/network/tracer/connection/fentry/manager.go @@ -5,18 +5,17 @@ //go:build linux_bpf -package fentry //nolint:revive // TODO +// Package fentry provides connection tracing for fentry +package fentry import ( manager "github.com/DataDog/ebpf-manager" ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" - "github.com/DataDog/datadog-agent/pkg/network/config" "github.com/DataDog/datadog-agent/pkg/network/ebpf/probes" - "github.com/DataDog/datadog-agent/pkg/network/tracer/connection/util" ) -func initManager(mgr *ddebpf.Manager, connCloseEventHandler ddebpf.EventHandler, cfg *config.Config) { +func initManager(mgr *ddebpf.Manager) { mgr.Maps = []*manager.Map{ {Name: probes.ConnMap}, {Name: probes.TCPStatsMap}, @@ -29,7 +28,6 @@ func initManager(mgr *ddebpf.Manager, connCloseEventHandler ddebpf.EventHandler, {Name: "pending_bind"}, {Name: probes.TelemetryMap}, } - util.SetupClosedConnHandler(connCloseEventHandler, mgr, cfg) for funcName := range programs { p := &manager.Probe{ ProbeIdentificationPair: manager.ProbeIdentificationPair{ diff --git a/pkg/network/tracer/connection/fentry/probes.go b/pkg/network/tracer/connection/fentry/probes.go index 8d8ce7ae73df3..3724d1be0dafe 100644 --- a/pkg/network/tracer/connection/fentry/probes.go +++ b/pkg/network/tracer/connection/fentry/probes.go @@ -46,8 +46,8 @@ const ( udpSendSkb = "kprobe__udp_send_skb" skbFreeDatagramLocked = "skb_free_datagram_locked" - __skbFreeDatagramLocked = "__skb_free_datagram_locked" //nolint:revive // TODO - skbConsumeUdp = "skb_consume_udp" //nolint:revive // TODO + __skbFreeDatagramLocked = "__skb_free_datagram_locked" // nolint:revive + skbConsumeUDP = "skb_consume_udp" udpv6RecvMsg = "udpv6_recvmsg" udpv6RecvMsgReturn = "udpv6_recvmsg_exit" @@ -112,7 +112,7 @@ var programs = map[string]struct{}{ udpv6DestroySockReturn: {}, skbFreeDatagramLocked: {}, __skbFreeDatagramLocked: {}, - skbConsumeUdp: {}, + skbConsumeUDP: {}, tcpRecvMsgPre5190Return: {}, udpRecvMsgPre5190Return: {}, udpv6RecvMsgPre5190Return: {}, @@ -138,7 +138,6 @@ func enabledPrograms(c *config.Config) (map[string]struct{}, error) { enableProgram(enabled, tcpSendPageReturn) enableProgram(enabled, selectVersionBasedProbe(kv, tcpRecvMsgReturn, tcpRecvMsgPre5190Return, kv5190)) enableProgram(enabled, tcpClose) - enableProgram(enabled, tcpCloseReturn) enableProgram(enabled, tcpConnect) enableProgram(enabled, tcpFinishConnect) enableProgram(enabled, inetCskAcceptReturn) @@ -153,30 +152,40 @@ func enabledPrograms(c *config.Config) (map[string]struct{}, error) { // if err == nil && len(missing) == 0 { // enableProgram(enabled, sockFDLookupRet) // } + + if c.CustomBatchingEnabled { + enableProgram(enabled, tcpCloseReturn) + } } if c.CollectUDPv4Conns { enableProgram(enabled, udpSendPageReturn) enableProgram(enabled, udpDestroySock) - enableProgram(enabled, udpDestroySockReturn) enableProgram(enabled, inetBind) enableProgram(enabled, inetBindRet) enableProgram(enabled, udpRecvMsg) enableProgram(enabled, selectVersionBasedProbe(kv, udpRecvMsgReturn, udpRecvMsgPre5190Return, kv5190)) enableProgram(enabled, udpSendMsgReturn) enableProgram(enabled, udpSendSkb) + + if c.CustomBatchingEnabled { + enableProgram(enabled, udpDestroySockReturn) + } } if c.CollectUDPv6Conns { enableProgram(enabled, udpSendPageReturn) enableProgram(enabled, udpv6DestroySock) - enableProgram(enabled, udpv6DestroySockReturn) enableProgram(enabled, inet6Bind) enableProgram(enabled, inet6BindRet) enableProgram(enabled, udpv6RecvMsg) enableProgram(enabled, selectVersionBasedProbe(kv, udpv6RecvMsgReturn, udpv6RecvMsgPre5190Return, kv5190)) enableProgram(enabled, udpv6SendMsgReturn) enableProgram(enabled, udpv6SendSkb) + + if c.CustomBatchingEnabled { + enableProgram(enabled, udpv6DestroySockReturn) + } } if c.CollectUDPv4Conns || c.CollectUDPv6Conns { @@ -194,7 +203,7 @@ func enableAdvancedUDP(enabled map[string]struct{}) error { return fmt.Errorf("error verifying kernel function presence: %s", err) } if _, miss := missing["skb_consume_udp"]; !miss { - enableProgram(enabled, skbConsumeUdp) + enableProgram(enabled, skbConsumeUDP) } else if _, miss := missing["__skb_free_datagram_locked"]; !miss { enableProgram(enabled, __skbFreeDatagramLocked) } else if _, miss := missing["skb_free_datagram_locked"]; !miss { diff --git a/pkg/network/tracer/connection/fentry/tracer.go b/pkg/network/tracer/connection/fentry/tracer.go index d44715e8c87e5..42e96ba6f4129 100644 --- a/pkg/network/tracer/connection/fentry/tracer.go +++ b/pkg/network/tracer/connection/fentry/tracer.go @@ -17,83 +17,78 @@ import ( ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/ebpf/bytecode" + "github.com/DataDog/datadog-agent/pkg/ebpf/perf" ebpftelemetry "github.com/DataDog/datadog-agent/pkg/ebpf/telemetry" "github.com/DataDog/datadog-agent/pkg/network/config" netebpf "github.com/DataDog/datadog-agent/pkg/network/ebpf" - "github.com/DataDog/datadog-agent/pkg/network/tracer/connection/util" "github.com/DataDog/datadog-agent/pkg/util/fargate" ) const probeUID = "net" -var ErrorNotSupported = errors.New("fentry tracer is only supported on Fargate") //nolint:revive // TODO +// ErrorNotSupported is the error when entry tracer is not supported on an environment +var ErrorNotSupported = errors.New("fentry tracer is only supported on Fargate") // LoadTracer loads a new tracer -func LoadTracer(config *config.Config, mgrOpts manager.Options, connCloseEventHandler ddebpf.EventHandler) (*manager.Manager, func(), error) { +func LoadTracer(config *config.Config, mgrOpts manager.Options, connCloseEventHandler *perf.EventHandler) (*ddebpf.Manager, func(), error) { if !fargate.IsFargateInstance() { return nil, nil, ErrorNotSupported } - m := ddebpf.NewManagerWithDefault(&manager.Manager{}, "network", &ebpftelemetry.ErrorsTelemetryModifier{}) + m := ddebpf.NewManagerWithDefault(&manager.Manager{}, "network", &ebpftelemetry.ErrorsTelemetryModifier{}, connCloseEventHandler) err := ddebpf.LoadCOREAsset(netebpf.ModuleFileName("tracer-fentry", config.BPFDebug), func(ar bytecode.AssetReader, o manager.Options) error { o.RemoveRlimit = mgrOpts.RemoveRlimit o.MapSpecEditors = mgrOpts.MapSpecEditors o.ConstantEditors = mgrOpts.ConstantEditors + return initFentryTracer(ar, o, config, m) + }) - // Use the config to determine what kernel probes should be enabled - enabledProbes, err := enabledPrograms(config) - if err != nil { - return fmt.Errorf("invalid probe configuration: %v", err) - } - - initManager(m, connCloseEventHandler, config) - - file, err := os.Stat("/proc/self/ns/pid") + if err != nil { + return nil, nil, err + } - if err != nil { - return fmt.Errorf("could not load sysprobe pid: %w", err) - } + return m, nil, nil +} - device := file.Sys().(*syscall.Stat_t).Dev - inode := file.Sys().(*syscall.Stat_t).Ino - ringbufferEnabled := config.RingBufferSupportedNPM() - - o.ConstantEditors = append(o.ConstantEditors, manager.ConstantEditor{ - Name: "systemprobe_device", - Value: device, - }) - o.ConstantEditors = append(o.ConstantEditors, manager.ConstantEditor{ - Name: "systemprobe_ino", - Value: inode, - }) - util.AddBoolConst(&o, "ringbuffers_enabled", ringbufferEnabled) - if ringbufferEnabled { - util.EnableRingbuffersViaMapEditor(&mgrOpts) - } +// Use a function so someone doesn't accidentally use mgrOpts from the outer scope in LoadTracer +func initFentryTracer(ar bytecode.AssetReader, o manager.Options, config *config.Config, m *ddebpf.Manager) error { + // Use the config to determine what kernel probes should be enabled + enabledProbes, err := enabledPrograms(config) + if err != nil { + return fmt.Errorf("invalid probe configuration: %v", err) + } - // exclude all non-enabled probes to ensure we don't run into problems with unsupported probe types - for _, p := range m.Probes { - if _, enabled := enabledProbes[p.EBPFFuncName]; !enabled { - o.ExcludedFunctions = append(o.ExcludedFunctions, p.EBPFFuncName) - } - } - for funcName := range enabledProbes { - o.ActivatedProbes = append( - o.ActivatedProbes, - &manager.ProbeSelector{ - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - EBPFFuncName: funcName, - UID: probeUID, - }, - }) - } + initManager(m) - return m.InitWithOptions(ar, &o) + file, err := os.Stat("/proc/self/ns/pid") + if err != nil { + return fmt.Errorf("could not load sysprobe pid: %w", err) + } + pidStat := file.Sys().(*syscall.Stat_t) + o.ConstantEditors = append(o.ConstantEditors, manager.ConstantEditor{ + Name: "systemprobe_device", + Value: pidStat.Dev, + }, manager.ConstantEditor{ + Name: "systemprobe_ino", + Value: pidStat.Ino, }) - if err != nil { - return nil, nil, err + // exclude all non-enabled probes to ensure we don't run into problems with unsupported probe types + for _, p := range m.Probes { + if _, enabled := enabledProbes[p.EBPFFuncName]; !enabled { + o.ExcludedFunctions = append(o.ExcludedFunctions, p.EBPFFuncName) + } + } + for funcName := range enabledProbes { + o.ActivatedProbes = append( + o.ActivatedProbes, + &manager.ProbeSelector{ + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + EBPFFuncName: funcName, + UID: probeUID, + }, + }) } - return m.Manager, nil, nil + return m.InitWithOptions(ar, &o) } diff --git a/pkg/network/tracer/connection/kprobe/compile.go b/pkg/network/tracer/connection/kprobe/compile.go index a515480ba5194..9a8c2855f755a 100644 --- a/pkg/network/tracer/connection/kprobe/compile.go +++ b/pkg/network/tracer/connection/kprobe/compile.go @@ -5,7 +5,8 @@ //go:build linux_bpf -package kprobe //nolint:revive // TODO +// Package kprobe supports kprobe connecting tracing +package kprobe import ( "github.com/DataDog/datadog-agent/pkg/ebpf/bytecode/runtime" diff --git a/pkg/network/tracer/connection/kprobe/config.go b/pkg/network/tracer/connection/kprobe/config.go index 880a2f0a5e838..2034af8c5ed0c 100644 --- a/pkg/network/tracer/connection/kprobe/config.go +++ b/pkg/network/tracer/connection/kprobe/config.go @@ -8,10 +8,14 @@ package kprobe import ( + "errors" "fmt" + manager "github.com/DataDog/ebpf-manager" + "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/network/config" + netebpf "github.com/DataDog/datadog-agent/pkg/network/ebpf" "github.com/DataDog/datadog-agent/pkg/network/ebpf/probes" "github.com/DataDog/datadog-agent/pkg/util/kernel" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -58,6 +62,8 @@ func enabledProbes(c *config.Config, runtimeTracer, coreTracer bool) (map[probes if c.CollectTCPv4Conns || c.CollectTCPv6Conns { if ClassificationSupported(c) { enableProbe(enabled, probes.ProtocolClassifierEntrySocketFilter) + enableProbe(enabled, probes.ProtocolClassifierTLSClientSocketFilter) + enableProbe(enabled, probes.ProtocolClassifierTLSServerSocketFilter) enableProbe(enabled, probes.ProtocolClassifierQueuesSocketFilter) enableProbe(enabled, probes.ProtocolClassifierDBsSocketFilter) enableProbe(enabled, probes.ProtocolClassifierGRPCSocketFilter) @@ -76,10 +82,15 @@ func enabledProbes(c *config.Config, runtimeTracer, coreTracer bool) (map[probes enableProbe(enabled, probes.TCPReadSock) enableProbe(enabled, probes.TCPReadSockReturn) enableProbe(enabled, probes.TCPClose) - enableProbe(enabled, probes.TCPCloseFlushReturn) + if c.CustomBatchingEnabled { + enableProbe(enabled, probes.TCPCloseFlushReturn) + } + enableProbe(enabled, probes.TCPConnect) enableProbe(enabled, probes.TCPDone) - enableProbe(enabled, probes.TCPDoneFlushReturn) + if c.CustomBatchingEnabled { + enableProbe(enabled, probes.TCPDoneFlushReturn) + } enableProbe(enabled, probes.TCPFinishConnect) enableProbe(enabled, probes.InetCskAcceptReturn) enableProbe(enabled, probes.InetCskListenStop) @@ -93,7 +104,9 @@ func enabledProbes(c *config.Config, runtimeTracer, coreTracer bool) (map[probes if c.CollectUDPv4Conns { enableProbe(enabled, probes.UDPDestroySock) - enableProbe(enabled, probes.UDPDestroySockReturn) + if c.CustomBatchingEnabled { + enableProbe(enabled, probes.UDPDestroySockReturn) + } enableProbe(enabled, selectVersionBasedProbe(runtimeTracer, kv, probes.IPMakeSkb, probes.IPMakeSkbPre4180, kv4180)) enableProbe(enabled, probes.IPMakeSkbReturn) enableProbe(enabled, probes.InetBind) @@ -116,11 +129,13 @@ func enabledProbes(c *config.Config, runtimeTracer, coreTracer bool) (map[probes if c.CollectUDPv6Conns { enableProbe(enabled, probes.UDPv6DestroySock) - enableProbe(enabled, probes.UDPv6DestroySockReturn) + if c.CustomBatchingEnabled { + enableProbe(enabled, probes.UDPv6DestroySockReturn) + } if kv >= kv5180 || runtimeTracer { // prebuilt shouldn't arrive here with 5.18+ and UDPv6 enabled if !coreTracer && !runtimeTracer { - return nil, fmt.Errorf("UDPv6 does not function on prebuilt tracer with kernel versions 5.18+") + return nil, errors.New("UDPv6 does not function on prebuilt tracer with kernel versions 5.18+") } enableProbe(enabled, probes.IP6MakeSkb) } else if kv >= kv470 { @@ -156,6 +171,62 @@ func enabledProbes(c *config.Config, runtimeTracer, coreTracer bool) (map[probes return enabled, nil } +func protocolClassificationTailCalls(cfg *config.Config) []manager.TailCallRoute { + tcs := []manager.TailCallRoute{ + { + ProgArrayName: probes.ClassificationProgsMap, + Key: netebpf.ClassificationTLSClient, + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + EBPFFuncName: probes.ProtocolClassifierTLSClientSocketFilter, + UID: probeUID, + }, + }, + { + ProgArrayName: probes.ClassificationProgsMap, + Key: netebpf.ClassificationTLSServer, + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + EBPFFuncName: probes.ProtocolClassifierTLSServerSocketFilter, + UID: probeUID, + }, + }, + { + ProgArrayName: probes.ClassificationProgsMap, + Key: netebpf.ClassificationQueues, + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + EBPFFuncName: probes.ProtocolClassifierQueuesSocketFilter, + UID: probeUID, + }, + }, + { + ProgArrayName: probes.ClassificationProgsMap, + Key: netebpf.ClassificationDBs, + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + EBPFFuncName: probes.ProtocolClassifierDBsSocketFilter, + UID: probeUID, + }, + }, + { + ProgArrayName: probes.ClassificationProgsMap, + Key: netebpf.ClassificationGRPC, + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + EBPFFuncName: probes.ProtocolClassifierGRPCSocketFilter, + UID: probeUID, + }, + }, + } + if cfg.CustomBatchingEnabled { + tcs = append(tcs, manager.TailCallRoute{ + ProgArrayName: probes.TCPCloseProgsMap, + Key: 0, + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + EBPFFuncName: probes.TCPCloseFlushReturn, + UID: probeUID, + }, + }) + } + return tcs +} + func enableAdvancedUDP(enabled map[probes.ProbeFuncName]struct{}) error { missing, err := ebpf.VerifyKernelFuncs("skb_consume_udp", "__skb_free_datagram_locked", "skb_free_datagram_locked") if err != nil { @@ -169,7 +240,7 @@ func enableAdvancedUDP(enabled map[probes.ProbeFuncName]struct{}) error { } else if _, miss := missing["skb_free_datagram_locked"]; !miss { enableProbe(enabled, probes.SKBFreeDatagramLocked) } else { - return fmt.Errorf("missing desired UDP receive kernel functions") + return errors.New("missing desired UDP receive kernel functions") } return nil } diff --git a/pkg/network/tracer/connection/kprobe/manager.go b/pkg/network/tracer/connection/kprobe/manager.go index fb2ee4b7bd065..046c8f951ca32 100644 --- a/pkg/network/tracer/connection/kprobe/manager.go +++ b/pkg/network/tracer/connection/kprobe/manager.go @@ -11,14 +11,15 @@ import ( manager "github.com/DataDog/ebpf-manager" ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" - "github.com/DataDog/datadog-agent/pkg/network/config" "github.com/DataDog/datadog-agent/pkg/network/ebpf/probes" - "github.com/DataDog/datadog-agent/pkg/network/tracer/connection/util" + "github.com/DataDog/datadog-agent/pkg/util/slices" ) var mainProbes = []probes.ProbeFuncName{ probes.NetDevQueue, probes.ProtocolClassifierEntrySocketFilter, + probes.ProtocolClassifierTLSClientSocketFilter, + probes.ProtocolClassifierTLSServerSocketFilter, probes.ProtocolClassifierQueuesSocketFilter, probes.ProtocolClassifierDBsSocketFilter, probes.ProtocolClassifierGRPCSocketFilter, @@ -32,9 +33,7 @@ var mainProbes = []probes.ProbeFuncName{ probes.TCPReadSockReturn, probes.TCPClose, probes.TCPDone, - probes.TCPDoneFlushReturn, probes.TCPCloseCleanProtocolsReturn, - probes.TCPCloseFlushReturn, probes.TCPConnect, probes.TCPFinishConnect, probes.IPMakeSkb, @@ -50,9 +49,7 @@ var mainProbes = []probes.ProbeFuncName{ probes.InetCskAcceptReturn, probes.InetCskListenStop, probes.UDPDestroySock, - probes.UDPDestroySockReturn, probes.UDPv6DestroySock, - probes.UDPv6DestroySockReturn, probes.InetBind, probes.Inet6Bind, probes.InetBindRet, @@ -61,7 +58,14 @@ var mainProbes = []probes.ProbeFuncName{ probes.UDPSendPageReturn, } -func initManager(mgr *ddebpf.Manager, connCloseEventHandler ddebpf.EventHandler, runtimeTracer bool, cfg *config.Config) error { +var batchProbes = []probes.ProbeFuncName{ + probes.TCPDoneFlushReturn, + probes.TCPCloseFlushReturn, + probes.UDPDestroySockReturn, + probes.UDPv6DestroySockReturn, +} + +func initManager(mgr *ddebpf.Manager, runtimeTracer bool) error { mgr.Maps = []*manager.Map{ {Name: probes.ConnMap}, {Name: probes.TCPStatsMap}, @@ -82,45 +86,45 @@ func initManager(mgr *ddebpf.Manager, connCloseEventHandler ddebpf.EventHandler, {Name: probes.ClassificationProgsMap}, {Name: probes.TCPCloseProgsMap}, } - util.SetupClosedConnHandler(connCloseEventHandler, mgr, cfg) - for _, funcName := range mainProbes { - p := &manager.Probe{ + var funcNameToProbe = func(funcName probes.ProbeFuncName) *manager.Probe { + return &manager.Probe{ ProbeIdentificationPair: manager.ProbeIdentificationPair{ EBPFFuncName: funcName, UID: probeUID, }, } - mgr.Probes = append(mgr.Probes, p) } - mgr.Probes = append(mgr.Probes, - &manager.Probe{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: probes.SKBFreeDatagramLocked, UID: probeUID}}, - &manager.Probe{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: probes.UnderscoredSKBFreeDatagramLocked, UID: probeUID}}, - &manager.Probe{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: probes.SKBConsumeUDP, UID: probeUID}}, - ) + mgr.Probes = append(mgr.Probes, slices.Map(mainProbes, funcNameToProbe)...) + mgr.Probes = append(mgr.Probes, slices.Map(batchProbes, funcNameToProbe)...) + mgr.Probes = append(mgr.Probes, slices.Map([]probes.ProbeFuncName{ + probes.SKBFreeDatagramLocked, + probes.UnderscoredSKBFreeDatagramLocked, + probes.SKBConsumeUDP, + }, funcNameToProbe)...) if !runtimeTracer { // the runtime compiled tracer has no need for separate probes targeting specific kernel versions, since it can // do that with #ifdefs inline. Thus, the following probes should only be declared as existing in the prebuilt // tracer. - mgr.Probes = append(mgr.Probes, - &manager.Probe{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: probes.TCPRetransmitPre470, UID: probeUID}}, - &manager.Probe{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: probes.IPMakeSkbPre4180, UID: probeUID}}, - &manager.Probe{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: probes.IP6MakeSkbPre470, UID: probeUID}}, - &manager.Probe{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: probes.IP6MakeSkbPre5180, UID: probeUID}}, - &manager.Probe{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: probes.UDPRecvMsgPre5190, UID: probeUID}}, - &manager.Probe{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: probes.UDPv6RecvMsgPre5190, UID: probeUID}}, - &manager.Probe{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: probes.UDPRecvMsgPre470, UID: probeUID}}, - &manager.Probe{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: probes.UDPv6RecvMsgPre470, UID: probeUID}}, - &manager.Probe{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: probes.UDPRecvMsgPre410, UID: probeUID}}, - &manager.Probe{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: probes.UDPv6RecvMsgPre410, UID: probeUID}}, - &manager.Probe{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: probes.UDPRecvMsgReturnPre470, UID: probeUID}}, - &manager.Probe{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: probes.UDPv6RecvMsgReturnPre470, UID: probeUID}}, - &manager.Probe{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: probes.TCPSendMsgPre410, UID: probeUID}}, - &manager.Probe{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: probes.TCPRecvMsgPre410, UID: probeUID}}, - &manager.Probe{ProbeIdentificationPair: manager.ProbeIdentificationPair{EBPFFuncName: probes.TCPRecvMsgPre5190, UID: probeUID}}, - ) + mgr.Probes = append(mgr.Probes, slices.Map([]probes.ProbeFuncName{ + probes.TCPRetransmitPre470, + probes.IPMakeSkbPre4180, + probes.IP6MakeSkbPre470, + probes.IP6MakeSkbPre5180, + probes.UDPRecvMsgPre5190, + probes.UDPv6RecvMsgPre5190, + probes.UDPRecvMsgPre470, + probes.UDPv6RecvMsgPre470, + probes.UDPRecvMsgPre410, + probes.UDPv6RecvMsgPre410, + probes.UDPRecvMsgReturnPre470, + probes.UDPv6RecvMsgReturnPre470, + probes.TCPSendMsgPre410, + probes.TCPRecvMsgPre410, + probes.TCPRecvMsgPre5190, + }, funcNameToProbe)...) } return nil diff --git a/pkg/network/tracer/connection/kprobe/tracer.go b/pkg/network/tracer/connection/kprobe/tracer.go index 61ee6a8808771..09bee0db65855 100644 --- a/pkg/network/tracer/connection/kprobe/tracer.go +++ b/pkg/network/tracer/connection/kprobe/tracer.go @@ -16,6 +16,7 @@ import ( ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/ebpf/bytecode" + "github.com/DataDog/datadog-agent/pkg/ebpf/perf" "github.com/DataDog/datadog-agent/pkg/ebpf/prebuilt" ebpftelemetry "github.com/DataDog/datadog-agent/pkg/ebpf/telemetry" "github.com/DataDog/datadog-agent/pkg/network/config" @@ -30,12 +31,16 @@ import ( const probeUID = "net" -type TracerType int //nolint:revive // TODO +// TracerType is the type of tracer +type TracerType int const ( - TracerTypePrebuilt TracerType = iota //nolint:revive // TODO - TracerTypeRuntimeCompiled //nolint:revive // TODO - TracerTypeCORE //nolint:revive // TODO + // TracerTypePrebuilt is the prebuilt tracer type + TracerTypePrebuilt TracerType = iota + // TracerTypeRuntimeCompiled is the runtime compiled tracer type + TracerTypeRuntimeCompiled + // TracerTypeCORE is the CORE tracer type + TracerTypeCORE ) var ( @@ -44,41 +49,6 @@ var ( // - 2492d3b867043f6880708d095a7a5d65debcfc32 classificationMinimumKernel = kernel.VersionCode(4, 11, 0) - protocolClassificationTailCalls = []manager.TailCallRoute{ - { - ProgArrayName: probes.ClassificationProgsMap, - Key: netebpf.ClassificationQueues, - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - EBPFFuncName: probes.ProtocolClassifierQueuesSocketFilter, - UID: probeUID, - }, - }, - { - ProgArrayName: probes.ClassificationProgsMap, - Key: netebpf.ClassificationDBs, - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - EBPFFuncName: probes.ProtocolClassifierDBsSocketFilter, - UID: probeUID, - }, - }, - { - ProgArrayName: probes.ClassificationProgsMap, - Key: netebpf.ClassificationGRPC, - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - EBPFFuncName: probes.ProtocolClassifierGRPCSocketFilter, - UID: probeUID, - }, - }, - { - ProgArrayName: probes.TCPCloseProgsMap, - Key: 0, - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - EBPFFuncName: probes.TCPCloseFlushReturn, - UID: probeUID, - }, - }, - } - // these primarily exist for mocking out in tests coreTracerLoader = loadCORETracer rcTracerLoader = loadRuntimeCompiledTracer @@ -87,11 +57,13 @@ var ( tracerOffsetGuesserRunner = offsetguess.TracerOffsets.Offsets errCORETracerNotSupported = errors.New("CO-RE tracer not supported on this platform") + + rhel9KernelVersion = kernel.VersionCode(5, 14, 0) ) // ClassificationSupported returns true if the current kernel version supports the classification feature. -// The kernel has to be newer than 4.7.0 since we are using bpf_skb_load_bytes (4.5.0+) method to read from the socket -// filter, and a tracepoint (4.7.0+) +// The kernel has to be newer than 4.11.0 since we are using bpf_skb_load_bytes (4.5.0+) method which was added to +// socket filters in 4.11.0, and a tracepoint (4.7.0+) func ClassificationSupported(config *config.Config) bool { if !config.ProtocolClassificationEnabled { return false @@ -105,11 +77,27 @@ func ClassificationSupported(config *config.Config) bool { return false } - return currentKernelVersion >= classificationMinimumKernel + if currentKernelVersion < classificationMinimumKernel { + return false + } + + // TODO: fix protocol classification is not supported on RHEL 9+ + family, err := kernel.Family() + if err != nil { + log.Warnf("could not determine OS family: %s", err) + return false + } + + if family == "rhel" && currentKernelVersion >= rhel9KernelVersion { + log.Warn("protocol classification is currently not supported on RHEL 9+") + return false + } + + return true } // LoadTracer loads the co-re/prebuilt/runtime compiled network tracer, depending on config -func LoadTracer(cfg *config.Config, mgrOpts manager.Options, connCloseEventHandler ddebpf.EventHandler, failedConnsHandler ddebpf.EventHandler) (*manager.Manager, func(), TracerType, error) { //nolint:revive // TODO +func LoadTracer(cfg *config.Config, mgrOpts manager.Options, connCloseEventHandler *perf.EventHandler) (*ddebpf.Manager, func(), TracerType, error) { kprobeAttachMethod := manager.AttachKprobeWithPerfEventOpen if cfg.AttachKprobesWithKprobeEventsABI { kprobeAttachMethod = manager.AttachKprobeWithKprobeEvents @@ -123,7 +111,7 @@ func LoadTracer(cfg *config.Config, mgrOpts manager.Options, connCloseEventHandl return nil, nil, TracerTypeCORE, fmt.Errorf("error determining if CO-RE tracer is supported: %w", err) } - var m *manager.Manager + var m *ddebpf.Manager var closeFn func() if err == nil { m, closeFn, err = coreTracerLoader(cfg, mgrOpts, connCloseEventHandler) @@ -174,18 +162,11 @@ func LoadTracer(cfg *config.Config, mgrOpts manager.Options, connCloseEventHandl return m, closeFn, TracerTypePrebuilt, err } -func loadTracerFromAsset(buf bytecode.AssetReader, runtimeTracer, coreTracer bool, config *config.Config, mgrOpts manager.Options, connCloseEventHandler ddebpf.EventHandler) (*manager.Manager, func(), error) { - m := ddebpf.NewManagerWithDefault(&manager.Manager{}, "network", &ebpftelemetry.ErrorsTelemetryModifier{}) - if err := initManager(m, connCloseEventHandler, runtimeTracer, config); err != nil { +func loadTracerFromAsset(buf bytecode.AssetReader, runtimeTracer, coreTracer bool, config *config.Config, mgrOpts manager.Options, connCloseEventHandler *perf.EventHandler) (*ddebpf.Manager, func(), error) { + m := ddebpf.NewManagerWithDefault(&manager.Manager{}, "network", &ebpftelemetry.ErrorsTelemetryModifier{}, connCloseEventHandler) + if err := initManager(m, runtimeTracer); err != nil { return nil, nil, fmt.Errorf("could not initialize manager: %w", err) } - switch connCloseEventHandler.(type) { - case *ddebpf.RingBufferHandler: - util.EnableRingbuffersViaMapEditor(&mgrOpts) - util.AddBoolConst(&mgrOpts, "ringbuffers_enabled", true) - } - - var undefinedProbes []manager.ProbeIdentificationPair var closeProtocolClassifierSocketFilterFn func() classificationSupported := ClassificationSupported(config) @@ -193,8 +174,9 @@ func loadTracerFromAsset(buf bytecode.AssetReader, runtimeTracer, coreTracer boo var tailCallsIdentifiersSet map[manager.ProbeIdentificationPair]struct{} if classificationSupported { - tailCallsIdentifiersSet = make(map[manager.ProbeIdentificationPair]struct{}, len(protocolClassificationTailCalls)) - for _, tailCall := range protocolClassificationTailCalls { + pcTailCalls := protocolClassificationTailCalls(config) + tailCallsIdentifiersSet = make(map[manager.ProbeIdentificationPair]struct{}, len(pcTailCalls)) + for _, tailCall := range pcTailCalls { tailCallsIdentifiersSet[tailCall.ProbeIdentificationPair] = struct{}{} } socketFilterProbe, _ := m.GetProbe(manager.ProbeIdentificationPair{ @@ -202,7 +184,7 @@ func loadTracerFromAsset(buf bytecode.AssetReader, runtimeTracer, coreTracer boo UID: probeUID, }) if socketFilterProbe == nil { - return nil, nil, fmt.Errorf("error retrieving protocol classifier socket filter") + return nil, nil, errors.New("error retrieving protocol classifier socket filter") } var err error @@ -211,9 +193,7 @@ func loadTracerFromAsset(buf bytecode.AssetReader, runtimeTracer, coreTracer boo return nil, nil, fmt.Errorf("error enabling protocol classifier: %w", err) } - //nolint:ineffassign,staticcheck // TODO(NET) Fix ineffassign linter // TODO(NET) Fix staticcheck linter - undefinedProbes = append(undefinedProbes, protocolClassificationTailCalls[0].ProbeIdentificationPair) - mgrOpts.TailCallRouter = append(mgrOpts.TailCallRouter, protocolClassificationTailCalls...) + mgrOpts.TailCallRouter = append(mgrOpts.TailCallRouter, pcTailCalls...) } else { // Kernels < 4.7.0 do not know about the per-cpu array map used // in classification, preventing the program to load even though @@ -267,11 +247,11 @@ func loadTracerFromAsset(buf bytecode.AssetReader, runtimeTracer, coreTracer boo return nil, nil, fmt.Errorf("failed to init ebpf manager: %w", err) } - return m.Manager, closeProtocolClassifierSocketFilterFn, nil + return m, closeProtocolClassifierSocketFilterFn, nil } -func loadCORETracer(config *config.Config, mgrOpts manager.Options, connCloseEventHandler ddebpf.EventHandler) (*manager.Manager, func(), error) { - var m *manager.Manager +func loadCORETracer(config *config.Config, mgrOpts manager.Options, connCloseEventHandler *perf.EventHandler) (*ddebpf.Manager, func(), error) { + var m *ddebpf.Manager var closeFn func() var err error err = ddebpf.LoadCOREAsset(netebpf.ModuleFileName("tracer", config.BPFDebug), func(ar bytecode.AssetReader, o manager.Options) error { @@ -288,7 +268,7 @@ func loadCORETracer(config *config.Config, mgrOpts manager.Options, connCloseEve return m, closeFn, err } -func loadRuntimeCompiledTracer(config *config.Config, mgrOpts manager.Options, connCloseEventHandler ddebpf.EventHandler) (*manager.Manager, func(), error) { +func loadRuntimeCompiledTracer(config *config.Config, mgrOpts manager.Options, connCloseEventHandler *perf.EventHandler) (*ddebpf.Manager, func(), error) { buf, err := getRuntimeCompiledTracer(config) if err != nil { return nil, nil, err @@ -298,7 +278,7 @@ func loadRuntimeCompiledTracer(config *config.Config, mgrOpts manager.Options, c return tracerLoaderFromAsset(buf, true, false, config, mgrOpts, connCloseEventHandler) } -func loadPrebuiltTracer(config *config.Config, mgrOpts manager.Options, connCloseEventHandler ddebpf.EventHandler) (*manager.Manager, func(), error) { +func loadPrebuiltTracer(config *config.Config, mgrOpts manager.Options, connCloseEventHandler *perf.EventHandler) (*ddebpf.Manager, func(), error) { buf, err := netebpf.ReadBPFModule(config.BPFDir, config.BPFDebug) if err != nil { return nil, nil, fmt.Errorf("could not read bpf module: %w", err) diff --git a/pkg/network/tracer/connection/kprobe/tracer_test.go b/pkg/network/tracer/connection/kprobe/tracer_test.go index 4c4a155ce12b9..972e4a9c48eef 100644 --- a/pkg/network/tracer/connection/kprobe/tracer_test.go +++ b/pkg/network/tracer/connection/kprobe/tracer_test.go @@ -10,13 +10,14 @@ package kprobe import ( "testing" + manager "github.com/DataDog/ebpf-manager" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - manager "github.com/DataDog/ebpf-manager" - ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/ebpf/bytecode" + "github.com/DataDog/datadog-agent/pkg/ebpf/perf" + "github.com/DataDog/datadog-agent/pkg/ebpf/prebuilt" "github.com/DataDog/datadog-agent/pkg/network/config" "github.com/DataDog/datadog-agent/pkg/util/kernel" ) @@ -169,14 +170,14 @@ func testTracerFallbackCOREAndRCErr(t *testing.T) { runFallbackTests(t, "CORE and RC error", true, true, tests) } -func loaderFunc(closeFn func(), err error) func(_ *config.Config, _ manager.Options, _ ddebpf.EventHandler) (*manager.Manager, func(), error) { - return func(_ *config.Config, _ manager.Options, _ ddebpf.EventHandler) (*manager.Manager, func(), error) { +func loaderFunc(closeFn func(), err error) func(_ *config.Config, _ manager.Options, _ *perf.EventHandler) (*ddebpf.Manager, func(), error) { + return func(_ *config.Config, _ manager.Options, _ *perf.EventHandler) (*ddebpf.Manager, func(), error) { return nil, closeFn, err } } -func prebuiltLoaderFunc(closeFn func(), err error) func(_ *config.Config, _ manager.Options, _ ddebpf.EventHandler) (*manager.Manager, func(), error) { - return func(_ *config.Config, _ manager.Options, _ ddebpf.EventHandler) (*manager.Manager, func(), error) { +func prebuiltLoaderFunc(closeFn func(), err error) func(_ *config.Config, _ manager.Options, _ *perf.EventHandler) (*ddebpf.Manager, func(), error) { + return func(_ *config.Config, _ manager.Options, _ *perf.EventHandler) (*ddebpf.Manager, func(), error) { return nil, closeFn, err } } @@ -216,7 +217,7 @@ func runFallbackTests(t *testing.T, desc string, coreErr, rcErr bool, tests []st cfg.AllowPrebuiltFallback = te.allowPrebuiltFallback prevOffsetGuessingRun := offsetGuessingRun - _, closeFn, tracerType, err := LoadTracer(cfg, manager.Options{}, nil, nil) + _, closeFn, tracerType, err := LoadTracer(cfg, manager.Options{}, nil) if te.err == nil { assert.NoError(t, err, "%+v", te) } else { @@ -251,12 +252,12 @@ func TestCORETracerSupported(t *testing.T) { }) coreCalled := false - coreTracerLoader = func(*config.Config, manager.Options, ddebpf.EventHandler) (*manager.Manager, func(), error) { + coreTracerLoader = func(*config.Config, manager.Options, *perf.EventHandler) (*ddebpf.Manager, func(), error) { coreCalled = true return nil, nil, nil } prebuiltCalled := false - prebuiltTracerLoader = func(*config.Config, manager.Options, ddebpf.EventHandler) (*manager.Manager, func(), error) { + prebuiltTracerLoader = func(*config.Config, manager.Options, *perf.EventHandler) (*ddebpf.Manager, func(), error) { prebuiltCalled = true return nil, nil, nil } @@ -270,7 +271,7 @@ func TestCORETracerSupported(t *testing.T) { cfg := config.New() cfg.EnableCORE = true cfg.AllowRuntimeCompiledFallback = false - _, _, _, err = LoadTracer(cfg, manager.Options{}, nil, nil) + _, _, _, err = LoadTracer(cfg, manager.Options{}, nil) assert.False(t, prebuiltCalled) if kv < kernel.VersionCode(4, 4, 128) && platform != "centos" && platform != "redhat" { assert.False(t, coreCalled) @@ -283,7 +284,7 @@ func TestCORETracerSupported(t *testing.T) { coreCalled = false prebuiltCalled = false cfg.AllowRuntimeCompiledFallback = true - _, _, _, err = LoadTracer(cfg, manager.Options{}, nil, nil) + _, _, _, err = LoadTracer(cfg, manager.Options{}, nil) assert.NoError(t, err) if kv < kernel.VersionCode(4, 4, 128) && platform != "centos" && platform != "redhat" { assert.False(t, coreCalled) @@ -296,7 +297,7 @@ func TestCORETracerSupported(t *testing.T) { func TestDefaultKprobeMaxActiveSet(t *testing.T) { prevLoader := tracerLoaderFromAsset - tracerLoaderFromAsset = func(_ bytecode.AssetReader, _, _ bool, _ *config.Config, mgrOpts manager.Options, _ ddebpf.EventHandler) (*manager.Manager, func(), error) { + tracerLoaderFromAsset = func(_ bytecode.AssetReader, _, _ bool, _ *config.Config, mgrOpts manager.Options, _ *perf.EventHandler) (*ddebpf.Manager, func(), error) { assert.Equal(t, mgrOpts.DefaultKProbeMaxActive, 128) return nil, nil, nil } @@ -305,24 +306,33 @@ func TestDefaultKprobeMaxActiveSet(t *testing.T) { t.Run("CO-RE", func(t *testing.T) { cfg := config.New() cfg.EnableCORE = true + cfg.EnableRuntimeCompiler = false cfg.AllowRuntimeCompiledFallback = false - _, _, _, err := LoadTracer(cfg, manager.Options{DefaultKProbeMaxActive: 128}, nil, nil) + cfg.AllowPrebuiltFallback = false + _, _, _, err := LoadTracer(cfg, manager.Options{DefaultKProbeMaxActive: 128}, nil) require.NoError(t, err) }) t.Run("prebuilt", func(t *testing.T) { + if prebuilt.IsDeprecated() { + t.Skip("prebuilt not supported on this platform") + } cfg := config.New() cfg.EnableCORE = false + cfg.EnableRuntimeCompiler = false cfg.AllowRuntimeCompiledFallback = false - _, _, _, err := LoadTracer(cfg, manager.Options{DefaultKProbeMaxActive: 128}, nil, nil) + cfg.AllowPrebuiltFallback = false + _, _, _, err := LoadTracer(cfg, manager.Options{DefaultKProbeMaxActive: 128}, nil) require.NoError(t, err) }) t.Run("runtime_compiled", func(t *testing.T) { cfg := config.New() cfg.EnableCORE = false - cfg.AllowRuntimeCompiledFallback = true - _, _, _, err := LoadTracer(cfg, manager.Options{DefaultKProbeMaxActive: 128}, nil, nil) + cfg.AllowPrebuiltFallback = false + cfg.AllowRuntimeCompiledFallback = false + cfg.EnableRuntimeCompiler = true + _, _, _, err := LoadTracer(cfg, manager.Options{DefaultKProbeMaxActive: 128}, nil) require.NoError(t, err) }) } diff --git a/pkg/network/tracer/connection/perf_batching.go b/pkg/network/tracer/connection/perf_batching.go index 9c20044cde3da..93754574a10f2 100644 --- a/pkg/network/tracer/connection/perf_batching.go +++ b/pkg/network/tracer/connection/perf_batching.go @@ -8,6 +8,7 @@ package connection import ( + "errors" "fmt" "time" @@ -17,6 +18,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/network" netebpf "github.com/DataDog/datadog-agent/pkg/network/ebpf" "github.com/DataDog/datadog-agent/pkg/network/ebpf/probes" + ddsync "github.com/DataDog/datadog-agent/pkg/util/sync" ) // perfBatchManager is responsible for two things: @@ -27,16 +29,18 @@ import ( // The motivation is to impose an upper limit on how long a TCP close connection // event remains stored in the eBPF map before being processed by the NetworkAgent. type perfBatchManager struct { - batchMap *maps.GenericMap[uint32, netebpf.Batch] - extractor *batchExtractor - ch *cookieHasher + batchMap *maps.GenericMap[uint32, netebpf.Batch] + extractor *batchExtractor + ch *cookieHasher + connGetter ddsync.PoolGetter[network.ConnectionStats] + callback func(stats *network.ConnectionStats) } // newPerfBatchManager returns a new `PerfBatchManager` and initializes the // eBPF map that holds the tcp_close batch objects. -func newPerfBatchManager(batchMap *maps.GenericMap[uint32, netebpf.Batch], extractor *batchExtractor) (*perfBatchManager, error) { +func newPerfBatchManager(batchMap *maps.GenericMap[uint32, netebpf.Batch], extractor *batchExtractor, getter ddsync.PoolGetter[network.ConnectionStats], callback func(stats *network.ConnectionStats)) (*perfBatchManager, error) { if batchMap == nil { - return nil, fmt.Errorf("batchMap is nil") + return nil, errors.New("batchMap is nil") } for cpu := uint32(0); cpu < uint32(extractor.NumCPUs()); cpu++ { @@ -51,25 +55,18 @@ func newPerfBatchManager(batchMap *maps.GenericMap[uint32, netebpf.Batch], extra } return &perfBatchManager{ - batchMap: batchMap, - extractor: extractor, - ch: newCookieHasher(), + batchMap: batchMap, + extractor: extractor, + ch: newCookieHasher(), + connGetter: getter, + callback: callback, }, nil } -// ExtractBatchInto extracts from the given batch all connections that haven't been processed yet. -func (p *perfBatchManager) ExtractBatchInto(buffer *network.ConnectionBuffer, b *netebpf.Batch) { - for rc := p.extractor.NextConnection(b); rc != nil; rc = p.extractor.NextConnection(b) { - conn := buffer.Next() - populateConnStats(conn, &rc.Tup, &rc.Conn_stats, p.ch) - updateTCPStats(conn, &rc.Tcp_stats) - } -} - -// GetPendingConns return all connections that are in batches that are not yet full. +// Flush return all connections that are in batches that are not yet full. // It tracks which connections have been processed by this call, by batch id. // This prevents double-processing of connections between GetPendingConns and Extract. -func (p *perfBatchManager) GetPendingConns(buffer *network.ConnectionBuffer) { +func (p *perfBatchManager) Flush() { b := new(netebpf.Batch) for cpu := uint32(0); cpu < uint32(p.extractor.NumCPUs()); cpu++ { err := p.batchMap.Lookup(&cpu, b) @@ -78,20 +75,23 @@ func (p *perfBatchManager) GetPendingConns(buffer *network.ConnectionBuffer) { } for rc := p.extractor.NextConnection(b); rc != nil; rc = p.extractor.NextConnection(b) { - c := buffer.Next() - populateConnStats(c, &rc.Tup, &rc.Conn_stats, p.ch) - updateTCPStats(c, &rc.Tcp_stats) + c := p.connGetter.Get() + c.FromConn(rc) + p.ch.Hash(c) + p.callback(c) } } + // indicate we are done with all pending connection + p.callback(nil) p.extractor.CleanupExpiredState(time.Now()) } -func newConnBatchManager(mgr *manager.Manager, extractor *batchExtractor) (*perfBatchManager, error) { +func newConnBatchManager(mgr *manager.Manager, extractor *batchExtractor, connGetter ddsync.PoolGetter[network.ConnectionStats], closedCallback func(stats *network.ConnectionStats)) (*perfBatchManager, error) { connCloseMap, err := maps.GetMap[uint32, netebpf.Batch](mgr, probes.ConnCloseBatchMap) if err != nil { return nil, fmt.Errorf("unable to get map %s: %s", probes.ConnCloseBatchMap, err) } - batchMgr, err := newPerfBatchManager(connCloseMap, extractor) + batchMgr, err := newPerfBatchManager(connCloseMap, extractor, connGetter, closedCallback) if err != nil { return nil, err } diff --git a/pkg/network/tracer/connection/perf_batching_test.go b/pkg/network/tracer/connection/perf_batching_test.go index c7d22aaff83aa..82095728412c4 100644 --- a/pkg/network/tracer/connection/perf_batching_test.go +++ b/pkg/network/tracer/connection/perf_batching_test.go @@ -19,6 +19,7 @@ import ( ebpfmaps "github.com/DataDog/datadog-agent/pkg/ebpf/maps" "github.com/DataDog/datadog-agent/pkg/network" netebpf "github.com/DataDog/datadog-agent/pkg/network/ebpf" + ddsync "github.com/DataDog/datadog-agent/pkg/util/sync" ) const ( @@ -26,7 +27,15 @@ const ( ) func TestGetPendingConns(t *testing.T) { - manager := newTestBatchManager(t) + var pendingConns []*network.ConnectionStats + flushDone := make(chan struct{}) + manager := newTestBatchManager(t, func(conn *network.ConnectionStats) { + if conn == nil { + flushDone <- struct{}{} + return + } + pendingConns = append(pendingConns, conn) + }) batch := new(netebpf.Batch) batch.Id = 0 @@ -41,9 +50,8 @@ func TestGetPendingConns(t *testing.T) { } updateBatch() - buffer := network.NewConnectionBuffer(256, 256) - manager.GetPendingConns(buffer) - pendingConns := buffer.Connections() + go manager.Flush() + <-flushDone assert.GreaterOrEqual(t, len(pendingConns), 2) for _, pid := range []uint32{pidMax + 1, pidMax + 2} { found := false @@ -64,9 +72,9 @@ func TestGetPendingConns(t *testing.T) { updateBatch() // We should now get only the connection that hasn't been processed before - buffer.Reset() - manager.GetPendingConns(buffer) - pendingConns = buffer.Connections() + go manager.Flush() + pendingConns = pendingConns[:0] + <-flushDone assert.GreaterOrEqual(t, len(pendingConns), 1) var found bool for _, p := range pendingConns { @@ -80,7 +88,12 @@ func TestGetPendingConns(t *testing.T) { } func TestPerfBatchStateCleanup(t *testing.T) { - manager := newTestBatchManager(t) + flushDone := make(chan struct{}) + manager := newTestBatchManager(t, func(stats *network.ConnectionStats) { + if stats == nil { + flushDone <- struct{}{} + } + }) manager.extractor.expiredStateInterval = 100 * time.Millisecond batch := new(netebpf.Batch) @@ -93,14 +106,15 @@ func TestPerfBatchStateCleanup(t *testing.T) { err := manager.batchMap.Put(&cpu, batch) require.NoError(t, err) - buffer := network.NewConnectionBuffer(256, 256) - manager.GetPendingConns(buffer) + go manager.Flush() + <-flushDone _, ok := manager.extractor.stateByCPU[cpu].processed[batch.Id] require.True(t, ok) assert.Equal(t, uint16(2), manager.extractor.stateByCPU[cpu].processed[batch.Id].offset) manager.extractor.CleanupExpiredState(time.Now().Add(manager.extractor.expiredStateInterval)) - manager.GetPendingConns(buffer) + go manager.Flush() + <-flushDone // state should not have been cleaned up, since no more connections have happened _, ok = manager.extractor.stateByCPU[cpu].processed[batch.Id] @@ -108,7 +122,7 @@ func TestPerfBatchStateCleanup(t *testing.T) { assert.Equal(t, uint16(2), manager.extractor.stateByCPU[cpu].processed[batch.Id].offset) } -func newTestBatchManager(t *testing.T) *perfBatchManager { +func newTestBatchManager(t *testing.T, callback func(*network.ConnectionStats)) *perfBatchManager { require.NoError(t, rlimit.RemoveMemlock()) m, err := ebpf.NewMap(&ebpf.MapSpec{ Type: ebpf.Hash, @@ -122,7 +136,8 @@ func newTestBatchManager(t *testing.T) *perfBatchManager { gm, err := ebpfmaps.Map[uint32, netebpf.Batch](m) require.NoError(t, err) extractor := newBatchExtractor(numTestCPUs) - mgr, err := newPerfBatchManager(gm, extractor) + connPool := ddsync.NewDefaultTypedPool[network.ConnectionStats]() + mgr, err := newPerfBatchManager(gm, extractor, connPool, callback) require.NoError(t, err) return mgr } diff --git a/pkg/network/tracer/connection/tcp_close_consumer.go b/pkg/network/tracer/connection/tcp_close_consumer.go index 3c9ff45dba9f0..5e1d45fae5afa 100644 --- a/pkg/network/tracer/connection/tcp_close_consumer.go +++ b/pkg/network/tracer/connection/tcp_close_consumer.go @@ -9,46 +9,48 @@ package connection import ( "sync" - "time" - "unsafe" + "sync/atomic" - ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" + "github.com/DataDog/datadog-agent/pkg/ebpf/perf" "github.com/DataDog/datadog-agent/pkg/network" - netebpf "github.com/DataDog/datadog-agent/pkg/network/ebpf" "github.com/DataDog/datadog-agent/pkg/status/health" "github.com/DataDog/datadog-agent/pkg/telemetry" "github.com/DataDog/datadog-agent/pkg/util/log" + ddsync "github.com/DataDog/datadog-agent/pkg/util/sync" ) const closeConsumerModuleName = "network_tracer__ebpf" // Telemetry var closeConsumerTelemetry = struct { - perfReceived telemetry.Counter - perfLost telemetry.Counter + perfReceived telemetry.Counter + flushReceived telemetry.Counter }{ telemetry.NewCounter(closeConsumerModuleName, "closed_conn_polling_received", []string{}, "Counter measuring the number of closed connections received"), - telemetry.NewCounter(closeConsumerModuleName, "closed_conn_polling_lost", []string{}, "Counter measuring the number of closed connection batches lost (were transmitted from ebpf but never received)"), + telemetry.NewCounter(closeConsumerModuleName, "closed_conn_flush_received", []string{}, "Counter measuring the number of closed connections received during flush"), } type tcpCloseConsumer struct { - eventHandler ddebpf.EventHandler - batchManager *perfBatchManager - requests chan chan struct{} - buffer *network.ConnectionBuffer - once sync.Once - closed chan struct{} - ch *cookieHasher + requests chan chan struct{} + once sync.Once + closed chan struct{} + + flusher perf.Flusher + callback func(*network.ConnectionStats) + releaser ddsync.PoolReleaser[network.ConnectionStats] + flushChannel chan chan struct{} + flushing *atomic.Bool } -func newTCPCloseConsumer(eventHandler ddebpf.EventHandler, batchManager *perfBatchManager) *tcpCloseConsumer { +func newTCPCloseConsumer(flusher perf.Flusher, releaser ddsync.PoolReleaser[network.ConnectionStats]) *tcpCloseConsumer { return &tcpCloseConsumer{ - eventHandler: eventHandler, - batchManager: batchManager, requests: make(chan chan struct{}), - buffer: network.NewConnectionBuffer(netebpf.BatchSize, netebpf.BatchSize), closed: make(chan struct{}), - ch: newCookieHasher(), + flusher: flusher, + releaser: releaser, + callback: func(*network.ConnectionStats) {}, + flushChannel: make(chan chan struct{}, 1), + flushing: &atomic.Bool{}, } } @@ -75,101 +77,52 @@ func (c *tcpCloseConsumer) Stop() { if c == nil { return } - c.eventHandler.Stop() c.once.Do(func() { close(c.closed) }) } -func (c *tcpCloseConsumer) extractConn(data []byte) { - ct := (*netebpf.Conn)(unsafe.Pointer(&data[0])) - conn := c.buffer.Next() - populateConnStats(conn, &ct.Tup, &ct.Conn_stats, c.ch) - updateTCPStats(conn, &ct.Tcp_stats) +func (c *tcpCloseConsumer) Callback(conn *network.ConnectionStats) { + // sentinel record post-flush + if conn == nil { + request := <-c.flushChannel + close(request) + c.flushing.Store(false) + return + } + + closeConsumerTelemetry.perfReceived.Inc() + if c.flushing.Load() { + closeConsumerTelemetry.flushReceived.Inc() + } + c.callback(conn) + c.releaser.Put(conn) } func (c *tcpCloseConsumer) Start(callback func(*network.ConnectionStats)) { if c == nil { return } - health := health.RegisterLiveness("network-tracer") - - var ( - then = time.Now() - closedCount uint64 - lostSamplesCount uint64 - ) + c.callback = callback + liveHealth := health.RegisterLiveness("network-tracer") go func() { defer func() { - err := health.Deregister() + err := liveHealth.Deregister() if err != nil { log.Warnf("error de-registering health check: %s", err) } }() - dataChannel := c.eventHandler.DataChannel() - lostChannel := c.eventHandler.LostChannel() for { select { - case <-c.closed: return - case <-health.C: - case batchData, ok := <-dataChannel: - if !ok { - return - } - - l := len(batchData.Data) - switch { - case l >= netebpf.SizeofBatch: - batch := netebpf.ToBatch(batchData.Data) - c.batchManager.ExtractBatchInto(c.buffer, batch) - case l >= netebpf.SizeofConn: - c.extractConn(batchData.Data) - default: - log.Errorf("unknown type received from perf buffer, skipping. data size=%d, expecting %d or %d", len(batchData.Data), netebpf.SizeofConn, netebpf.SizeofBatch) - continue - } - - closeConsumerTelemetry.perfReceived.Add(float64(c.buffer.Len())) - closedCount += uint64(c.buffer.Len()) - conns := c.buffer.Connections() - for i := range conns { - callback(&conns[i]) - } - c.buffer.Reset() - batchData.Done() - // lost events only occur when using perf buffers - case lc, ok := <-lostChannel: - if !ok { - return - } - closeConsumerTelemetry.perfLost.Add(float64(lc)) - lostSamplesCount += lc + case <-liveHealth.C: case request := <-c.requests: - oneTimeBuffer := network.NewConnectionBuffer(32, 32) - c.batchManager.GetPendingConns(oneTimeBuffer) - conns := oneTimeBuffer.Connections() - for i := range conns { - callback(&conns[i]) - } - close(request) - - closedCount += uint64(oneTimeBuffer.Len()) - now := time.Now() - elapsed := now.Sub(then) - then = now - log.Debugf( - "tcp close summary: closed_count=%d elapsed=%s closed_rate=%.2f/s lost_samples_count=%d", - closedCount, - elapsed, - float64(closedCount)/elapsed.Seconds(), - lostSamplesCount, - ) - closedCount = 0 - lostSamplesCount = 0 + c.flushing.Store(true) + c.flushChannel <- request + c.flusher.Flush() } } }() diff --git a/pkg/network/tracer/connection/tcp_close_consumer_test.go b/pkg/network/tracer/connection/tcp_close_consumer_test.go index 96db4d9bfc04c..6bdbf4024962e 100644 --- a/pkg/network/tracer/connection/tcp_close_consumer_test.go +++ b/pkg/network/tracer/connection/tcp_close_consumer_test.go @@ -11,15 +11,10 @@ import ( "testing" "github.com/stretchr/testify/require" - - "github.com/DataDog/datadog-agent/pkg/ebpf" ) func TestTcpCloseConsumerStopRace(t *testing.T) { - pf := ebpf.NewPerfHandler(10) - require.NotNil(t, pf) - - c := newTCPCloseConsumer(pf, nil) + c := newTCPCloseConsumer(nil, nil) require.NotNil(t, c) c.Stop() diff --git a/pkg/network/tracer/connection/tracer.go b/pkg/network/tracer/connection/tracer.go index 7b73e332ba1a8..3720758649705 100644 --- a/pkg/network/tracer/connection/tracer.go +++ b/pkg/network/tracer/connection/tracer.go @@ -22,11 +22,16 @@ import ( type TracerType int const ( - TracerTypeKProbePrebuilt TracerType = iota //nolint:revive // TODO - TracerTypeKProbeRuntimeCompiled //nolint:revive // TODO - TracerTypeKProbeCORE //nolint:revive // TODO - TracerTypeFentry //nolint:revive // TODO - TracerTypeEbpfless //nolint:revive // TODO + // TracerTypeKProbePrebuilt is the TracerType for prebuilt kprobe tracer + TracerTypeKProbePrebuilt TracerType = iota + // TracerTypeKProbeRuntimeCompiled is the TracerType for the runtime compiled kprobe tracer + TracerTypeKProbeRuntimeCompiled + // TracerTypeKProbeCORE is the TracerType for the CORE kprobe tracer + TracerTypeKProbeCORE + // TracerTypeFentry is the TracerType for the fentry tracer + TracerTypeFentry + // TracerTypeEbpfless is the TracerType for the EBPF-less tracer + TracerTypeEbpfless ) const ( diff --git a/pkg/network/tracer/connection/util/conn_tracer.go b/pkg/network/tracer/connection/util/conn_tracer.go index 1fc7e12934058..811c656e7a43a 100644 --- a/pkg/network/tracer/connection/util/conn_tracer.go +++ b/pkg/network/tracer/connection/util/conn_tracer.go @@ -14,16 +14,10 @@ import ( manager "github.com/DataDog/ebpf-manager" cebpf "github.com/cilium/ebpf" - "github.com/cilium/ebpf/asm" - "github.com/DataDog/datadog-agent/pkg/ebpf" - ebpftelemetry "github.com/DataDog/datadog-agent/pkg/ebpf/telemetry" "github.com/DataDog/datadog-agent/pkg/network" - "github.com/DataDog/datadog-agent/pkg/network/config" netebpf "github.com/DataDog/datadog-agent/pkg/network/ebpf" - "github.com/DataDog/datadog-agent/pkg/network/ebpf/probes" "github.com/DataDog/datadog-agent/pkg/process/util" - "github.com/DataDog/datadog-agent/pkg/util/log" ) // toPowerOf2 converts a number to its nearest power of 2 @@ -32,9 +26,9 @@ func toPowerOf2(x int) int { return int(math.Pow(2, math.Round(log2))) } -// computeDefaultClosedConnRingBufferSize is the default buffer size of the ring buffer for closed connection events. +// ComputeDefaultClosedConnRingBufferSize is the default buffer size of the ring buffer for closed connection events. // Must be a power of 2 and a multiple of the page size -func computeDefaultClosedConnRingBufferSize() int { +func ComputeDefaultClosedConnRingBufferSize() int { numCPUs, err := cebpf.PossibleCPU() if err != nil { numCPUs = 1 @@ -42,68 +36,12 @@ func computeDefaultClosedConnRingBufferSize() int { return 8 * toPowerOf2(numCPUs) * os.Getpagesize() } -// computeDefaultClosedConnPerfBufferSize is the default buffer size of the perf buffer for closed connection events. +// ComputeDefaultClosedConnPerfBufferSize is the default buffer size of the perf buffer for closed connection events. // Must be a multiple of the page size -func computeDefaultClosedConnPerfBufferSize() int { +func ComputeDefaultClosedConnPerfBufferSize() int { return 8 * os.Getpagesize() } -// EnableRingbuffersViaMapEditor sets up the ring buffer for closed connection events via a map editor -func EnableRingbuffersViaMapEditor(mgrOpts *manager.Options) { - mgrOpts.MapSpecEditors[probes.ConnCloseEventMap] = manager.MapSpecEditor{ - Type: cebpf.RingBuf, - MaxEntries: uint32(computeDefaultClosedConnRingBufferSize()), - KeySize: 0, - ValueSize: 0, - EditorFlag: manager.EditType | manager.EditMaxEntries | manager.EditKeyValue, - } -} - -// SetupHandler sets up the closed connection event handler -func SetupHandler(eventHandler ebpf.EventHandler, mgr *ebpf.Manager, cfg *config.Config, perfSize int, mapName probes.BPFMapName) { - switch handler := eventHandler.(type) { - case *ebpf.RingBufferHandler: - log.Infof("Setting up connection handler for map %v with ring buffer", mapName) - rb := &manager.RingBuffer{ - Map: manager.Map{Name: mapName}, - RingBufferOptions: manager.RingBufferOptions{ - RecordGetter: handler.RecordGetter, - RecordHandler: handler.RecordHandler, - TelemetryEnabled: cfg.InternalTelemetryEnabled, - }, - } - mgr.RingBuffers = append(mgr.RingBuffers, rb) - ebpftelemetry.ReportRingBufferTelemetry(rb) - case *ebpf.PerfHandler: - log.Infof("Setting up connection handler for map %v with perf buffer", mapName) - pm := &manager.PerfMap{ - Map: manager.Map{Name: mapName}, - PerfMapOptions: manager.PerfMapOptions{ - PerfRingBufferSize: perfSize, - Watermark: 1, - RecordHandler: handler.RecordHandler, - LostHandler: handler.LostHandler, - RecordGetter: handler.RecordGetter, - TelemetryEnabled: cfg.InternalTelemetryEnabled, - }, - } - mgr.PerfMaps = append(mgr.PerfMaps, pm) - ebpftelemetry.ReportPerfMapTelemetry(pm) - helperCallRemover := ebpf.NewHelperCallRemover(asm.FnRingbufOutput) - err := helperCallRemover.BeforeInit(mgr.Manager, mgr.Name, nil) - if err != nil { - log.Error("Failed to remove helper calls from eBPF programs: ", err) - } - default: - log.Errorf("Failed to set up connection handler for map %v: unknown event handler type", mapName) - } -} - -// SetupClosedConnHandler sets up the closed connection event handler -func SetupClosedConnHandler(connCloseEventHandler ebpf.EventHandler, mgr *ebpf.Manager, cfg *config.Config) { - SetupHandler(connCloseEventHandler, mgr, cfg, computeDefaultClosedConnPerfBufferSize(), probes.ConnCloseEventMap) -} - // AddBoolConst modifies the options to include a constant editor for a boolean value func AddBoolConst(options *manager.Options, name string, flag bool) { val := uint64(1) diff --git a/pkg/network/tracer/ebpf_conntracker_test.go b/pkg/network/tracer/ebpf_conntracker_test.go index e36e60de2000b..3ceb4095a3ee9 100644 --- a/pkg/network/tracer/ebpf_conntracker_test.go +++ b/pkg/network/tracer/ebpf_conntracker_test.go @@ -15,6 +15,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/DataDog/datadog-agent/pkg/ebpf/prebuilt" "github.com/DataDog/datadog-agent/pkg/network/config" "github.com/DataDog/datadog-agent/pkg/network/ebpf/probes" "github.com/DataDog/datadog-agent/pkg/network/tracer/offsetguess" @@ -33,7 +34,7 @@ func ebpfCOREConntrackerSupportedOnKernelT(t *testing.T) bool { } func skipPrebuiltEbpfConntrackerTestOnUnsupportedKernel(t *testing.T) { - if !ebpfPrebuiltConntrackerSupportedOnKernelT(t) { + if !ebpfPrebuiltConntrackerSupportedOnKernelT(t) || prebuilt.IsDeprecated() { t.Skip("Skipping prebuilt ebpf conntracker related test on unsupported kernel") } } diff --git a/pkg/network/tracer/offsetguess/conntrack.go b/pkg/network/tracer/offsetguess/conntrack.go index 379340c44d89f..60752a56ae901 100644 --- a/pkg/network/tracer/offsetguess/conntrack.go +++ b/pkg/network/tracer/offsetguess/conntrack.go @@ -5,7 +5,8 @@ //go:build linux_bpf -package offsetguess //nolint:revive // TODO +// Package offsetguess provides offsetguesses for tracer +package offsetguess import ( "fmt" @@ -44,7 +45,8 @@ type conntrackOffsetGuesser struct { udpv6Enabled uint64 } -func NewConntrackOffsetGuesser(cfg *config.Config) (OffsetGuesser, error) { //nolint:revive // TODO +// NewConntrackOffsetGuesser creates a new OffsetGuesser +func NewConntrackOffsetGuesser(cfg *config.Config) (OffsetGuesser, error) { tcpv6Enabled, udpv6Enabled := getIpv6Configuration(cfg) tcpv6EnabledConst, udpv6EnabledConst := boolToUint64(tcpv6Enabled), boolToUint64(udpv6Enabled) return &conntrackOffsetGuesser{ diff --git a/pkg/network/tracer/offsetguess/offsetguess.go b/pkg/network/tracer/offsetguess/offsetguess.go index 8889b0e837fd6..164d629fb540d 100644 --- a/pkg/network/tracer/offsetguess/offsetguess.go +++ b/pkg/network/tracer/offsetguess/offsetguess.go @@ -80,7 +80,8 @@ var whatString = map[GuessWhat]string{ GuessCtNet: "conntrack network namespace", } -type OffsetGuesser interface { //nolint:revive // TODO +// OffsetGuesser provides offset guesses +type OffsetGuesser interface { Manager() *manager.Manager Probes(c *config.Config) (map[string]struct{}, error) Guess(c *config.Config) ([]manager.ConstantEditor, error) @@ -109,9 +110,6 @@ type fieldValues struct { daddrFl6 [4]uint32 sportFl6 uint16 dportFl6 uint16 - - //nolint:unused // TODO(NET) Fix unused linter - ctStatus uint32 } func idPair(name probes.ProbeFuncName) manager.ProbeIdentificationPair { @@ -186,7 +184,8 @@ func setupOffsetGuesser(guesser OffsetGuesser, config *config.Config, buf byteco return nil } -func RunOffsetGuessing(cfg *config.Config, buf bytecode.AssetReader, newGuesser func() (OffsetGuesser, error)) (editors []manager.ConstantEditor, err error) { //nolint:revive // TODO +// RunOffsetGuessing will run offset guessing +func RunOffsetGuessing(cfg *config.Config, buf bytecode.AssetReader, newGuesser func() (OffsetGuesser, error)) (editors []manager.ConstantEditor, err error) { // Offset guessing has been flaky for some customers, so if it fails we'll retry it up to 5 times start := time.Now() for i := 0; i < 5; i++ { diff --git a/pkg/network/tracer/offsetguess/tracer.go b/pkg/network/tracer/offsetguess/tracer.go index 20b4c4f4b3c45..1f2fa420074df 100644 --- a/pkg/network/tracer/offsetguess/tracer.go +++ b/pkg/network/tracer/offsetguess/tracer.go @@ -77,7 +77,8 @@ type tracerOffsetGuesser struct { guessUDPv6 bool } -func NewTracerOffsetGuesser() (OffsetGuesser, error) { //nolint:revive // TODO +// NewTracerOffsetGuesser creates a new OffsetGuesser +func NewTracerOffsetGuesser() (OffsetGuesser, error) { return &tracerOffsetGuesser{ m: &manager.Manager{ Maps: []*manager.Map{ @@ -161,7 +162,7 @@ func expectedValues(conn net.Conn) (*fieldValues, error) { return nil, err } - tcpInfo, err := TcpGetInfo(conn) + tcpInfo, err := TCPGetInfo(conn) if err != nil { return nil, err } @@ -286,7 +287,8 @@ func uint32ArrayFromIPv6(ip net.IP) (addr [4]uint32, err error) { // IPv6LinkLocalPrefix is only exposed for testing purposes var IPv6LinkLocalPrefix = "fe80::" -func GetIPv6LinkLocalAddress() ([]*net.UDPAddr, error) { //nolint:revive // TODO +// GetIPv6LinkLocalAddress returns the link local addresses +func GetIPv6LinkLocalAddress() ([]*net.UDPAddr, error) { ints, err := net.Interfaces() if err != nil { return nil, err @@ -1017,7 +1019,7 @@ func (e *tracerEventGenerator) Generate(status GuessWhat, expected *fieldValues) } // This triggers the KProbe handler attached to `tcp_getsockopt` - _, err := TcpGetInfo(e.conn) + _, err := TCPGetInfo(e.conn) return err } @@ -1080,12 +1082,12 @@ func acceptHandler(l net.Listener) { } } -// TcpGetInfo obtains information from a TCP socket via GETSOCKOPT(2) system call. +// TCPGetInfo obtains information from a TCP socket via GETSOCKOPT(2) system call. // The motivation for using this is twofold: 1) it is a way of triggering the kprobe // responsible for the V4 offset guessing in kernel-space and 2) using it we can obtain // in user-space TCP socket information such as RTT and use it for setting the expected // values in the `fieldValues` struct. -func TcpGetInfo(conn net.Conn) (*unix.TCPInfo, error) { //nolint:revive // TODO +func TCPGetInfo(conn net.Conn) (*unix.TCPInfo, error) { tcpConn, ok := conn.(*net.TCPConn) if !ok { return nil, fmt.Errorf("not a TCPConn") @@ -1149,7 +1151,8 @@ func newUDPServer(addr string) (string, func(), error) { return ln.LocalAddr().String(), doneFn, nil } -var TracerOffsets tracerOffsets //nolint:revive // TODO +// TracerOffsets is the global tracer offsets +var TracerOffsets tracerOffsets type tracerOffsets struct { offsets []manager.ConstantEditor diff --git a/pkg/network/tracer/offsetguess_test.go b/pkg/network/tracer/offsetguess_test.go index a73523d48207b..b024e0e2ccfca 100644 --- a/pkg/network/tracer/offsetguess_test.go +++ b/pkg/network/tracer/offsetguess_test.go @@ -300,7 +300,7 @@ func testOffsetGuess(t *testing.T) { } var offset uint64 - var name offsetT = o //nolint:revive // TODO + var name = o require.NoError(t, mp.Lookup(&name, &offset)) assert.Equal(t, offset, consts[o], "unexpected offset for %s", o) t.Logf("offset %s expected: %d guessed: %d", o, offset, consts[o]) diff --git a/pkg/network/tracer/tracer.go b/pkg/network/tracer/tracer.go index cc10d988a4f48..c26a78b3ac9f8 100644 --- a/pkg/network/tracer/tracer.go +++ b/pkg/network/tracer/tracer.go @@ -462,7 +462,7 @@ func (t *Tracer) GetActiveConnections(clientID string) (*network.Connections, er conns.ConnTelemetry = t.state.GetTelemetryDelta(clientID, t.getConnTelemetry(len(active))) conns.CompilationTelemetryByAsset = t.getRuntimeCompilationTelemetry() conns.KernelHeaderFetchResult = int32(kernel.HeaderProvider.GetResult()) - conns.CORETelemetryByAsset = ebpftelemetry.GetCORETelemetryByAsset() + conns.CORETelemetryByAsset = ddebpf.GetCORETelemetryByAsset() conns.PrebuiltAssets = netebpf.GetModulesInUse() t.lastCheck.Store(time.Now().Unix()) @@ -896,7 +896,7 @@ const connProtoCleaningInterval = 65 * time.Second // slight jitter to avoid all // setupConnectionProtocolMapCleaner sets up a map cleaner for the connectionProtocolMap. // It will run every connProtoCleaningInterval and delete entries older than connProtoTTL. func setupConnectionProtocolMapCleaner(connectionProtocolMap *ebpf.Map, name string) (*ddebpf.MapCleaner[netebpf.ConnTuple, netebpf.ProtocolStackWrapper], error) { - mapCleaner, err := ddebpf.NewMapCleaner[netebpf.ConnTuple, netebpf.ProtocolStackWrapper](connectionProtocolMap, 1024, name, "npm_tracer") + mapCleaner, err := ddebpf.NewMapCleaner[netebpf.ConnTuple, netebpf.ProtocolStackWrapper](connectionProtocolMap, 1, name, "npm_tracer") if err != nil { return nil, err } diff --git a/pkg/network/tracer/tracer_dump_conntrack.go b/pkg/network/tracer/tracer_dump_conntrack.go index d1d4f0680ee07..8bd078bbd1bff 100644 --- a/pkg/network/tracer/tracer_dump_conntrack.go +++ b/pkg/network/tracer/tracer_dump_conntrack.go @@ -58,7 +58,7 @@ func (table *DebugConntrackTable) WriteTo(w io.Writer, maxEntries int) error { // in this case the table itself is incomplete due to closing the netlink socket part-way if table.IsTruncated { - _, err = fmt.Fprintln(w, "netlink table truncated due to response timeout, some entries may be missing") //nolint:ineffassign, staticcheck // TODO + _, _ = fmt.Fprintln(w, "netlink table truncated due to response timeout, some entries may be missing") } // used to stop writing once we reach maxEntries diff --git a/pkg/network/tracer/tracer_linux_test.go b/pkg/network/tracer/tracer_linux_test.go index 46f33ac7fa405..bcd9ad785c7ff 100644 --- a/pkg/network/tracer/tracer_linux_test.go +++ b/pkg/network/tracer/tracer_linux_test.go @@ -11,6 +11,7 @@ import ( "bufio" "bytes" "context" + "crypto/tls" "errors" "fmt" "io" @@ -50,8 +51,12 @@ import ( "github.com/DataDog/datadog-agent/pkg/network/config/sysctl" "github.com/DataDog/datadog-agent/pkg/network/events" netlinktestutil "github.com/DataDog/datadog-agent/pkg/network/netlink/testutil" + "github.com/DataDog/datadog-agent/pkg/network/protocols" + usmtestutil "github.com/DataDog/datadog-agent/pkg/network/protocols/http/testutil" + ddtls "github.com/DataDog/datadog-agent/pkg/network/protocols/tls" "github.com/DataDog/datadog-agent/pkg/network/testutil" "github.com/DataDog/datadog-agent/pkg/network/tracer/connection" + "github.com/DataDog/datadog-agent/pkg/network/tracer/connection/kprobe" "github.com/DataDog/datadog-agent/pkg/network/tracer/offsetguess" tracertestutil "github.com/DataDog/datadog-agent/pkg/network/tracer/testutil" "github.com/DataDog/datadog-agent/pkg/network/tracer/testutil/testdns" @@ -303,7 +308,7 @@ func (s *TracerSuite) TestTCPRTT() { require.NoError(t, err) // Obtain information from a TCP socket via GETSOCKOPT(2) system call. - tcpInfo, err := offsetguess.TcpGetInfo(c) + tcpInfo, err := offsetguess.TCPGetInfo(c) require.NoError(t, err) require.EventuallyWithT(t, func(ct *assert.CollectT) { @@ -381,7 +386,7 @@ func (s *TracerSuite) TestTCPMiscount() { assert.False(t, uint64(len(x)) == conn.Monotonic.SentBytes) } - assert.NotZero(t, connection.EbpfTracerTelemetry.LastTcpSentMiscounts.Load()) + assert.NotZero(t, connection.EbpfTracerTelemetry.LastTCPSentMiscounts.Load()) } func (s *TracerSuite) TestConnectionExpirationRegression() { @@ -513,10 +518,14 @@ func (s *TracerSuite) TestConntrackExpiration() { // connections when the first lookup fails func (s *TracerSuite) TestConntrackDelays() { t := s.T() + cfg := testConfig() + // fargate does not have CAP_NET_ADMIN + skipOnEbpflessNotSupported(t, cfg) + netlinktestutil.SetupDNAT(t) wg := sync.WaitGroup{} - tr := setupTracer(t, testConfig()) + tr := setupTracer(t, cfg) // This will ensure that the first lookup for every connection fails, while the following ones succeed tr.conntracker = tracertestutil.NewDelayedConntracker(tr.conntracker, 1) @@ -556,10 +565,14 @@ func (s *TracerSuite) TestConntrackDelays() { func (s *TracerSuite) TestTranslationBindingRegression() { t := s.T() + cfg := testConfig() + // fargate does not have CAP_NET_ADMIN + skipOnEbpflessNotSupported(t, cfg) + netlinktestutil.SetupDNAT(t) wg := sync.WaitGroup{} - tr := setupTracer(t, testConfig()) + tr := setupTracer(t, cfg) // Setup TCP server server := tracertestutil.NewTCPServerOnAddress(fmt.Sprintf("1.1.1.1:%d", 0), func(c net.Conn) { @@ -1187,7 +1200,7 @@ func (s *TracerSuite) TestSelfConnect() { // sets up two udp sockets talking to each other locally. // returns (listener, dialer) -func setupUdpSockets(t *testing.T, udpnet, ip string) (*net.UDPConn, *net.UDPConn) { //nolint:revive // TODO +func setupUDPSockets(t *testing.T, udpnet, ip string) (*net.UDPConn, *net.UDPConn) { serverAddr := fmt.Sprintf("%s:%d", ip, 0) laddr, err := net.ResolveUDPAddr(udpnet, serverAddr) @@ -1233,7 +1246,7 @@ func testUDPPeekCount(t *testing.T, udpnet, ip string) { config := testConfig() tr := setupTracer(t, config) - ln, c := setupUdpSockets(t, udpnet, ip) + ln, c := setupUDPSockets(t, udpnet, ip) msg := []byte("asdf") _, err := c.Write(msg) @@ -1322,7 +1335,7 @@ func testUDPPacketSumming(t *testing.T, udpnet, ip string) { config := testConfig() tr := setupTracer(t, config) - ln, c := setupUdpSockets(t, udpnet, ip) + ln, c := setupUDPSockets(t, udpnet, ip) msg := []byte("asdf") // send UDP packets of increasing length @@ -1527,7 +1540,7 @@ func testUDPReusePort(t *testing.T, udpnet string, ip string) { // Iterate through active connections until we find connection created above, and confirm send + recv counts t.Logf("port: %d", assignedPort) - assert.EventuallyWithT(t, func(ct *assert.CollectT) { //nolint:revive // TODO + assert.EventuallyWithT(t, func(ct *assert.CollectT) { // use t instead of ct because getConnections uses require (not assert), and we get a better error message that way connections := getConnections(ct, tr) @@ -2029,13 +2042,12 @@ func (s *TracerSuite) TestPreexistingConnectionDirection() { } m := outgoing.Monotonic - assert.Equal(collect, clientMessageSize, int(m.SentBytes)) - // ebpfless RecvBytes is based off acknowledgements, so it can miss the first - // packet in a pre-existing connection + // skip byte counts in ebpfless: for ebpfless pre-existing connections, + // byte counts will miss the first couple packets while in connStatAttempted. if !tr.config.EnableEbpfless { + assert.Equal(collect, clientMessageSize, int(m.SentBytes)) assert.Equal(collect, serverMessageSize, int(m.RecvBytes)) - } - if !tr.config.EnableEbpfless { + assert.Equal(collect, os.Getpid(), int(outgoing.Pid)) } assert.Equal(collect, addrPort(server.Address()), int(outgoing.DPort)) @@ -2043,13 +2055,12 @@ func (s *TracerSuite) TestPreexistingConnectionDirection() { assert.Equal(collect, network.OUTGOING, outgoing.Direction) m = incoming.Monotonic - // ebpfless RecvBytes is based off acknowledgements, so it can miss the first - // packet in a pre-existing connection + // skip byte counts in ebpfless: for ebpfless pre-existing connections, + // byte counts will miss the first couple packets while in connStatAttempted. if !tr.config.EnableEbpfless { assert.Equal(collect, clientMessageSize, int(m.RecvBytes)) - } - assert.Equal(collect, serverMessageSize, int(m.SentBytes)) - if !tr.config.EnableEbpfless { + assert.Equal(collect, serverMessageSize, int(m.SentBytes)) + assert.Equal(collect, os.Getpid(), int(incoming.Pid)) } assert.Equal(collect, addrPort(server.Address()), int(incoming.SPort)) @@ -2590,6 +2601,196 @@ func setupDropTrafficRule(tb testing.TB) (ns string) { return } +func (s *TracerSuite) TestTLSClassification() { + t := s.T() + cfg := testConfig() + + if !kprobe.ClassificationSupported(cfg) { + t.Skip("protocol classification not supported") + } + + tr := setupTracer(t, cfg) + + type tlsTest struct { + name string + postTracerSetup func(t *testing.T) (port uint16, scenario uint16) + validation func(t *testing.T, tr *Tracer, port uint16, scenario uint16) + } + + tests := make([]tlsTest, 0) + for _, scenario := range []uint16{tls.VersionTLS10, tls.VersionTLS11, tls.VersionTLS12, tls.VersionTLS13} { + scenario := scenario + tests = append(tests, tlsTest{ + name: strings.Replace(tls.VersionName(scenario), " ", "-", 1), + postTracerSetup: func(t *testing.T) (uint16, uint16) { + srv := usmtestutil.NewTLSServerWithSpecificVersion("localhost:0", func(conn net.Conn) { + defer conn.Close() + _, err := io.Copy(conn, conn) + if err != nil { + fmt.Printf("Failed to echo data: %v\n", err) + return + } + }, scenario) + done := make(chan struct{}) + require.NoError(t, srv.Run(done)) + t.Cleanup(func() { close(done) }) + + // Retrieve the actual port assigned to the server + addr := srv.Address() + _, portStr, err := net.SplitHostPort(addr) + require.NoError(t, err) + portInt, err := strconv.Atoi(portStr) + require.NoError(t, err) + port := uint16(portInt) + + tlsConfig := &tls.Config{ + MinVersion: scenario, + MaxVersion: scenario, + InsecureSkipVerify: true, + SessionTicketsDisabled: true, + ClientSessionCache: nil, + } + conn, err := net.Dial("tcp", addr) + require.NoError(t, err) + defer conn.Close() + + tlsConn := tls.Client(conn, tlsConfig) + require.NoError(t, tlsConn.Handshake()) + + return port, scenario + }, + validation: func(t *testing.T, tr *Tracer, port uint16, scenario uint16) { + require.EventuallyWithT(t, func(ct *assert.CollectT) { + require.True(ct, validateTLSTags(ct, tr, port, scenario), "TLS tags not set") + }, 3*time.Second, 100*time.Millisecond, "couldn't find TLS connection matching: dst port %v", port) + }, + }) + } + tests = append(tests, tlsTest{ + name: "Invalid-TLS-Handshake", + postTracerSetup: func(t *testing.T) (uint16, uint16) { + // server that accepts connections but does not perform TLS handshake + listener, err := net.Listen("tcp", "localhost:0") + require.NoError(t, err) + t.Cleanup(func() { listener.Close() }) + + go func() { + for { + conn, err := listener.Accept() + if err != nil { + return + } + go func(c net.Conn) { + defer c.Close() + buf := make([]byte, 1024) + _, _ = c.Read(buf) + // Do nothing with the data + }(conn) + } + }() + + // Retrieve the actual port from the listener address + addr := listener.Addr().String() + _, portStr, err := net.SplitHostPort(addr) + require.NoError(t, err) + portInt, err := strconv.Atoi(portStr) + require.NoError(t, err) + port := uint16(portInt) + + // Client connects to the server + conn, err := net.Dial("tcp", addr) + require.NoError(t, err) + defer conn.Close() + + // Send invalid TLS handshake data + _, err = conn.Write([]byte("invalid TLS data")) + require.NoError(t, err) + + // Since this is invalid TLS, scenario can be set to something irrelevant, e.g., TLS.VersionTLS12 + // or just 0 since the validation doesn't rely on the scenario for this test. + return port, tls.VersionTLS12 + }, + validation: func(t *testing.T, tr *Tracer, port uint16, _ uint16) { + // Verify that no TLS tags are set for this connection + require.EventuallyWithT(t, func(ct *assert.CollectT) { + payload := getConnections(ct, tr) + for _, c := range payload.Conns { + if c.DPort == port && c.ProtocolStack.Contains(protocols.TLS) { + t.Log("Unexpected TLS protocol detected for invalid handshake") + require.Fail(ct, "unexpected TLS tags") + } + } + }, 3*time.Second, 100*time.Millisecond) + }, + }) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if ebpftest.GetBuildMode() == ebpftest.Fentry { + t.Skip("protocol classification not supported for fentry tracer") + } + t.Cleanup(func() { + tr.RemoveClient(clientID) + _ = tr.Pause() + }) + tr.RemoveClient(clientID) + require.NoError(t, tr.RegisterClient(clientID)) + require.NoError(t, tr.Resume(), "enable probes - before post tracer") + port, scenario := tt.postTracerSetup(t) + require.NoError(t, tr.Pause(), "disable probes - after post tracer") + tt.validation(t, tr, port, scenario) + }) + } +} + +func validateTLSTags(t *assert.CollectT, tr *Tracer, port uint16, scenario uint16) bool { + payload := getConnections(t, tr) + for _, c := range payload.Conns { + if c.DPort == port && c.ProtocolStack.Contains(protocols.TLS) && !c.TLSTags.IsEmpty() { + tlsTags := c.TLSTags.GetDynamicTags() + + // Check that the cipher suite ID tag is present + cipherSuiteTagFound := false + for key := range tlsTags { + if strings.HasPrefix(key, ddtls.TagTLSCipherSuiteID) { + cipherSuiteTagFound = true + break + } + } + if !cipherSuiteTagFound { + return false + } + + // Check that the negotiated version tag is present + negotiatedVersionTag := ddtls.VersionTags[scenario] + if _, ok := tlsTags[negotiatedVersionTag]; !ok { + return false + } + + // Check that the client offered version tag is present + clientVersionTag := ddtls.ClientVersionTags[scenario] + if _, ok := tlsTags[clientVersionTag]; !ok { + return false + } + + if scenario == tls.VersionTLS13 { + expectedClientVersions := []string{ + ddtls.ClientVersionTags[tls.VersionTLS12], + ddtls.ClientVersionTags[tls.VersionTLS13], + } + for _, tag := range expectedClientVersions { + if _, ok := tlsTags[tag]; !ok { + return false + } + } + } + + return true + } + } + return false +} + func skipOnEbpflessNotSupported(t *testing.T, cfg *config.Config) { if cfg.EnableEbpfless { t.Skip("not supported on ebpf-less") diff --git a/pkg/network/tracer/tracer_test.go b/pkg/network/tracer/tracer_test.go index d50f4f299690c..bc45fe5cb12f3 100644 --- a/pkg/network/tracer/tracer_test.go +++ b/pkg/network/tracer/tracer_test.go @@ -392,28 +392,35 @@ func (s *TracerSuite) TestTCPConnsReported() { // Connect to server c, err := net.DialTimeout("tcp", server.Address(), 50*time.Millisecond) require.NoError(t, err) - defer c.Close() <-processedChan + c.Close() var forward *network.ConnectionStats var reverse *network.ConnectionStats - var okForward, okReverse bool // for ebpfless, it takes time for the packet capture to arrive, so poll require.EventuallyWithT(t, func(collect *assert.CollectT) { // Test connections := getConnections(collect, tr) - // Server-side - forward, okForward = findConnection(c.RemoteAddr(), c.LocalAddr(), connections) - require.True(collect, okForward) - // Client-side - reverse, okReverse = findConnection(c.LocalAddr(), c.RemoteAddr(), connections) - require.True(collect, okReverse) - }, 3*time.Second, 100*time.Millisecond, "connection not found") - assert.Equal(t, network.INCOMING, forward.Direction) - assert.Equal(t, network.OUTGOING, reverse.Direction) - assert.Equal(t, network.StatCounters{TCPEstablished: 1, TCPClosed: 1}, forward.Monotonic) - assert.Equal(t, network.StatCounters{TCPEstablished: 1, TCPClosed: 0}, reverse.Monotonic) + if forward == nil { + // Server-side + forward, _ = findConnection(c.RemoteAddr(), c.LocalAddr(), connections) + } + if reverse == nil { + // Client-side + reverse, _ = findConnection(c.LocalAddr(), c.RemoteAddr(), connections) + } + + require.NotNil(collect, forward) + require.NotNil(collect, reverse) + + require.Equal(collect, network.INCOMING, forward.Direction) + require.Equal(collect, network.OUTGOING, reverse.Direction) + require.Equal(collect, uint16(1), forward.Monotonic.TCPEstablished) + require.Equal(collect, uint16(1), forward.Monotonic.TCPClosed) + require.Equal(collect, uint16(1), reverse.Monotonic.TCPEstablished) + require.Equal(collect, uint16(1), reverse.Monotonic.TCPClosed) + }, 3*time.Second, 100*time.Millisecond, "connection not found") } @@ -616,22 +623,25 @@ func (s *TracerSuite) TestShouldSkipExcludedConnection() { _, err = cn.Write([]byte("test")) assert.NoError(t, err) - // Make sure we're not picking up 127.0.0.1:80 - cxs := getConnections(t, tr) - for _, c := range cxs.Conns { - assert.False(t, c.Source.String() == "127.0.0.1" && c.SPort == 80, "connection %s should be excluded", c) - assert.False(t, c.Dest.String() == "127.0.0.1" && c.DPort == 80 && c.Type == network.TCP, "connection %s should be excluded", c) - } - - // ensure one of the connections is UDP to 127.0.0.1:80 - assert.Condition(t, func() bool { + require.EventuallyWithT(t, func(collect *assert.CollectT) { + // Make sure we're not picking up 127.0.0.1:80 + cxs := getConnections(collect, tr) for _, c := range cxs.Conns { - if c.Dest.String() == "127.0.0.1" && c.DPort == 80 && c.Type == network.UDP { - return true - } + assert.False(collect, c.Source.String() == "127.0.0.1" && c.SPort == 80, "connection %s should be excluded", c) + assert.False(collect, c.Dest.String() == "127.0.0.1" && c.DPort == 80 && c.Type == network.TCP, "connection %s should be excluded", c) } - return false - }, "Unable to find UDP connection to 127.0.0.1:80") + + // ensure one of the connections is UDP to 127.0.0.1:80 + assert.Condition(collect, func() bool { + for _, c := range cxs.Conns { + if c.Dest.String() == "127.0.0.1" && c.DPort == 80 && c.Type == network.UDP { + return true + } + } + return false + }, "Unable to find UDP connection to 127.0.0.1:80") + + }, 2*time.Second, 100*time.Millisecond) } func (s *TracerSuite) TestShouldExcludeEmptyStatsConnection() { @@ -1073,7 +1083,6 @@ func (s *TracerSuite) TestDNSStats() { func (s *TracerSuite) TestTCPEstablished() { t := s.T() - // Ensure closed connections are flushed as soon as possible cfg := testConfig() tr := setupTracer(t, cfg) diff --git a/pkg/network/usm/ebpf_gotls.go b/pkg/network/usm/ebpf_gotls.go index 2c1e31300612a..76877ca2df2ec 100644 --- a/pkg/network/usm/ebpf_gotls.go +++ b/pkg/network/usm/ebpf_gotls.go @@ -387,7 +387,7 @@ func registerCBCreator(mgr *manager.Manager, offsetsDataMap *ebpf.Map, probeIDs if errors.Is(err, safeelf.ErrNoSymbols) { binNoSymbolsMetric.Add(1) } - return fmt.Errorf("error extracting inspectoin data from %s: %w", filePath.HostPath, err) + return fmt.Errorf("error extracting inspection data from %s: %w", filePath.HostPath, err) } if err := addInspectionResultToMap(offsetsDataMap, filePath.ID, inspectionResult); err != nil { diff --git a/pkg/network/usm/ebpf_ssl_test.go b/pkg/network/usm/ebpf_ssl_test.go index 2e3ef3043d00d..e4b9576669cbf 100644 --- a/pkg/network/usm/ebpf_ssl_test.go +++ b/pkg/network/usm/ebpf_ssl_test.go @@ -38,7 +38,7 @@ func testArch(t *testing.T, arch string) { libmmap := filepath.Join(curDir, "testdata", "site-packages", "ddtrace") lib := filepath.Join(libmmap, fmt.Sprintf("libssl.so.%s", arch)) - monitor := setupUSMTLSMonitor(t, cfg) + monitor := setupUSMTLSMonitor(t, cfg, useExistingConsumer) require.NotNil(t, monitor) cmd, err := fileopener.OpenFromAnotherProcess(t, lib) diff --git a/pkg/network/usm/kafka_monitor_test.go b/pkg/network/usm/kafka_monitor_test.go index c4f3e30c08ad4..b81f3622aa9a0 100644 --- a/pkg/network/usm/kafka_monitor_test.go +++ b/pkg/network/usm/kafka_monitor_test.go @@ -34,7 +34,6 @@ import ( ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" - "github.com/DataDog/datadog-agent/pkg/ebpf/prebuilt" "github.com/DataDog/datadog-agent/pkg/network" "github.com/DataDog/datadog-agent/pkg/network/config" "github.com/DataDog/datadog-agent/pkg/network/protocols" @@ -45,6 +44,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/network/tracer/testutil/proxy" usmconfig "github.com/DataDog/datadog-agent/pkg/network/usm/config" "github.com/DataDog/datadog-agent/pkg/network/usm/consts" + usmtestutil "github.com/DataDog/datadog-agent/pkg/network/usm/testutil" "github.com/DataDog/datadog-agent/pkg/network/usm/utils" "github.com/DataDog/datadog-agent/pkg/util/kernel" ) @@ -138,11 +138,7 @@ func TestKafkaProtocolParsing(t *testing.T) { serverHost := "127.0.0.1" require.NoError(t, kafka.RunServer(t, serverHost, kafkaPort)) - modes := []ebpftest.BuildMode{ebpftest.RuntimeCompiled, ebpftest.CORE} - if !prebuilt.IsDeprecated() { - modes = append(modes, ebpftest.Prebuilt) - } - ebpftest.TestBuildModes(t, modes, "", func(t *testing.T) { + ebpftest.TestBuildModes(t, usmtestutil.SupportedBuildModes(), "", func(t *testing.T) { suite.Run(t, new(KafkaProtocolParsingSuite)) }) } @@ -559,7 +555,7 @@ func (s *KafkaProtocolParsingSuite) testKafkaProtocolParsing(t *testing.T, tls b client.Client.Close() } }) - monitor := newKafkaMonitor(t, cfg) + monitor := setupUSMTLSMonitor(t, cfg, useExistingConsumer) if tls && cfg.EnableGoTLSSupport { utils.WaitForProgramsToBeTraced(t, consts.USMModuleName, GoTLSAttacherName, proxyProcess.Process.Pid, utils.ManualTracingFallbackEnabled) } @@ -1157,7 +1153,7 @@ func testKafkaFetchRaw(t *testing.T, tls bool, apiVersion int) { can.runServer() proxyPid := can.runProxy() - monitor := newKafkaMonitor(t, getDefaultTestConfiguration(tls)) + monitor := setupUSMTLSMonitor(t, getDefaultTestConfiguration(tls), useExistingConsumer) if tls { utils.WaitForProgramsToBeTraced(t, consts.USMModuleName, GoTLSAttacherName, proxyPid, utils.ManualTracingFallbackEnabled) } @@ -1385,7 +1381,7 @@ func testKafkaProduceRaw(t *testing.T, tls bool, apiVersion int) { can.runServer() proxyPid := can.runProxy() - monitor := newKafkaMonitor(t, getDefaultTestConfiguration(tls)) + monitor := setupUSMTLSMonitor(t, getDefaultTestConfiguration(tls), useExistingConsumer) if tls { utils.WaitForProgramsToBeTraced(t, consts.USMModuleName, GoTLSAttacherName, proxyPid, utils.ManualTracingFallbackEnabled) } @@ -1514,7 +1510,7 @@ func TestKafkaInFlightMapCleaner(t *testing.T) { cfg := getDefaultTestConfiguration(false) cfg.HTTPMapCleanerInterval = 5 * time.Second cfg.HTTPIdleConnectionTTL = time.Second - monitor := newKafkaMonitor(t, cfg) + monitor := setupUSMTLSMonitor(t, cfg, useExistingConsumer) ebpfNow, err := ddebpf.NowNanoseconds() require.NoError(t, err) inFlightMap, _, err := monitor.ebpfProgram.GetMap("kafka_in_flight") @@ -1690,29 +1686,11 @@ func validateProduceFetchCountWithErrorCodes(t *assert.CollectT, kafkaStats map[ } } -func newKafkaMonitor(t *testing.T, cfg *config.Config) *Monitor { - monitor, err := NewMonitor(cfg, nil) - skipIfNotSupported(t, err) - require.NoError(t, err) - t.Cleanup(func() { - monitor.Stop() - }) - t.Cleanup(utils.ResetDebugger) - - err = monitor.Start() - require.NoError(t, err) - return monitor -} - // This test will help us identify if there is any verifier problems while loading the Kafka binary in the CI environment func TestLoadKafkaBinary(t *testing.T) { skipTestIfKernelNotSupported(t) - modes := []ebpftest.BuildMode{ebpftest.RuntimeCompiled, ebpftest.CORE} - if !prebuilt.IsDeprecated() { - modes = append(modes, ebpftest.Prebuilt) - } - ebpftest.TestBuildModes(t, modes, "", func(t *testing.T) { + ebpftest.TestBuildModes(t, usmtestutil.SupportedBuildModes(), "", func(t *testing.T) { t.Run("debug", func(t *testing.T) { loadKafkaBinary(t, true) }) @@ -1730,5 +1708,5 @@ func loadKafkaBinary(t *testing.T, debug bool) { cfg.MaxTrackedConnections = 1000 cfg.BPFDebug = debug - newKafkaMonitor(t, cfg) + setupUSMTLSMonitor(t, cfg, useExistingConsumer) } diff --git a/pkg/network/usm/monitor_test.go b/pkg/network/usm/monitor_test.go index f899eb6c295b9..2be5e85965e03 100644 --- a/pkg/network/usm/monitor_test.go +++ b/pkg/network/usm/monitor_test.go @@ -30,14 +30,13 @@ import ( "github.com/stretchr/testify/suite" "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" - "github.com/DataDog/datadog-agent/pkg/ebpf/prebuilt" - "github.com/DataDog/datadog-agent/pkg/network/config" + networkConfig "github.com/DataDog/datadog-agent/pkg/network/config" netlink "github.com/DataDog/datadog-agent/pkg/network/netlink/testutil" "github.com/DataDog/datadog-agent/pkg/network/protocols" "github.com/DataDog/datadog-agent/pkg/network/protocols/http" "github.com/DataDog/datadog-agent/pkg/network/protocols/http/testutil" - libtelemetry "github.com/DataDog/datadog-agent/pkg/network/protocols/telemetry" usmconfig "github.com/DataDog/datadog-agent/pkg/network/usm/config" + usmtestutil "github.com/DataDog/datadog-agent/pkg/network/usm/testutil" "github.com/DataDog/datadog-agent/pkg/network/usm/utils" "github.com/DataDog/datadog-agent/pkg/util/kernel" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -102,11 +101,7 @@ func TestHTTP(t *testing.T) { if kv < usmconfig.MinimumKernelVersion { t.Skipf("USM is not supported on %v", kv) } - modes := []ebpftest.BuildMode{ebpftest.RuntimeCompiled, ebpftest.CORE} - if !prebuilt.IsDeprecated() { - modes = append(modes, ebpftest.Prebuilt) - } - ebpftest.TestBuildModes(t, modes, "", func(t *testing.T) { + ebpftest.TestBuildModes(t, usmtestutil.SupportedBuildModes(), "", func(t *testing.T) { suite.Run(t, new(HTTPTestSuite)) }) } @@ -121,7 +116,7 @@ func (s *HTTPTestSuite) TestHTTPStats() { }) t.Cleanup(srvDoneFn) - monitor := newHTTPMonitorWithCfg(t, utils.NewUSMEmptyConfig()) + monitor := setupUSMTLSMonitor(t, getHTTPCfg(), useExistingConsumer) resp, err := nethttp.Get(fmt.Sprintf("http://%s/%d/test", serverAddr, nethttp.StatusNoContent)) require.NoError(t, err) @@ -153,7 +148,7 @@ func (s *HTTPTestSuite) TestHTTPMonitorLoadWithIncompleteBuffers() { slowServerAddr := "localhost:8080" fastServerAddr := "localhost:8081" - monitor := newHTTPMonitorWithCfg(t, utils.NewUSMEmptyConfig()) + monitor := setupUSMTLSMonitor(t, getHTTPCfg(), useExistingConsumer) slowSrvDoneFn := testutil.HTTPServer(t, slowServerAddr, testutil.Options{ SlowResponse: time.Millisecond * 500, // Half a second. WriteTimeout: time.Millisecond * 200, @@ -228,7 +223,7 @@ func (s *HTTPTestSuite) TestHTTPMonitorIntegrationWithResponseBody() { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - monitor := newHTTPMonitorWithCfg(t, utils.NewUSMEmptyConfig()) + monitor := setupUSMTLSMonitor(t, getHTTPCfg(), useExistingConsumer) srvDoneFn := testutil.HTTPServer(t, serverAddr, testutil.Options{ EnableKeepAlive: true, }) @@ -285,9 +280,10 @@ func (s *HTTPTestSuite) TestHTTPMonitorIntegrationSlowResponse() { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cfg := utils.NewUSMEmptyConfig() + cfg.EnableHTTPMonitoring = true cfg.HTTPMapCleanerInterval = time.Duration(tt.mapCleanerIntervalSeconds) * time.Second cfg.HTTPIdleConnectionTTL = time.Duration(tt.httpIdleConnectionTTLSeconds) * time.Second - monitor := newHTTPMonitorWithCfg(t, cfg) + monitor := setupUSMTLSMonitor(t, cfg, useExistingConsumer) slowResponseTimeout := time.Duration(tt.slowResponseTime) * time.Second serverTimeout := slowResponseTimeout + time.Second @@ -351,7 +347,7 @@ func (s *HTTPTestSuite) TestSanity() { t.Run(tt.name, func(t *testing.T) { for _, keepAliveEnabled := range []bool{true, false} { t.Run(testNameHelper("with keep alive", "without keep alive", keepAliveEnabled), func(t *testing.T) { - monitor := newHTTPMonitorWithCfg(t, utils.NewUSMEmptyConfig()) + monitor := setupUSMTLSMonitor(t, getHTTPCfg(), useExistingConsumer) srvDoneFn := testutil.HTTPServer(t, tt.serverAddress, testutil.Options{EnableKeepAlive: keepAliveEnabled}) t.Cleanup(srvDoneFn) @@ -377,7 +373,7 @@ func (s *HTTPTestSuite) TestSanity() { func (s *HTTPTestSuite) TestRSTPacketRegression() { t := s.T() - monitor := newHTTPMonitorWithCfg(t, utils.NewUSMEmptyConfig()) + monitor := setupUSMTLSMonitor(t, getHTTPCfg(), useExistingConsumer) serverAddr := "127.0.0.1:8080" srvDoneFn := testutil.HTTPServer(t, serverAddr, testutil.Options{ @@ -412,7 +408,7 @@ func (s *HTTPTestSuite) TestRSTPacketRegression() { func (s *HTTPTestSuite) TestKeepAliveWithIncompleteResponseRegression() { t := s.T() - monitor := newHTTPMonitorWithCfg(t, utils.NewUSMEmptyConfig()) + monitor := setupUSMTLSMonitor(t, getHTTPCfg(), useExistingConsumer) const req = "GET /200/foobar HTTP/1.1\n" const rsp = "HTTP/1.1 200 OK\n" @@ -639,21 +635,10 @@ func countRequestOccurrences(allStats map[http.Key]*http.RequestStats, req *neth return occurrences } -func newHTTPMonitorWithCfg(t *testing.T, cfg *config.Config) *Monitor { +func getHTTPCfg() *networkConfig.Config { + cfg := utils.NewUSMEmptyConfig() cfg.EnableHTTPMonitoring = true - - monitor, err := NewMonitor(cfg, nil) - skipIfNotSupported(t, err) - require.NoError(t, err) - t.Cleanup(func() { - monitor.Stop() - libtelemetry.Clear() - }) - - // at this stage the test can be legitimately skipped due to missing BTF information - // in the context of CO-RE - require.NoError(t, monitor.Start()) - return monitor + return cfg } func skipIfNotSupported(t *testing.T, err error) { diff --git a/pkg/network/usm/monitor_tls_test.go b/pkg/network/usm/monitor_tls_test.go index 44ca4244b07a1..06700430167a7 100644 --- a/pkg/network/usm/monitor_tls_test.go +++ b/pkg/network/usm/monitor_tls_test.go @@ -21,6 +21,7 @@ import ( "path/filepath" "regexp" "strings" + "sync" "testing" "time" @@ -30,7 +31,7 @@ import ( "github.com/stretchr/testify/suite" "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" - "github.com/DataDog/datadog-agent/pkg/ebpf/prebuilt" + "github.com/DataDog/datadog-agent/pkg/eventmonitor/consumers" consumerstestutil "github.com/DataDog/datadog-agent/pkg/eventmonitor/consumers/testutil" "github.com/DataDog/datadog-agent/pkg/network" "github.com/DataDog/datadog-agent/pkg/network/config" @@ -54,11 +55,7 @@ type tlsSuite struct { } func TestTLSSuite(t *testing.T) { - modes := []ebpftest.BuildMode{ebpftest.RuntimeCompiled, ebpftest.CORE} - if !prebuilt.IsDeprecated() { - modes = append(modes, ebpftest.Prebuilt) - } - ebpftest.TestBuildModes(t, modes, "", func(t *testing.T) { + ebpftest.TestBuildModes(t, usmtestutil.SupportedBuildModes(), "", func(t *testing.T) { if !usmconfig.TLSSupported(utils.NewUSMEmptyConfig()) { t.Skip("TLS not supported for this setup") } @@ -192,7 +189,7 @@ func (s *tlsSuite) TestHTTPSViaLibraryIntegration() { } func testHTTPSLibrary(t *testing.T, cfg *config.Config, fetchCmd, prefetchLibs []string) { - usmMonitor := setupUSMTLSMonitor(t, cfg) + usmMonitor := setupUSMTLSMonitor(t, cfg, useExistingConsumer) // not ideal but, short process are hard to catch utils.WaitForProgramsToBeTraced(t, consts.USMModuleName, "shared_libraries", prefetchLib(t, prefetchLibs...).Process.Pid, utils.ManualTracingFallbackDisabled) @@ -289,7 +286,7 @@ func (s *tlsSuite) TestOpenSSLVersions() { cfg := utils.NewUSMEmptyConfig() cfg.EnableNativeTLSMonitoring = true cfg.EnableHTTPMonitoring = true - usmMonitor := setupUSMTLSMonitor(t, cfg) + usmMonitor := setupUSMTLSMonitor(t, cfg, useExistingConsumer) addressOfHTTPPythonServer := "127.0.0.1:8001" cmd := testutil.HTTPPythonServer(t, addressOfHTTPPythonServer, testutil.Options{ @@ -361,7 +358,7 @@ func (s *tlsSuite) TestOpenSSLVersionsSlowStart() { missedRequests = append(missedRequests, requestFn()) } - usmMonitor := setupUSMTLSMonitor(t, cfg) + usmMonitor := setupUSMTLSMonitor(t, cfg, reInitEventConsumer) // Giving the tracer time to install the hooks utils.WaitForProgramsToBeTraced(t, consts.USMModuleName, "shared_libraries", cmd.Process.Pid, utils.ManualTracingFallbackEnabled) @@ -568,7 +565,7 @@ func TestOldConnectionRegression(t *testing.T) { cfg.EnableHTTPMonitoring = true cfg.EnableGoTLSSupport = true cfg.GoTLSExcludeSelf = false - usmMonitor := setupUSMTLSMonitor(t, cfg) + usmMonitor := setupUSMTLSMonitor(t, cfg, useExistingConsumer) // Ensure this test program is being traced utils.WaitForProgramsToBeTraced(t, consts.USMModuleName, GoTLSAttacherName, os.Getpid(), utils.ManualTracingFallbackEnabled) @@ -639,7 +636,7 @@ func TestLimitListenerRegression(t *testing.T) { // don't accidentally report a false positive based on client (`curl`) // data as opposed to the GoTLS server with `netutils.LimitListener` cfg.EnableNativeTLSMonitoring = false - usmMonitor := setupUSMTLSMonitor(t, cfg) + usmMonitor := setupUSMTLSMonitor(t, cfg, useExistingConsumer) // Ensure this test program is being traced utils.WaitForProgramsToBeTraced(t, consts.USMModuleName, GoTLSAttacherName, os.Getpid(), utils.ManualTracingFallbackEnabled) @@ -690,7 +687,7 @@ func testHTTPGoTLSCaptureNewProcess(t *testing.T, cfg *config.Config, isHTTP2 bo cfg.EnableHTTPMonitoring = true } - usmMonitor := setupUSMTLSMonitor(t, cfg) + usmMonitor := setupUSMTLSMonitor(t, cfg, useExistingConsumer) // This maps will keep track of whether the tracer saw this request already or not reqs := make(requestsMap) @@ -729,7 +726,7 @@ func testHTTPGoTLSCaptureAlreadyRunning(t *testing.T, cfg *config.Config, isHTTP // spin-up goTLS client but don't issue requests yet command, issueRequestsFn := gotlstestutil.NewGoTLSClient(t, serverAddr, expectedOccurrences, isHTTP2) - usmMonitor := setupUSMTLSMonitor(t, cfg) + usmMonitor := setupUSMTLSMonitor(t, cfg, reInitEventConsumer) // This maps will keep track of whether the tracer saw this request already or not reqs := make(requestsMap) @@ -762,7 +759,7 @@ func testHTTPSGoTLSCaptureNewProcessContainer(t *testing.T, cfg *config.Config) cfg.EnableGoTLSSupport = true cfg.EnableHTTPMonitoring = true - usmMonitor := setupUSMTLSMonitor(t, cfg) + usmMonitor := setupUSMTLSMonitor(t, cfg, useExistingConsumer) require.NoError(t, gotlstestutil.RunServer(t, serverPort)) reqs := make(requestsMap) @@ -796,7 +793,7 @@ func testHTTPSGoTLSCaptureAlreadyRunningContainer(t *testing.T, cfg *config.Conf cfg.EnableGoTLSSupport = true cfg.EnableHTTPMonitoring = true - usmMonitor := setupUSMTLSMonitor(t, cfg) + usmMonitor := setupUSMTLSMonitor(t, cfg, reInitEventConsumer) reqs := make(requestsMap) for i := 0; i < expectedOccurrences; i++ { @@ -864,12 +861,49 @@ func (m requestsMap) String() string { return result.String() } -func setupUSMTLSMonitor(t *testing.T, cfg *config.Config) *Monitor { +var ( + // eventConsumerInstance is used to store the event consumer singleton + eventConsumerInstance *consumers.ProcessConsumer + // eventConsumerMutex is used to protect the event consumer singleton + eventConsumerMutex sync.Mutex +) + +// initializeEventConsumerSingleton is used to initialize the event consumer singleton +func initializeEventConsumerSingleton(t *testing.T) *consumers.ProcessConsumer { + eventConsumerMutex.Lock() + defer eventConsumerMutex.Unlock() + + if eventConsumerInstance == nil { + eventConsumerInstance = consumerstestutil.NewTestProcessConsumer(t) + } + return eventConsumerInstance +} + +// reinitializeEventConsumer is used to reinitialize the event consumer instance +func reinitializeEventConsumer(t *testing.T) { + eventConsumerMutex.Lock() + defer eventConsumerMutex.Unlock() + + eventConsumerInstance = consumerstestutil.NewTestProcessConsumer(t) +} + +const ( + // useExistingConsumer is used to indicate that we should use the existing consumer instance + reInitEventConsumer = true + // useExistingConsumer is used to indicate that we should create a new consumer instance + useExistingConsumer = false +) + +func setupUSMTLSMonitor(t *testing.T, cfg *config.Config, reinit bool) *Monitor { usmMonitor, err := NewMonitor(cfg, nil) require.NoError(t, err) require.NoError(t, usmMonitor.Start()) if cfg.EnableUSMEventStream && usmconfig.NeedProcessMonitor(cfg) { - monitor.InitializeEventConsumer(consumerstestutil.NewTestProcessConsumer(t)) + if reinit { + reinitializeEventConsumer(t) + } else { + monitor.InitializeEventConsumer(initializeEventConsumerSingleton(t)) + } } t.Cleanup(usmMonitor.Stop) t.Cleanup(utils.ResetDebugger) @@ -908,7 +942,7 @@ func (s *tlsSuite) TestNodeJSTLS() { cfg.EnableHTTPMonitoring = true cfg.EnableNodeJSMonitoring = true - usmMonitor := setupUSMTLSMonitor(t, cfg) + usmMonitor := setupUSMTLSMonitor(t, cfg, useExistingConsumer) utils.WaitForProgramsToBeTraced(t, consts.USMModuleName, "nodejs", int(nodeJSPID), utils.ManualTracingFallbackEnabled) // This maps will keep track of whether the tracer saw this request already or not diff --git a/pkg/network/usm/postgres_monitor_test.go b/pkg/network/usm/postgres_monitor_test.go index 9198448f99468..9a5359eac4118 100644 --- a/pkg/network/usm/postgres_monitor_test.go +++ b/pkg/network/usm/postgres_monitor_test.go @@ -24,7 +24,6 @@ import ( "github.com/stretchr/testify/suite" "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" - "github.com/DataDog/datadog-agent/pkg/ebpf/prebuilt" "github.com/DataDog/datadog-agent/pkg/network" "github.com/DataDog/datadog-agent/pkg/network/config" "github.com/DataDog/datadog-agent/pkg/network/protocols" @@ -34,6 +33,7 @@ import ( protocolsUtils "github.com/DataDog/datadog-agent/pkg/network/protocols/testutil" gotlstestutil "github.com/DataDog/datadog-agent/pkg/network/protocols/tls/gotls/testutil" "github.com/DataDog/datadog-agent/pkg/network/usm/consts" + usmtestutil "github.com/DataDog/datadog-agent/pkg/network/usm/testutil" "github.com/DataDog/datadog-agent/pkg/network/usm/utils" ) @@ -107,11 +107,7 @@ type postgresProtocolParsingSuite struct { func TestPostgresMonitoring(t *testing.T) { skipTestIfKernelNotSupported(t) - modes := []ebpftest.BuildMode{ebpftest.RuntimeCompiled, ebpftest.CORE} - if !prebuilt.IsDeprecated() { - modes = append(modes, ebpftest.Prebuilt) - } - ebpftest.TestBuildModes(t, modes, "", func(t *testing.T) { + ebpftest.TestBuildModes(t, usmtestutil.SupportedBuildModes(), "", func(t *testing.T) { suite.Run(t, new(postgresProtocolParsingSuite)) }) } @@ -122,7 +118,7 @@ func (s *postgresProtocolParsingSuite) TestLoadPostgresBinary() { t.Run(name, func(t *testing.T) { cfg := getPostgresDefaultTestConfiguration(protocolsUtils.TLSDisabled) cfg.BPFDebug = debug - setupUSMTLSMonitor(t, cfg) + setupUSMTLSMonitor(t, cfg, useExistingConsumer) }) } } @@ -191,7 +187,7 @@ func testDecoding(t *testing.T, isTLS bool) { return count * 2 } - monitor := setupUSMTLSMonitor(t, getPostgresDefaultTestConfiguration(isTLS)) + monitor := setupUSMTLSMonitor(t, getPostgresDefaultTestConfiguration(isTLS), useExistingConsumer) if isTLS { utils.WaitForProgramsToBeTraced(t, consts.USMModuleName, GoTLSAttacherName, os.Getpid(), utils.ManualTracingFallbackEnabled) } @@ -720,7 +716,7 @@ func (s *postgresProtocolParsingSuite) TestCleanupEBPFEntriesOnTermination() { t := s.T() // Creating the monitor - monitor := setupUSMTLSMonitor(t, getPostgresDefaultTestConfiguration(protocolsUtils.TLSDisabled)) + monitor := setupUSMTLSMonitor(t, getPostgresDefaultTestConfiguration(protocolsUtils.TLSDisabled), useExistingConsumer) wg := sync.WaitGroup{} @@ -924,7 +920,7 @@ func testKernelMessagesCount(t *testing.T, isTLS bool) { require.NoError(t, postgres.RunServer(t, serverHost, postgresPort, isTLS)) waitForPostgresServer(t, serverAddress, isTLS) - monitor := setupUSMTLSMonitor(t, getPostgresDefaultTestConfiguration(isTLS)) + monitor := setupUSMTLSMonitor(t, getPostgresDefaultTestConfiguration(isTLS), useExistingConsumer) if isTLS { utils.WaitForProgramsToBeTraced(t, consts.USMModuleName, GoTLSAttacherName, os.Getpid(), utils.ManualTracingFallbackEnabled) } diff --git a/pkg/network/usm/sharedlibraries/ebpf.go b/pkg/network/usm/sharedlibraries/ebpf.go index e583b6886c0f9..6f6cf41d2654b 100644 --- a/pkg/network/usm/sharedlibraries/ebpf.go +++ b/pkg/network/usm/sharedlibraries/ebpf.go @@ -15,6 +15,11 @@ import ( "strings" "sync" + "github.com/cilium/ebpf" + "github.com/cilium/ebpf/asm" + "github.com/cilium/ebpf/features" + "github.com/cilium/ebpf/link" + manager "github.com/DataDog/ebpf-manager" ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" @@ -105,6 +110,11 @@ type EbpfProgram struct { // otherwise used to check if the program needs to be stopped and re-started // when adding new libsets isInitialized bool + + // enabledProbes is a list of the probes that are enabled for the current system. + enabledProbes []manager.ProbeIdentificationPair + // disabledProbes is a list of the probes that are disabled for the current system. + disabledProbes []manager.ProbeIdentificationPair } // IsSupported returns true if the shared libraries monitoring is supported on the current system. @@ -196,14 +206,13 @@ func (e *EbpfProgram) setupManagerAndPerfHandlers() { handler.perfHandler = perfHandler } - probeIDs := getSysOpenHooksIdentifiers() - for _, identifier := range probeIDs { - mgr.Probes = append(mgr.Probes, - &manager.Probe{ - ProbeIdentificationPair: identifier, - KProbeMaxActive: maxActive, - }, - ) + e.initializeProbes() + for _, identifier := range e.enabledProbes { + probe := &manager.Probe{ + ProbeIdentificationPair: identifier, + KProbeMaxActive: maxActive, + } + mgr.Probes = append(mgr.Probes, probe) } e.Manager = ddebpf.NewManager(mgr, "shared-libraries", &ebpftelemetry.ErrorsTelemetryModifier{}) @@ -303,6 +312,7 @@ func (e *EbpfProgram) InitWithLibsets(libsets ...Libset) error { return fmt.Errorf("cannot start manager: %w", err) } + ddebpf.AddNameMappings(e.Manager.Manager, "shared-libraries") e.isInitialized = true return nil } @@ -462,8 +472,10 @@ func (e *EbpfProgram) Stop() { func (e *EbpfProgram) stopImpl() { if e.Manager != nil { - _ = e.Manager.Stop(manager.CleanAll) - ebpftelemetry.UnregisterTelemetry(e.Manager.Manager) + err := e.Manager.Stop(manager.CleanAll) + if err != nil { + log.Errorf("error stopping manager: %s", err) + } } for _, handler := range e.libsets { @@ -480,13 +492,16 @@ func (e *EbpfProgram) stopImpl() { func (e *EbpfProgram) init(buf bytecode.AssetReader, options manager.Options) error { options.RemoveRlimit = true - for _, probe := range e.Probes { + for _, probe := range e.enabledProbes { options.ActivatedProbes = append(options.ActivatedProbes, &manager.ProbeSelector{ - ProbeIdentificationPair: probe.ProbeIdentificationPair, + ProbeIdentificationPair: probe, }, ) } + for _, probe := range e.disabledProbes { + options.ExcludedFunctions = append(options.ExcludedFunctions, probe.EBPFFuncName) + } var enabledMsgs []string for libset := range LibsetToLibSuffixes { @@ -537,25 +552,60 @@ func (e *EbpfProgram) initPrebuilt() error { func sysOpenAt2Supported() bool { missing, err := ddebpf.VerifyKernelFuncs("do_sys_openat2") - if err == nil && len(missing) == 0 { - return true + return err == nil && len(missing) == 0 +} + +// fexitSupported checks if fexit type of probe is supported on the current host. +// It does this by creating a dummy program that attaches to the given function name, and returns true if it succeeds. +// Method was adapted from the CWS code. +func fexitSupported(funcName string) bool { + if features.HaveProgramType(ebpf.Tracing) != nil { + return false } - kversion, err := kernel.HostVersion() + spec := &ebpf.ProgramSpec{ + Type: ebpf.Tracing, + AttachType: ebpf.AttachTraceFExit, + AttachTo: funcName, + Instructions: asm.Instructions{ + asm.LoadImm(asm.R0, 0, asm.DWord), + asm.Return(), + }, + } + prog, err := ebpf.NewProgramWithOptions(spec, ebpf.ProgramOptions{ + LogDisabled: true, + }) + if err != nil { + return false + } + defer prog.Close() + l, err := link.AttachTracing(link.TracingOptions{ + Program: prog, + }) if err != nil { - log.Error("could not determine the current kernel version. fallback to do_sys_open") return false } + defer l.Close() - return kversion >= kernel.VersionCode(5, 6, 0) + return true } -// getSysOpenHooksIdentifiers returns the enter and exit tracepoints for supported open* -// system calls. -func getSysOpenHooksIdentifiers() []manager.ProbeIdentificationPair { +// initializedProbes initializes the probes that are enabled for the current system +func (e *EbpfProgram) initializeProbes() { + openat2Supported := sysOpenAt2Supported() + isFexitSupported := fexitSupported("do_sys_openat2") + + // Tracing represents fentry/fexit probes. + tracingProbes := []manager.ProbeIdentificationPair{ + { + EBPFFuncName: fmt.Sprintf("do_sys_%s_exit", openat2SysCall), + UID: probeUID, + }, + } + openatProbes := []string{openatSysCall} - if sysOpenAt2Supported() { + if openat2Supported { openatProbes = append(openatProbes, openat2SysCall) } // amd64 has open(2), arm64 doesn't @@ -563,17 +613,24 @@ func getSysOpenHooksIdentifiers() []manager.ProbeIdentificationPair { openatProbes = append(openatProbes, openSysCall) } - res := make([]manager.ProbeIdentificationPair, 0, len(traceTypes)*len(openatProbes)) + // tp stands for tracepoints, which is the older format of the probes. + tpProbes := make([]manager.ProbeIdentificationPair, 0, len(traceTypes)*len(openatProbes)) for _, probe := range openatProbes { for _, traceType := range traceTypes { - res = append(res, manager.ProbeIdentificationPair{ + tpProbes = append(tpProbes, manager.ProbeIdentificationPair{ EBPFFuncName: fmt.Sprintf("tracepoint__syscalls__sys_%s_%s", traceType, probe), UID: probeUID, }) } } - return res + if isFexitSupported && openat2Supported { + e.enabledProbes = tracingProbes + e.disabledProbes = tpProbes + } else { + e.enabledProbes = tpProbes + e.disabledProbes = tracingProbes + } } func getAssetName(module string, debug bool) string { diff --git a/pkg/network/usm/sharedlibraries/ebpf_test.go b/pkg/network/usm/sharedlibraries/ebpf_test.go index 418088f49acb0..63a8e892b8b99 100644 --- a/pkg/network/usm/sharedlibraries/ebpf_test.go +++ b/pkg/network/usm/sharedlibraries/ebpf_test.go @@ -20,6 +20,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" fileopener "github.com/DataDog/datadog-agent/pkg/network/usm/sharedlibraries/testutil" + usmtestutil "github.com/DataDog/datadog-agent/pkg/network/usm/testutil" ) type EbpfProgramSuite struct { @@ -27,7 +28,7 @@ type EbpfProgramSuite struct { } func TestEbpfProgram(t *testing.T) { - ebpftest.TestBuildModes(t, []ebpftest.BuildMode{ebpftest.Prebuilt, ebpftest.RuntimeCompiled, ebpftest.CORE}, "", func(t *testing.T) { + ebpftest.TestBuildModes(t, usmtestutil.SupportedBuildModes(), "", func(t *testing.T) { if !IsSupported(ebpf.NewConfig()) { t.Skip("shared-libraries monitoring is not supported on this configuration") } diff --git a/pkg/network/usm/sharedlibraries/watcher.go b/pkg/network/usm/sharedlibraries/watcher.go index f23673fc6c24e..f13548414a017 100644 --- a/pkg/network/usm/sharedlibraries/watcher.go +++ b/pkg/network/usm/sharedlibraries/watcher.go @@ -26,9 +26,10 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/log" ) -const ( +var ( // The interval of the periodic scan for terminated processes. Increasing the interval, might cause larger spikes in cpu - // and lowering it might cause constant cpu usage. + // and lowering it might cause constant cpu usage. This is a var instead of a const only because the test code changes + // this value to speed up test execution. scanTerminatedProcessesInterval = 30 * time.Second ) @@ -55,6 +56,7 @@ type Rule struct { // Watcher provides a way to tie callback functions to the lifecycle of shared libraries type Watcher struct { + syncMutex sync.RWMutex wg sync.WaitGroup done chan struct{} procRoot string @@ -64,6 +66,7 @@ type Watcher struct { ebpfProgram *EbpfProgram libset Libset thisPID int + scannedPIDs map[uint32]int // telemetry libHits *telemetry.Counter @@ -90,6 +93,7 @@ func NewWatcher(cfg *config.Config, libset Libset, rules ...Rule) (*Watcher, err processMonitor: monitor.GetProcessMonitor(), ebpfProgram: ebpfProgram, registry: utils.NewFileRegistry(consts.USMModuleName, "shared_libraries"), + scannedPIDs: make(map[uint32]int), libHits: telemetry.NewCounter("usm.so_watcher.hits", telemetry.OptPrometheus), libMatches: telemetry.NewCounter("usm.so_watcher.matches", telemetry.OptPrometheus), @@ -274,11 +278,7 @@ func (w *Watcher) Start() { case <-w.done: return case <-processSync.C: - processSet := w.registry.GetRegisteredProcesses() - deletedPids := findDeletedProcesses(processSet) - for deletedPid := range deletedPids { - _ = w.registry.Unregister(deletedPid) - } + w.sync() } } }() @@ -291,28 +291,60 @@ func (w *Watcher) Start() { utils.AddAttacher(consts.USMModuleName, "native", w) } -// findDeletedProcesses returns the terminated PIDs from the given map. -func findDeletedProcesses[V any](pids map[uint32]V) map[uint32]struct{} { - existingPids := make(map[uint32]struct{}, len(pids)) +// sync unregisters from any terminated processes which we missed the exit +// callback for, and also attempts to register to running processes to ensure +// that we don't miss any process. +func (w *Watcher) sync() { + // The mutex is only used for protection with the test code which reads the + // scannedPIDs map. + w.syncMutex.Lock() + defer w.syncMutex.Unlock() + + deletionCandidates := w.registry.GetRegisteredProcesses() + alivePIDs := make(map[uint32]struct{}) - procIter := func(pid int) error { - if _, exists := pids[uint32(pid)]; exists { - existingPids[uint32(pid)] = struct{}{} + _ = kernel.WithAllProcs(kernel.ProcFSRoot(), func(origPid int) error { + if origPid == w.thisPID { // don't scan ourselves + return nil } + + pid := uint32(origPid) + alivePIDs[pid] = struct{}{} + + if _, ok := deletionCandidates[pid]; ok { + // We have previously hooked into this process and it remains + // active, so we remove it from the deletionCandidates list, and + // move on to the next PID + delete(deletionCandidates, pid) + return nil + } + + scanned := w.scannedPIDs[pid] + + // Try to scan twice. This is because we may happen to scan the process + // just after it has been exec'd and before it has opened its shared + // libraries. Scanning twice with the sync interval reduce this risk of + // missing shared libraries due to this. + if scanned < 2 { + w.scannedPIDs[pid]++ + err := w.AttachPID(pid) + if err == nil { + log.Debugf("watcher attached to %v via periodic scan", pid) + w.scannedPIDs[pid] = 2 + } + } + return nil - } - // Scanning already running processes - if err := kernel.WithAllProcs(kernel.ProcFSRoot(), procIter); err != nil { - return nil - } + }) - res := make(map[uint32]struct{}, len(pids)-len(existingPids)) - for pid := range pids { - if _, exists := existingPids[pid]; exists { - continue + // Clean up dead processes from the list of scanned PIDs + for pid := range w.scannedPIDs { + if _, alive := alivePIDs[pid]; !alive { + delete(w.scannedPIDs, pid) } - res[pid] = struct{}{} } - return res + for pid := range deletionCandidates { + _ = w.registry.Unregister(pid) + } } diff --git a/pkg/network/usm/sharedlibraries/watcher_test.go b/pkg/network/usm/sharedlibraries/watcher_test.go index 8c9864a91af8f..05b914ef5ec46 100644 --- a/pkg/network/usm/sharedlibraries/watcher_test.go +++ b/pkg/network/usm/sharedlibraries/watcher_test.go @@ -17,18 +17,21 @@ import ( "regexp" "strings" "sync" + "syscall" "testing" "time" + "unsafe" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" + "golang.org/x/sys/unix" "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" - "github.com/DataDog/datadog-agent/pkg/ebpf/prebuilt" "github.com/DataDog/datadog-agent/pkg/eventmonitor/consumers/testutil" usmconfig "github.com/DataDog/datadog-agent/pkg/network/usm/config" fileopener "github.com/DataDog/datadog-agent/pkg/network/usm/sharedlibraries/testutil" + usmtestutil "github.com/DataDog/datadog-agent/pkg/network/usm/testutil" "github.com/DataDog/datadog-agent/pkg/network/usm/utils" "github.com/DataDog/datadog-agent/pkg/process/monitor" "github.com/DataDog/datadog-agent/pkg/util/kernel" @@ -53,12 +56,7 @@ func TestSharedLibrary(t *testing.T) { t.Skip("shared library tracing not supported for this platform") } - modes := []ebpftest.BuildMode{ebpftest.RuntimeCompiled, ebpftest.CORE} - if !prebuilt.IsDeprecated() { - modes = append(modes, ebpftest.Prebuilt) - } - - ebpftest.TestBuildModes(t, modes, "", func(t *testing.T) { + ebpftest.TestBuildModes(t, usmtestutil.SupportedBuildModes(), "", func(t *testing.T) { t.Run("netlink", func(t *testing.T) { launchProcessMonitor(t, false) suite.Run(t, new(SharedLibrarySuite)) @@ -104,6 +102,89 @@ func (s *SharedLibrarySuite) TestSharedLibraryDetection() { }, time.Second*10, 100*time.Millisecond) } +// open abstracts open, openat, and openat2 +func open(dirfd int, pathname string, how *unix.OpenHow, syscallType string) (int, error) { + switch syscallType { + case "open": + return unix.Open(pathname, int(how.Flags), uint32(how.Mode)) + case "openat": + return unix.Openat(dirfd, pathname, int(how.Flags), uint32(how.Mode)) + case "openat2": + return unix.Openat2(dirfd, pathname, how) + default: + return -1, fmt.Errorf("unsupported syscall type: %s", syscallType) + } +} + +// Test that shared library files opened for writing only are ignored. +func (s *SharedLibrarySuite) TestSharedLibraryIgnoreWrite() { + t := s.T() + + tests := []struct { + syscallType string + skipFunc func(t *testing.T) + }{ + { + syscallType: "open", + }, + { + syscallType: "openat", + }, + { + syscallType: "openat2", + skipFunc: func(t *testing.T) { + if !sysOpenAt2Supported() { + t.Skip("openat2 not supported") + } + }, + }, + } + for _, tt := range tests { + t.Run(tt.syscallType, func(t *testing.T) { + if tt.skipFunc != nil { + tt.skipFunc(t) + } + // Since we want to detect that the write _hasn't_ been detected, verify the + // read too to try to ensure that test isn't broken and failing to detect + // the write due to some bug in the test itself. + readPath, readPathID := createTempTestFile(t, "read-foo-libssl.so") + writePath, writePathID := createTempTestFile(t, "write-foo-libssl.so") + + registerRecorder := new(utils.CallbackRecorder) + unregisterRecorder := new(utils.CallbackRecorder) + + watcher, err := NewWatcher(utils.NewUSMEmptyConfig(), LibsetCrypto, + Rule{ + Re: regexp.MustCompile(`foo-libssl.so`), + RegisterCB: registerRecorder.Callback(), + UnregisterCB: unregisterRecorder.Callback(), + }, + ) + require.NoError(t, err) + watcher.Start() + t.Cleanup(watcher.Stop) + // Overriding PID, to allow the watcher to watch the test process + watcher.thisPID = 0 + + how := unix.OpenHow{Mode: 0644} + + require.EventuallyWithT(t, func(c *assert.CollectT) { + how.Flags = syscall.O_CREAT | syscall.O_RDONLY + fd, err := open(unix.AT_FDCWD, readPath, &how, tt.syscallType) + require.NoError(c, err) + require.NoError(c, syscall.Close(fd)) + require.GreaterOrEqual(c, 1, registerRecorder.CallsForPathID(readPathID)) + + how.Flags = syscall.O_CREAT | syscall.O_WRONLY + fd, err = open(unix.AT_FDCWD, writePath, &how, tt.syscallType) + require.NoError(c, err) + require.NoError(c, syscall.Close(fd)) + require.Equal(c, 0, registerRecorder.CallsForPathID(writePathID)) + }, time.Second*5, 100*time.Millisecond) + }) + } +} + func (s *SharedLibrarySuite) TestLongPath() { t := s.T() @@ -151,6 +232,105 @@ func (s *SharedLibrarySuite) TestLongPath() { }, time.Second*10, 100*time.Millisecond) } +// Tests that the periodic scan is able to detect processes which are missed by +// the eBPF-based watcher. +func (s *SharedLibrarySuite) TestSharedLibraryDetectionPeriodic() { + t := s.T() + + // Construct a large path to exceed the limits of the eBPF-based watcher + // (LIB_PATH_MAX_SIZE). 255 is the max filename size of ext4. The path + // size will also include the directories leading up to this filename so the + // total size will be more. + var b strings.Builder + final := "foo-libssl.so" + for i := 0; i < 255-len(final); i++ { + b.WriteByte('x') + } + b.WriteString(final) + filename := b.String() + + // Reduce interval to speed up test + orig := scanTerminatedProcessesInterval + t.Cleanup(func() { scanTerminatedProcessesInterval = orig }) + scanTerminatedProcessesInterval = 10 * time.Millisecond + + fooPath1, fooPathID1 := createTempTestFile(t, filename) + errPath, errorPathID := createTempTestFile(t, strings.Replace(filename, "xfoo", "yfoo", 1)) + + registerRecorder := new(utils.CallbackRecorder) + unregisterRecorder := new(utils.CallbackRecorder) + + registerCallback := registerRecorder.Callback() + + watcher, err := NewWatcher(utils.NewUSMEmptyConfig(), LibsetCrypto, + Rule{ + Re: regexp.MustCompile(`foo-libssl.so`), + RegisterCB: func(fp utils.FilePath) error { + registerCallback(fp) + if fp.ID == errorPathID { + return utils.ErrEnvironment + } + return nil + }, + UnregisterCB: unregisterRecorder.Callback(), + }, + ) + require.NoError(t, err) + watcher.Start() + t.Cleanup(watcher.Stop) + + // create files + command1, err := fileopener.OpenFromAnotherProcess(t, fooPath1) + pid := command1.Process.Pid + require.NoError(t, err) + + command2, err := fileopener.OpenFromAnotherProcess(t, errPath) + pid2 := command2.Process.Pid + require.NoError(t, err) + + require.EventuallyWithT(t, func(c *assert.CollectT) { + assert.Equal(c, 1, registerRecorder.CallsForPathID(fooPathID1)) + + // We expect at least one registration attempt to the error path, but + // there could be up to two since w.sync() can scan the maps file twice. + // We can't _guarantee_ there will be two registration attempts in this + // test though because the first attempt could have happened before the + // process opened the shared library (and we don't want to move the + // watcher start to after the process start since that would test the + // initial scan and not the periodic). + errorCalls := registerRecorder.CallsForPathID(errorPathID) + assert.GreaterOrEqual(c, errorCalls, 1) + assert.LessOrEqual(c, errorCalls, 2) + }, time.Second*10, 100*time.Millisecond, "") + + require.EventuallyWithT(t, func(c *assert.CollectT) { + watcher.syncMutex.Lock() + defer watcher.syncMutex.Unlock() + + assert.Contains(c, watcher.scannedPIDs, uint32(pid)) + assert.Contains(c, watcher.scannedPIDs, uint32(pid2)) + }, time.Second*10, 100*time.Millisecond) + + require.NoError(t, command1.Process.Kill()) + require.NoError(t, command2.Process.Kill()) + + command1.Process.Wait() + command2.Process.Wait() + + require.EventuallyWithT(t, func(c *assert.CollectT) { + assert.Equal(c, 1, unregisterRecorder.CallsForPathID(fooPathID1)) + }, time.Second*10, 100*time.Millisecond) + + // Check that clean up of dead processes works. + require.EventuallyWithT(t, func(c *assert.CollectT) { + watcher.syncMutex.Lock() + defer watcher.syncMutex.Unlock() + + assert.NotContains(c, watcher.scannedPIDs, uint32(pid)) + assert.NotContains(c, watcher.scannedPIDs, uint32(pid2)) + }, time.Second*10, 100*time.Millisecond) +} + func (s *SharedLibrarySuite) TestSharedLibraryDetectionWithPIDAndRootNamespace() { t := s.T() _, err := os.Stat("/usr/bin/busybox") @@ -194,9 +374,10 @@ func (s *SharedLibrarySuite) TestSharedLibraryDetectionWithPIDAndRootNamespace() t.Cleanup(watcher.Stop) time.Sleep(10 * time.Millisecond) - // simulate a slow (1 second) : open, write, close of the file + // simulate a slow (1 second) : open, read, close of the file // in a new pid and mount namespaces - o, err := exec.Command("unshare", "--fork", "--pid", "-R", root, "/ash", "-c", fmt.Sprintf("sleep 1 > %s", libpath)).CombinedOutput() + o, err := exec.Command("unshare", "--fork", "--pid", "-R", root, "/ash", "-c", + fmt.Sprintf("touch foo && mv foo %s && sleep 1 < %s", libpath, libpath)).CombinedOutput() if err != nil { t.Log(err, string(o)) } @@ -389,6 +570,123 @@ func (s *SharedLibrarySuite) TestSoWatcherProcessAlreadyHoldingReferences() { assert.Len(t, watcher.registry.GetRegisteredProcesses(), 0) } +func zeroPages(data []byte) { + for i := range data { + data[i] = 0 + } +} + +// This test ensures that the shared library watcher correctly identifies and processes the first file path in memory, +// even when a second path is present, particularly in scenarios where the first path crosses a memory page boundary. +// The goal is to verify that the presence of the second path does not inadvertently cause the watcher to send to the +// user mode the first path. Before each iteration, the memory-mapped pages are zeroed to ensure consistent and isolated +// test conditions. +func (s *SharedLibrarySuite) TestValidPathExistsInTheMemory() { + t := s.T() + + pageSize := os.Getpagesize() + + // We want to allocate two contiguous pages and ensure that the address + // after the two pages is inaccessible. So allocate 3 pages and change the + // protection of the last one with mprotect(2). If we only map two pages the + // kernel may merge this mmaping with another existing mapping after it. + data, err := syscall.Mmap(-1, 0, 3*pageSize, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_ANON|syscall.MAP_PRIVATE) + require.NoError(t, err) + t.Cleanup(func() { _ = syscall.Munmap(data) }) + + err = syscall.Mprotect(data[2*pageSize:], 0) + require.NoError(t, err) + // Truncate the size so that the range loop on it in zeroPages() does not + // access the memory we've disabled access to. + data = data[:2*pageSize] + + dummyPath, dummyPathID := createTempTestFile(t, "dummy.text") + soPath, soPathID := createTempTestFile(t, "foo-libssl.so") + + tests := []struct { + name string + writePaths func(data []byte, textFilePath, soPath string) int + }{ + { + // Paths are written consecutively in memory, without crossing a page boundary. + name: "sanity", + writePaths: func(data []byte, textFilePath, soPath string) int { + copy(data, textFilePath) + data[len(textFilePath)] = 0 // Null-terminate the first path + copy(data[len(textFilePath)+1:], soPath) + + return 0 + }, + }, + { + // Paths are written consecutively in memory, at the end of a page. + name: "end of a page", + writePaths: func(data []byte, textFilePath, soPath string) int { + offset := 2*pageSize - len(textFilePath) - 1 - len(soPath) - 1 + copy(data[offset:], textFilePath) + data[offset+len(textFilePath)] = 0 // Null-terminate the first path + copy(data[offset+len(textFilePath)+1:], soPath) + data[offset+len(textFilePath)+1+len(soPath)] = 0 // Null-terminate the second path + + return offset + }, + }, + { + // The first path is written at the end of the first page, and the second path is written at the beginning + // of the second page. + name: "cross pages", + writePaths: func(data []byte, textFilePath, soPath string) int { + // Ensure the first path ends near the end of the first page, crossing into the second page + offset := pageSize - len(textFilePath) - 1 + copy(data[offset:], textFilePath) + data[offset+len(textFilePath)] = 0 // Null-terminate the first path + copy(data[pageSize:], soPath) + + return offset + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + zeroPages(data) + + // Ensure the first path ends near the end of the first page, crossing into the second page + offset := tt.writePaths(data, dummyPath, soPath) + + registerRecorder := new(utils.CallbackRecorder) + unregisterRecorder := new(utils.CallbackRecorder) + + watcher, err := NewWatcher(utils.NewUSMEmptyConfig(), LibsetCrypto, + Rule{ + Re: regexp.MustCompile(`foo-libssl.so`), + RegisterCB: registerRecorder.Callback(), + UnregisterCB: unregisterRecorder.Callback(), + }, + ) + require.NoError(t, err) + watcher.Start() + t.Cleanup(watcher.Stop) + // Overriding PID, to allow the watcher to watch the test process + watcher.thisPID = 0 + + pathPtr := uintptr(unsafe.Pointer(&data[offset])) + dirfd := int(unix.AT_FDCWD) + fd, _, errno := syscall.Syscall6(syscall.SYS_OPENAT, uintptr(dirfd), pathPtr, uintptr(os.O_RDONLY), 0644, 0, 0) + require.Zero(t, errno) + t.Cleanup(func() { _ = syscall.Close(int(fd)) }) + // Since we want to verify that the write _hasn't_ been detected, we need to try it multiple times + // to avoid race conditions. + for i := 0; i < 10; i++ { + time.Sleep(100 * time.Millisecond) + assert.Zero(t, watcher.libHits.Get()) + assert.Zero(t, watcher.libMatches.Get()) + assert.Zero(t, registerRecorder.CallsForPathID(dummyPathID)) + assert.Zero(t, registerRecorder.CallsForPathID(soPathID)) + } + }) + } +} + func createTempTestFile(t *testing.T, name string) (string, utils.PathIdentifier) { fullPath := filepath.Join(t.TempDir(), name) diff --git a/pkg/network/usm/tests/tracer_usm_linux_test.go b/pkg/network/usm/tests/tracer_usm_linux_test.go index 5cbbef2763777..34bef2359cf0c 100644 --- a/pkg/network/usm/tests/tracer_usm_linux_test.go +++ b/pkg/network/usm/tests/tracer_usm_linux_test.go @@ -62,6 +62,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/network/usm" usmconfig "github.com/DataDog/datadog-agent/pkg/network/usm/config" "github.com/DataDog/datadog-agent/pkg/network/usm/consts" + usmtestutil "github.com/DataDog/datadog-agent/pkg/network/usm/testutil" "github.com/DataDog/datadog-agent/pkg/network/usm/testutil/grpc" "github.com/DataDog/datadog-agent/pkg/network/usm/utils" "github.com/DataDog/datadog-agent/pkg/process/util" @@ -141,7 +142,7 @@ type USMSuite struct { } func TestUSMSuite(t *testing.T) { - ebpftest.TestBuildModes(t, []ebpftest.BuildMode{ebpftest.Prebuilt, ebpftest.RuntimeCompiled, ebpftest.CORE}, "", func(t *testing.T) { + ebpftest.TestBuildModes(t, usmtestutil.SupportedBuildModes(), "", func(t *testing.T) { suite.Run(t, new(USMSuite)) }) } diff --git a/pkg/network/usm/testutil/buildmode.go b/pkg/network/usm/testutil/buildmode.go new file mode 100644 index 0000000000000..8f61b0a943aee --- /dev/null +++ b/pkg/network/usm/testutil/buildmode.go @@ -0,0 +1,25 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build linux_bpf && test + +package testutil + +import ( + "os" + + "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" + "github.com/DataDog/datadog-agent/pkg/ebpf/prebuilt" +) + +// SupportedBuildModes returns the build modes supported on the current host +func SupportedBuildModes() []ebpftest.BuildMode { + modes := []ebpftest.BuildMode{ebpftest.RuntimeCompiled, ebpftest.CORE} + if !prebuilt.IsDeprecated() || os.Getenv("TEST_PREBUILT_OVERRIDE") == "true" { + modes = append(modes, ebpftest.Prebuilt) + } + + return modes +} diff --git a/pkg/network/usm/usm_grpc_monitor_test.go b/pkg/network/usm/usm_grpc_monitor_test.go index bfc6833bf897d..ec97f1c866e27 100644 --- a/pkg/network/usm/usm_grpc_monitor_test.go +++ b/pkg/network/usm/usm_grpc_monitor_test.go @@ -20,13 +20,13 @@ import ( "google.golang.org/grpc/metadata" "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" - "github.com/DataDog/datadog-agent/pkg/ebpf/prebuilt" "github.com/DataDog/datadog-agent/pkg/network/config" "github.com/DataDog/datadog-agent/pkg/network/protocols" "github.com/DataDog/datadog-agent/pkg/network/protocols/http" "github.com/DataDog/datadog-agent/pkg/network/protocols/http2" gotlsutils "github.com/DataDog/datadog-agent/pkg/network/protocols/tls/gotls/testutil" "github.com/DataDog/datadog-agent/pkg/network/usm/consts" + usmtestutil "github.com/DataDog/datadog-agent/pkg/network/usm/testutil" "github.com/DataDog/datadog-agent/pkg/network/usm/testutil/grpc" "github.com/DataDog/datadog-agent/pkg/network/usm/utils" "github.com/DataDog/datadog-agent/pkg/util/kernel" @@ -58,12 +58,7 @@ func TestGRPCScenarios(t *testing.T) { t.Skipf("HTTP2 monitoring can not run on kernel before %v", http2.MinimumKernelVersion) } - modes := []ebpftest.BuildMode{ebpftest.RuntimeCompiled, ebpftest.CORE} - if !prebuilt.IsDeprecated() { - modes = append(modes, ebpftest.Prebuilt) - } - - ebpftest.TestBuildModes(t, modes, "", func(t *testing.T) { + ebpftest.TestBuildModes(t, usmtestutil.SupportedBuildModes(), "", func(t *testing.T) { for _, tc := range []struct { name string isTLS bool @@ -120,7 +115,7 @@ func (s *usmGRPCSuite) TestSimpleGRPCScenarios() { t.Cleanup(cancel) defaultCtx := context.Background() - usmMonitor := setupUSMTLSMonitor(t, s.getConfig()) + usmMonitor := setupUSMTLSMonitor(t, s.getConfig(), useExistingConsumer) if s.isTLS { utils.WaitForProgramsToBeTraced(t, consts.USMModuleName, GoTLSAttacherName, srv.Process.Pid, utils.ManualTracingFallbackEnabled) } @@ -448,7 +443,7 @@ func (s *usmGRPCSuite) TestLargeBodiesGRPCScenarios() { t.Cleanup(cancel) defaultCtx := context.Background() - usmMonitor := setupUSMTLSMonitor(t, s.getConfig()) + usmMonitor := setupUSMTLSMonitor(t, s.getConfig(), useExistingConsumer) if s.isTLS { utils.WaitForProgramsToBeTraced(t, consts.USMModuleName, GoTLSAttacherName, srv.Process.Pid, utils.ManualTracingFallbackEnabled) } diff --git a/pkg/network/usm/usm_http2_monitor_test.go b/pkg/network/usm/usm_http2_monitor_test.go index ffeb78a706f69..dea8d20edc345 100644 --- a/pkg/network/usm/usm_http2_monitor_test.go +++ b/pkg/network/usm/usm_http2_monitor_test.go @@ -35,7 +35,6 @@ import ( ddebpf "github.com/DataDog/datadog-agent/pkg/ebpf" "github.com/DataDog/datadog-agent/pkg/ebpf/ebpftest" - "github.com/DataDog/datadog-agent/pkg/ebpf/prebuilt" "github.com/DataDog/datadog-agent/pkg/network" "github.com/DataDog/datadog-agent/pkg/network/config" "github.com/DataDog/datadog-agent/pkg/network/protocols" @@ -45,6 +44,7 @@ import ( gotlsutils "github.com/DataDog/datadog-agent/pkg/network/protocols/tls/gotls/testutil" "github.com/DataDog/datadog-agent/pkg/network/tracer/testutil/proxy" "github.com/DataDog/datadog-agent/pkg/network/usm/consts" + usmtestutil "github.com/DataDog/datadog-agent/pkg/network/usm/testutil" "github.com/DataDog/datadog-agent/pkg/network/usm/utils" "github.com/DataDog/datadog-agent/pkg/util/kernel" ) @@ -95,12 +95,7 @@ func skipIfKernelNotSupported(t *testing.T) { func TestHTTP2Scenarios(t *testing.T) { skipIfKernelNotSupported(t) - modes := []ebpftest.BuildMode{ebpftest.RuntimeCompiled, ebpftest.CORE} - if !prebuilt.IsDeprecated() { - modes = append(modes, ebpftest.Prebuilt) - } - - ebpftest.TestBuildModes(t, modes, "", func(t *testing.T) { + ebpftest.TestBuildModes(t, usmtestutil.SupportedBuildModes(), "", func(t *testing.T) { for _, tc := range []struct { name string isTLS bool @@ -132,7 +127,7 @@ func (s *usmHTTP2Suite) TestLoadHTTP2Binary() { for _, debug := range map[string]bool{"enabled": true, "disabled": false} { t.Run(fmt.Sprintf("debug %v", debug), func(t *testing.T) { cfg.BPFDebug = debug - setupUSMTLSMonitor(t, cfg) + setupUSMTLSMonitor(t, cfg, useExistingConsumer) }) } } @@ -150,7 +145,7 @@ func (s *usmHTTP2Suite) TestHTTP2DynamicTableCleanup() { t.Cleanup(cancel) require.NoError(t, proxy.WaitForConnectionReady(unixPath)) - monitor := setupUSMTLSMonitor(t, cfg) + monitor := setupUSMTLSMonitor(t, cfg, useExistingConsumer) if s.isTLS { utils.WaitForProgramsToBeTraced(t, consts.USMModuleName, GoTLSAttacherName, proxyProcess.Process.Pid, utils.ManualTracingFallbackEnabled) } @@ -212,7 +207,7 @@ func (s *usmHTTP2Suite) TestSimpleHTTP2() { t.Cleanup(cancel) require.NoError(t, proxy.WaitForConnectionReady(unixPath)) - monitor := setupUSMTLSMonitor(t, cfg) + monitor := setupUSMTLSMonitor(t, cfg, useExistingConsumer) if s.isTLS { utils.WaitForProgramsToBeTraced(t, consts.USMModuleName, GoTLSAttacherName, proxyProcess.Process.Pid, utils.ManualTracingFallbackEnabled) } @@ -400,7 +395,7 @@ func (s *usmHTTP2Suite) TestHTTP2KernelTelemetry() { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - monitor := setupUSMTLSMonitor(t, cfg) + monitor := setupUSMTLSMonitor(t, cfg, useExistingConsumer) if s.isTLS { utils.WaitForProgramsToBeTraced(t, consts.USMModuleName, GoTLSAttacherName, proxyProcess.Process.Pid, utils.ManualTracingFallbackEnabled) } @@ -455,7 +450,7 @@ func (s *usmHTTP2Suite) TestHTTP2ManyDifferentPaths() { t.Cleanup(cancel) require.NoError(t, proxy.WaitForConnectionReady(unixPath)) - monitor := setupUSMTLSMonitor(t, cfg) + monitor := setupUSMTLSMonitor(t, cfg, useExistingConsumer) if s.isTLS { utils.WaitForProgramsToBeTraced(t, consts.USMModuleName, GoTLSAttacherName, proxyProcess.Process.Pid, utils.ManualTracingFallbackEnabled) } @@ -512,7 +507,7 @@ func (s *usmHTTP2Suite) TestRawTraffic() { t := s.T() cfg := s.getCfg() - usmMonitor := setupUSMTLSMonitor(t, cfg) + usmMonitor := setupUSMTLSMonitor(t, cfg, useExistingConsumer) // Start local server and register its cleanup. t.Cleanup(startH2CServer(t, authority, s.isTLS)) @@ -1319,7 +1314,7 @@ func (s *usmHTTP2Suite) TestDynamicTable() { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - usmMonitor := setupUSMTLSMonitor(t, cfg) + usmMonitor := setupUSMTLSMonitor(t, cfg, useExistingConsumer) if s.isTLS { utils.WaitForProgramsToBeTraced(t, consts.USMModuleName, GoTLSAttacherName, proxyProcess.Process.Pid, utils.ManualTracingFallbackEnabled) } @@ -1405,7 +1400,7 @@ func (s *usmHTTP2Suite) TestIncompleteFrameTable() { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - usmMonitor := setupUSMTLSMonitor(t, cfg) + usmMonitor := setupUSMTLSMonitor(t, cfg, useExistingConsumer) if s.isTLS { utils.WaitForProgramsToBeTraced(t, consts.USMModuleName, GoTLSAttacherName, proxyProcess.Process.Pid, utils.ManualTracingFallbackEnabled) } @@ -1475,7 +1470,7 @@ func (s *usmHTTP2Suite) TestRawHuffmanEncoding() { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - usmMonitor := setupUSMTLSMonitor(t, cfg) + usmMonitor := setupUSMTLSMonitor(t, cfg, useExistingConsumer) if s.isTLS { utils.WaitForProgramsToBeTraced(t, consts.USMModuleName, GoTLSAttacherName, proxyProcess.Process.Pid, utils.ManualTracingFallbackEnabled) } @@ -1513,7 +1508,7 @@ func TestHTTP2InFlightMapCleaner(t *testing.T) { cfg.EnableHTTP2Monitoring = true cfg.HTTP2DynamicTableMapCleanerInterval = 5 * time.Second cfg.HTTPIdleConnectionTTL = time.Second - monitor := setupUSMTLSMonitor(t, cfg) + monitor := setupUSMTLSMonitor(t, cfg, useExistingConsumer) ebpfNow, err := ddebpf.NowNanoseconds() require.NoError(t, err) http2InFLightMap, _, err := monitor.ebpfProgram.GetMap(usmhttp2.InFlightMap) diff --git a/pkg/network/usm/utils/debugger.go b/pkg/network/usm/utils/debugger.go index 8094b2e02c6c3..9f2ec2632b9be 100644 --- a/pkg/network/usm/utils/debugger.go +++ b/pkg/network/usm/utils/debugger.go @@ -45,6 +45,7 @@ type BlockedProcess struct { type PathIdentifierWithSamplePath struct { PathIdentifier SamplePath string + Reason string } // GetTracedProgramsEndpoint returns a callback for the given module name, that @@ -213,11 +214,12 @@ func (d *tlsDebugger) GetBlockedPathIDsWithSamplePath(moduleName, programType st blockedIDsWithSampleFile := make([]PathIdentifierWithSamplePath, 0, len(registry.blocklistByID.Keys())) for _, pathIdentifier := range registry.blocklistByID.Keys() { - samplePath, ok := registry.blocklistByID.Get(pathIdentifier) + entry, ok := registry.blocklistByID.Get(pathIdentifier) if ok { blockedIDsWithSampleFile = append(blockedIDsWithSampleFile, PathIdentifierWithSamplePath{ PathIdentifier: pathIdentifier, - SamplePath: samplePath}) + SamplePath: entry.Path, + Reason: entry.Reason}) } } diff --git a/pkg/network/usm/utils/file_registry.go b/pkg/network/usm/utils/file_registry.go index 851d816618e86..2b20410088188 100644 --- a/pkg/network/usm/utils/file_registry.go +++ b/pkg/network/usm/utils/file_registry.go @@ -16,9 +16,11 @@ import ( "github.com/hashicorp/golang-lru/v2/simplelru" "go.uber.org/atomic" + "github.com/DataDog/datadog-agent/pkg/network/go/binversion" "github.com/DataDog/datadog-agent/pkg/network/protocols/telemetry" "github.com/DataDog/datadog-agent/pkg/util/kernel" "github.com/DataDog/datadog-agent/pkg/util/log" + "github.com/DataDog/datadog-agent/pkg/util/safeelf" ) // FileRegistry is responsible for tracking open files and executing callbacks @@ -48,11 +50,17 @@ type FileRegistry struct { byPID map[uint32]pathIdentifierSet // if we can't execute a callback for a given file we don't try more than once - blocklistByID *simplelru.LRU[PathIdentifier, string] + blocklistByID *simplelru.LRU[PathIdentifier, BlockListEntry] telemetry registryTelemetry } +// BlockListEntry represents an enty in the block list +type BlockListEntry struct { + Path string + Reason string +} + // FilePath represents the location of a file from the *root* namespace view type FilePath struct { HostPath string @@ -90,7 +98,7 @@ var ErrEnvironment = errors.New("Environment error, path will not be blocked") // NewFileRegistry creates a new `FileRegistry` instance func NewFileRegistry(moduleName, programName string) *FileRegistry { - blocklistByID, err := simplelru.NewLRU[PathIdentifier, string](2000, nil) + blocklistByID, err := simplelru.NewLRU[PathIdentifier, BlockListEntry](2000, nil) if err != nil { log.Warnf("running without block cache list, creation error: %s", err) blocklistByID = nil @@ -121,6 +129,22 @@ var ( ErrPathIsAlreadyRegistered = errors.New("path is already registered") ) +// getBlockReason creates a string specifying the reason for the block based on +// the error received. To reduce memory usage of this debugging feature, for +// very common errors we store a summary instead of the full error string, +// leaving the latter for more interesting errors. +func getBlockReason(error error) string { + if errors.Is(error, binversion.ErrNotGoExe) { + return "not-go" + } + + if errors.Is(error, safeelf.ErrNoSymbols) { + return "no-symbols" + } + + return error.Error() +} + // Register inserts or updates a new file registration within to the `FileRegistry`; // // If no current registration exists for the given `PathIdentifier`, we execute @@ -193,7 +217,7 @@ func (r *FileRegistry) Register(namespacedPath string, pid uint32, activationCB, if r.blocklistByID != nil { // add `pathID` to blocklist so we don't attempt to re-register files // that are problematic for some reason - r.blocklistByID.Add(pathID, path.HostPath) + r.blocklistByID.Add(pathID, BlockListEntry{Path: path.HostPath, Reason: getBlockReason(err)}) } r.telemetry.fileHookFailed.Add(1) return err diff --git a/pkg/network/usm/utils/file_registry_test.go b/pkg/network/usm/utils/file_registry_test.go index 67b7c0984f313..292e2fe58e7a6 100644 --- a/pkg/network/usm/utils/file_registry_test.go +++ b/pkg/network/usm/utils/file_registry_test.go @@ -244,6 +244,9 @@ func TestFailedRegistration(t *testing.T) { assert.Equal(t, 1, registerRecorder.CallsForPathID(pathID)) assert.Contains(t, debugger.GetBlockedPathIDs(testModuleName, ""), pathID) + info := debugger.GetBlockedPathIDsWithSamplePath(testModuleName, "") + require.Len(t, info, 1) + assert.Equal(t, registerRecorder.ReturnError.Error(), info[0].Reason) debugger.ClearBlocked(testModuleName) assert.Empty(t, debugger.GetBlockedPathIDs(testModuleName, "")) } diff --git a/pkg/networkdevice/metadata/payload.go b/pkg/networkdevice/metadata/payload.go index 5f67824728cbe..af7d43f9f6a90 100644 --- a/pkg/networkdevice/metadata/payload.go +++ b/pkg/networkdevice/metadata/payload.go @@ -43,6 +43,7 @@ type NetworkDevicesMetadata struct { NetflowExporters []NetflowExporter `json:"netflow_exporters,omitempty"` Diagnoses []DiagnosisMetadata `json:"diagnoses,omitempty"` DeviceOIDs []DeviceOID `json:"device_oids,omitempty"` + DeviceScanStatus *ScanStatusMetadata `json:"scan_status,omitempty"` CollectTimestamp int64 `json:"collect_timestamp"` } @@ -81,6 +82,24 @@ type DeviceOID struct { Value string `json:"value"` } +// ScanStatus type for the different possible scan statuses +type ScanStatus string + +const ( + // ScanStatusInProgress represents a scan in progress + ScanStatusInProgress ScanStatus = "in progress" + // ScanStatusCompleted represents a completed scan + ScanStatusCompleted ScanStatus = "completed" + // ScanStatusError represents a scan error + ScanStatusError ScanStatus = "error" +) + +// ScanStatusMetadata contains scan status metadata +type ScanStatusMetadata struct { + DeviceID string `json:"device_id"` + ScanStatus ScanStatus `json:"scan_status"` +} + // InterfaceMetadata contains interface metadata type InterfaceMetadata struct { DeviceID string `json:"device_id"` diff --git a/pkg/networkdevice/profile/profiledefinition/metadata.go b/pkg/networkdevice/profile/profiledefinition/metadata.go index fc4b49726bc1c..30212a0dd579f 100644 --- a/pkg/networkdevice/profile/profiledefinition/metadata.go +++ b/pkg/networkdevice/profile/profiledefinition/metadata.go @@ -5,16 +5,33 @@ package profiledefinition +import "github.com/invopop/jsonschema" + // MetadataDeviceResource is the device resource name const MetadataDeviceResource = "device" // MetadataConfig holds configs per resource type -type MetadataConfig map[string]MetadataResourceConfig +type MetadataConfig ListMap[MetadataResourceConfig] + +// JSONSchema defines the JSON schema for MetadataConfig +func (mc MetadataConfig) JSONSchema() *jsonschema.Schema { + return ListMap[MetadataResourceConfig](mc).JSONSchema() +} + +// MarshalJSON marshals the metadata config +func (mc MetadataConfig) MarshalJSON() ([]byte, error) { + return ListMap[MetadataResourceConfig](mc).MarshalJSON() +} + +// UnmarshalJSON unmarshals the metadata config +func (mc *MetadataConfig) UnmarshalJSON(data []byte) error { + return (*ListMap[MetadataResourceConfig])(mc).UnmarshalJSON(data) +} // MetadataResourceConfig holds configs for a metadata resource type MetadataResourceConfig struct { - Fields map[string]MetadataField `yaml:"fields" json:"fields"` - IDTags MetricTagConfigList `yaml:"id_tags,omitempty" json:"id_tags,omitempty"` + Fields ListMap[MetadataField] `yaml:"fields" json:"fields"` + IDTags MetricTagConfigList `yaml:"id_tags,omitempty" json:"id_tags,omitempty"` } // MetadataField holds configs for a metadata field diff --git a/pkg/networkdevice/profile/profiledefinition/metrics.go b/pkg/networkdevice/profile/profiledefinition/metrics.go index 0e25879f75c66..c29857144d582 100644 --- a/pkg/networkdevice/profile/profiledefinition/metrics.go +++ b/pkg/networkdevice/profile/profiledefinition/metrics.go @@ -82,12 +82,15 @@ type MetricTagConfig struct { // Table config Index uint `yaml:"index,omitempty" json:"index,omitempty"` - // DEPRECATED: Column field is deprecated in favour Symbol field + // DEPRECATED: Use .Symbol instead Column SymbolConfig `yaml:"column,omitempty" json:"-"` - // Symbol config - OID string `yaml:"OID,omitempty" json:"-" jsonschema:"-"` // DEPRECATED replaced by Symbol field - // Using Symbol field below as string is deprecated + // DEPRECATED: use .Symbol instead + OID string `yaml:"OID,omitempty" json:"-" jsonschema:"-"` + // Symbol records the OID to be parsed. Note that .Symbol.Name is ignored: + // set .Tag to specify the tag name. If a serialized Symbol is a string + // instead of an object, it will be treated like {name: }; this use + // pattern is deprecated Symbol SymbolConfigCompat `yaml:"symbol,omitempty" json:"symbol,omitempty"` IndexTransform []MetricIndexTransform `yaml:"index_transform,omitempty" json:"index_transform,omitempty"` @@ -129,8 +132,9 @@ type MetricsConfig struct { // Symbol configs Symbol SymbolConfig `yaml:"symbol,omitempty" json:"symbol,omitempty"` - // Legacy Symbol configs syntax - OID string `yaml:"OID,omitempty" json:"OID,omitempty" jsonschema:"-"` + // DEPRECATED: Use .Symbol instead + OID string `yaml:"OID,omitempty" json:"OID,omitempty" jsonschema:"-"` + // DEPRECATED: Use .Symbol instead Name string `yaml:"name,omitempty" json:"name,omitempty" jsonschema:"-"` // Table configs @@ -140,11 +144,11 @@ type MetricsConfig struct { StaticTags []string `yaml:"static_tags,omitempty" json:"-"` MetricTags MetricTagConfigList `yaml:"metric_tags,omitempty" json:"metric_tags,omitempty"` - ForcedType ProfileMetricType `yaml:"forced_type,omitempty" json:"forced_type,omitempty" jsonschema:"-"` // deprecated in favour of metric_type + // DEPRECATED: use MetricType instead. + ForcedType ProfileMetricType `yaml:"forced_type,omitempty" json:"forced_type,omitempty" jsonschema:"-"` MetricType ProfileMetricType `yaml:"metric_type,omitempty" json:"metric_type,omitempty"` - // `options` is not exposed as json at the moment since we need to evaluate if we want to expose it via UI - Options MetricsConfigOption `yaml:"options,omitempty" json:"-"` + Options MetricsConfigOption `yaml:"options,omitempty" json:"options,omitempty"` } // GetSymbolTags returns symbol tags diff --git a/pkg/networkdevice/profile/profiledefinition/profile_definition.go b/pkg/networkdevice/profile/profiledefinition/profile_definition.go index 1f0f2cf837e6a..065c0a83b4d46 100644 --- a/pkg/networkdevice/profile/profiledefinition/profile_definition.go +++ b/pkg/networkdevice/profile/profiledefinition/profile_definition.go @@ -22,20 +22,32 @@ type ProfileDefinition struct { Description string `yaml:"description,omitempty" json:"description,omitempty"` SysObjectIDs StringArray `yaml:"sysobjectid,omitempty" json:"sysobjectid,omitempty"` Extends []string `yaml:"extends,omitempty" json:"extends,omitempty"` - Metadata MetadataConfig `yaml:"metadata,omitempty" json:"metadata,omitempty" jsonschema:"-"` + Metadata MetadataConfig `yaml:"metadata,omitempty" json:"metadata,omitempty"` MetricTags []MetricTagConfig `yaml:"metric_tags,omitempty" json:"metric_tags,omitempty"` StaticTags []string `yaml:"static_tags,omitempty" json:"static_tags,omitempty"` Metrics []MetricsConfig `yaml:"metrics,omitempty" json:"metrics,omitempty"` - // Used previously to pass device vendor field (has been replaced by Metadata). - // Used in RC for passing device vendor field. - Device DeviceMeta `yaml:"device,omitempty" json:"device,omitempty" jsonschema:"device,omitempty"` // DEPRECATED + // DEPRECATED: Use metadata directly + Device DeviceMeta `yaml:"device,omitempty" json:"device,omitempty" jsonschema:"device,omitempty"` // Version is the profile version. // It is currently used only with downloaded/RC profiles. Version uint64 `yaml:"version,omitempty" json:"version"` } +// GetVendor returns the static vendor for this profile, if one is set +func (p *ProfileDefinition) GetVendor() string { + device, ok := p.Metadata["device"] + if !ok { + return "" + } + vendor, ok := device.Fields["vendor"] + if !ok { + return "" + } + return vendor.Value +} + // DeviceProfileRcConfig represent the profile stored in remote config. type DeviceProfileRcConfig struct { Profile ProfileDefinition `json:"profile_definition"` @@ -47,3 +59,11 @@ func NewProfileDefinition() *ProfileDefinition { p.Metadata = make(MetadataConfig) return p } + +// SplitOIDs returns two slices (scalars, columns) of all scalar and column OIDs requested by this profile. +func (p *ProfileDefinition) SplitOIDs(includeMetadata bool) ([]string, []string) { + if includeMetadata { + return splitOIDs(p.Metrics, p.MetricTags, p.Metadata) + } + return splitOIDs(p.Metrics, p.MetricTags, nil) +} diff --git a/pkg/networkdevice/profile/profiledefinition/schema/profile_rc_schema.json b/pkg/networkdevice/profile/profiledefinition/schema/profile_rc_schema.json index e7c99b88f29d7..cbee775db45b2 100644 --- a/pkg/networkdevice/profile/profiledefinition/schema/profile_rc_schema.json +++ b/pkg/networkdevice/profile/profiledefinition/schema/profile_rc_schema.json @@ -43,6 +43,196 @@ }, "type": "array" }, + "MetadataConfig": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "value": { + "properties": { + "fields": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "value": { + "properties": { + "symbol": { + "properties": { + "OID": { + "type": "string" + }, + "name": { + "type": "string" + }, + "extract_value": { + "type": "string" + }, + "scale_factor": { + "type": "number" + }, + "format": { + "type": "string" + }, + "constant_value_one": { + "type": "boolean" + }, + "metric_type": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object" + }, + "symbols": { + "items": { + "properties": { + "OID": { + "type": "string" + }, + "name": { + "type": "string" + }, + "extract_value": { + "type": "string" + }, + "scale_factor": { + "type": "number" + }, + "format": { + "type": "string" + }, + "constant_value_one": { + "type": "boolean" + }, + "metric_type": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object" + }, + "type": "array" + }, + "value": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "key", + "value" + ] + }, + "type": "array" + }, + "id_tags": { + "items": { + "properties": { + "tag": { + "type": "string" + }, + "index": { + "type": "integer" + }, + "symbol": { + "properties": { + "OID": { + "type": "string" + }, + "name": { + "type": "string" + }, + "extract_value": { + "type": "string" + }, + "scale_factor": { + "type": "number" + }, + "format": { + "type": "string" + }, + "constant_value_one": { + "type": "boolean" + }, + "metric_type": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object" + }, + "index_transform": { + "items": { + "properties": { + "start": { + "type": "integer" + }, + "end": { + "type": "integer" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "start", + "end" + ] + }, + "type": "array" + }, + "mapping": { + "items": { + "properties": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "key", + "value" + ] + }, + "type": "array" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "tag" + ] + }, + "type": "array" + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "fields" + ] + } + }, + "additionalProperties": false, + "type": "object", + "required": [ + "key", + "value" + ] + }, + "type": "array" + }, "MetricIndexTransform": { "properties": { "start": { @@ -114,6 +304,21 @@ }, "metric_type": { "type": "string" + }, + "options": { + "$ref": "#/$defs/MetricsConfigOption" + } + }, + "additionalProperties": false, + "type": "object" + }, + "MetricsConfigOption": { + "properties": { + "placement": { + "type": "integer" + }, + "metric_suffix": { + "type": "string" } }, "additionalProperties": false, @@ -136,6 +341,9 @@ }, "type": "array" }, + "metadata": { + "$ref": "#/$defs/MetadataConfig" + }, "metric_tags": { "items": { "$ref": "#/$defs/MetricTagConfig" diff --git a/pkg/networkdevice/profile/profiledefinition/splitoids.go b/pkg/networkdevice/profile/profiledefinition/splitoids.go new file mode 100644 index 0000000000000..98d96c4301006 --- /dev/null +++ b/pkg/networkdevice/profile/profiledefinition/splitoids.go @@ -0,0 +1,63 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2023-present Datadog, Inc. + +package profiledefinition + +import "sort" + +// splitOIDs returns all scalar and column (i.e. table) OIDs from metrics, tags, and metadata +func splitOIDs(metrics []MetricsConfig, globalTags []MetricTagConfig, metadata MetadataConfig) ([]string, []string) { + scalars := make(map[string]bool) + columns := make(map[string]bool) + // Singular metric values are scalars; metrics with .Symbols are tables, + // and their symbols and tags are both expected to be columns. + for _, metric := range metrics { + scalars[metric.Symbol.OID] = true + for _, symbolConfig := range metric.Symbols { + columns[symbolConfig.OID] = true + } + for _, metricTag := range metric.MetricTags { + columns[metricTag.Symbol.OID] = true + } + } + // Global tags are scalar by definition + for _, tag := range globalTags { + scalars[tag.Symbol.OID] = true + } + // Metadata fields are all columns except when IsMetadataResourceWithScalarOids is true + for resource, metadataConfig := range metadata { + target := columns + if IsMetadataResourceWithScalarOids(resource) { + target = scalars + } + for _, field := range metadataConfig.Fields { + target[field.Symbol.OID] = true + for _, symbol := range field.Symbols { + target[symbol.OID] = true + } + } + for _, tagConfig := range metadataConfig.IDTags { + target[tagConfig.Symbol.OID] = true + } + } + scalarValues := make([]string, 0, len(scalars)) + for key := range scalars { + if key == "" { + continue + } + scalarValues = append(scalarValues, key) + } + columnValues := make([]string, 0, len(columns)) + for key := range columns { + if key == "" { + continue + } + columnValues = append(columnValues, key) + } + // Sort them for deterministic testing + sort.Strings(scalarValues) + sort.Strings(columnValues) + return scalarValues, columnValues +} diff --git a/pkg/networkdevice/profile/profiledefinition/splitoids_test.go b/pkg/networkdevice/profile/profiledefinition/splitoids_test.go new file mode 100644 index 0000000000000..39d28506b5dd8 --- /dev/null +++ b/pkg/networkdevice/profile/profiledefinition/splitoids_test.go @@ -0,0 +1,227 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2023-present Datadog, Inc. + +package profiledefinition + +import ( + "github.com/stretchr/testify/assert" + "testing" +) + +func TestSplitOIDs(t *testing.T) { + type testCase struct { + name string + metrics []MetricsConfig + tags []MetricTagConfig + metadata MetadataConfig + expectedScalars []string + expectedColumns []string + } + testCases := []testCase{ + { + name: "scalar metric", + metrics: []MetricsConfig{{ + Symbol: SymbolConfig{ + OID: "1.2.3.4", + }, + }}, + expectedScalars: []string{"1.2.3.4"}, + }, { + name: "tabular metric", + metrics: []MetricsConfig{{ + Symbols: []SymbolConfig{{OID: "1.2.3.4"}}, + MetricTags: []MetricTagConfig{ + {Symbol: SymbolConfigCompat{ + OID: "2.3.4.5", + }}, + }, + }}, + expectedColumns: []string{"1.2.3.4", "2.3.4.5"}, + }, { + name: "tags", + tags: []MetricTagConfig{ + {Symbol: SymbolConfigCompat{ + OID: "2.3.4.5", + }, + }, + }, + expectedScalars: []string{"2.3.4.5"}, + }, { + name: "metadata", + metadata: map[string]MetadataResourceConfig{ + "device": { + Fields: map[string]MetadataField{ + "vendor": {Value: "static"}, + "name": {Symbol: SymbolConfig{ + OID: "1.1", + }}, + "os_name": {Symbols: []SymbolConfig{ + { + OID: "1.2", + }, { + OID: "1.3", + }, + }}, + }, + IDTags: []MetricTagConfig{ + {Symbol: SymbolConfigCompat{ + OID: "1.4", + }}, + }, + }, + "not_device": { + Fields: map[string]MetadataField{ + "vendor": {Value: "static"}, + "name": {Symbol: SymbolConfig{ + OID: "2.1", + }}, + "os_name": {Symbols: []SymbolConfig{ + { + OID: "2.2", + }, { + OID: "2.3", + }, + }}, + }, + IDTags: []MetricTagConfig{ + {Symbol: SymbolConfigCompat{ + OID: "2.4", + }}, + }, + }, + }, + expectedScalars: []string{"1.1", "1.2", "1.3", "1.4"}, + expectedColumns: []string{"2.1", "2.2", "2.3", "2.4"}, + }, { + name: "duplicates", + metrics: []MetricsConfig{ + { + Symbol: SymbolConfig{OID: "1.1"}, + }, { + Symbols: []SymbolConfig{ + {OID: "1.1"}, + }, + MetricTags: []MetricTagConfig{ + {Symbol: SymbolConfigCompat{OID: "1.1"}}, + }, + }}, + metadata: map[string]MetadataResourceConfig{ + "device": { + Fields: map[string]MetadataField{ + "name": {Symbol: SymbolConfig{ + OID: "1.1", + }}, + }, + }, + }, + expectedScalars: []string{"1.1"}, + expectedColumns: []string{"1.1"}, + }, { + name: "sorting", + metrics: []MetricsConfig{ + {Symbol: SymbolConfig{OID: "1.2"}}, + {Symbol: SymbolConfig{OID: "1.1"}}, + { + Symbols: []SymbolConfig{ + {OID: "2.4"}, + {OID: "2.3"}, + }, + MetricTags: []MetricTagConfig{ + {Symbol: SymbolConfigCompat{OID: "2.2"}}, + {Symbol: SymbolConfigCompat{OID: "2.1"}}, + }, + }}, + expectedScalars: []string{"1.1", "1.2"}, + expectedColumns: []string{"2.1", "2.2", "2.3", "2.4"}, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + scalars, columns := splitOIDs(tc.metrics, tc.tags, tc.metadata) + expectedScalars := tc.expectedScalars + if expectedScalars == nil { + expectedScalars = []string{} + } + assert.Equal(t, expectedScalars, scalars) + expectedColumns := tc.expectedColumns + if expectedColumns == nil { + expectedColumns = []string{} + } + assert.Equal(t, expectedColumns, columns) + }) + } +} + +func TestProfileSplitOIDs(t *testing.T) { + p := ProfileDefinition{ + Metrics: []MetricsConfig{ + {Symbol: SymbolConfig{OID: "1.2"}}, + {Symbol: SymbolConfig{OID: "1.1"}}, + { + Symbols: []SymbolConfig{ + {OID: "2.4"}, + {OID: "2.3"}, + }, + MetricTags: []MetricTagConfig{ + {Symbol: SymbolConfigCompat{OID: "2.2"}}, + {Symbol: SymbolConfigCompat{OID: "2.1"}}, + }, + }, + }, + MetricTags: []MetricTagConfig{ + {Symbol: SymbolConfigCompat{OID: "1.4"}}, + {Symbol: SymbolConfigCompat{OID: "1.3"}}, + }, + Metadata: map[string]MetadataResourceConfig{ + "device": { + Fields: map[string]MetadataField{ + "vendor": {Value: "static"}, + "name": {Symbol: SymbolConfig{ + OID: "3.4", + }}, + "os_name": {Symbols: []SymbolConfig{ + { + OID: "3.3", + }, { + OID: "3.2", + }, + }}, + }, + IDTags: []MetricTagConfig{ + {Symbol: SymbolConfigCompat{ + OID: "3.1", + }}, + }, + }, + "not_device": { + Fields: map[string]MetadataField{ + "vendor": {Value: "static"}, + "name": {Symbol: SymbolConfig{ + OID: "4.4", + }}, + "os_name": {Symbols: []SymbolConfig{ + { + OID: "4.3", + }, { + OID: "4.2", + }, + }}, + }, + IDTags: []MetricTagConfig{ + {Symbol: SymbolConfigCompat{ + OID: "4.1", + }}, + }, + }, + }, + } + scalars, columns := p.SplitOIDs(true) + assert.Equal(t, []string{"1.1", "1.2", "1.3", "1.4", "3.1", "3.2", "3.3", "3.4"}, scalars) + assert.Equal(t, []string{"2.1", "2.2", "2.3", "2.4", "4.1", "4.2", "4.3", "4.4"}, columns) + + scalars, columns = p.SplitOIDs(false) + assert.Equal(t, []string{"1.1", "1.2", "1.3", "1.4"}, scalars) + assert.Equal(t, []string{"2.1", "2.2", "2.3", "2.4"}, columns) +} diff --git a/pkg/networkpath/traceroute/common/common.go b/pkg/networkpath/traceroute/common/common.go index c7622fa391891..08034659c22df 100644 --- a/pkg/networkpath/traceroute/common/common.go +++ b/pkg/networkpath/traceroute/common/common.go @@ -13,20 +13,10 @@ import ( "strconv" "time" - "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/google/gopacket" "github.com/google/gopacket/layers" "golang.org/x/net/ipv4" ) -const ( - // IPProtoICMP is the IP protocol number for ICMP - // we create our own constant here because there are - // different imports for the constant in different - // operating systems - IPProtoICMP = 1 -) - type ( // Results encapsulates a response from the // traceroute @@ -52,109 +42,45 @@ type ( // is canceled CanceledError string - // ICMPResponse encapsulates the data from - // an ICMP response packet needed for matching - ICMPResponse struct { - SrcIP net.IP - DstIP net.IP - TypeCode layers.ICMPv4TypeCode - InnerSrcIP net.IP - InnerDstIP net.IP - InnerSrcPort uint16 - InnerDstPort uint16 - InnerSeqNum uint32 - } + // MismatchError is an error type that indicates a MatcherFunc + // failed due to one or more fields from the packet not matching + // the expected information + MismatchError string + + // MatcherFunc defines functions for matching a packet from the wire to + // a traceroute based on the source/destination addresses and an identifier + MatcherFunc func(*ipv4.Header, []byte, net.IP, uint16, net.IP, uint16, uint32) (net.IP, error) ) +// Error implements the error interface for +// CanceledError func (c CanceledError) Error() string { return string(c) } +// Error implements the error interface for +// MismatchError +func (m MismatchError) Error() string { + return string(m) +} + // LocalAddrForHost takes in a destionation IP and port and returns the local -// address that should be used to connect to the destination -func LocalAddrForHost(destIP net.IP, destPort uint16) (*net.UDPAddr, error) { +// address that should be used to connect to the destination. The returned connection +// should be closed by the caller when the the local UDP port is no longer needed +func LocalAddrForHost(destIP net.IP, destPort uint16) (*net.UDPAddr, net.Conn, error) { // this is a quick way to get the local address for connecting to the host // using UDP as the network type to avoid actually creating a connection to // the host, just get the OS to give us a local IP and local ephemeral port conn, err := net.Dial("udp4", net.JoinHostPort(destIP.String(), strconv.Itoa(int(destPort)))) if err != nil { - return nil, err + return nil, nil, err } - defer conn.Close() localAddr := conn.LocalAddr() localUDPAddr, ok := localAddr.(*net.UDPAddr) if !ok { - return nil, fmt.Errorf("invalid address type for %s: want %T, got %T", localAddr, localUDPAddr, localAddr) - } - - return localUDPAddr, nil -} - -// ParseICMP takes in an IPv4 header and payload and tries to convert to an ICMP -// message, it returns all the fields from the packet we need to validate it's the response -// we're looking for -func ParseICMP(header *ipv4.Header, payload []byte) (*ICMPResponse, error) { - // in addition to parsing, it is probably not a bad idea to do some validation - // so we can ignore the ICMP packets we don't care about - icmpResponse := ICMPResponse{} - - if header.Protocol != IPProtoICMP || header.Version != 4 || - header.Src == nil || header.Dst == nil { - return nil, fmt.Errorf("invalid IP header for ICMP packet: %+v", header) - } - icmpResponse.SrcIP = header.Src - icmpResponse.DstIP = header.Dst - - var icmpv4Layer layers.ICMPv4 - decoded := []gopacket.LayerType{} - icmpParser := gopacket.NewDecodingLayerParser(layers.LayerTypeICMPv4, &icmpv4Layer) - icmpParser.IgnoreUnsupported = true // ignore unsupported layers, we will decode them in the next step - if err := icmpParser.DecodeLayers(payload, &decoded); err != nil { - return nil, fmt.Errorf("failed to decode ICMP packet: %w", err) - } - // since we ignore unsupported layers, we need to check if we actually decoded - // anything - if len(decoded) < 1 { - return nil, fmt.Errorf("failed to decode ICMP packet, no layers decoded") - } - icmpResponse.TypeCode = icmpv4Layer.TypeCode - - var icmpPayload []byte - if len(icmpv4Layer.Payload) < 40 { - log.Tracef("Payload length %d is less than 40, extending...\n", len(icmpv4Layer.Payload)) - icmpPayload = make([]byte, 40) - copy(icmpPayload, icmpv4Layer.Payload) - // we have to set this in order for the TCP - // parser to work - icmpPayload[32] = 5 << 4 // set data offset - } else { - icmpPayload = icmpv4Layer.Payload - } - - // a separate parser is needed to decode the inner IP and TCP headers because - // gopacket doesn't support this type of nesting in a single decoder - var innerIPLayer layers.IPv4 - var innerTCPLayer layers.TCP - innerIPParser := gopacket.NewDecodingLayerParser(layers.LayerTypeIPv4, &innerIPLayer, &innerTCPLayer) - if err := innerIPParser.DecodeLayers(icmpPayload, &decoded); err != nil { - return nil, fmt.Errorf("failed to decode inner ICMP payload: %w", err) + return nil, nil, fmt.Errorf("invalid address type for %s: want %T, got %T", localAddr, localUDPAddr, localAddr) } - icmpResponse.InnerSrcIP = innerIPLayer.SrcIP - icmpResponse.InnerDstIP = innerIPLayer.DstIP - icmpResponse.InnerSrcPort = uint16(innerTCPLayer.SrcPort) - icmpResponse.InnerDstPort = uint16(innerTCPLayer.DstPort) - icmpResponse.InnerSeqNum = innerTCPLayer.Seq - - return &icmpResponse, nil -} -// ICMPMatch checks if an ICMP response matches the expected response -// based on the local and remote IP, port, and sequence number -func ICMPMatch(localIP net.IP, localPort uint16, remoteIP net.IP, remotePort uint16, seqNum uint32, response *ICMPResponse) bool { - return localIP.Equal(response.InnerSrcIP) && - remoteIP.Equal(response.InnerDstIP) && - localPort == response.InnerSrcPort && - remotePort == response.InnerDstPort && - seqNum == response.InnerSeqNum + return localUDPAddr, conn, nil } diff --git a/pkg/networkpath/traceroute/common/common_test.go b/pkg/networkpath/traceroute/common/common_test.go deleted file mode 100644 index aa6f6cfdf24cb..0000000000000 --- a/pkg/networkpath/traceroute/common/common_test.go +++ /dev/null @@ -1,117 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -//go:build test - -package common - -import ( - "net" - "testing" - - "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/testutils" - "github.com/google/gopacket/layers" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "golang.org/x/net/ipv4" -) - -var ( - srcIP = net.ParseIP("1.2.3.4") - dstIP = net.ParseIP("5.6.7.8") - - innerSrcIP = net.ParseIP("10.0.0.1") - innerDstIP = net.ParseIP("192.168.1.1") -) - -func Test_parseICMP(t *testing.T) { - ipv4Header := testutils.CreateMockIPv4Header(srcIP, dstIP, 1) - icmpLayer := testutils.CreateMockICMPLayer(layers.ICMPv4CodeTTLExceeded) - innerIPv4Layer := testutils.CreateMockIPv4Layer(innerSrcIP, innerDstIP, layers.IPProtocolTCP) - innerTCPLayer := testutils.CreateMockTCPLayer(12345, 443, 28394, 12737, true, true, true) - - tt := []struct { - description string - inHeader *ipv4.Header - inPayload []byte - expected *ICMPResponse - errMsg string - }{ - { - description: "empty IPv4 layer should return an error", - inHeader: &ipv4.Header{}, - inPayload: []byte{}, - expected: nil, - errMsg: "invalid IP header for ICMP packet", - }, - { - description: "missing ICMP layer should return an error", - inHeader: ipv4Header, - inPayload: []byte{}, - expected: nil, - errMsg: "failed to decode ICMP packet", - }, - { - description: "missing inner layers should return an error", - inHeader: ipv4Header, - inPayload: testutils.CreateMockICMPPacket(nil, icmpLayer, nil, nil, false), - expected: nil, - errMsg: "failed to decode inner ICMP payload", - }, - { - description: "ICMP packet with partial TCP header should create icmpResponse", - inHeader: ipv4Header, - inPayload: testutils.CreateMockICMPPacket(nil, icmpLayer, innerIPv4Layer, innerTCPLayer, true), - expected: &ICMPResponse{ - SrcIP: srcIP, - DstIP: dstIP, - InnerSrcIP: innerSrcIP, - InnerDstIP: innerDstIP, - InnerSrcPort: 12345, - InnerDstPort: 443, - InnerSeqNum: 28394, - }, - errMsg: "", - }, - { - description: "full ICMP packet should create icmpResponse", - inHeader: ipv4Header, - inPayload: testutils.CreateMockICMPPacket(nil, icmpLayer, innerIPv4Layer, innerTCPLayer, true), - expected: &ICMPResponse{ - SrcIP: srcIP, - DstIP: dstIP, - InnerSrcIP: innerSrcIP, - InnerDstIP: innerDstIP, - InnerSrcPort: 12345, - InnerDstPort: 443, - InnerSeqNum: 28394, - }, - errMsg: "", - }, - } - - for _, test := range tt { - t.Run(test.description, func(t *testing.T) { - actual, err := ParseICMP(test.inHeader, test.inPayload) - if test.errMsg != "" { - require.Error(t, err) - assert.Contains(t, err.Error(), test.errMsg) - assert.Nil(t, actual) - return - } - require.Nil(t, err) - require.NotNil(t, actual) - // assert.Equal doesn't handle net.IP well - assert.Equal(t, testutils.StructFieldCount(test.expected), testutils.StructFieldCount(actual)) - assert.Truef(t, test.expected.SrcIP.Equal(actual.SrcIP), "mismatch source IPs: expected %s, got %s", test.expected.SrcIP.String(), actual.SrcIP.String()) - assert.Truef(t, test.expected.DstIP.Equal(actual.DstIP), "mismatch dest IPs: expected %s, got %s", test.expected.DstIP.String(), actual.DstIP.String()) - assert.Truef(t, test.expected.InnerSrcIP.Equal(actual.InnerSrcIP), "mismatch inner source IPs: expected %s, got %s", test.expected.InnerSrcIP.String(), actual.InnerSrcIP.String()) - assert.Truef(t, test.expected.InnerDstIP.Equal(actual.InnerDstIP), "mismatch inner dest IPs: expected %s, got %s", test.expected.InnerDstIP.String(), actual.InnerDstIP.String()) - assert.Equal(t, test.expected.InnerSrcPort, actual.InnerSrcPort) - assert.Equal(t, test.expected.InnerDstPort, actual.InnerDstPort) - assert.Equal(t, test.expected.InnerSeqNum, actual.InnerSeqNum) - }) - } -} diff --git a/pkg/networkpath/traceroute/icmp/parser.go b/pkg/networkpath/traceroute/icmp/parser.go new file mode 100644 index 0000000000000..f407c4d9e1c54 --- /dev/null +++ b/pkg/networkpath/traceroute/icmp/parser.go @@ -0,0 +1,76 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package icmp provides the logic for parsing ICMP packets +package icmp + +import ( + "fmt" + "net" + + "github.com/google/gopacket/layers" + "golang.org/x/net/ipv4" +) + +const ( + // IPProtoICMP is the IP protocol number for ICMP + // we create our own constant here because there are + // different imports for the constant in different + // operating systems + IPProtoICMP = 1 +) + +type ( + // Parser defines the interface for parsing + // ICMP packets + Parser interface { + Match(header *ipv4.Header, packet []byte, localIP net.IP, localPort uint16, remoteIP net.IP, remotePort uint16, innerIdentifier uint32) (net.IP, error) + Parse(header *ipv4.Header, packet []byte) (*Response, error) + } + + // Response encapsulates the data from + // an ICMP response packet needed for matching + Response struct { + SrcIP net.IP + DstIP net.IP + TypeCode layers.ICMPv4TypeCode + InnerSrcIP net.IP + InnerDstIP net.IP + InnerSrcPort uint16 + InnerDstPort uint16 + // InnerIdentifier will be populated with + // an additional identifcation field for matching + // received packets. For TCP packets, this is the + // sequence number. For UDP packets, this is the + // checksum, a uint16 cast to a uint32. + InnerIdentifier uint32 + } +) + +// Matches checks if an ICMPResponse matches the expected response +// based on the local and remote IP, port, and identifier. In this context, +// identifier will either be the TCP sequence number OR the UDP checksum +func (i *Response) Matches(localIP net.IP, localPort uint16, remoteIP net.IP, remotePort uint16, innerIdentifier uint32) bool { + return localIP.Equal(i.InnerSrcIP) && + remoteIP.Equal(i.InnerDstIP) && + localPort == i.InnerSrcPort && + remotePort == i.InnerDstPort && + innerIdentifier == i.InnerIdentifier +} + +func validatePacket(header *ipv4.Header, payload []byte) error { + // in addition to parsing, it is probably not a bad idea to do some validation + // so we can quickly ignore the ICMP packets we don't care about + if len(payload) <= 0 { + return fmt.Errorf("received empty ICMP packet") + } + + if header.Protocol != IPProtoICMP || header.Version != 4 || + header.Src == nil || header.Dst == nil { + return fmt.Errorf("invalid IP header for ICMP packet: %+v", header) + } + + return nil +} diff --git a/pkg/networkpath/traceroute/icmp/tcp_parser.go b/pkg/networkpath/traceroute/icmp/tcp_parser.go new file mode 100644 index 0000000000000..cf1ef031df9d4 --- /dev/null +++ b/pkg/networkpath/traceroute/icmp/tcp_parser.go @@ -0,0 +1,119 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package icmp + +import ( + "errors" + "fmt" + "net" + + "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/common" + "github.com/DataDog/datadog-agent/pkg/util/log" + "github.com/google/gopacket" + "github.com/google/gopacket/layers" + "golang.org/x/net/ipv4" +) + +type ( + // TCPParser encapsulates the data and logic + // for parsing ICMP packets with embedded TCP + // data + TCPParser struct { + icmpLayer layers.ICMPv4 + innerIPLayer layers.IPv4 + innerUDPLayer layers.UDP + innerTCPLayer layers.TCP + innerPayload gopacket.Payload + // packetParser is parser for the ICMP segment of the packet + packetParser *gopacket.DecodingLayerParser + // innerPacketParser is necessary for ICMP packets + // because gopacket does not allow the payload of + // an ICMP packet to be decoded in the same parser + innerPacketParser *gopacket.DecodingLayerParser + icmpResponse *Response + } +) + +// NewICMPTCPParser creates a new ICMPParser that can parse ICMP packets with +// embedded TCP packets +func NewICMPTCPParser() Parser { + icmpParser := &TCPParser{} + icmpParser.packetParser = gopacket.NewDecodingLayerParser(layers.LayerTypeICMPv4, &icmpParser.icmpLayer, &icmpParser.innerPayload) + icmpParser.innerPacketParser = gopacket.NewDecodingLayerParser(layers.LayerTypeIPv4, &icmpParser.innerIPLayer, &icmpParser.innerTCPLayer) + // TODO: can we ignore unsupported layers? + //icmpParser.packetParser.IgnoreUnsupported = true + return icmpParser +} + +// Match encapsulates to logic to both parse and match an ICMP packet +func (p *TCPParser) Match(header *ipv4.Header, packet []byte, localIP net.IP, localPort uint16, remoteIP net.IP, remotePort uint16, innerIdentifier uint32) (net.IP, error) { + if header.Protocol != IPProtoICMP { + return net.IP{}, errors.New("expected an ICMP packet") + } + icmpResponse, err := p.Parse(header, packet) + if err != nil { + return net.IP{}, fmt.Errorf("ICMP parse error: %w", err) + } + if !icmpResponse.Matches(localIP, localPort, remoteIP, remotePort, innerIdentifier) { + return net.IP{}, common.MismatchError("ICMP packet doesn't match") + } + + return icmpResponse.SrcIP, nil +} + +// Parse parses an ICMP packet with embedded TCP data and returns a Response +func (p *TCPParser) Parse(header *ipv4.Header, payload []byte) (*Response, error) { + if err := validatePacket(header, payload); err != nil { + return nil, err + } + + // clear layers between each run + p.icmpLayer = layers.ICMPv4{} + p.innerIPLayer = layers.IPv4{} + p.innerTCPLayer = layers.TCP{} + p.innerUDPLayer = layers.UDP{} + p.innerPayload = gopacket.Payload{} + + p.icmpResponse = &Response{} // ensure we get a fresh ICMPResponse each run + p.icmpResponse.SrcIP = header.Src + p.icmpResponse.DstIP = header.Dst + + decoded := []gopacket.LayerType{} + if err := p.packetParser.DecodeLayers(payload, &decoded); err != nil { + return nil, fmt.Errorf("failed to decode ICMP packet: %w", err) + } + // since we ignore unsupported layers, we need to check if we actually decoded + // anything + if len(decoded) < 1 { + return nil, fmt.Errorf("failed to decode ICMP packet, no layers decoded") + } + p.icmpResponse.TypeCode = p.icmpLayer.TypeCode + + var icmpPayload []byte + if len(p.icmpLayer.Payload) < 40 { + log.Tracef("Payload length %d is less than 40, extending...\n", len(p.icmpLayer.Payload)) + icmpPayload = make([]byte, 40) + copy(icmpPayload, p.icmpLayer.Payload) + // we have to set this in order for the inner + // parser to work + icmpPayload[32] = 5 << 4 // set data offset + } else { + icmpPayload = p.icmpLayer.Payload + } + + // a separate parser is needed to decode the inner IP and TCP headers because + // gopacket doesn't support this type of nesting in a single decoder + if err := p.innerPacketParser.DecodeLayers(icmpPayload, &decoded); err != nil { + return nil, fmt.Errorf("failed to decode inner ICMP payload: %w", err) + } + p.icmpResponse.InnerSrcIP = p.innerIPLayer.SrcIP + p.icmpResponse.InnerDstIP = p.innerIPLayer.DstIP + p.icmpResponse.InnerSrcPort = uint16(p.innerTCPLayer.SrcPort) + p.icmpResponse.InnerDstPort = uint16(p.innerTCPLayer.DstPort) + p.icmpResponse.InnerIdentifier = p.innerTCPLayer.Seq + + return p.icmpResponse, nil +} diff --git a/pkg/networkpath/traceroute/icmp/tcp_parser_test.go b/pkg/networkpath/traceroute/icmp/tcp_parser_test.go new file mode 100644 index 0000000000000..15b5eee6595df --- /dev/null +++ b/pkg/networkpath/traceroute/icmp/tcp_parser_test.go @@ -0,0 +1,208 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build test + +package icmp + +import ( + "fmt" + "net" + "strings" + "testing" + + "github.com/google/gopacket/layers" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/net/ipv4" + + "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/testutils" +) + +var ( + srcIP = net.ParseIP("1.2.3.4") + dstIP = net.ParseIP("5.6.7.8") + + innerSrcIP = net.ParseIP("10.0.0.1") + innerDstIP = net.ParseIP("192.168.1.1") +) + +func TestTCPMatch(t *testing.T) { + srcPort := uint16(12345) + dstPort := uint16(443) + seqNum := uint32(2549) + mockHeader := testutils.CreateMockIPv4Header(srcIP, dstIP, 1) + icmpLayer := testutils.CreateMockICMPLayer(layers.ICMPv4CodeTTLExceeded) + innerIPv4Layer := testutils.CreateMockIPv4Layer(innerSrcIP, innerDstIP, layers.IPProtocolTCP) + innerTCPLayer := testutils.CreateMockTCPLayer(srcPort, dstPort, seqNum, 12737, true, true, true) + icmpBytes := testutils.CreateMockICMPWithTCPPacket(nil, icmpLayer, innerIPv4Layer, innerTCPLayer, true) + + tts := []struct { + description string + header *ipv4.Header + payload []byte + localIP net.IP + localPort uint16 + remoteIP net.IP + remotePort uint16 + seqNum uint32 + // expected + expectedIP net.IP + expectedErrMsg string + }{ + { + description: "protocol not ICMP returns an error", + header: &ipv4.Header{ + Protocol: 17, // UDP + }, + expectedIP: net.IP{}, + expectedErrMsg: "expected an ICMP packet", + }, + { + description: "bad ICMP payload returns an error", + header: mockHeader, + localIP: srcIP, + remoteIP: dstIP, + expectedIP: net.IP{}, + expectedErrMsg: "ICMP parse error", + }, + { + description: "non-matching ICMP payload returns mismatch error", + header: mockHeader, + payload: icmpBytes, + localIP: srcIP, + localPort: srcPort, + remoteIP: dstIP, + remotePort: 9001, + seqNum: seqNum, + expectedIP: net.IP{}, + expectedErrMsg: "ICMP packet doesn't match", + }, + { + description: "matching ICMP payload returns destination IP", + header: mockHeader, + payload: icmpBytes, + localIP: innerSrcIP, + localPort: srcPort, + remoteIP: innerDstIP, + remotePort: dstPort, + seqNum: seqNum, + expectedIP: srcIP, + expectedErrMsg: "", + }, + } + + for _, test := range tts { + t.Run(test.description, func(t *testing.T) { + icmpParser := NewICMPTCPParser() + actualIP, err := icmpParser.Match(test.header, test.payload, test.localIP, test.localPort, test.remoteIP, test.remotePort, test.seqNum) + if test.expectedErrMsg != "" { + require.Error(t, err) + assert.True(t, strings.Contains(err.Error(), test.expectedErrMsg), fmt.Sprintf("expected %q, got %q", test.expectedErrMsg, err.Error())) + return + } + require.NoError(t, err) + assert.Truef(t, test.expectedIP.Equal(actualIP), "mismatch IPs: expected %s, got %s", test.expectedIP.String(), actualIP.String()) + }) + } +} + +func TestTCPParse(t *testing.T) { + ipv4Header := testutils.CreateMockIPv4Header(srcIP, dstIP, 1) + icmpLayer := testutils.CreateMockICMPLayer(layers.ICMPv4CodeTTLExceeded) + innerIPv4Layer := testutils.CreateMockIPv4Layer(innerSrcIP, innerDstIP, layers.IPProtocolTCP) + innerTCPLayer := testutils.CreateMockTCPLayer(12345, 443, 28394, 12737, true, true, true) + + tt := []struct { + description string + inHeader *ipv4.Header + inPayload []byte + expected *Response + errMsg string + }{ + { + description: "empty IPv4 layer should return an error", + inHeader: &ipv4.Header{}, + inPayload: []byte{1}, + expected: nil, + errMsg: "invalid IP header for ICMP packet", + }, + { + description: "nil ICMP layer should return an error", + inHeader: ipv4Header, + inPayload: nil, + expected: nil, + errMsg: "received empty ICMP packet", + }, + { + description: "empty ICMP layer should return an error", + inHeader: ipv4Header, + inPayload: []byte{}, + expected: nil, + errMsg: "received empty ICMP packet", + }, + { + description: "missing inner layers should return an error", + inHeader: ipv4Header, + inPayload: testutils.CreateMockICMPWithTCPPacket(nil, icmpLayer, nil, nil, false), + expected: nil, + errMsg: "failed to decode inner ICMP payload", + }, + { + description: "ICMP packet with partial TCP header should create icmpResponse", + inHeader: ipv4Header, + inPayload: testutils.CreateMockICMPWithTCPPacket(nil, icmpLayer, innerIPv4Layer, innerTCPLayer, true), + expected: &Response{ + SrcIP: srcIP, + DstIP: dstIP, + InnerSrcIP: innerSrcIP, + InnerDstIP: innerDstIP, + InnerSrcPort: 12345, + InnerDstPort: 443, + InnerIdentifier: 28394, + }, + errMsg: "", + }, + { + description: "full ICMP packet should create icmpResponse", + inHeader: ipv4Header, + inPayload: testutils.CreateMockICMPWithTCPPacket(nil, icmpLayer, innerIPv4Layer, innerTCPLayer, true), + expected: &Response{ + SrcIP: srcIP, + DstIP: dstIP, + InnerSrcIP: innerSrcIP, + InnerDstIP: innerDstIP, + InnerSrcPort: 12345, + InnerDstPort: 443, + InnerIdentifier: 28394, + }, + errMsg: "", + }, + } + + for _, test := range tt { + t.Run(test.description, func(t *testing.T) { + icmpParser := NewICMPTCPParser() + actual, err := icmpParser.Parse(test.inHeader, test.inPayload) + if test.errMsg != "" { + require.Error(t, err) + assert.Contains(t, err.Error(), test.errMsg) + assert.Nil(t, actual) + return + } + require.Nil(t, err) + require.NotNil(t, actual) + // assert.Equal doesn't handle net.IP well + assert.Equal(t, testutils.StructFieldCount(test.expected), testutils.StructFieldCount(actual)) + assert.Truef(t, test.expected.SrcIP.Equal(actual.SrcIP), "mismatch source IPs: expected %s, got %s", test.expected.SrcIP.String(), actual.SrcIP.String()) + assert.Truef(t, test.expected.DstIP.Equal(actual.DstIP), "mismatch dest IPs: expected %s, got %s", test.expected.DstIP.String(), actual.DstIP.String()) + assert.Truef(t, test.expected.InnerSrcIP.Equal(actual.InnerSrcIP), "mismatch inner source IPs: expected %s, got %s", test.expected.InnerSrcIP.String(), actual.InnerSrcIP.String()) + assert.Truef(t, test.expected.InnerDstIP.Equal(actual.InnerDstIP), "mismatch inner dest IPs: expected %s, got %s", test.expected.InnerDstIP.String(), actual.InnerDstIP.String()) + assert.Equal(t, test.expected.InnerSrcPort, actual.InnerSrcPort) + assert.Equal(t, test.expected.InnerDstPort, actual.InnerDstPort) + assert.Equal(t, test.expected.InnerIdentifier, actual.InnerIdentifier) + }) + } +} diff --git a/pkg/networkpath/traceroute/icmp/udp_parser.go b/pkg/networkpath/traceroute/icmp/udp_parser.go new file mode 100644 index 0000000000000..2258a8af4dbbe --- /dev/null +++ b/pkg/networkpath/traceroute/icmp/udp_parser.go @@ -0,0 +1,108 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package icmp + +import ( + "errors" + "fmt" + "net" + + "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/common" + "github.com/google/gopacket" + "github.com/google/gopacket/layers" + "golang.org/x/net/ipv4" +) + +type ( + // UDPParser encapsulates the data and logic + // for parsing ICMP packets with embedded UDP + // data + UDPParser struct { + icmpLayer layers.ICMPv4 + innerIPLayer layers.IPv4 + innerUDPLayer layers.UDP + innerTCPLayer layers.TCP + innerPayload gopacket.Payload + // packetParser is parser for the ICMP segment of the packet + packetParser *gopacket.DecodingLayerParser + // innerPacketParser is necessary for ICMP packets + // because gopacket does not allow the payload of + // an ICMP packet to be decoded in the same parser + innerPacketParser *gopacket.DecodingLayerParser + icmpResponse *Response + } +) + +// NewICMPUDPParser creates a new ICMPParser that can parse ICMP packets with +// embedded UDP packets +func NewICMPUDPParser() Parser { + icmpParser := &UDPParser{} + icmpParser.packetParser = gopacket.NewDecodingLayerParser(layers.LayerTypeICMPv4, &icmpParser.icmpLayer) + icmpParser.innerPacketParser = gopacket.NewDecodingLayerParser(layers.LayerTypeIPv4, &icmpParser.innerIPLayer, &icmpParser.innerUDPLayer) + // TODO: can we ignore unsupported layers? + icmpParser.packetParser.IgnoreUnsupported = true + return icmpParser +} + +// Match encapsulates to logic to both parse and match an ICMP packet +func (p *UDPParser) Match(header *ipv4.Header, packet []byte, localIP net.IP, localPort uint16, remoteIP net.IP, remotePort uint16, innerIdentifier uint32) (net.IP, error) { + if header.Protocol != IPProtoICMP { + return net.IP{}, errors.New("expected an ICMP packet") + } + icmpResponse, err := p.Parse(header, packet) + if err != nil { + return net.IP{}, fmt.Errorf("ICMP parse error: %w", err) + } + if !icmpResponse.Matches(localIP, localPort, remoteIP, remotePort, innerIdentifier) { + return net.IP{}, common.MismatchError("ICMP packet doesn't match") + } + + return icmpResponse.SrcIP, nil +} + +// Parse parses an ICMP packet with embedded UDP data and returns a Response +func (p *UDPParser) Parse(header *ipv4.Header, payload []byte) (*Response, error) { + if err := validatePacket(header, payload); err != nil { + return nil, err + } + + // clear layers between each run + p.icmpLayer = layers.ICMPv4{} + p.innerIPLayer = layers.IPv4{} + p.innerTCPLayer = layers.TCP{} + p.innerUDPLayer = layers.UDP{} + p.innerPayload = gopacket.Payload{} + + p.icmpResponse = &Response{} // ensure we get a fresh ICMPResponse each run + p.icmpResponse.SrcIP = header.Src + p.icmpResponse.DstIP = header.Dst + + decoded := []gopacket.LayerType{} + if err := p.packetParser.DecodeLayers(payload, &decoded); err != nil { + return nil, fmt.Errorf("failed to decode ICMP packet: %w", err) + } + // since we ignore unsupported layers, we need to check if we actually decoded + // anything + if len(decoded) < 1 { + return nil, fmt.Errorf("failed to decode ICMP packet, no layers decoded") + } + p.icmpResponse.TypeCode = p.icmpLayer.TypeCode + + // a separate parser is needed to decode the inner IP and UDP headers because + // gopacket doesn't support this type of nesting in a single decoder + if err := p.innerPacketParser.DecodeLayers(p.icmpLayer.Payload, &decoded); err != nil { + return nil, fmt.Errorf("failed to decode inner ICMP payload: %w", err) + } + + p.icmpResponse.InnerSrcIP = p.innerIPLayer.SrcIP + p.icmpResponse.InnerDstIP = p.innerIPLayer.DstIP + p.icmpResponse.InnerSrcPort = uint16(p.innerUDPLayer.SrcPort) + p.icmpResponse.InnerDstPort = uint16(p.innerUDPLayer.DstPort) + // the packet's checksum is used as the identifier for UDP packets + p.icmpResponse.InnerIdentifier = uint32(p.innerUDPLayer.Checksum) + + return p.icmpResponse, nil +} diff --git a/pkg/networkpath/traceroute/icmp/udp_parser_test.go b/pkg/networkpath/traceroute/icmp/udp_parser_test.go new file mode 100644 index 0000000000000..a8969b818fff3 --- /dev/null +++ b/pkg/networkpath/traceroute/icmp/udp_parser_test.go @@ -0,0 +1,185 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build test + +package icmp + +import ( + "fmt" + "net" + "strings" + "testing" + + "github.com/google/gopacket/layers" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/net/ipv4" + + "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/testutils" +) + +func TestUDPMatch(t *testing.T) { + srcPort := uint16(12345) + dstPort := uint16(443) + checksum := uint16(576) // calculated field + mockHeader := testutils.CreateMockIPv4Header(srcIP, dstIP, 1) + icmpLayer := testutils.CreateMockICMPLayer(layers.ICMPv4CodeTTLExceeded) + innerIPv4Layer := testutils.CreateMockIPv4Layer(innerSrcIP, innerDstIP, layers.IPProtocolUDP) + innerUDPLayer := testutils.CreateMockUDPLayer(srcPort, dstPort, checksum) + icmpBytes := testutils.CreateMockICMPWithUDPPacket(nil, icmpLayer, innerIPv4Layer, innerUDPLayer) + + tts := []struct { + description string + header *ipv4.Header + payload []byte + localIP net.IP + localPort uint16 + remoteIP net.IP + remotePort uint16 + checksum uint16 + // expected + expectedIP net.IP + expectedErrMsg string + }{ + { + description: "protocol not ICMP returns an error", + header: &ipv4.Header{ + Protocol: 17, // UDP + }, + expectedIP: net.IP{}, + expectedErrMsg: "expected an ICMP packet", + }, + { + description: "bad ICMP payload returns an error", + header: mockHeader, + localIP: srcIP, + remoteIP: dstIP, + expectedIP: net.IP{}, + expectedErrMsg: "ICMP parse error", + }, + { + description: "non-matching ICMP payload returns mismatch error", + header: mockHeader, + payload: icmpBytes, + localIP: srcIP, + localPort: srcPort, + remoteIP: dstIP, + remotePort: 9001, + checksum: checksum, + expectedIP: net.IP{}, + expectedErrMsg: "ICMP packet doesn't match", + }, + { + description: "matching ICMP payload returns destination IP", + header: mockHeader, + payload: icmpBytes, + localIP: innerSrcIP, + localPort: srcPort, + remoteIP: innerDstIP, + remotePort: dstPort, + checksum: checksum, + expectedIP: srcIP, + expectedErrMsg: "", + }, + } + + for _, test := range tts { + t.Run(test.description, func(t *testing.T) { + icmpParser := NewICMPUDPParser() + actualIP, err := icmpParser.Match(test.header, test.payload, test.localIP, test.localPort, test.remoteIP, test.remotePort, uint32(test.checksum)) + if test.expectedErrMsg != "" { + require.Error(t, err) + assert.True(t, strings.Contains(err.Error(), test.expectedErrMsg), fmt.Sprintf("expected %q, got %q", test.expectedErrMsg, err.Error())) + return + } + require.NoError(t, err) + assert.Truef(t, test.expectedIP.Equal(actualIP), "mismatch IPs: expected %s, got %s", test.expectedIP.String(), actualIP.String()) + }) + } +} + +func TestUDPParse(t *testing.T) { + ipv4Header := testutils.CreateMockIPv4Header(srcIP, dstIP, 1) + icmpLayer := testutils.CreateMockICMPLayer(layers.ICMPv4CodeTTLExceeded) + innerIPv4Layer := testutils.CreateMockIPv4Layer(innerSrcIP, innerDstIP, layers.IPProtocolUDP) + innerUDPLayer := testutils.CreateMockUDPLayer(12345, 443, 28394) + + tt := []struct { + description string + inHeader *ipv4.Header + inPayload []byte + expected *Response + errMsg string + }{ + { + description: "empty IPv4 layer should return an error", + inHeader: &ipv4.Header{}, + inPayload: []byte{1}, + expected: nil, + errMsg: "invalid IP header for ICMP packet", + }, + { + description: "nil ICMP layer should return an error", + inHeader: ipv4Header, + inPayload: nil, + expected: nil, + errMsg: "received empty ICMP packet", + }, + { + description: "empty ICMP layer should return an error", + inHeader: ipv4Header, + inPayload: []byte{}, + expected: nil, + errMsg: "received empty ICMP packet", + }, + { + description: "missing inner layers should return an error", + inHeader: ipv4Header, + inPayload: testutils.CreateMockICMPWithUDPPacket(nil, icmpLayer, nil, nil), + expected: nil, + errMsg: "failed to decode inner ICMP payload", + }, + { + description: "full ICMP packet should create icmpResponse", + inHeader: ipv4Header, + inPayload: testutils.CreateMockICMPWithUDPPacket(nil, icmpLayer, innerIPv4Layer, innerUDPLayer), + expected: &Response{ + SrcIP: srcIP, + DstIP: dstIP, + InnerSrcIP: innerSrcIP, + InnerDstIP: innerDstIP, + InnerSrcPort: 12345, + InnerDstPort: 443, + InnerIdentifier: 576, + }, + errMsg: "", + }, + } + + for _, test := range tt { + t.Run(test.description, func(t *testing.T) { + icmpParser := NewICMPUDPParser() + actual, err := icmpParser.Parse(test.inHeader, test.inPayload) + if test.errMsg != "" { + require.Error(t, err) + assert.Contains(t, err.Error(), test.errMsg) + assert.Nil(t, actual) + return + } + require.Nil(t, err) + require.NotNil(t, actual) + // assert.Equal doesn't handle net.IP well + assert.Equal(t, testutils.StructFieldCount(test.expected), testutils.StructFieldCount(actual)) + assert.Truef(t, test.expected.SrcIP.Equal(actual.SrcIP), "mismatch source IPs: expected %s, got %s", test.expected.SrcIP.String(), actual.SrcIP.String()) + assert.Truef(t, test.expected.DstIP.Equal(actual.DstIP), "mismatch dest IPs: expected %s, got %s", test.expected.DstIP.String(), actual.DstIP.String()) + assert.Truef(t, test.expected.InnerSrcIP.Equal(actual.InnerSrcIP), "mismatch inner source IPs: expected %s, got %s", test.expected.InnerSrcIP.String(), actual.InnerSrcIP.String()) + assert.Truef(t, test.expected.InnerDstIP.Equal(actual.InnerDstIP), "mismatch inner dest IPs: expected %s, got %s", test.expected.InnerDstIP.String(), actual.InnerDstIP.String()) + assert.Equal(t, test.expected.InnerSrcPort, actual.InnerSrcPort) + assert.Equal(t, test.expected.InnerDstPort, actual.InnerDstPort) + assert.Equal(t, test.expected.InnerIdentifier, actual.InnerIdentifier) + }) + } +} diff --git a/pkg/networkpath/traceroute/runner/runner.go b/pkg/networkpath/traceroute/runner/runner.go index b8240ee6db3cd..47be74957f580 100644 --- a/pkg/networkpath/traceroute/runner/runner.go +++ b/pkg/networkpath/traceroute/runner/runner.go @@ -13,11 +13,8 @@ import ( "math/rand" "net" "os" - "sort" "time" - "github.com/Datadog/dublin-traceroute/go/dublintraceroute/probes/probev4" - "github.com/Datadog/dublin-traceroute/go/dublintraceroute/results" "github.com/vishvananda/netns" telemetryComponent "github.com/DataDog/datadog-agent/comp/core/telemetry" @@ -163,58 +160,20 @@ func (r *Runner) RunTraceroute(ctx context.Context, cfg config.Config) (payload. return pathResult, nil } -func (r *Runner) runUDP(cfg config.Config, hname string, dest net.IP, maxTTL uint8, timeout time.Duration) (payload.NetworkPath, error) { - destPort, srcPort, useSourcePort := getPorts(cfg.DestPort) - - dt := &probev4.UDPv4{ - Target: dest, - SrcPort: srcPort, - DstPort: destPort, - UseSrcPort: useSourcePort, - NumPaths: uint16(DefaultNumPaths), - MinTTL: uint8(DefaultMinTTL), // TODO: what's a good value? - MaxTTL: maxTTL, - Delay: time.Duration(DefaultDelay) * time.Millisecond, // TODO: what's a good value? - Timeout: timeout, // TODO: what's a good value? - BrokenNAT: false, - } - - results, err := dt.Traceroute() - if err != nil { - return payload.NetworkPath{}, fmt.Errorf("traceroute run failed: %s", err.Error()) - } - - pathResult, err := r.processUDPResults(results, hname, cfg.DestHostname, destPort, dest) - if err != nil { - return payload.NetworkPath{}, err - } - log.Tracef("UDP Results: %+v", pathResult) - - return pathResult, nil -} - func (r *Runner) runTCP(cfg config.Config, hname string, target net.IP, maxTTL uint8, timeout time.Duration) (payload.NetworkPath, error) { destPort := cfg.DestPort if destPort == 0 { destPort = 80 // TODO: is this the default we want? } - tr := tcp.TCPv4{ - Target: target, - DestPort: destPort, - NumPaths: 1, - MinTTL: uint8(DefaultMinTTL), - MaxTTL: maxTTL, - Delay: time.Duration(DefaultDelay) * time.Millisecond, - Timeout: timeout, - } + tr := tcp.NewTCPv4(target, destPort, DefaultNumPaths, DefaultMinTTL, maxTTL, time.Duration(DefaultDelay)*time.Millisecond, timeout) results, err := tr.TracerouteSequential() if err != nil { return payload.NetworkPath{}, err } - pathResult, err := r.processTCPResults(results, hname, cfg.DestHostname, destPort, target) + pathResult, err := r.processResults(results, payload.ProtocolTCP, hname, cfg.DestHostname, destPort, target) if err != nil { return payload.NetworkPath{}, err } @@ -223,11 +182,11 @@ func (r *Runner) runTCP(cfg config.Config, hname string, target net.IP, maxTTL u return pathResult, nil } -func (r *Runner) processTCPResults(res *common.Results, hname string, destinationHost string, destinationPort uint16, destinationIP net.IP) (payload.NetworkPath, error) { +func (r *Runner) processResults(res *common.Results, protocol payload.Protocol, hname string, destinationHost string, destinationPort uint16, destinationIP net.IP) (payload.NetworkPath, error) { traceroutePath := payload.NetworkPath{ AgentVersion: version.AgentVersion, PathtraceID: payload.NewPathtraceID(), - Protocol: payload.ProtocolTCP, + Protocol: protocol, Timestamp: time.Now().UnixMilli(), Source: payload.NetworkPathSource{ Hostname: hname, @@ -281,113 +240,6 @@ func (r *Runner) processTCPResults(res *common.Results, hname string, destinatio return traceroutePath, nil } -func (r *Runner) processUDPResults(res *results.Results, hname string, destinationHost string, destinationPort uint16, destinationIP net.IP) (payload.NetworkPath, error) { - type node struct { - node string - probe *results.Probe - } - - traceroutePath := payload.NetworkPath{ - AgentVersion: version.AgentVersion, - PathtraceID: payload.NewPathtraceID(), - Protocol: payload.ProtocolUDP, - Timestamp: time.Now().UnixMilli(), - Source: payload.NetworkPathSource{ - Hostname: hname, - NetworkID: r.networkID, - }, - Destination: payload.NetworkPathDestination{ - Hostname: destinationHost, - Port: destinationPort, - IPAddress: destinationIP.String(), - }, - } - - flowIDs := make([]int, 0, len(res.Flows)) - for flowID := range res.Flows { - flowIDs = append(flowIDs, int(flowID)) - } - sort.Ints(flowIDs) - - for _, flowID := range flowIDs { - hops := res.Flows[uint16(flowID)] - if len(hops) == 0 { - log.Tracef("No hops for flow ID %d", flowID) - continue - } - var nodes []node - // add first hop - localAddr := hops[0].Sent.IP.SrcIP - - // get hardware interface info - if r.gatewayLookup != nil { - src := util.AddressFromNetIP(localAddr) - dst := util.AddressFromNetIP(hops[0].Sent.IP.DstIP) - - traceroutePath.Source.Via = r.gatewayLookup.LookupWithIPs(src, dst, r.nsIno) - } - - firstNodeName := localAddr.String() - nodes = append(nodes, node{node: firstNodeName, probe: &hops[0]}) - - // then add all the other hops - for _, hop := range hops { - hop := hop - nodename := fmt.Sprintf("unknown_hop_%d", hop.Sent.IP.TTL) - if hop.Received != nil { - nodename = hop.Received.IP.SrcIP.String() - } - nodes = append(nodes, node{node: nodename, probe: &hop}) - - if hop.IsLast { - break - } - } - // add edges - if len(nodes) <= 1 { - // no edges to add if there is only one node - continue - } - - // start at node 1. Each node back-references the previous one - for idx := 1; idx < len(nodes); idx++ { - if idx >= len(nodes) { - // we are at the second-to-last node - break - } - prev := nodes[idx-1] - cur := nodes[idx] - - edgeLabel := "" - if idx == 1 { - edgeLabel += fmt.Sprintf( - "srcport %d\ndstport %d", - cur.probe.Sent.UDP.SrcPort, - cur.probe.Sent.UDP.DstPort, - ) - } - if prev.probe.NATID != cur.probe.NATID { - edgeLabel += "\nNAT detected" - } - edgeLabel += fmt.Sprintf("\n%d.%d ms", int(cur.probe.RttUsec/1000), int(cur.probe.RttUsec%1000)) - - isReachable := cur.probe.Received != nil - ip := cur.node - durationMs := float64(cur.probe.RttUsec) / 1000 - - hop := payload.NetworkPathHop{ - TTL: idx, - IPAddress: ip, - RTT: durationMs, - Reachable: isReachable, - } - traceroutePath.Hops = append(traceroutePath.Hops, hop) - } - } - - return traceroutePath, nil -} - func getPorts(configDestPort uint16) (uint16, uint16, bool) { var destPort uint16 var srcPort uint16 diff --git a/pkg/networkpath/traceroute/runner/runner_unix.go b/pkg/networkpath/traceroute/runner/runner_unix.go new file mode 100644 index 0000000000000..b91c36972de2e --- /dev/null +++ b/pkg/networkpath/traceroute/runner/runner_unix.go @@ -0,0 +1,161 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +//go:build unix + +package runner + +import ( + "fmt" + "net" + "sort" + "time" + + "github.com/DataDog/datadog-agent/pkg/networkpath/payload" + "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/config" + "github.com/DataDog/datadog-agent/pkg/process/util" + "github.com/DataDog/datadog-agent/pkg/util/log" + "github.com/DataDog/datadog-agent/pkg/version" + "github.com/Datadog/dublin-traceroute/go/dublintraceroute/probes/probev4" + "github.com/Datadog/dublin-traceroute/go/dublintraceroute/results" +) + +// runUDP runs a UDP traceroute using the Dublin Traceroute library. +func (r *Runner) runUDP(cfg config.Config, hname string, dest net.IP, maxTTL uint8, timeout time.Duration) (payload.NetworkPath, error) { + destPort, srcPort, useSourcePort := getPorts(cfg.DestPort) + + dt := &probev4.UDPv4{ + Target: dest, + SrcPort: srcPort, + DstPort: destPort, + UseSrcPort: useSourcePort, + NumPaths: uint16(DefaultNumPaths), + MinTTL: uint8(DefaultMinTTL), // TODO: what's a good value? + MaxTTL: maxTTL, + Delay: time.Duration(DefaultDelay) * time.Millisecond, // TODO: what's a good value? + Timeout: timeout, // TODO: what's a good value? + BrokenNAT: false, + } + + results, err := dt.Traceroute() + if err != nil { + return payload.NetworkPath{}, fmt.Errorf("traceroute run failed: %s", err.Error()) + } + + pathResult, err := r.processDublinResults(results, hname, cfg.DestHostname, destPort, dest) + if err != nil { + return payload.NetworkPath{}, err + } + log.Tracef("UDP Results: %+v", pathResult) + + return pathResult, nil +} + +func (r *Runner) processDublinResults(res *results.Results, hname string, destinationHost string, destinationPort uint16, destinationIP net.IP) (payload.NetworkPath, error) { + type node struct { + node string + probe *results.Probe + } + + traceroutePath := payload.NetworkPath{ + AgentVersion: version.AgentVersion, + PathtraceID: payload.NewPathtraceID(), + Protocol: payload.ProtocolUDP, + Timestamp: time.Now().UnixMilli(), + Source: payload.NetworkPathSource{ + Hostname: hname, + NetworkID: r.networkID, + }, + Destination: payload.NetworkPathDestination{ + Hostname: destinationHost, + Port: destinationPort, + IPAddress: destinationIP.String(), + }, + } + + flowIDs := make([]int, 0, len(res.Flows)) + for flowID := range res.Flows { + flowIDs = append(flowIDs, int(flowID)) + } + sort.Ints(flowIDs) + + for _, flowID := range flowIDs { + hops := res.Flows[uint16(flowID)] + if len(hops) == 0 { + log.Tracef("No hops for flow ID %d", flowID) + continue + } + var nodes []node + // add first hop + localAddr := hops[0].Sent.IP.SrcIP + + // get hardware interface info + if r.gatewayLookup != nil { + src := util.AddressFromNetIP(localAddr) + dst := util.AddressFromNetIP(hops[0].Sent.IP.DstIP) + + traceroutePath.Source.Via = r.gatewayLookup.LookupWithIPs(src, dst, r.nsIno) + } + + firstNodeName := localAddr.String() + nodes = append(nodes, node{node: firstNodeName, probe: &hops[0]}) + + // then add all the other hops + for _, hop := range hops { + hop := hop + nodename := fmt.Sprintf("unknown_hop_%d", hop.Sent.IP.TTL) + if hop.Received != nil { + nodename = hop.Received.IP.SrcIP.String() + } + nodes = append(nodes, node{node: nodename, probe: &hop}) + + if hop.IsLast { + break + } + } + // add edges + if len(nodes) <= 1 { + // no edges to add if there is only one node + continue + } + + // start at node 1. Each node back-references the previous one + for idx := 1; idx < len(nodes); idx++ { + if idx >= len(nodes) { + // we are at the second-to-last node + break + } + prev := nodes[idx-1] + cur := nodes[idx] + + edgeLabel := "" + if idx == 1 { + edgeLabel += fmt.Sprintf( + "srcport %d\ndstport %d", + cur.probe.Sent.UDP.SrcPort, + cur.probe.Sent.UDP.DstPort, + ) + } + if prev.probe.NATID != cur.probe.NATID { + edgeLabel += "\nNAT detected" + } + edgeLabel += fmt.Sprintf("\n%d.%d ms", int(cur.probe.RttUsec/1000), int(cur.probe.RttUsec%1000)) + + isReachable := cur.probe.Received != nil + ip := cur.node + durationMs := float64(cur.probe.RttUsec) / 1000 + + hop := payload.NetworkPathHop{ + TTL: idx, + IPAddress: ip, + RTT: durationMs, + Reachable: isReachable, + } + traceroutePath.Hops = append(traceroutePath.Hops, hop) + } + } + + return traceroutePath, nil +} diff --git a/pkg/networkpath/traceroute/runner/runner_windows.go b/pkg/networkpath/traceroute/runner/runner_windows.go new file mode 100644 index 0000000000000..14e393a192e88 --- /dev/null +++ b/pkg/networkpath/traceroute/runner/runner_windows.go @@ -0,0 +1,39 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +package runner + +import ( + "net" + "time" + + "github.com/DataDog/datadog-agent/pkg/networkpath/payload" + "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/config" + "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/udp" + "github.com/DataDog/datadog-agent/pkg/util/log" +) + +// runUDP runs a UDP traceroute for Windows using a traceroute that's built in +// to the agent. +func (r *Runner) runUDP(cfg config.Config, hname string, target net.IP, maxTTL uint8, timeout time.Duration) (payload.NetworkPath, error) { + destPort := cfg.DestPort + if destPort == 0 { + destPort = 33434 // TODO: is this the default we want? + } + + tr := udp.NewUDPv4(target, destPort, DefaultNumPaths, uint8(DefaultMinTTL), maxTTL, time.Duration(DefaultDelay)*time.Millisecond, timeout) + results, err := tr.TracerouteSequential() + if err != nil { + return payload.NetworkPath{}, err + } + + pathResult, err := r.processResults(results, payload.ProtocolUDP, hname, cfg.DestHostname, destPort, target) + if err != nil { + return payload.NetworkPath{}, err + } + log.Tracef("UDP Results: %+v", pathResult) + + return pathResult, nil +} diff --git a/pkg/networkpath/traceroute/tcp/parser.go b/pkg/networkpath/traceroute/tcp/parser.go new file mode 100644 index 0000000000000..714732fbdbc3c --- /dev/null +++ b/pkg/networkpath/traceroute/tcp/parser.go @@ -0,0 +1,101 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package tcp + +import ( + "errors" + "fmt" + "net" + "syscall" + + "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/common" + "github.com/google/gopacket" + "github.com/google/gopacket/layers" + "golang.org/x/net/ipv4" +) + +type ( + // tcpResponse encapsulates the data from a + // TCP response needed for matching + tcpResponse struct { + SrcIP net.IP + DstIP net.IP + TCPResponse layers.TCP + } + + // parser encapsulates everything needed to + // decode TCP packets off the wire into structs + parser struct { + layer layers.TCP + decoded []gopacket.LayerType + decodingLayerParser *gopacket.DecodingLayerParser + } +) + +func newParser() *parser { + tcpParser := &parser{} + tcpParser.decodingLayerParser = gopacket.NewDecodingLayerParser(layers.LayerTypeTCP, &tcpParser.layer) + return tcpParser +} + +func (tp *parser) parseTCP(header *ipv4.Header, payload []byte) (*tcpResponse, error) { + if header.Protocol != syscall.IPPROTO_TCP || header.Version != 4 || + header.Src == nil || header.Dst == nil { + return nil, fmt.Errorf("invalid IP header for TCP packet: %+v", header) + } + if len(payload) <= 0 { + return nil, errors.New("received empty TCP payload") + } + + if err := tp.decodingLayerParser.DecodeLayers(payload, &tp.decoded); err != nil { + return nil, fmt.Errorf("failed to decode TCP packet: %w", err) + } + + resp := &tcpResponse{ + SrcIP: header.Src, + DstIP: header.Dst, + TCPResponse: tp.layer, + } + // make sure the TCP layer is cleared between runs + tp.layer = layers.TCP{} + + return resp, nil +} + +// MatchTCP parses a TCP packet from a header and packet bytes and compares the information +// contained in the packet to what's expected and returns the source IP of the incoming packet +// if it's successful or a MismatchError if the packet can be read but doesn't match +func (tp *parser) MatchTCP(header *ipv4.Header, packet []byte, localIP net.IP, localPort uint16, remoteIP net.IP, remotePort uint16, seqNum uint32) (net.IP, error) { + if header.Protocol != 6 { // TCP + return net.IP{}, errors.New("expected a TCP packet") + } + // don't even bother parsing the packet if the src/dst ip don't match + if !localIP.Equal(header.Dst) || !remoteIP.Equal(header.Src) { + return net.IP{}, common.MismatchError("TCP packet doesn't match") + } + tcpResp, err := tp.parseTCP(header, packet) + if err != nil { + return net.IP{}, fmt.Errorf("TCP parse error: %w", err) + } + if !tcpResp.Match(localIP, localPort, remoteIP, remotePort, seqNum) { + return net.IP{}, common.MismatchError("TCP packet doesn't match") + } + + return tcpResp.SrcIP, nil +} + +func (t *tcpResponse) Match(localIP net.IP, localPort uint16, remoteIP net.IP, remotePort uint16, seqNum uint32) bool { + flagsCheck := (t.TCPResponse.SYN && t.TCPResponse.ACK) || t.TCPResponse.RST + sourcePort := uint16(t.TCPResponse.SrcPort) + destPort := uint16(t.TCPResponse.DstPort) + + return remoteIP.Equal(t.SrcIP) && + remotePort == sourcePort && + localIP.Equal(t.DstIP) && + localPort == destPort && + seqNum == t.TCPResponse.Ack-1 && + flagsCheck +} diff --git a/pkg/networkpath/traceroute/tcp/parser_test.go b/pkg/networkpath/traceroute/tcp/parser_test.go new file mode 100644 index 0000000000000..4a2208f3e1182 --- /dev/null +++ b/pkg/networkpath/traceroute/tcp/parser_test.go @@ -0,0 +1,200 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build test + +package tcp + +import ( + "fmt" + "net" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/net/ipv4" + + "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/testutils" +) + +func Test_parseTCP(t *testing.T) { + ipv4Header := testutils.CreateMockIPv4Header(srcIP, dstIP, 6) // 6 is TCP + tcpLayer := testutils.CreateMockTCPLayer(12345, 443, 28394, 12737, true, true, true) + + // full packet + encodedTCPLayer, fullTCPPacket := testutils.CreateMockTCPPacket(ipv4Header, tcpLayer, false) + + tt := []struct { + description string + inHeader *ipv4.Header + inPayload []byte + expected *tcpResponse + errMsg string + }{ + { + description: "empty IPv4 layer should return an error", + inHeader: &ipv4.Header{}, + inPayload: []byte{}, + expected: nil, + errMsg: "invalid IP header for TCP packet", + }, + { + description: "nil TCP layer should return an error", + inHeader: ipv4Header, + expected: nil, + errMsg: "received empty TCP payload", + }, + { + description: "missing TCP layer should return an error", + inHeader: ipv4Header, + inPayload: []byte{}, + expected: nil, + errMsg: "received empty TCP payload", + }, + { + description: "full TCP packet should create tcpResponse", + inHeader: ipv4Header, + inPayload: fullTCPPacket, + expected: &tcpResponse{ + SrcIP: srcIP, + DstIP: dstIP, + TCPResponse: *encodedTCPLayer, + }, + errMsg: "", + }, + } + + tp := newParser() + for _, test := range tt { + t.Run(test.description, func(t *testing.T) { + actual, err := tp.parseTCP(test.inHeader, test.inPayload) + if test.errMsg != "" { + require.Error(t, err) + assert.Contains(t, err.Error(), test.errMsg) + assert.Nil(t, actual) + return + } + require.Nil(t, err) + require.NotNil(t, actual) + // assert.Equal doesn't handle net.IP well + assert.Equal(t, testutils.StructFieldCount(test.expected), testutils.StructFieldCount(actual)) + assert.Truef(t, test.expected.SrcIP.Equal(actual.SrcIP), "mismatch source IPs: expected %s, got %s", test.expected.SrcIP.String(), actual.SrcIP.String()) + assert.Truef(t, test.expected.DstIP.Equal(actual.DstIP), "mismatch dest IPs: expected %s, got %s", test.expected.DstIP.String(), actual.DstIP.String()) + assert.Equal(t, test.expected.TCPResponse, actual.TCPResponse) + }) + } +} + +func Test_MatchTCP(t *testing.T) { + srcPort := uint16(12345) + dstPort := uint16(443) + seqNum := uint32(2549) + mockHeader := testutils.CreateMockIPv4Header(dstIP, srcIP, 6) + _, tcpBytes := testutils.CreateMockTCPPacket(mockHeader, testutils.CreateMockTCPLayer(dstPort, srcPort, seqNum, seqNum+1, true, true, true), false) + + tts := []struct { + description string + header *ipv4.Header + payload []byte + localIP net.IP + localPort uint16 + remoteIP net.IP + remotePort uint16 + seqNum uint32 + // expected + expectedIP net.IP + expectedErrMsg string + }{ + { + description: "protocol not TCP returns an error", + header: &ipv4.Header{ + Protocol: 17, // UDP + }, + expectedIP: net.IP{}, + expectedErrMsg: "expected a TCP packet", + }, + { + description: "non-matching source IP returns mismatch error", + header: testutils.CreateMockIPv4Header(dstIP, net.ParseIP("2.2.2.2"), 6), + localIP: srcIP, + remoteIP: dstIP, + expectedIP: net.IP{}, + expectedErrMsg: "TCP packet doesn't match", + }, + { + description: "non-matching destination IP returns mismatch error", + header: testutils.CreateMockIPv4Header(net.ParseIP("2.2.2.2"), srcIP, 6), + localIP: srcIP, + remoteIP: dstIP, + expectedIP: net.IP{}, + expectedErrMsg: "TCP packet doesn't match", + }, + { + description: "bad TCP payload returns an error", + header: mockHeader, + localIP: srcIP, + remoteIP: dstIP, + expectedIP: net.IP{}, + expectedErrMsg: "TCP parse error", + }, + { + description: "non-matching TCP payload returns mismatch error", + header: mockHeader, + payload: tcpBytes, + localIP: srcIP, + localPort: srcPort, + remoteIP: dstIP, + remotePort: 9001, + seqNum: seqNum, + expectedIP: net.IP{}, + expectedErrMsg: "TCP packet doesn't match", + }, + { + description: "matching TCP payload returns destination IP", + header: mockHeader, + payload: tcpBytes, + localIP: srcIP, + localPort: srcPort, + remoteIP: dstIP, + remotePort: dstPort, + seqNum: seqNum, + expectedIP: dstIP, + expectedErrMsg: "", + }, + } + + for _, test := range tts { + t.Run(test.description, func(t *testing.T) { + tp := newParser() + actualIP, err := tp.MatchTCP(test.header, test.payload, test.localIP, test.localPort, test.remoteIP, test.remotePort, test.seqNum) + if test.expectedErrMsg != "" { + require.Error(t, err) + assert.True(t, strings.Contains(err.Error(), test.expectedErrMsg), fmt.Sprintf("expected %q, got %q", test.expectedErrMsg, err.Error())) + return + } + require.NoError(t, err) + assert.Truef(t, test.expectedIP.Equal(actualIP), "mismatch IPs: expected %s, got %s", test.expectedIP.String(), actualIP.String()) + }) + } +} + +func BenchmarkParse(b *testing.B) { + ipv4Header := testutils.CreateMockIPv4Header(srcIP, dstIP, 6) // 6 is TCP + tcpLayer := testutils.CreateMockTCPLayer(12345, 443, 28394, 12737, true, true, true) + + // full packet + _, fullTCPPacket := testutils.CreateMockTCPPacket(ipv4Header, tcpLayer, false) + + tp := newParser() + + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := tp.parseTCP(ipv4Header, fullTCPPacket) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/pkg/networkpath/traceroute/tcp/tcpv4.go b/pkg/networkpath/traceroute/tcp/tcpv4.go index ca38edf21cf4c..e84f21c609cc4 100644 --- a/pkg/networkpath/traceroute/tcp/tcpv4.go +++ b/pkg/networkpath/traceroute/tcp/tcpv4.go @@ -7,8 +7,13 @@ package tcp import ( + "fmt" "net" "time" + + "github.com/google/gopacket" + "github.com/google/gopacket/layers" + "golang.org/x/net/ipv4" ) type ( @@ -24,12 +29,88 @@ type ( MaxTTL uint8 Delay time.Duration // delay between sending packets (not applicable if we go the serial send/receive route) Timeout time.Duration // full timeout for all packets + buffer gopacket.SerializeBuffer } ) +// NewTCPv4 initializes a new TCPv4 traceroute instance +func NewTCPv4(target net.IP, targetPort uint16, numPaths uint16, minTTL uint8, maxTTL uint8, delay time.Duration, timeout time.Duration) *TCPv4 { + buffer := gopacket.NewSerializeBuffer() + + return &TCPv4{ + Target: target, + DestPort: targetPort, + NumPaths: numPaths, + MinTTL: minTTL, + MaxTTL: maxTTL, + Delay: delay, + Timeout: timeout, + buffer: buffer, + } +} + // Close doesn't to anything yet, but we should // use this to close out long running sockets // when we're done with a path test func (t *TCPv4) Close() error { return nil } + +// createRawTCPSyn creates a TCP packet with the specified parameters +func (t *TCPv4) createRawTCPSyn(seqNum uint32, ttl int) (*ipv4.Header, []byte, error) { + ipHdr, packet, hdrlen, err := t.createRawTCPSynBuffer(seqNum, ttl) + if err != nil { + return nil, nil, err + } + + return ipHdr, packet[hdrlen:], nil +} + +func (t *TCPv4) createRawTCPSynBuffer(seqNum uint32, ttl int) (*ipv4.Header, []byte, int, error) { + ipLayer := &layers.IPv4{ + Version: 4, + Length: 20, + TTL: uint8(ttl), + Id: uint16(41821), + Protocol: 6, + DstIP: t.Target, + SrcIP: t.srcIP, + } + + tcpLayer := &layers.TCP{ + SrcPort: layers.TCPPort(t.srcPort), + DstPort: layers.TCPPort(t.DestPort), + Seq: seqNum, + Ack: 0, + SYN: true, + Window: 1024, + } + + err := tcpLayer.SetNetworkLayerForChecksum(ipLayer) + if err != nil { + return nil, nil, 0, fmt.Errorf("failed to create packet checksum: %w", err) + } + + // clear the gopacket.SerializeBuffer + if len(t.buffer.Bytes()) > 0 { + if err = t.buffer.Clear(); err != nil { + t.buffer = gopacket.NewSerializeBuffer() + } + } + opts := gopacket.SerializeOptions{FixLengths: true, ComputeChecksums: true} + err = gopacket.SerializeLayers(t.buffer, opts, + ipLayer, + tcpLayer, + ) + if err != nil { + return nil, nil, 0, fmt.Errorf("failed to serialize packet: %w", err) + } + packet := t.buffer.Bytes() + + var ipHdr ipv4.Header + if err := ipHdr.Parse(packet[:20]); err != nil { + return nil, nil, 0, fmt.Errorf("failed to parse IP header: %w", err) + } + + return &ipHdr, packet, 20, nil +} diff --git a/pkg/networkpath/traceroute/tcp/tcpv4_test.go b/pkg/networkpath/traceroute/tcp/tcpv4_test.go new file mode 100644 index 0000000000000..f10a69870c86d --- /dev/null +++ b/pkg/networkpath/traceroute/tcp/tcpv4_test.go @@ -0,0 +1,96 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build test + +package tcp + +import ( + "net" + "runtime" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/net/ipv4" +) + +func TestCreateRawTCPSyn(t *testing.T) { + if runtime.GOOS == "darwin" { + t.Skip("TestCreateRawTCPSyn is broken on macOS") + } + + srcIP := net.ParseIP("1.2.3.4") + dstIP := net.ParseIP("5.6.7.8") + srcPort := uint16(12345) + dstPort := uint16(80) + seqNum := uint32(1000) + ttl := 64 + + tcp := NewTCPv4(dstIP, dstPort, 1, 1, 1, 0, 0) + tcp.srcIP = srcIP + tcp.srcPort = srcPort + + expectedIPHeader := &ipv4.Header{ + Version: 4, + TTL: ttl, + ID: 41821, + Protocol: 6, + Dst: dstIP, + Src: srcIP, + Len: 20, + TotalLen: 40, + Checksum: 51039, + } + + expectedPktBytes := []byte{ + 0x30, 0x39, 0x0, 0x50, 0x0, 0x0, 0x3, 0xe8, 0x0, 0x0, 0x0, 0x0, 0x50, 0x2, 0x4, 0x0, 0x67, 0x5e, 0x0, 0x0, + } + + ipHeader, pktBytes, err := tcp.createRawTCPSyn(seqNum, ttl) + require.NoError(t, err) + assert.Equal(t, expectedIPHeader, ipHeader) + assert.Equal(t, expectedPktBytes, pktBytes) +} + +func TestCreateRawTCPSynBuffer(t *testing.T) { + if runtime.GOOS == "darwin" { + t.Skip("TestCreateRawTCPSyn is broken on macOS") + } + + srcIP := net.ParseIP("1.2.3.4") + dstIP := net.ParseIP("5.6.7.8") + srcPort := uint16(12345) + dstPort := uint16(80) + seqNum := uint32(1000) + ttl := 64 + + tcp := NewTCPv4(dstIP, dstPort, 1, 1, 1, 0, 0) + tcp.srcIP = srcIP + tcp.srcPort = srcPort + + expectedIPHeader := &ipv4.Header{ + Version: 4, + TTL: ttl, + ID: 41821, + Protocol: 6, + Dst: dstIP, + Src: srcIP, + Len: 20, + TotalLen: 40, + Checksum: 51039, + } + + expectedPktBytes := []byte{ + 0x45, 0x0, 0x0, 0x28, 0xa3, 0x5d, 0x0, 0x0, 0x40, 0x6, 0xc7, 0x5f, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x30, 0x39, 0x0, 0x50, 0x0, 0x0, 0x3, 0xe8, 0x0, 0x0, 0x0, 0x0, 0x50, 0x2, 0x4, 0x0, 0x67, 0x5e, 0x0, 0x0, + } + + ipHeader, pktBytes, headerLength, err := tcp.createRawTCPSynBuffer(seqNum, ttl) + + require.NoError(t, err) + assert.Equal(t, expectedIPHeader, ipHeader) + assert.Equal(t, 20, headerLength) + assert.Equal(t, expectedPktBytes, pktBytes) +} diff --git a/pkg/networkpath/traceroute/tcp/tcpv4_unix.go b/pkg/networkpath/traceroute/tcp/tcpv4_unix.go index f5859d056e00e..8b02bd26c1c6f 100644 --- a/pkg/networkpath/traceroute/tcp/tcpv4_unix.go +++ b/pkg/networkpath/traceroute/tcp/tcpv4_unix.go @@ -25,10 +25,11 @@ import ( func (t *TCPv4) TracerouteSequential() (*common.Results, error) { // Get local address for the interface that connects to this // host and store in in the probe - addr, err := common.LocalAddrForHost(t.Target, t.DestPort) + addr, conn, err := common.LocalAddrForHost(t.Target, t.DestPort) if err != nil { return nil, fmt.Errorf("failed to get local address for target: %w", err) } + conn.Close() // we don't need the UDP port here t.srcIP = addr.IP // So far I haven't had success trying to simply create a socket @@ -99,7 +100,7 @@ func (t *TCPv4) TracerouteSequential() (*common.Results, error) { } func (t *TCPv4) sendAndReceive(rawIcmpConn *ipv4.RawConn, rawTCPConn *ipv4.RawConn, ttl int, seqNum uint32, timeout time.Duration) (*common.Hop, error) { - tcpHeader, tcpPacket, err := createRawTCPSyn(t.srcIP, t.srcPort, t.Target, t.DestPort, seqNum, ttl) + tcpHeader, tcpPacket, err := t.createRawTCPSyn(seqNum, ttl) if err != nil { log.Errorf("failed to create TCP packet with TTL: %d, error: %s", ttl, err.Error()) return nil, err diff --git a/pkg/networkpath/traceroute/tcp/tcpv4_windows.go b/pkg/networkpath/traceroute/tcp/tcpv4_windows.go index e9eb74cae702f..10d08ee63a910 100644 --- a/pkg/networkpath/traceroute/tcp/tcpv4_windows.go +++ b/pkg/networkpath/traceroute/tcp/tcpv4_windows.go @@ -12,81 +12,32 @@ import ( "net" "time" - "golang.org/x/sys/windows" - "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/common" + "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/icmp" + "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/winconn" "github.com/DataDog/datadog-agent/pkg/util/log" + "golang.org/x/sys/windows" ) -var ( - sendTo = windows.Sendto -) - -type winrawsocket struct { - s windows.Handle -} - -func (w *winrawsocket) close() { - if w.s != windows.InvalidHandle { - windows.Closesocket(w.s) // nolint: errcheck - } - w.s = windows.InvalidHandle -} - -func (t *TCPv4) sendRawPacket(w *winrawsocket, payload []byte) error { - - dst := t.Target.To4() - sa := &windows.SockaddrInet4{ - Port: int(t.DestPort), - Addr: [4]byte{dst[0], dst[1], dst[2], dst[3]}, - } - if err := sendTo(w.s, payload, 0, sa); err != nil { - return fmt.Errorf("failed to send packet: %w", err) - } - return nil -} - -func createRawSocket() (*winrawsocket, error) { - s, err := windows.Socket(windows.AF_INET, windows.SOCK_RAW, windows.IPPROTO_IP) - if err != nil { - return nil, fmt.Errorf("failed to create raw socket: %w", err) - } - on := int(1) - err = windows.SetsockoptInt(s, windows.IPPROTO_IP, windows.IP_HDRINCL, on) - if err != nil { - windows.Closesocket(s) // nolint: errcheck - return nil, fmt.Errorf("failed to set IP_HDRINCL: %w", err) - } - - err = windows.SetsockoptInt(s, windows.SOL_SOCKET, windows.SO_RCVTIMEO, 100) - if err != nil { - windows.Closesocket(s) // nolint: errcheck - return nil, fmt.Errorf("failed to set SO_RCVTIMEO: %w", err) - } - return &winrawsocket{s: s}, nil -} - // TracerouteSequential runs a traceroute sequentially where a packet is // sent and we wait for a response before sending the next packet func (t *TCPv4) TracerouteSequential() (*common.Results, error) { log.Debugf("Running traceroute to %+v", t) // Get local address for the interface that connects to this // host and store in in the probe - // - // TODO: do this once for the probe and hang on to the - // listener until we decide to close the probe - addr, err := common.LocalAddrForHost(t.Target, t.DestPort) + addr, conn, err := common.LocalAddrForHost(t.Target, t.DestPort) if err != nil { return nil, fmt.Errorf("failed to get local address for target: %w", err) } + defer conn.Close() t.srcIP = addr.IP t.srcPort = addr.AddrPort().Port() - rs, err := createRawSocket() + rs, err := winconn.NewRawConn() if err != nil { return nil, fmt.Errorf("failed to create raw socket: %w", err) } - defer rs.close() + defer rs.Close() hops := make([]*common.Hop, 0, int(t.MaxTTL-t.MinTTL)+1) @@ -114,21 +65,27 @@ func (t *TCPv4) TracerouteSequential() (*common.Results, error) { }, nil } -func (t *TCPv4) sendAndReceive(rs *winrawsocket, ttl int, seqNum uint32, timeout time.Duration) (*common.Hop, error) { - _, buffer, _, err := createRawTCPSynBuffer(t.srcIP, t.srcPort, t.Target, t.DestPort, seqNum, ttl) +func (t *TCPv4) sendAndReceive(rs *winconn.RawConn, ttl int, seqNum uint32, timeout time.Duration) (*common.Hop, error) { + _, buffer, _, err := t.createRawTCPSynBuffer(seqNum, ttl) if err != nil { log.Errorf("failed to create TCP packet with TTL: %d, error: %s", ttl, err.Error()) return nil, err } - err = t.sendRawPacket(rs, buffer) + err = rs.SendRawPacket(t.Target, t.DestPort, buffer) if err != nil { log.Errorf("failed to send TCP packet: %s", err.Error()) return nil, err } + icmpParser := icmp.NewICMPTCPParser() + tcpParser := newParser() + matcherFuncs := map[int]common.MatcherFunc{ + windows.IPPROTO_ICMP: icmpParser.Match, + windows.IPPROTO_TCP: tcpParser.MatchTCP, + } start := time.Now() // TODO: is this the best place to start? - hopIP, hopPort, icmpType, end, err := rs.listenPackets(timeout, t.srcIP, t.srcPort, t.Target, t.DestPort, seqNum) + hopIP, end, err := rs.ListenPackets(timeout, t.srcIP, t.srcPort, t.Target, t.DestPort, seqNum, matcherFuncs) if err != nil { log.Errorf("failed to listen for packets: %s", err.Error()) return nil, err @@ -141,8 +98,8 @@ func (t *TCPv4) sendAndReceive(rs *winrawsocket, ttl int, seqNum uint32, timeout return &common.Hop{ IP: hopIP, - Port: hopPort, - ICMPType: icmpType, + Port: 0, // TODO: fix this + ICMPType: 0, // TODO: fix this RTT: rtt, IsDest: hopIP.Equal(t.Target), }, nil diff --git a/pkg/networkpath/traceroute/tcp/utils.go b/pkg/networkpath/traceroute/tcp/utils.go index 3bd9e437e90c7..96ece97a94f33 100644 --- a/pkg/networkpath/traceroute/tcp/utils.go +++ b/pkg/networkpath/traceroute/tcp/utils.go @@ -8,21 +8,6 @@ package tcp import ( "fmt" "net" - "syscall" - - "github.com/google/gopacket" - "github.com/google/gopacket/layers" - "golang.org/x/net/ipv4" -) - -type ( - // tcpResponse encapsulates the data from a - // TCP response needed for matching - tcpResponse struct { - SrcIP net.IP - DstIP net.IP - TCPResponse layers.TCP - } ) // reserveLocalPort reserves an ephemeral TCP port @@ -40,102 +25,3 @@ func reserveLocalPort() (uint16, net.Listener, error) { return uint16(tcpAddr.Port), tcpListener, nil } - -// createRawTCPSyn creates a TCP packet with the specified parameters -func createRawTCPSyn(sourceIP net.IP, sourcePort uint16, destIP net.IP, destPort uint16, seqNum uint32, ttl int) (*ipv4.Header, []byte, error) { - ipHdr, packet, hdrlen, err := createRawTCPSynBuffer(sourceIP, sourcePort, destIP, destPort, seqNum, ttl) - if err != nil { - return nil, nil, err - } - - return ipHdr, packet[hdrlen:], nil -} - -func createRawTCPSynBuffer(sourceIP net.IP, sourcePort uint16, destIP net.IP, destPort uint16, seqNum uint32, ttl int) (*ipv4.Header, []byte, int, error) { - ipLayer := &layers.IPv4{ - Version: 4, - Length: 20, - TTL: uint8(ttl), - Id: uint16(41821), - Protocol: 6, - DstIP: destIP, - SrcIP: sourceIP, - } - - tcpLayer := &layers.TCP{ - SrcPort: layers.TCPPort(sourcePort), - DstPort: layers.TCPPort(destPort), - Seq: seqNum, - Ack: 0, - SYN: true, - Window: 1024, - } - - err := tcpLayer.SetNetworkLayerForChecksum(ipLayer) - if err != nil { - return nil, nil, 0, fmt.Errorf("failed to create packet checksum: %w", err) - } - buf := gopacket.NewSerializeBuffer() - opts := gopacket.SerializeOptions{FixLengths: true, ComputeChecksums: true} - err = gopacket.SerializeLayers(buf, opts, - ipLayer, - tcpLayer, - ) - if err != nil { - return nil, nil, 0, fmt.Errorf("failed to serialize packet: %w", err) - } - packet := buf.Bytes() - - var ipHdr ipv4.Header - if err := ipHdr.Parse(packet[:20]); err != nil { - return nil, nil, 0, fmt.Errorf("failed to parse IP header: %w", err) - } - - return &ipHdr, packet, 20, nil -} - -type tcpParser struct { - layer layers.TCP - decoded []gopacket.LayerType - decodingLayerParser *gopacket.DecodingLayerParser -} - -func newTCPParser() *tcpParser { - tcpParser := &tcpParser{} - tcpParser.decodingLayerParser = gopacket.NewDecodingLayerParser(layers.LayerTypeTCP, &tcpParser.layer) - return tcpParser -} - -func (tp *tcpParser) parseTCP(header *ipv4.Header, payload []byte) (*tcpResponse, error) { - if header.Protocol != syscall.IPPROTO_TCP || header.Version != 4 || - header.Src == nil || header.Dst == nil { - return nil, fmt.Errorf("invalid IP header for TCP packet: %+v", header) - } - - if err := tp.decodingLayerParser.DecodeLayers(payload, &tp.decoded); err != nil { - return nil, fmt.Errorf("failed to decode TCP packet: %w", err) - } - - resp := &tcpResponse{ - SrcIP: header.Src, - DstIP: header.Dst, - TCPResponse: tp.layer, - } - // make sure the TCP layer is cleared between runs - tp.layer = layers.TCP{} - - return resp, nil -} - -func tcpMatch(localIP net.IP, localPort uint16, remoteIP net.IP, remotePort uint16, seqNum uint32, response *tcpResponse) bool { - flagsCheck := (response.TCPResponse.SYN && response.TCPResponse.ACK) || response.TCPResponse.RST - sourcePort := uint16(response.TCPResponse.SrcPort) - destPort := uint16(response.TCPResponse.DstPort) - - return remoteIP.Equal(response.SrcIP) && - remotePort == sourcePort && - localIP.Equal(response.DstIP) && - localPort == destPort && - seqNum == response.TCPResponse.Ack-1 && - flagsCheck -} diff --git a/pkg/networkpath/traceroute/tcp/utils_test.go b/pkg/networkpath/traceroute/tcp/utils_test.go index 5b344df71ec16..8e3200b65949a 100644 --- a/pkg/networkpath/traceroute/tcp/utils_test.go +++ b/pkg/networkpath/traceroute/tcp/utils_test.go @@ -10,25 +10,18 @@ package tcp import ( "fmt" "net" - "runtime" "testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "golang.org/x/net/ipv4" - - "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/testutils" ) var ( srcIP = net.ParseIP("1.2.3.4") dstIP = net.ParseIP("5.6.7.8") - - innerSrcIP = net.ParseIP("10.0.0.1") - innerDstIP = net.ParseIP("192.168.1.1") ) -func Test_reserveLocalPort(t *testing.T) { +func TestReserveLocalPort(t *testing.T) { // WHEN we reserve a local port port, listener, err := reserveLocalPort() require.NoError(t, err) @@ -41,153 +34,3 @@ func Test_reserveLocalPort(t *testing.T) { assert.Error(t, err) assert.Nil(t, conn2) } - -func Test_createRawTCPSyn(t *testing.T) { - if runtime.GOOS == "darwin" { - t.Skip("Test_createRawTCPSyn is broken on macOS") - } - - srcIP := net.ParseIP("1.2.3.4") - dstIP := net.ParseIP("5.6.7.8") - srcPort := uint16(12345) - dstPort := uint16(80) - seqNum := uint32(1000) - ttl := 64 - - expectedIPHeader := &ipv4.Header{ - Version: 4, - TTL: ttl, - ID: 41821, - Protocol: 6, - Dst: dstIP, - Src: srcIP, - Len: 20, - TotalLen: 40, - Checksum: 51039, - } - - expectedPktBytes := []byte{ - 0x30, 0x39, 0x0, 0x50, 0x0, 0x0, 0x3, 0xe8, 0x0, 0x0, 0x0, 0x0, 0x50, 0x2, 0x4, 0x0, 0x67, 0x5e, 0x0, 0x0, - } - - ipHeader, pktBytes, err := createRawTCPSyn(srcIP, srcPort, dstIP, dstPort, seqNum, ttl) - require.NoError(t, err) - assert.Equal(t, expectedIPHeader, ipHeader) - assert.Equal(t, expectedPktBytes, pktBytes) -} - -func Test_createRawTCPSynBuffer(t *testing.T) { - if runtime.GOOS == "darwin" { - t.Skip("Test_createRawTCPSyn is broken on macOS") - } - - srcIP := net.ParseIP("1.2.3.4") - dstIP := net.ParseIP("5.6.7.8") - srcPort := uint16(12345) - dstPort := uint16(80) - seqNum := uint32(1000) - ttl := 64 - - expectedIPHeader := &ipv4.Header{ - Version: 4, - TTL: ttl, - ID: 41821, - Protocol: 6, - Dst: dstIP, - Src: srcIP, - Len: 20, - TotalLen: 40, - Checksum: 51039, - } - - expectedPktBytes := []byte{ - 0x45, 0x0, 0x0, 0x28, 0xa3, 0x5d, 0x0, 0x0, 0x40, 0x6, 0xc7, 0x5f, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x30, 0x39, 0x0, 0x50, 0x0, 0x0, 0x3, 0xe8, 0x0, 0x0, 0x0, 0x0, 0x50, 0x2, 0x4, 0x0, 0x67, 0x5e, 0x0, 0x0, - } - - ipHeader, pktBytes, headerLength, err := createRawTCPSynBuffer(srcIP, srcPort, dstIP, dstPort, seqNum, ttl) - - require.NoError(t, err) - assert.Equal(t, expectedIPHeader, ipHeader) - assert.Equal(t, 20, headerLength) - assert.Equal(t, expectedPktBytes, pktBytes) -} - -func Test_parseTCP(t *testing.T) { - ipv4Header := testutils.CreateMockIPv4Header(srcIP, dstIP, 6) // 6 is TCP - tcpLayer := testutils.CreateMockTCPLayer(12345, 443, 28394, 12737, true, true, true) - - // full packet - encodedTCPLayer, fullTCPPacket := testutils.CreateMockTCPPacket(ipv4Header, tcpLayer, false) - - tt := []struct { - description string - inHeader *ipv4.Header - inPayload []byte - expected *tcpResponse - errMsg string - }{ - { - description: "empty IPv4 layer should return an error", - inHeader: &ipv4.Header{}, - inPayload: []byte{}, - expected: nil, - errMsg: "invalid IP header for TCP packet", - }, - { - description: "missing TCP layer should return an error", - inHeader: ipv4Header, - inPayload: []byte{}, - expected: nil, - errMsg: "failed to decode TCP packet", - }, - { - description: "full TCP packet should create tcpResponse", - inHeader: ipv4Header, - inPayload: fullTCPPacket, - expected: &tcpResponse{ - SrcIP: srcIP, - DstIP: dstIP, - TCPResponse: *encodedTCPLayer, - }, - errMsg: "", - }, - } - - tp := newTCPParser() - for _, test := range tt { - t.Run(test.description, func(t *testing.T) { - actual, err := tp.parseTCP(test.inHeader, test.inPayload) - if test.errMsg != "" { - require.Error(t, err) - assert.Contains(t, err.Error(), test.errMsg) - assert.Nil(t, actual) - return - } - require.Nil(t, err) - require.NotNil(t, actual) - // assert.Equal doesn't handle net.IP well - assert.Equal(t, testutils.StructFieldCount(test.expected), testutils.StructFieldCount(actual)) - assert.Truef(t, test.expected.SrcIP.Equal(actual.SrcIP), "mismatch source IPs: expected %s, got %s", test.expected.SrcIP.String(), actual.SrcIP.String()) - assert.Truef(t, test.expected.DstIP.Equal(actual.DstIP), "mismatch dest IPs: expected %s, got %s", test.expected.DstIP.String(), actual.DstIP.String()) - assert.Equal(t, test.expected.TCPResponse, actual.TCPResponse) - }) - } -} - -func BenchmarkParseTCP(b *testing.B) { - ipv4Header := testutils.CreateMockIPv4Header(srcIP, dstIP, 6) // 6 is TCP - tcpLayer := testutils.CreateMockTCPLayer(12345, 443, 28394, 12737, true, true, true) - - // full packet - _, fullTCPPacket := testutils.CreateMockTCPPacket(ipv4Header, tcpLayer, false) - - tp := newTCPParser() - - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := tp.parseTCP(ipv4Header, fullTCPPacket) - if err != nil { - b.Fatal(err) - } - } -} diff --git a/pkg/networkpath/traceroute/tcp/utils_unix.go b/pkg/networkpath/traceroute/tcp/utils_unix.go index 4fd1b2e4b251d..5425ce43591b4 100644 --- a/pkg/networkpath/traceroute/tcp/utils_unix.go +++ b/pkg/networkpath/traceroute/tcp/utils_unix.go @@ -15,6 +15,7 @@ import ( "time" "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/common" + "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/icmp" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/google/gopacket/layers" "go.uber.org/multierr" @@ -100,7 +101,8 @@ func listenPackets(icmpConn rawConnWrapper, tcpConn rawConnWrapper, timeout time // timeout or if the listener is canceled, it should return a canceledError func handlePackets(ctx context.Context, conn rawConnWrapper, listener string, localIP net.IP, localPort uint16, remoteIP net.IP, remotePort uint16, seqNum uint32) (net.IP, uint16, layers.ICMPv4TypeCode, time.Time, error) { buf := make([]byte, 1024) - tp := newTCPParser() + tp := newParser() + icmpParser := icmp.NewICMPTCPParser() for { select { case <-ctx.Done(): @@ -128,12 +130,12 @@ func handlePackets(ctx context.Context, conn rawConnWrapper, listener string, lo // TODO: remove listener constraint and parse all packets // in the same function return a succinct struct here if listener == "icmp" { - icmpResponse, err := common.ParseICMP(header, packet) + icmpResponse, err := icmpParser.Parse(header, packet) if err != nil { log.Tracef("failed to parse ICMP packet: %s", err) continue } - if common.ICMPMatch(localIP, localPort, remoteIP, remotePort, seqNum, icmpResponse) { + if icmpResponse.Matches(localIP, localPort, remoteIP, remotePort, seqNum) { return icmpResponse.SrcIP, 0, icmpResponse.TypeCode, received, nil } } else if listener == "tcp" { @@ -142,7 +144,7 @@ func handlePackets(ctx context.Context, conn rawConnWrapper, listener string, lo log.Tracef("failed to parse TCP packet: %s", err) continue } - if tcpMatch(localIP, localPort, remoteIP, remotePort, seqNum, tcpResp) { + if tcpResp.Match(localIP, localPort, remoteIP, remotePort, seqNum) { return tcpResp.SrcIP, uint16(tcpResp.TCPResponse.SrcPort), 0, received, nil } } else { diff --git a/pkg/networkpath/traceroute/tcp/utils_unix_test.go b/pkg/networkpath/traceroute/tcp/utils_unix_test.go index db78310723d28..42b862b411a03 100644 --- a/pkg/networkpath/traceroute/tcp/utils_unix_test.go +++ b/pkg/networkpath/traceroute/tcp/utils_unix_test.go @@ -23,6 +23,11 @@ import ( "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/testutils" ) +var ( + innerSrcIP = net.ParseIP("10.0.0.1") + innerDstIP = net.ParseIP("192.168.1.1") +) + type ( mockRawConn struct { setReadDeadlineErr error @@ -123,7 +128,7 @@ func Test_handlePackets(t *testing.T) { ctxTimeout: 500 * time.Millisecond, conn: &mockRawConn{ header: testutils.CreateMockIPv4Header(srcIP, dstIP, 1), - payload: testutils.CreateMockICMPPacket(nil, testutils.CreateMockICMPLayer(layers.ICMPv4CodeTTLExceeded), testutils.CreateMockIPv4Layer(innerSrcIP, innerDstIP, layers.IPProtocolTCP), testutils.CreateMockTCPLayer(12345, 443, 28394, 12737, true, true, true), false), + payload: testutils.CreateMockICMPWithTCPPacket(nil, testutils.CreateMockICMPLayer(layers.ICMPv4CodeTTLExceeded), testutils.CreateMockIPv4Layer(innerSrcIP, innerDstIP, layers.IPProtocolTCP), testutils.CreateMockTCPLayer(12345, 443, 28394, 12737, true, true, true), false), }, localIP: innerSrcIP, localPort: 12345, diff --git a/pkg/networkpath/traceroute/tcp/utils_windows.go b/pkg/networkpath/traceroute/tcp/utils_windows.go deleted file mode 100644 index 483167109b6e4..0000000000000 --- a/pkg/networkpath/traceroute/tcp/utils_windows.go +++ /dev/null @@ -1,139 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -// Package tcp adds a TCP traceroute implementation to the agent -package tcp - -import ( - "context" - "fmt" - "net" - "sync" - "time" - - "golang.org/x/net/ipv4" - "golang.org/x/sys/windows" - - "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/common" - "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/google/gopacket/layers" -) - -var ( - recvFrom = windows.Recvfrom -) - -// listenPackets takes in raw ICMP and TCP connections and listens for matching ICMP -// and TCP responses based on the passed in trace information. If neither listener -// receives a matching packet within the timeout, a blank response is returned. -// Once a matching packet is received by a listener, it will cause the other listener -// to be canceled, and data from the matching packet will be returned to the caller -func (w *winrawsocket) listenPackets(timeout time.Duration, localIP net.IP, localPort uint16, remoteIP net.IP, remotePort uint16, seqNum uint32) (net.IP, uint16, layers.ICMPv4TypeCode, time.Time, error) { - var icmpErr error - var wg sync.WaitGroup - var icmpIP net.IP - //var tcpIP net.IP - //var icmpCode layers.ICMPv4TypeCode - //var tcpFinished time.Time - var icmpFinished time.Time - var port uint16 - wg.Add(1) - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - go func() { - defer wg.Done() - defer cancel() - icmpIP, _, _, icmpFinished, icmpErr = w.handlePackets(ctx, localIP, localPort, remoteIP, remotePort, seqNum) - }() - wg.Wait() - - if icmpErr != nil { - _, icmpCanceled := icmpErr.(common.CanceledError) - if icmpCanceled { - log.Trace("timed out waiting for responses") - return net.IP{}, 0, 0, time.Time{}, nil - } - if icmpErr != nil { - log.Errorf("ICMP listener error: %s", icmpErr.Error()) - } - - return net.IP{}, 0, 0, time.Time{}, fmt.Errorf("icmp error: %w", icmpErr) - } - - // return the TCP response - return icmpIP, port, 0, icmpFinished, nil -} - -// handlePackets in its current implementation should listen for the first matching -// packet on the connection and then return. If no packet is received within the -// timeout or if the listener is canceled, it should return a canceledError -func (w *winrawsocket) handlePackets(ctx context.Context, localIP net.IP, localPort uint16, remoteIP net.IP, remotePort uint16, seqNum uint32) (net.IP, uint16, layers.ICMPv4TypeCode, time.Time, error) { - buf := make([]byte, 512) - tp := newTCPParser() - for { - select { - case <-ctx.Done(): - return net.IP{}, 0, 0, time.Time{}, common.CanceledError("listener canceled") - default: - } - - // the receive timeout is set to 100ms in the constructor, to match the - // linux side. This is a workaround for the lack of a deadline for sockets. - //err := conn.SetReadDeadline(now.Add(time.Millisecond * 100)) - n, _, err := recvFrom(w.s, buf, 0) - if err != nil { - if err == windows.WSAETIMEDOUT { - continue - } - if err == windows.WSAEMSGSIZE { - log.Warnf("Message too large for buffer") - continue - } - return nil, 0, 0, time.Time{}, err - } - log.Tracef("Got packet %+v", buf[:n]) - - if n < 20 { // min size of ipv4 header - continue - } - header, err := ipv4.ParseHeader(buf[:n]) - if err != nil { - continue - } - packet := buf[header.Len:header.TotalLen] - - // once we have a packet, take a timestamp to know when - // the response was received, if it matches, we will - // return this timestamp - received := time.Now() - // TODO: remove listener constraint and parse all packets - // in the same function return a succinct struct here - if header.Protocol == windows.IPPROTO_ICMP { - icmpResponse, err := common.ParseICMP(header, packet) - if err != nil { - log.Tracef("failed to parse ICMP packet: %s", err.Error()) - continue - } - if common.ICMPMatch(localIP, localPort, remoteIP, remotePort, seqNum, icmpResponse) { - return icmpResponse.SrcIP, 0, icmpResponse.TypeCode, received, nil - } - } else if header.Protocol == windows.IPPROTO_TCP { - // don't even bother parsing the packet if the src/dst ip don't match - if !localIP.Equal(header.Dst) || !remoteIP.Equal(header.Src) { - continue - } - tcpResp, err := tp.parseTCP(header, packet) - if err != nil { - log.Tracef("failed to parse TCP packet: %s", err.Error()) - continue - } - if tcpMatch(localIP, localPort, remoteIP, remotePort, seqNum, tcpResp) { - return tcpResp.SrcIP, uint16(tcpResp.TCPResponse.SrcPort), 0, received, nil - } - } else { - continue - } - } -} diff --git a/pkg/networkpath/traceroute/tcp/utils_windows_test.go b/pkg/networkpath/traceroute/tcp/utils_windows_test.go deleted file mode 100644 index 6fbd7d0cc860b..0000000000000 --- a/pkg/networkpath/traceroute/tcp/utils_windows_test.go +++ /dev/null @@ -1,156 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -//go:build test - -package tcp - -import ( - "context" - "errors" - "fmt" - "net" - "strings" - "testing" - "time" - - "golang.org/x/sys/windows" - - "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/testutils" - "github.com/google/gopacket/layers" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -type ( - mockRawConn struct { - readTimeoutCount int - readTimeout time.Duration - readFromErr error - - payload []byte - } -) - -func Test_handlePackets(t *testing.T) { - _, tcpBytes := testutils.CreateMockTCPPacket(testutils.CreateMockIPv4Header(dstIP, srcIP, 6), testutils.CreateMockTCPLayer(443, 12345, 28394, 28395, true, true, true), true) - - tt := []struct { - description string - // input - ctxTimeout time.Duration - conn *mockRawConn - localIP net.IP - localPort uint16 - remoteIP net.IP - remotePort uint16 - seqNum uint32 - // output - expectedIP net.IP - expectedPort uint16 - expectedTypeCode layers.ICMPv4TypeCode - errMsg string - }{ - { - description: "canceled context returns canceledErr", - ctxTimeout: 300 * time.Millisecond, - conn: &mockRawConn{ - readTimeoutCount: 100, - readTimeout: 100 * time.Millisecond, - readFromErr: errors.New("bad test error"), - }, - errMsg: "canceled", - }, - { - description: "non-timeout read error returns an error", - ctxTimeout: 1 * time.Second, - conn: &mockRawConn{ - readFromErr: errors.New("test read error"), - }, - errMsg: "test read error", - }, - // { - // description: "failed ICMP parsing eventuallly returns cancel timeout", - // ctxTimeout: 500 * time.Millisecond, - // conn: &mockRawConn{ - // payload: nil, - // }, - // errMsg: "canceled", - // }, - // { - // description: "failed TCP parsing eventuallly returns cancel timeout", - // ctxTimeout: 500 * time.Millisecond, - // conn: &mockRawConn{ - // header: &ipv4.Header{}, - // payload: nil, - // }, - // listener: "tcp", - // errMsg: "canceled", - // }, - { - description: "successful ICMP parsing returns IP, port, and type code", - ctxTimeout: 500 * time.Millisecond, - conn: &mockRawConn{ - payload: testutils.CreateMockICMPPacket(testutils.CreateMockIPv4Layer(srcIP, dstIP, layers.IPProtocolICMPv4), testutils.CreateMockICMPLayer(layers.ICMPv4CodeTTLExceeded), testutils.CreateMockIPv4Layer(innerSrcIP, innerDstIP, layers.IPProtocolTCP), testutils.CreateMockTCPLayer(12345, 443, 28394, 12737, true, true, true), false), - }, - localIP: innerSrcIP, - localPort: 12345, - remoteIP: innerDstIP, - remotePort: 443, - seqNum: 28394, - expectedIP: srcIP, - expectedPort: 0, - expectedTypeCode: layers.ICMPv4CodeTTLExceeded, - }, - { - description: "successful TCP parsing returns IP, port, and type code", - ctxTimeout: 500 * time.Millisecond, - conn: &mockRawConn{ - payload: tcpBytes, - }, - localIP: srcIP, - localPort: 12345, - remoteIP: dstIP, - remotePort: 443, - seqNum: 28394, - expectedIP: dstIP, - expectedPort: 443, - expectedTypeCode: 0, - }, - } - - for _, test := range tt { - t.Run(test.description, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), test.ctxTimeout) - defer cancel() - recvFrom = test.conn.RecvFrom - w := &winrawsocket{} - actualIP, actualPort, actualTypeCode, _, err := w.handlePackets(ctx, test.localIP, test.localPort, test.remoteIP, test.remotePort, test.seqNum) - if test.errMsg != "" { - require.Error(t, err) - assert.True(t, strings.Contains(err.Error(), test.errMsg), fmt.Sprintf("expected %q, got %q", test.errMsg, err.Error())) - return - } - require.NoError(t, err) - assert.Truef(t, test.expectedIP.Equal(actualIP), "mismatch source IPs: expected %s, got %s", test.expectedIP.String(), actualIP.String()) - assert.Equal(t, test.expectedPort, actualPort) - assert.Equal(t, test.expectedTypeCode, actualTypeCode) - }) - } -} - -func (m *mockRawConn) RecvFrom(_ windows.Handle, buf []byte, _ int) (int, windows.Sockaddr, error) { - if m.readTimeoutCount > 0 { - m.readTimeoutCount-- - time.Sleep(m.readTimeout) - return 0, nil, windows.WSAETIMEDOUT - } - if m.readFromErr != nil { - return 0, nil, m.readFromErr - } - copy(buf, m.payload) - - return len(m.payload), nil, nil -} diff --git a/pkg/networkpath/traceroute/testutils/testutils.go b/pkg/networkpath/traceroute/testutils/testutils.go index e412d8971372b..52419cb1c0a2a 100644 --- a/pkg/networkpath/traceroute/testutils/testutils.go +++ b/pkg/networkpath/traceroute/testutils/testutils.go @@ -28,8 +28,8 @@ func CreateMockIPv4Header(srcIP, dstIP net.IP, protocol int) *ipv4.Header { } } -// CreateMockICMPPacket creates a mock ICMP packet for testing -func CreateMockICMPPacket(ipLayer *layers.IPv4, icmpLayer *layers.ICMPv4, innerIP *layers.IPv4, innerTCP *layers.TCP, partialTCPHeader bool) []byte { +// CreateMockICMPWithTCPPacket creates a mock ICMP packet for testing +func CreateMockICMPWithTCPPacket(ipLayer *layers.IPv4, icmpLayer *layers.ICMPv4, innerIP *layers.IPv4, innerTCP *layers.TCP, partialTCPHeader bool) []byte { innerBuf := gopacket.NewSerializeBuffer() opts := gopacket.SerializeOptions{FixLengths: true, ComputeChecksums: true} @@ -76,6 +76,47 @@ func CreateMockICMPPacket(ipLayer *layers.IPv4, icmpLayer *layers.ICMPv4, innerI return buf.Bytes() } +// CreateMockICMPWithUDPPacket creates a mock ICMP packet for testing +func CreateMockICMPWithUDPPacket(ipLayer *layers.IPv4, icmpLayer *layers.ICMPv4, innerIP *layers.IPv4, innerUDP *layers.UDP) []byte { + innerBuf := gopacket.NewSerializeBuffer() + opts := gopacket.SerializeOptions{FixLengths: true, ComputeChecksums: true} + + innerLayers := make([]gopacket.SerializableLayer, 0, 2) + if innerIP != nil { + innerLayers = append(innerLayers, innerIP) + } + if innerUDP != nil { + innerLayers = append(innerLayers, innerUDP) + if innerIP != nil { + innerUDP.SetNetworkLayerForChecksum(innerIP) // nolint: errcheck + } + } + + gopacket.SerializeLayers(innerBuf, opts, // nolint: errcheck + innerLayers..., + ) + payload := innerBuf.Bytes() + + buf := gopacket.NewSerializeBuffer() + gopacket.SerializeLayers(buf, opts, // nolint: errcheck + icmpLayer, + gopacket.Payload(payload), + ) + + icmpBytes := buf.Bytes() + if ipLayer == nil { + return icmpBytes + } + + buf = gopacket.NewSerializeBuffer() + gopacket.SerializeLayers(buf, opts, // nolint: errcheck + ipLayer, + gopacket.Payload(icmpBytes), + ) + + return buf.Bytes() +} + // CreateMockTCPPacket creates a mock TCP packet for testing func CreateMockTCPPacket(ipHeader *ipv4.Header, tcpLayer *layers.TCP, includeHeader bool) (*layers.TCP, []byte) { ipLayer := &layers.IPv4{ @@ -136,6 +177,15 @@ func CreateMockTCPLayer(srcPort uint16, dstPort uint16, seqNum uint32, ackNum ui } } +// CreateMockUDPLayer creates a mock UDP layer for testing +func CreateMockUDPLayer(srcPort uint16, dstPort uint16, checksum uint16) *layers.UDP { + return &layers.UDP{ + SrcPort: layers.UDPPort(srcPort), + DstPort: layers.UDPPort(dstPort), + Checksum: checksum, + } +} + // StructFieldCount returns the number of fields in a struct func StructFieldCount(v interface{}) int { val := reflect.ValueOf(v) diff --git a/pkg/networkpath/traceroute/traceroute_linux.go b/pkg/networkpath/traceroute/traceroute_linux.go index e3d54f61ccf48..28f470ba8e02d 100644 --- a/pkg/networkpath/traceroute/traceroute_linux.go +++ b/pkg/networkpath/traceroute/traceroute_linux.go @@ -12,9 +12,7 @@ import ( "encoding/json" "net/http" - sysprobeclient "github.com/DataDog/datadog-agent/cmd/system-probe/api/client" "github.com/DataDog/datadog-agent/comp/core/telemetry" - pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/networkpath/payload" "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/config" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -37,12 +35,8 @@ type LinuxTraceroute struct { func New(cfg config.Config, _ telemetry.Component) (*LinuxTraceroute, error) { log.Debugf("Creating new traceroute with config: %+v", cfg) return &LinuxTraceroute{ - cfg: cfg, - sysprobeClient: &http.Client{ - Transport: &http.Transport{ - DialContext: sysprobeclient.DialContextFunc(pkgconfigsetup.SystemProbe().GetString("system_probe_config.sysprobe_socket")), - }, - }, + cfg: cfg, + sysprobeClient: getSysProbeClient(), }, nil } diff --git a/pkg/networkpath/traceroute/traceroute_sysprobe.go b/pkg/networkpath/traceroute/traceroute_sysprobe.go new file mode 100644 index 0000000000000..dd2c21c47b287 --- /dev/null +++ b/pkg/networkpath/traceroute/traceroute_sysprobe.go @@ -0,0 +1,24 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux || windows + +package traceroute + +import ( + "net/http" + + sysprobeclient "github.com/DataDog/datadog-agent/cmd/system-probe/api/client" + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + "github.com/DataDog/datadog-agent/pkg/util/funcs" +) + +var getSysProbeClient = funcs.MemoizeNoError(func() *http.Client { + return &http.Client{ + Transport: &http.Transport{ + DialContext: sysprobeclient.DialContextFunc(pkgconfigsetup.SystemProbe().GetString("system_probe_config.sysprobe_socket")), + }, + } +}) diff --git a/pkg/networkpath/traceroute/traceroute_windows.go b/pkg/networkpath/traceroute/traceroute_windows.go index 8e05c249e4a35..fdf4425bfac4b 100644 --- a/pkg/networkpath/traceroute/traceroute_windows.go +++ b/pkg/networkpath/traceroute/traceroute_windows.go @@ -10,12 +10,9 @@ package traceroute import ( "context" "encoding/json" - "errors" "net/http" - sysprobeclient "github.com/DataDog/datadog-agent/cmd/system-probe/api/client" "github.com/DataDog/datadog-agent/comp/core/telemetry" - pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/networkpath/payload" "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/config" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -39,18 +36,9 @@ type WindowsTraceroute struct { func New(cfg config.Config, _ telemetry.Component) (*WindowsTraceroute, error) { log.Debugf("Creating new traceroute with config: %+v", cfg) - // UDP is not supported at the moment - if cfg.Protocol == payload.ProtocolUDP { - return nil, errors.New(udpNotSupportedWindowsMsg) - } - return &WindowsTraceroute{ - cfg: cfg, - sysprobeClient: &http.Client{ - Transport: &http.Transport{ - DialContext: sysprobeclient.DialContextFunc(pkgconfigsetup.SystemProbe().GetString("system_probe_config.sysprobe_socket")), - }, - }, + cfg: cfg, + sysprobeClient: getSysProbeClient(), }, nil } diff --git a/pkg/networkpath/traceroute/udp/udpv4.go b/pkg/networkpath/traceroute/udp/udpv4.go new file mode 100644 index 0000000000000..0256e6637560e --- /dev/null +++ b/pkg/networkpath/traceroute/udp/udpv4.go @@ -0,0 +1,118 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +// Package udp adds a UDP traceroute implementation to the agent +package udp + +import ( + "fmt" + "net" + "time" + + "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/icmp" + "github.com/google/gopacket" + "github.com/google/gopacket/layers" + "golang.org/x/net/ipv4" +) + +type ( + // UDPv4 encapsulates the data needed to run + // a UDPv4 traceroute + UDPv4 struct { + Target net.IP + TargetPort uint16 + srcIP net.IP // calculated internally + srcPort uint16 // calculated internally + NumPaths uint16 + MinTTL uint8 + MaxTTL uint8 + Delay time.Duration // delay between sending packets (not applicable if we go the serial send/receive route) + Timeout time.Duration // full timeout for all packets + icmpParser icmp.Parser + buffer gopacket.SerializeBuffer + } +) + +// NewUDPv4 initializes a new UDPv4 traceroute instance +func NewUDPv4(target net.IP, targetPort uint16, numPaths uint16, minTTL uint8, maxTTL uint8, delay time.Duration, timeout time.Duration) *UDPv4 { + icmpParser := icmp.NewICMPUDPParser() + buffer := gopacket.NewSerializeBuffer() + + return &UDPv4{ + Target: target, + TargetPort: targetPort, + NumPaths: numPaths, + MinTTL: minTTL, + MaxTTL: maxTTL, + srcIP: net.IP{}, // avoid linter error on linux as it's only used on windows + srcPort: 0, // avoid linter error on linux as it's only used on windows + Delay: delay, + Timeout: timeout, + icmpParser: icmpParser, + buffer: buffer, + } +} + +// Close doesn't to anything yet, but we should +// use this to close out long running sockets +// when we're done with a path test +func (u *UDPv4) Close() error { + return nil +} + +// createRawUDPBuffer creates a raw UDP packet with the specified parameters +// +// the nolint:unused is necessary because we don't yet use this outside the Windows implementation +func (u *UDPv4) createRawUDPBuffer(sourceIP net.IP, sourcePort uint16, destIP net.IP, destPort uint16, ttl int) (*ipv4.Header, []byte, uint16, int, error) { //nolint:unused + ipLayer := &layers.IPv4{ + Version: 4, + Length: 20, + TTL: uint8(ttl), + Id: uint16(41821), + Protocol: 17, // hard code UDP so other OSs can use it + DstIP: destIP, + SrcIP: sourceIP, + Flags: layers.IPv4DontFragment, // needed for dublin-traceroute-like NAT detection + } + udpLayer := &layers.UDP{ + SrcPort: layers.UDPPort(sourcePort), + DstPort: layers.UDPPort(destPort), + } + udpPaylod := []byte("NSMNC\x00\x00\x00") + + // TODO: compute checksum before serialization so we + // can set ID field of the IP header to detect NATs just + // as is done in dublin-traceroute. Gopacket doesn't expose + // the checksum computations and modifying the IP header after + // serialization would change its checksum + err := udpLayer.SetNetworkLayerForChecksum(ipLayer) + if err != nil { + return nil, nil, 0, 0, fmt.Errorf("failed to create packet checksum: %w", err) + } + + // clear the gopacket.SerializeBuffer + if len(u.buffer.Bytes()) > 0 { + if err = u.buffer.Clear(); err != nil { + u.buffer = gopacket.NewSerializeBuffer() + } + } + opts := gopacket.SerializeOptions{FixLengths: true, ComputeChecksums: true} + err = gopacket.SerializeLayers(u.buffer, opts, + ipLayer, + udpLayer, + gopacket.Payload(udpPaylod), + ) + if err != nil { + return nil, nil, 0, 0, fmt.Errorf("failed to serialize packet: %w", err) + } + packet := u.buffer.Bytes() + + var ipHdr ipv4.Header + if err := ipHdr.Parse(packet[:20]); err != nil { + return nil, nil, 0, 0, fmt.Errorf("failed to parse IP header: %w", err) + } + + return &ipHdr, packet, udpLayer.Checksum, 20, nil +} diff --git a/pkg/networkpath/traceroute/udp/udpv4_unix.go b/pkg/networkpath/traceroute/udp/udpv4_unix.go new file mode 100644 index 0000000000000..f308425fb388d --- /dev/null +++ b/pkg/networkpath/traceroute/udp/udpv4_unix.go @@ -0,0 +1,19 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build unix + +package udp + +import ( + "errors" + + "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/common" +) + +// TracerouteSequential runs a traceroute +func (u *UDPv4) TracerouteSequential() (*common.Results, error) { + return nil, errors.New("non-Dublin UDP not implemented for Unix") +} diff --git a/pkg/networkpath/traceroute/udp/udpv4_windows.go b/pkg/networkpath/traceroute/udp/udpv4_windows.go new file mode 100644 index 0000000000000..21a9ef104dddb --- /dev/null +++ b/pkg/networkpath/traceroute/udp/udpv4_windows.go @@ -0,0 +1,101 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package udp + +import ( + "fmt" + "net" + "time" + + "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/common" + "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/winconn" + "github.com/DataDog/datadog-agent/pkg/util/log" + "golang.org/x/sys/windows" +) + +// TracerouteSequential runs a traceroute +func (u *UDPv4) TracerouteSequential() (*common.Results, error) { + log.Debugf("Running UDP traceroute to %+v", u) + // Get local address for the interface that connects to this + // host and store in in the probe + addr, conn, err := common.LocalAddrForHost(u.Target, u.TargetPort) + if err != nil { + return nil, fmt.Errorf("failed to get local address for target: %w", err) + } + // TODO: Need to call bind on our port? + // When the UDP socket for this remains claimed, ICMP messages that we wish + // to read on the raw socket created below are not received with the raw socket + // This makes a case to investigate using 2 separate sockets for + // Windows implementations in the future. + conn.Close() + u.srcIP = addr.IP + u.srcPort = addr.AddrPort().Port() + + rs, err := winconn.NewRawConn() + if err != nil { + return nil, fmt.Errorf("failed to create raw socket: %w", err) + } + defer rs.Close() + + hops := make([]*common.Hop, 0, int(u.MaxTTL-u.MinTTL)+1) + + for i := int(u.MinTTL); i <= int(u.MaxTTL); i++ { + hop, err := u.sendAndReceive(rs, i, u.Timeout) + if err != nil { + return nil, fmt.Errorf("failed to run traceroute: %w", err) + } + hops = append(hops, hop) + log.Tracef("Discovered hop: %+v", hop) + // if we've reached our destination, + // we're done + if hop.IsDest { + break + } + } + + return &common.Results{ + Source: u.srcIP, + SourcePort: u.srcPort, + Target: u.Target, + DstPort: u.TargetPort, + Hops: hops, + }, nil +} + +func (u *UDPv4) sendAndReceive(rs *winconn.RawConn, ttl int, timeout time.Duration) (*common.Hop, error) { + _, buffer, udpChecksum, _, err := u.createRawUDPBuffer(u.srcIP, u.srcPort, u.Target, u.TargetPort, ttl) + if err != nil { + log.Errorf("failed to create UDP packet with TTL: %d, error: %s", ttl, err.Error()) + return nil, err + } + + err = rs.SendRawPacket(u.Target, u.TargetPort, buffer) + if err != nil { + log.Errorf("failed to send UDP packet: %s", err.Error()) + return nil, err + } + + matcherFuncs := map[int]common.MatcherFunc{ + windows.IPPROTO_ICMP: u.icmpParser.Match, + } + start := time.Now() // TODO: is this the best place to start? + hopIP, end, err := rs.ListenPackets(timeout, u.srcIP, u.srcPort, u.Target, u.TargetPort, uint32(udpChecksum), matcherFuncs) + if err != nil { + log.Errorf("failed to listen for packets: %s", err.Error()) + return nil, err + } + + rtt := time.Duration(0) + if !hopIP.Equal(net.IP{}) { + rtt = end.Sub(start) + } + + return &common.Hop{ + IP: hopIP, + RTT: rtt, + IsDest: hopIP.Equal(u.Target), + }, nil +} diff --git a/pkg/networkpath/traceroute/winconn/doc.go b/pkg/networkpath/traceroute/winconn/doc.go new file mode 100644 index 0000000000000..3818829258b13 --- /dev/null +++ b/pkg/networkpath/traceroute/winconn/doc.go @@ -0,0 +1,8 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package winconn contains structs for sending and receiving packets +// to remote hosts from Windows machines +package winconn diff --git a/pkg/networkpath/traceroute/winconn/winconn_windows.go b/pkg/networkpath/traceroute/winconn/winconn_windows.go new file mode 100644 index 0000000000000..027dfbd5fb2ab --- /dev/null +++ b/pkg/networkpath/traceroute/winconn/winconn_windows.go @@ -0,0 +1,174 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package winconn + +import ( + "context" + "errors" + "fmt" + "net" + "time" + + "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/common" + "github.com/DataDog/datadog-agent/pkg/util/log" + "golang.org/x/net/ipv4" + "golang.org/x/sys/windows" +) + +var ( + sendTo = windows.Sendto + recvFrom = windows.Recvfrom +) + +type ( + // RawConn is a struct that encapsulates a raw socket + // on Windows that can be used to listen to traffic on a host + // or send raw packets from a host + RawConn struct { + Socket windows.Handle + } +) + +// Close closes the raw socket +func (r *RawConn) Close() { + if r.Socket != windows.InvalidHandle { + windows.Closesocket(r.Socket) // nolint: errcheck + } + r.Socket = windows.InvalidHandle +} + +// ReadFrom reads from the RawConn into the passed []byte and returns +// the IPv4 header and payload separately +func (r *RawConn) ReadFrom(b []byte) (*ipv4.Header, []byte, error) { + // the receive timeout is set to 100ms in the constructor, to match the + // linux side. This is a workaround for the lack of a deadline for sockets. + //err := conn.SetReadDeadline(now.Add(time.Millisecond * 100)) + n, _, err := recvFrom(r.Socket, b, 0) + if err != nil { + return nil, nil, err + } + log.Tracef("Got packet %+v", b[:n]) + + if n < 20 { // min size of ipv4 header + return nil, nil, errors.New("packet too small to be an IPv4 packet") + } + header, err := ipv4.ParseHeader(b[:n]) + if err != nil { + return nil, nil, fmt.Errorf("failed to parse IPv4 header: %w", err) + } + + return header, b[header.Len:header.TotalLen], nil +} + +// NewRawConn creates a Winrawsocket +func NewRawConn() (*RawConn, error) { + s, err := windows.Socket(windows.AF_INET, windows.SOCK_RAW, windows.IPPROTO_IP) + if err != nil { + return nil, fmt.Errorf("failed to create raw socket: %w", err) + } + on := int(1) + err = windows.SetsockoptInt(s, windows.IPPROTO_IP, windows.IP_HDRINCL, on) + if err != nil { + windows.Closesocket(s) // nolint: errcheck + return nil, fmt.Errorf("failed to set IP_HDRINCL: %w", err) + } + + err = windows.SetsockoptInt(s, windows.SOL_SOCKET, windows.SO_RCVTIMEO, 100) + if err != nil { + windows.Closesocket(s) // nolint: errcheck + return nil, fmt.Errorf("failed to set SO_RCVTIMEO: %w", err) + } + return &RawConn{Socket: s}, nil +} + +// SendRawPacket sends a raw packet to a destination IP and port +func (r *RawConn) SendRawPacket(destIP net.IP, destPort uint16, payload []byte) error { + + dst := destIP.To4() + if dst == nil { + return errors.New("unable to parse IP address") + } + sa := &windows.SockaddrInet4{ + Port: int(destPort), + Addr: [4]byte{dst[0], dst[1], dst[2], dst[3]}, + } + if err := sendTo(r.Socket, payload, 0, sa); err != nil { + return fmt.Errorf("failed to send packet: %w", err) + } + return nil +} + +// ListenPackets listens for matching responses based on the passed in trace information and decoderFunc. +// If neither decoderFunc receives a matching packet within the timeout, a blank response is returned. +// Once a matching packet is received by a decoderFunc, it will cause the other decoderFuncs to be +// canceled, and data from the matching packet will be returned to the caller +func (r *RawConn) ListenPackets(timeout time.Duration, localIP net.IP, localPort uint16, remoteIP net.IP, remotePort uint16, innerIdentifier uint32, matcherFuncs map[int]common.MatcherFunc) (net.IP, time.Time, error) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + ip, finished, err := r.handlePackets(ctx, localIP, localPort, remoteIP, remotePort, innerIdentifier, matcherFuncs) + if err != nil { + _, canceled := err.(common.CanceledError) + if canceled { + log.Trace("timed out waiting for responses") + return net.IP{}, time.Time{}, nil + } + log.Errorf("listener error: %s", err.Error()) + + return net.IP{}, time.Time{}, fmt.Errorf("error: %w", err) + } + + // return the response + return ip, finished, nil +} + +// handlePackets in its current implementation should listen for the first matching +// packet on the connection and then return. If no packet is received within the +// timeout or if the listener is canceled, it should return a canceledError +func (r *RawConn) handlePackets(ctx context.Context, localIP net.IP, localPort uint16, remoteIP net.IP, remotePort uint16, innerIdentifier uint32, matcherFuncs map[int]common.MatcherFunc) (net.IP, time.Time, error) { + // TODO: reset to 512 before merge? + buf := make([]byte, 4096) + for { + select { + case <-ctx.Done(): + return net.IP{}, time.Time{}, common.CanceledError("listener canceled") + default: + } + + header, packet, err := r.ReadFrom(buf) + if err != nil { + if err == windows.WSAETIMEDOUT { + continue + } + if err == windows.WSAEMSGSIZE { + log.Warnf("Message too large for buffer") + continue + } + return nil, time.Time{}, err + } + log.Tracef("Got packet: header: %+v body: %+v", header, packet) + + // once we have a packet, take a timestamp to know when + // the response was received, if it matches, we will + // return this timestamp + received := time.Now() + matcherFunc, ok := matcherFuncs[header.Protocol] + if !ok { + continue + } + ip, err := matcherFunc(header, packet, localIP, localPort, remoteIP, remotePort, innerIdentifier) + if err != nil { + // if packet is NOT a match continue, otherwise log + // the error + if _, ok := err.(common.MismatchError); !ok { + log.Tracef("decoder error: %s", err.Error()) + } else { + log.Tracef("mismatch error: %s", err.Error()) + } + continue + } + return ip, received, nil + } +} diff --git a/pkg/networkpath/traceroute/winconn/winconn_windows_test.go b/pkg/networkpath/traceroute/winconn/winconn_windows_test.go new file mode 100644 index 0000000000000..5bedf82d7d554 --- /dev/null +++ b/pkg/networkpath/traceroute/winconn/winconn_windows_test.go @@ -0,0 +1,279 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build test + +package winconn + +import ( + "context" + "errors" + "fmt" + "net" + "strings" + "testing" + "time" + + "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/common" + "github.com/DataDog/datadog-agent/pkg/networkpath/traceroute/testutils" + "github.com/google/gopacket/layers" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/net/ipv4" + "golang.org/x/sys/windows" +) + +var ( + srcIP = net.ParseIP("1.2.3.4") + dstIP = net.ParseIP("5.6.7.8") +) + +func Test_listenPackets(t *testing.T) { + srcIP := net.ParseIP("99.99.99.99") + dstIP := net.ParseIP("127.0.0.1") + innerSrcIP := net.ParseIP("88.88.88.88") + innerDstIP := net.ParseIP("77.77.77.77") + mockICMPPacket := testutils.CreateMockICMPWithTCPPacket( + testutils.CreateMockIPv4Layer(srcIP, dstIP, layers.IPProtocolICMPv4), + testutils.CreateMockICMPLayer(layers.ICMPv4CodeTTLExceeded), + testutils.CreateMockIPv4Layer(innerSrcIP, innerDstIP, layers.IPProtocolTCP), + testutils.CreateMockTCPLayer(12345, 443, 28394, 12737, true, true, true), + false, + ) + start := time.Now() + + tts := []struct { + description string + timeout time.Duration + matcherFuncs map[int]common.MatcherFunc + recvFrom func(windows.Handle, []byte, int) (int, windows.Sockaddr, error) + expectedIP net.IP + expectFinished bool // if true, we should test that a later finish timestamp is returned + expectedErrMsg string + }{ + { + description: "canceled context returns zero values and no error", + timeout: 500 * time.Millisecond, + recvFrom: func(_ windows.Handle, _ []byte, _ int) (int, windows.Sockaddr, error) { + time.Sleep(100 * time.Millisecond) + return 0, nil, windows.WSAETIMEDOUT // consistently return timeout errors + }, + expectedIP: net.IP{}, + expectedErrMsg: "", + }, + { + description: "downstream error returns the error", + timeout: 500 * time.Millisecond, + recvFrom: func(_ windows.Handle, _ []byte, _ int) (int, windows.Sockaddr, error) { + return 0, nil, errors.New("test handlePackets error") + }, + expectedIP: net.IP{}, + expectedErrMsg: "error: test handlePackets error", + }, + { + description: "successful call returns IP and timestamp", + timeout: 500 * time.Millisecond, + matcherFuncs: map[int]common.MatcherFunc{ + windows.IPPROTO_ICMP: func(_ *ipv4.Header, _ []byte, _ net.IP, _ uint16, _ net.IP, _ uint16, _ uint32) (net.IP, error) { + return srcIP, nil + }, + }, + recvFrom: func(_ windows.Handle, buf []byte, _ int) (int, windows.Sockaddr, error) { + copy(buf, mockICMPPacket) + + return len(mockICMPPacket), nil, nil + }, + expectedIP: srcIP, + expectFinished: true, + expectedErrMsg: "", + }, + } + + // these don't matter in the test, but are required parameters + socket := &RawConn{} + inputIP := net.ParseIP("127.0.0.1") + inputPort := uint16(161) + seqNum := uint32(1) + for _, test := range tts { + t.Run(test.description, func(t *testing.T) { + recvFrom = test.recvFrom + actualIP, finished, err := socket.ListenPackets(test.timeout, inputIP, inputPort, inputIP, inputPort, seqNum, test.matcherFuncs) + if test.expectedErrMsg != "" { + require.Error(t, err) + assert.True(t, strings.Contains(err.Error(), test.expectedErrMsg), fmt.Sprintf("expected %q, got %q", test.expectedErrMsg, err.Error())) + } else { + require.NoError(t, err) + } + assert.Truef(t, test.expectedIP.Equal(actualIP), "mismatch IPs: expected %s, got %s", test.expectedIP.String(), actualIP.String()) + + if test.expectFinished { + assert.Truef(t, finished.After(start), "finished timestamp should be later than start: finished %s, start %s", finished, start) + } else { + assert.Equal(t, finished, time.Time{}) + } + }) + } +} + +func Test_handlePackets(t *testing.T) { + _, tcpBytes := testutils.CreateMockTCPPacket(testutils.CreateMockIPv4Header(dstIP, srcIP, 6), testutils.CreateMockTCPLayer(443, 12345, 28394, 28395, true, true, true), true) + + tt := []struct { + description string + // input + ctxTimeout time.Duration + matcherFuncs map[int]common.MatcherFunc + recvFrom func(windows.Handle, []byte, int) (int, windows.Sockaddr, error) + // output + expectedIP net.IP + errMsg string + }{ + { + description: "canceled context returns canceledErr", + ctxTimeout: 300 * time.Millisecond, + recvFrom: func(_ windows.Handle, _ []byte, _ int) (int, windows.Sockaddr, error) { + time.Sleep(100 * time.Millisecond) + return 0, nil, windows.WSAETIMEDOUT + }, + errMsg: "canceled", + }, + { + description: "oversized messages eventually returns canceledErr", + ctxTimeout: 300 * time.Millisecond, + recvFrom: func(_ windows.Handle, _ []byte, _ int) (int, windows.Sockaddr, error) { + time.Sleep(100 * time.Millisecond) + return 0, nil, windows.WSAEMSGSIZE + }, + errMsg: "canceled", + }, + { + description: "non-timeout read error returns an error", + ctxTimeout: 1 * time.Second, + recvFrom: func(_ windows.Handle, _ []byte, _ int) (int, windows.Sockaddr, error) { + return 0, nil, errors.New("test read error") + }, + errMsg: "test read error", + }, + { + description: "failed parsing eventually returns cancel timeout", + ctxTimeout: 500 * time.Millisecond, + recvFrom: func(_ windows.Handle, buf []byte, _ int) (int, windows.Sockaddr, error) { + copy(buf, tcpBytes) + + return len(tcpBytes), nil, nil + }, + matcherFuncs: map[int]common.MatcherFunc{ + windows.IPPROTO_TCP: func(_ *ipv4.Header, _ []byte, _ net.IP, _ uint16, _ net.IP, _ uint16, _ uint32) (net.IP, error) { + return net.IP{}, errors.New("failed parsing packet") + }, + }, + errMsg: "canceled", + }, + { + description: "no matcher eventually returns cancel timeout", + ctxTimeout: 500 * time.Millisecond, + recvFrom: func(_ windows.Handle, buf []byte, _ int) (int, windows.Sockaddr, error) { + copy(buf, tcpBytes) + + return len(tcpBytes), nil, nil + }, + matcherFuncs: map[int]common.MatcherFunc{}, + errMsg: "canceled", + }, + { + description: "successful matching returns IP, port, and type code", + ctxTimeout: 500 * time.Millisecond, + recvFrom: func(_ windows.Handle, buf []byte, _ int) (int, windows.Sockaddr, error) { + copy(buf, tcpBytes) + + return len(tcpBytes), nil, nil + }, + matcherFuncs: map[int]common.MatcherFunc{ + windows.IPPROTO_TCP: func(_ *ipv4.Header, _ []byte, _ net.IP, _ uint16, _ net.IP, _ uint16, _ uint32) (net.IP, error) { + return srcIP, nil + }, + }, + expectedIP: srcIP, + }, + } + + for _, test := range tt { + t.Run(test.description, func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), test.ctxTimeout) + defer cancel() + recvFrom = test.recvFrom + w := &RawConn{} + actualIP, _, err := w.handlePackets(ctx, net.IP{}, uint16(0), net.IP{}, uint16(0), uint32(0), test.matcherFuncs) + if test.errMsg != "" { + require.Error(t, err) + assert.True(t, strings.Contains(err.Error(), test.errMsg), fmt.Sprintf("expected %q, got %q", test.errMsg, err.Error())) + return + } + require.NoError(t, err) + assert.Truef(t, test.expectedIP.Equal(actualIP), "mismatch source IPs: expected %s, got %s", test.expectedIP.String(), actualIP.String()) + }) + } +} + +func Test_SendRawPacket(t *testing.T) { + tts := []struct { + description string + destIP net.IP + destPort uint16 + payload []byte + sendTo func(windows.Handle, []byte, int, windows.Sockaddr) error + expectedErrMsg string + }{ + { + description: "non-IPv4 address returns an error", + destIP: net.ParseIP("e2cc:0314:92fe:1307:94e3:0108:a67c:980c"), + destPort: 161, + payload: []byte{}, + sendTo: nil, + expectedErrMsg: "unable to parse IP address", + }, + { + description: "sendTo error returns an error", + destIP: net.ParseIP("8.8.8.8"), + destPort: 161, + payload: []byte{}, + sendTo: func(_ windows.Handle, _ []byte, _ int, _ windows.Sockaddr) error { + return errors.New("test error") + }, + expectedErrMsg: "test error", + }, + { + description: "successful send returns nil", + destIP: net.ParseIP("8.8.8.8"), + destPort: 161, + payload: []byte{1, 2, 3}, + sendTo: func(_ windows.Handle, payload []byte, _ int, addr windows.Sockaddr) error { + expectedPayload := []byte{1, 2, 3} + expectedSockaddr := &windows.SockaddrInet4{ + Port: 161, + Addr: [4]byte{8, 8, 8, 8}, + } + assert.Equalf(t, payload, expectedPayload, "mismatched payloads in sendTo: expected %+v, got %+v", expectedPayload, payload) + assert.Equalf(t, addr, expectedSockaddr, "mismatched adddresses: expected %+v, got %+v", expectedSockaddr, addr) + return nil + }, + expectedErrMsg: "", + }, + } + + w := &RawConn{} + for _, test := range tts { + t.Run(test.description, func(t *testing.T) { + sendTo = test.sendTo + err := w.SendRawPacket(test.destIP, test.destPort, test.payload) + if test.expectedErrMsg != "" { + require.Error(t, err) + assert.True(t, strings.Contains(err.Error(), test.expectedErrMsg), fmt.Sprintf("expected %q, got %q", test.expectedErrMsg, err.Error())) + return + } + require.NoError(t, err) + }) + } +} diff --git a/pkg/obfuscate/cache.go b/pkg/obfuscate/cache.go index 837b2b15b1914..3faf030ba0b48 100644 --- a/pkg/obfuscate/cache.go +++ b/pkg/obfuscate/cache.go @@ -51,8 +51,9 @@ func (c *measuredCache) statsLoop() { } type cacheOptions struct { - On bool - Statsd StatsClient + On bool + Statsd StatsClient + MaxSize int64 } // newMeasuredCache returns a new measuredCache. @@ -62,17 +63,12 @@ func newMeasuredCache(opts cacheOptions) *measuredCache { return &measuredCache{} } cfg := &ristretto.Config{ - // We know that the maximum allowed resource length is 5K. This means that - // in 5MB we can store a minimum of 1000 queries. - MaxCost: 5000000, - - // An appromixated worst-case scenario when the cache is filled with small - // queries averaged as being of length 11 ("LOCK TABLES"), we would be able - // to fit 476K of them into 5MB of cost. - // - // We average it to 500K and multiply 10x as the documentation recommends. - NumCounters: 500000 * 10, - + MaxCost: opts.MaxSize, + // Assuming the minimum query size is 10 bytes , the maximum number of queries + // that can be stored is calculated as opts.MaxSize / (10 + 320). + // The 320 bytes is the fixed size of the ObfuscatedQuery struct which is stored in the cache. + // Multiplying this maximum number by 10 (opts.MaxSize / 330 * 10) as per the ristretto documentation. + NumCounters: int64(opts.MaxSize / 330 * 10), BufferItems: 64, // default recommended value Metrics: true, // enable hit/miss counters } diff --git a/pkg/obfuscate/go.mod b/pkg/obfuscate/go.mod index df547c8beb777..8abb2a0e0441f 100644 --- a/pkg/obfuscate/go.mod +++ b/pkg/obfuscate/go.mod @@ -4,7 +4,7 @@ go 1.22.0 require ( github.com/DataDog/datadog-go/v5 v5.6.0 - github.com/DataDog/go-sqllexer v0.0.17 + github.com/DataDog/go-sqllexer v0.0.20 github.com/outcaste-io/ristretto v0.2.3 github.com/stretchr/testify v1.10.0 go.uber.org/atomic v1.11.0 @@ -20,8 +20,8 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/rogpeppe/go-internal v1.13.1 // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/net v0.34.0 // indirect + golang.org/x/sys v0.29.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/obfuscate/go.sum b/pkg/obfuscate/go.sum index 87558fb1a5f79..dd74e852c628a 100644 --- a/pkg/obfuscate/go.sum +++ b/pkg/obfuscate/go.sum @@ -1,7 +1,7 @@ github.com/DataDog/datadog-go/v5 v5.6.0 h1:2oCLxjF/4htd55piM75baflj/KoE6VYS7alEUqFvRDw= github.com/DataDog/datadog-go/v5 v5.6.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= -github.com/DataDog/go-sqllexer v0.0.17 h1:u47fJAVg/+5DA74ZW3w0Qu+3qXHd3GtnA8ZBYixdPrM= -github.com/DataDog/go-sqllexer v0.0.17/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= +github.com/DataDog/go-sqllexer v0.0.20 h1:0fBknHo42yuhawZS3GtuQSdqcwaiojWjYNT6OdsZRfI= +github.com/DataDog/go-sqllexer v0.0.20/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= @@ -62,8 +62,8 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -74,8 +74,8 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= diff --git a/pkg/obfuscate/obfuscate.go b/pkg/obfuscate/obfuscate.go index dcccacbbb7210..52ddb384e13bb 100644 --- a/pkg/obfuscate/obfuscate.go +++ b/pkg/obfuscate/obfuscate.go @@ -70,32 +70,32 @@ type Config struct { SQL SQLConfig // ES holds the obfuscation configuration for ElasticSearch bodies. - ES JSONConfig + ES JSONConfig `mapstructure:"elasticsearch"` // OpenSearch holds the obfuscation configuration for OpenSearch bodies. - OpenSearch JSONConfig + OpenSearch JSONConfig `mapstructure:"opensearch"` // Mongo holds the obfuscation configuration for MongoDB queries. - Mongo JSONConfig + Mongo JSONConfig `mapstructure:"mongodb"` // SQLExecPlan holds the obfuscation configuration for SQL Exec Plans. This is strictly for safety related obfuscation, // not normalization. Normalization of exec plans is configured in SQLExecPlanNormalize. - SQLExecPlan JSONConfig + SQLExecPlan JSONConfig `mapstructure:"sql_exec_plan"` // SQLExecPlanNormalize holds the normalization configuration for SQL Exec Plans. - SQLExecPlanNormalize JSONConfig + SQLExecPlanNormalize JSONConfig `mapstructure:"sql_exec_plan_normalize"` // HTTP holds the obfuscation settings for HTTP URLs. - HTTP HTTPConfig + HTTP HTTPConfig `mapstructure:"http"` // Redis holds the obfuscation settings for Redis commands. - Redis RedisConfig + Redis RedisConfig `mapstructure:"redis"` // Memcached holds the obfuscation settings for Memcached commands. - Memcached MemcachedConfig + Memcached MemcachedConfig `mapstructure:"memcached"` // Memcached holds the obfuscation settings for obfuscation of CC numbers in meta. - CreditCard CreditCardsConfig + CreditCard CreditCardsConfig `mapstructure:"credit_cards"` // Statsd specifies the statsd client to use for reporting metrics. Statsd StatsClient @@ -105,7 +105,7 @@ type Config struct { Logger Logger // Cache enables the query cache for obfuscation for SQL and MongoDB queries. - Cache CacheConfig + Cache CacheConfig `mapstructure:"cache"` } // StatsClient implementations are able to emit stats. @@ -277,6 +277,9 @@ type CreditCardsConfig struct { type CacheConfig struct { // Enabled specifies whether caching should be enabled. Enabled bool `mapstructure:"enabled"` + + // MaxSize is the maximum size of the cache in bytes. + MaxSize int64 `mapstructure:"max_size"` } // NewObfuscator creates a new obfuscator @@ -286,7 +289,7 @@ func NewObfuscator(cfg Config) *Obfuscator { } o := Obfuscator{ opts: &cfg, - queryCache: newMeasuredCache(cacheOptions{On: cfg.Cache.Enabled, Statsd: cfg.Statsd}), + queryCache: newMeasuredCache(cacheOptions{On: cfg.Cache.Enabled, Statsd: cfg.Statsd, MaxSize: cfg.Cache.MaxSize}), sqlLiteralEscapes: atomic.NewBool(false), log: cfg.Logger, } diff --git a/pkg/obfuscate/sql.go b/pkg/obfuscate/sql.go index 8674173c8fdc3..a6fb5b89d3e29 100644 --- a/pkg/obfuscate/sql.go +++ b/pkg/obfuscate/sql.go @@ -287,6 +287,10 @@ func (f *groupingFilter) Reset() { f.groupMulti = 0 } +func isSQLLexer(obfuscationMode ObfuscationMode) bool { + return obfuscationMode != "" +} + // ObfuscateSQLString quantizes and obfuscates the given input SQL query string. Quantization removes // some elements such as comments and aliases and obfuscation attempts to hide sensitive information // in strings and numbers by redacting them. @@ -294,6 +298,14 @@ func (o *Obfuscator) ObfuscateSQLString(in string) (*ObfuscatedQuery, error) { return o.ObfuscateSQLStringWithOptions(in, &o.opts.SQL) } +// ObfuscateSQLStringForDBMS quantizes and obfuscates the given input SQL query string for a specific DBMS. +func (o *Obfuscator) ObfuscateSQLStringForDBMS(in string, dbms string) (*ObfuscatedQuery, error) { + if isSQLLexer(o.opts.SQL.ObfuscationMode) { + o.opts.SQL.DBMS = dbms + } + return o.ObfuscateSQLStringWithOptions(in, &o.opts.SQL) +} + // ObfuscateSQLStringWithOptions accepts an optional SQLOptions to change the behavior of the obfuscator // to quantize and obfuscate the given input SQL query string. Quantization removes some elements such as comments // and aliases and obfuscation attempts to hide sensitive information in strings and numbers by redacting them. @@ -342,7 +354,16 @@ type ObfuscatedQuery struct { // Cost returns the number of bytes needed to store all the fields // of this ObfuscatedQuery. func (oq *ObfuscatedQuery) Cost() int64 { - return int64(len(oq.Query)) + oq.Metadata.Size + // The cost of the ObfuscatedQuery struct is the sum of the length of the query string, + // the size of the metadata content, and the size of the struct itself and its fields headers. + // 320 bytes come from + // - 112 bytes for the ObfuscatedQuery struct itself, measured by unsafe.Sizeof(ObfuscatedQuery{}) + // - 96 bytes for the Metadata struct itself, measured by unsafe.Sizeof(SQLMetadata{}) + // - 16 bytes for the Query string header + // - 16 bytes for the TablesCSV string header + // - 24 * 3 bytes for the Comments, Commands, and Procedures slices headers + // - 8 bytes for the Size int64 field + return int64(len(oq.Query)) + oq.Metadata.Size + 320 } // attemptObfuscation attempts to obfuscate the SQL query loaded into the tokenizer, using the given set of filters. diff --git a/pkg/obfuscate/sql_test.go b/pkg/obfuscate/sql_test.go index db6d321be1c0f..fa11aa25ca2c5 100644 --- a/pkg/obfuscate/sql_test.go +++ b/pkg/obfuscate/sql_test.go @@ -380,8 +380,8 @@ TABLE T4 UNION CORRESPONDING TABLE T3`, assert.Equal(tt.metadata.TablesCSV, oq.Metadata.TablesCSV) assert.Equal(tt.metadata.Commands, oq.Metadata.Commands) assert.Equal(tt.metadata.Comments, oq.Metadata.Comments) - // Cost() includes the query text size, exclude it to see if it matches the size the metadata filter collected. - assert.Equal(oq.Cost()-int64(len(oq.Query)), oq.Metadata.Size) + // Cost() includes the query text size, metadata size and struct overhead + assert.Equal(oq.Cost()-int64(len(oq.Query))-oq.Metadata.Size, int64(320)) }) } } diff --git a/pkg/process/events/consumer/event_copy_linux.go b/pkg/process/events/consumer/event_copy_linux.go index db91b9e37c25e..f4acf4a518175 100644 --- a/pkg/process/events/consumer/event_copy_linux.go +++ b/pkg/process/events/consumer/event_copy_linux.go @@ -33,7 +33,7 @@ func (p *ProcessConsumer) Copy(event *smodel.Event) any { valueUID := event.GetProcessUid() result.UID = valueUID - valueGID := event.GetProcessUid() + valueGID := event.GetProcessGid() result.GID = valueGID valueUsername := event.GetProcessUser() @@ -51,7 +51,7 @@ func (p *ProcessConsumer) Copy(event *smodel.Event) any { } if event.GetEventType() == smodel.ForkEventType { - valueForkTime := event.GetProcessExecTime() + valueForkTime := event.GetProcessForkTime() result.ForkTime = valueForkTime } diff --git a/pkg/process/events/model/model_common.go b/pkg/process/events/model/model_common.go index 1c4c8f7db405b..ccd34452dbd0f 100644 --- a/pkg/process/events/model/model_common.go +++ b/pkg/process/events/model/model_common.go @@ -61,12 +61,12 @@ type ProcessEvent struct { ContainerID string `json:"container_id" msg:"container_id" copy:"GetContainerId;event:*"` Ppid uint32 `json:"ppid" msg:"ppid" copy:"GetProcessPpid;event:*"` UID uint32 `json:"uid" msg:"uid" copy_linux:"GetProcessUid;event:*"` - GID uint32 `json:"gid" msg:"gid" copy_linux:"GetProcessUid;event:*"` + GID uint32 `json:"gid" msg:"gid" copy_linux:"GetProcessGid;event:*"` Username string `json:"username" msg:"username" copy_linux:"GetProcessUser;event:*"` Group string `json:"group" msg:"group" copy_linux:"GetProcessGroup;event:*"` Exe string `json:"exe" msg:"exe" copy_linux:"GetExecFilePath;event:*"` Cmdline []string `json:"cmdline" msg:"cmdline" copy_linux:"GetExecCmdargv;event:ExecEventType"` - ForkTime time.Time `json:"fork_time,omitempty" msg:"fork_time,omitempty" copy_linux:"GetProcessExecTime;event:ForkEventType"` + ForkTime time.Time `json:"fork_time,omitempty" msg:"fork_time,omitempty" copy_linux:"GetProcessForkTime;event:ForkEventType"` ExecTime time.Time `json:"exec_time,omitempty" msg:"exec_time,omitempty" copy:"GetProcessExecTime;event:ExecEventType"` ExitTime time.Time `json:"exit_time,omitempty" msg:"exit_time,omitempty" copy:"GetProcessExitTime;event:ExitEventType"` ExitCode uint32 `json:"exit_code,omitempty" msg:"exit_code,omitempty" copy:"GetExitCode;event:ExitEventType"` diff --git a/pkg/process/metadata/parser/dockerproxy.go b/pkg/process/metadata/parser/dockerproxy.go index 52c8f48450b48..1ef92e547671b 100644 --- a/pkg/process/metadata/parser/dockerproxy.go +++ b/pkg/process/metadata/parser/dockerproxy.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//nolint:revive // TODO(PROC) Fix revive linter +// Package parser parses service metadata package parser import ( @@ -37,7 +37,7 @@ func NewDockerProxy() *DockerProxy { } } -//nolint:revive // TODO(PROC) Fix revive linter +// Extract the process metadata from the processes func (d *DockerProxy) Extract(processes map[int32]*procutil.Process) { proxyByPID := make(map[int32]*proxy) proxyByTarget := make(map[model.ContainerAddr]*proxy) diff --git a/pkg/process/metadata/parser/scm_reader.go b/pkg/process/metadata/parser/scm_reader.go index 5de1102b821ab..b368227937fc1 100644 --- a/pkg/process/metadata/parser/scm_reader.go +++ b/pkg/process/metadata/parser/scm_reader.go @@ -19,7 +19,6 @@ func newSCMReader() *scmReader { return &scmReader{} } -//nolint:revive // TODO(PROC) Fix revive linter func (s *scmReader) getServiceInfo(_ uint64) (*WindowsServiceInfo, error) { return nil, fmt.Errorf("scm service info is only available on windows") } diff --git a/pkg/process/metadata/parser/service.go b/pkg/process/metadata/parser/service.go index bacff8563b51c..5aa71d3d648ee 100644 --- a/pkg/process/metadata/parser/service.go +++ b/pkg/process/metadata/parser/service.go @@ -89,7 +89,7 @@ func NewServiceExtractor(enabled, useWindowsServiceName, useImprovedAlgorithm bo } } -//nolint:revive // TODO(PROC) Fix revive linter +// Extract the process metadata from the processes func (d *ServiceExtractor) Extract(processes map[int32]*procutil.Process) { if !d.enabled { return @@ -117,7 +117,7 @@ func (d *ServiceExtractor) Extract(processes map[int32]*procutil.Process) { d.serviceByPID = serviceByPID } -//nolint:revive // TODO(PROC) Fix revive linter +// GetServiceContext returns the service context for the PID func (d *ServiceExtractor) GetServiceContext(pid int32) []string { if !d.enabled { return nil diff --git a/pkg/process/monitor/process_monitor_test.go b/pkg/process/monitor/process_monitor_test.go index c03cf721aaafc..b0942f2f6d792 100644 --- a/pkg/process/monitor/process_monitor_test.go +++ b/pkg/process/monitor/process_monitor_test.go @@ -14,14 +14,14 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/vishvananda/netns" - "go.uber.org/atomic" "github.com/DataDog/datadog-agent/pkg/eventmonitor/consumers/testutil" "github.com/DataDog/datadog-agent/pkg/network/protocols/telemetry" - "github.com/DataDog/datadog-agent/pkg/util" + "github.com/DataDog/datadog-agent/pkg/util/filesystem" "github.com/DataDog/datadog-agent/pkg/util/kernel" ) @@ -34,20 +34,65 @@ func getProcessMonitor(t *testing.T) *ProcessMonitor { return pm } +// pidRecorder is a helper to record pids and check if they were recorded. +type pidRecorder struct { + mu sync.RWMutex + pids map[uint32]struct{} +} + +// newPidRecorder creates a new pidRecorder. +func newPidRecorder() *pidRecorder { + return &pidRecorder{pids: make(map[uint32]struct{})} +} + +// record records a pid. +func (pr *pidRecorder) record(pid uint32) { + pr.mu.Lock() + defer pr.mu.Unlock() + pr.pids[pid] = struct{}{} +} + +// has checks if a pid was recorded. +func (pr *pidRecorder) has(pid uint32) bool { + pr.mu.RLock() + defer pr.mu.RUnlock() + _, ok := pr.pids[pid] + return ok +} + +// getProcessCallback returns a ProcessCallback wrapper of the pidRecorder.record method. +func getProcessCallback(r *pidRecorder) *ProcessCallback { + f := func(pid uint32) { + r.record(pid) + } + return &f +} + func waitForProcessMonitor(t *testing.T, pm *ProcessMonitor) { - execCounter := atomic.NewInt32(0) - execCallback := func(_ uint32) { execCounter.Inc() } - registerCallback(t, pm, true, &execCallback) - - exitCounter := atomic.NewInt32(0) - // Sanity subscribing a callback. - exitCallback := func(_ uint32) { exitCounter.Inc() } - registerCallback(t, pm, false, &exitCallback) - - require.Eventually(t, func() bool { - _ = exec.Command("/bin/echo").Run() - return execCounter.Load() > 0 && exitCounter.Load() > 0 - }, 10*time.Second, time.Millisecond*200) + execRecorder := newPidRecorder() + registerCallback(t, pm, true, getProcessCallback(execRecorder)) + + exitRecorder := newPidRecorder() + registerCallback(t, pm, false, getProcessCallback(exitRecorder)) + + const ( + iterationInterval = 100 * time.Millisecond + iterations = 10 + ) + + // Trying for 10 seconds (100 iterations * 100ms) to capture exec and exit events. + require.EventuallyWithT(t, func(ct *assert.CollectT) { + cmd := exec.Command("/bin/echo") + require.NoError(ct, cmd.Run()) + require.NotZero(ct, cmd.Process.Pid) + t.Logf("running %d", cmd.Process.Pid) + // Trying for a second (10 iterations * 100ms) to capture exec and exit events. + // If we failed, try to run the command again. + require.EventuallyWithT(ct, func(innerCt *assert.CollectT) { + require.Truef(innerCt, execRecorder.has(uint32(cmd.Process.Pid)), "didn't capture exec event %d", cmd.Process.Pid) + require.True(innerCt, exitRecorder.has(uint32(cmd.Process.Pid)), "didn't capture exit event %d", cmd.Process.Pid) + }, iterations*iterationInterval, iterationInterval) + }, iterations*iterations*iterationInterval, iterationInterval) } func initializePM(t *testing.T, pm *ProcessMonitor, useEventStream bool) { @@ -74,7 +119,7 @@ func getTestBinaryPath(t *testing.T) string { t.Cleanup(func() { os.Remove(tmpFile.Name()) }) - require.NoError(t, util.CopyFile("/bin/echo", tmpFile.Name())) + require.NoError(t, filesystem.CopyFile("/bin/echo", tmpFile.Name())) return tmpFile.Name() } @@ -95,44 +140,21 @@ type processMonitorSuite struct { func (s *processMonitorSuite) TestProcessMonitorSanity() { t := s.T() pm := getProcessMonitor(t) - execsMutex := sync.RWMutex{} - execs := make(map[uint32]struct{}) testBinaryPath := getTestBinaryPath(t) - callback := func(pid uint32) { - execsMutex.Lock() - defer execsMutex.Unlock() - execs[pid] = struct{}{} - } - registerCallback(t, pm, true, &callback) - - exitMutex := sync.RWMutex{} - exits := make(map[uint32]struct{}) - exitCallback := func(pid uint32) { - exitMutex.Lock() - defer exitMutex.Unlock() - exits[pid] = struct{}{} - } - registerCallback(t, pm, false, &exitCallback) + + execRecorder := newPidRecorder() + registerCallback(t, pm, true, getProcessCallback(execRecorder)) + + exitRecorder := newPidRecorder() + registerCallback(t, pm, false, getProcessCallback(exitRecorder)) initializePM(t, pm, s.useEventStream) cmd := exec.Command(testBinaryPath, "test") require.NoError(t, cmd.Run()) - require.Eventually(t, func() bool { - execsMutex.RLock() - _, execCaptured := execs[uint32(cmd.Process.Pid)] - execsMutex.RUnlock() - if !execCaptured { - t.Logf("didn't capture exec event %d", cmd.Process.Pid) - } - - exitMutex.RLock() - _, exitCaptured := exits[uint32(cmd.Process.Pid)] - exitMutex.RUnlock() - if !exitCaptured { - t.Logf("didn't capture exit event %d", cmd.Process.Pid) - } - return execCaptured && exitCaptured - }, time.Second, time.Millisecond*200) + require.EventuallyWithT(t, func(ct *assert.CollectT) { + assert.Truef(ct, execRecorder.has(uint32(cmd.Process.Pid)), "didn't capture exec event %d", cmd.Process.Pid) + assert.Truef(ct, exitRecorder.has(uint32(cmd.Process.Pid)), "didn't capture exit event %d", cmd.Process.Pid) + }, 5*time.Second, time.Millisecond*100) require.GreaterOrEqual(t, pm.tel.events.Get(), pm.tel.exec.Get(), "events is not >= than exec") require.GreaterOrEqual(t, pm.tel.events.Get(), pm.tel.exit.Get(), "events is not >= than exit") @@ -158,57 +180,28 @@ func (s *processMonitorSuite) TestProcessRegisterMultipleCallbacks() { pm := getProcessMonitor(t) const iterations = 10 - execCountersMutexes := make([]sync.RWMutex, iterations) - execCounters := make([]map[uint32]struct{}, iterations) - exitCountersMutexes := make([]sync.RWMutex, iterations) - exitCounters := make([]map[uint32]struct{}, iterations) + execs := make([]*pidRecorder, iterations) + exits := make([]*pidRecorder, iterations) for i := 0; i < iterations; i++ { - execCountersMutexes[i] = sync.RWMutex{} - execCounters[i] = make(map[uint32]struct{}) - c := execCounters[i] - // Sanity subscribing a callback. - callback := func(pid uint32) { - execCountersMutexes[i].Lock() - defer execCountersMutexes[i].Unlock() - c[pid] = struct{}{} - } - registerCallback(t, pm, true, &callback) - - exitCountersMutexes[i] = sync.RWMutex{} - exitCounters[i] = make(map[uint32]struct{}) - exitc := exitCounters[i] - // Sanity subscribing a callback. - exitCallback := func(pid uint32) { - exitCountersMutexes[i].Lock() - defer exitCountersMutexes[i].Unlock() - exitc[pid] = struct{}{} - } - registerCallback(t, pm, false, &exitCallback) + newExecRecorder := newPidRecorder() + registerCallback(t, pm, true, getProcessCallback(newExecRecorder)) + execs[i] = newExecRecorder + + newExitRecorder := newPidRecorder() + registerCallback(t, pm, false, getProcessCallback(newExitRecorder)) + exits[i] = newExitRecorder } initializePM(t, pm, s.useEventStream) cmd := exec.Command("/bin/sleep", "1") require.NoError(t, cmd.Run()) - require.Eventuallyf(t, func() bool { + require.EventuallyWithTf(t, func(ct *assert.CollectT) { // Instead of breaking immediately when we don't find the event, we want logs to be printed for all iterations. - found := true for i := 0; i < iterations; i++ { - execCountersMutexes[i].RLock() - if _, captured := execCounters[i][uint32(cmd.Process.Pid)]; !captured { - t.Logf("iter %d didn't capture exec event", i) - found = false - } - execCountersMutexes[i].RUnlock() - - exitCountersMutexes[i].RLock() - if _, captured := exitCounters[i][uint32(cmd.Process.Pid)]; !captured { - t.Logf("iter %d didn't capture exit event", i) - found = false - } - exitCountersMutexes[i].RUnlock() + assert.Truef(ct, execs[i].has(uint32(cmd.Process.Pid)), "iter %d didn't capture exec event %d", i, cmd.Process.Pid) + assert.Truef(ct, exits[i].has(uint32(cmd.Process.Pid)), "iter %d didn't capture exit event %d", i, cmd.Process.Pid) } - return found - }, time.Second, time.Millisecond*200, "at least of the callbacks didn't capture events") + }, 5*time.Second, 100*time.Millisecond, "at least of the callbacks didn't capture events") require.GreaterOrEqual(t, pm.tel.events.Get(), pm.tel.exec.Get(), "events is not >= than exec") require.GreaterOrEqual(t, pm.tel.events.Get(), pm.tel.exit.Get(), "events is not >= than exit") @@ -236,16 +229,14 @@ func TestProcessMonitorRefcount(t *testing.T) { func (s *processMonitorSuite) TestProcessMonitorInNamespace() { t := s.T() - execSet := sync.Map{} - exitSet := sync.Map{} pm := getProcessMonitor(t) - callback := func(pid uint32) { execSet.Store(pid, struct{}{}) } - registerCallback(t, pm, true, &callback) + execRecorder := newPidRecorder() + registerCallback(t, pm, true, getProcessCallback(execRecorder)) - exitCallback := func(pid uint32) { exitSet.Store(pid, struct{}{}) } - registerCallback(t, pm, false, &exitCallback) + exitRecorder := newPidRecorder() + registerCallback(t, pm, false, getProcessCallback(exitRecorder)) monNs, err := netns.New() require.NoError(t, err, "could not create network namespace for process monitor") @@ -263,17 +254,10 @@ func (s *processMonitorSuite) TestProcessMonitorInNamespace() { require.NoError(t, cmd.Run(), "could not run process in root namespace") pid := uint32(cmd.ProcessState.Pid()) - require.Eventually(t, func() bool { - _, capturedExec := execSet.Load(pid) - if !capturedExec { - t.Logf("pid %d not captured in exec", pid) - } - _, capturedExit := exitSet.Load(pid) - if !capturedExit { - t.Logf("pid %d not captured in exit", pid) - } - return capturedExec && capturedExit - }, time.Second, time.Millisecond*200, "did not capture process EXEC/EXIT from root namespace") + require.EventuallyWithTf(t, func(ct *assert.CollectT) { + assert.Truef(ct, execRecorder.has(pid), "didn't capture exec event %d", pid) + assert.Truef(ct, exitRecorder.has(pid), "didn't capture exit event %d", pid) + }, 5*time.Second, 100*time.Millisecond, "did not capture process EXEC/EXIT from root namespace") // Process in another NS cmdNs, err := netns.New() @@ -284,17 +268,10 @@ func (s *processMonitorSuite) TestProcessMonitorInNamespace() { require.NoError(t, kernel.WithNS(cmdNs, cmd.Run), "could not run process in other network namespace") pid = uint32(cmd.ProcessState.Pid()) - require.Eventually(t, func() bool { - _, capturedExec := execSet.Load(pid) - if !capturedExec { - t.Logf("pid %d not captured in exec", pid) - } - _, capturedExit := exitSet.Load(pid) - if !capturedExit { - t.Logf("pid %d not captured in exit", pid) - } - return capturedExec && capturedExit - }, time.Second, 200*time.Millisecond, "did not capture process EXEC/EXIT from other namespace") + require.EventuallyWithTf(t, func(ct *assert.CollectT) { + assert.Truef(ct, execRecorder.has(pid), "didn't capture exec event %d", pid) + assert.Truef(ct, exitRecorder.has(pid), "didn't capture exit event %d", pid) + }, 5*time.Second, 100*time.Millisecond, "did not capture process EXEC/EXIT from root namespace") require.GreaterOrEqual(t, pm.tel.events.Get(), pm.tel.exec.Get(), "events is not >= than exec") require.GreaterOrEqual(t, pm.tel.events.Get(), pm.tel.exit.Get(), "events is not >= than exit") diff --git a/pkg/process/net/resolver/resolver.go b/pkg/process/net/resolver/resolver.go index fc196bfe37f18..59d3afd98a703 100644 --- a/pkg/process/net/resolver/resolver.go +++ b/pkg/process/net/resolver/resolver.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//nolint:revive // TODO(PROC) Fix revive linter +// Package resolver resolves local Raddrs package resolver import ( @@ -52,6 +52,7 @@ type LocalResolver struct { done chan bool } +// NewLocalResolver creates a new LocalResolver func NewLocalResolver(containerProvider proccontainers.ContainerProvider, clock clock.Clock, maxAddrCacheSize, maxPidCacheSize int) *LocalResolver { return &LocalResolver{ ContainerProvider: containerProvider, @@ -64,12 +65,14 @@ func NewLocalResolver(containerProvider proccontainers.ContainerProvider, clock } } +// Run the resolver func (l *LocalResolver) Run() { pullContainerFrequency := 10 * time.Second ticker := l.Clock.Ticker(pullContainerFrequency) go l.pullContainers(ticker) } +// Stop the resolver func (l *LocalResolver) Stop() { l.done <- true } diff --git a/pkg/process/util/api/go.mod b/pkg/process/util/api/go.mod index f76e70cb9ee0f..159cb6ced4541 100644 --- a/pkg/process/util/api/go.mod +++ b/pkg/process/util/api/go.mod @@ -7,11 +7,11 @@ replace ( github.com/DataDog/datadog-agent/comp/def => ../../../../comp/def github.com/DataDog/datadog-agent/pkg/telemetry => ../../../telemetry/ github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../util/fxutil/ - github.com/DataDog/datadog-agent/pkg/util/optional => ../../../util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../../util/option ) require ( - github.com/DataDog/agent-payload/v5 v5.0.138 + github.com/DataDog/agent-payload/v5 v5.0.141 github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3 github.com/gogo/protobuf v1.3.2 github.com/stretchr/testify v1.10.0 @@ -21,7 +21,7 @@ require ( github.com/DataDog/datadog-agent/comp/core/telemetry v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.55.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.55.0 // indirect github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 // indirect github.com/DataDog/zstd v1.5.6 // indirect github.com/DataDog/zstd_0 v0.0.0-20210310093942-586c1286621f // indirect @@ -34,7 +34,7 @@ require ( github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.60.1 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -43,7 +43,7 @@ require ( go.uber.org/fx v1.23.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/sys v0.28.0 // indirect - google.golang.org/protobuf v1.35.2 // indirect + golang.org/x/sys v0.29.0 // indirect + google.golang.org/protobuf v1.36.3 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/process/util/api/go.sum b/pkg/process/util/api/go.sum index 7343a1cf6d04e..1e52565513f21 100644 --- a/pkg/process/util/api/go.sum +++ b/pkg/process/util/api/go.sum @@ -1,5 +1,5 @@ -github.com/DataDog/agent-payload/v5 v5.0.138 h1:Wg7hmWuoLC/o0X3zZ+uGcfRHPyaytljudgSY9O59zjc= -github.com/DataDog/agent-payload/v5 v5.0.138/go.mod h1:lxh9lb5xYrBXjblpIWYUi4deJqVbkIfkjwesi5nskDc= +github.com/DataDog/agent-payload/v5 v5.0.141 h1:pV76CyTUEe/LFuS7fwarIfOX5seSuYZylzhj1aGY2DQ= +github.com/DataDog/agent-payload/v5 v5.0.141/go.mod h1:lxh9lb5xYrBXjblpIWYUi4deJqVbkIfkjwesi5nskDc= github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 h1:EbzDX8HPk5uE2FsJYxD74QmMw0/3CqSKhEr6teh0ncQ= github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49/go.mod h1:SvsjzyJlSg0rKsqYgdcFxeEVflx3ZNAyFfkUHP0TxXg= github.com/DataDog/zstd v1.5.6 h1:LbEglqepa/ipmmQJUDnSsfvA8e8IStVcGaFWDuxvGOY= @@ -37,8 +37,8 @@ github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+ github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= @@ -79,8 +79,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -91,8 +91,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/pkg/process/util/containers/containers.go b/pkg/process/util/containers/containers.go index c5ad7cae1c9b3..1dcadf08011a9 100644 --- a/pkg/process/util/containers/containers.go +++ b/pkg/process/util/containers/containers.go @@ -20,7 +20,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/containers/metrics/provider" "github.com/DataDog/datadog-agent/pkg/util/kubernetes" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -104,7 +104,7 @@ func NewDefaultContainerProvider(wmeta workloadmeta.Component, tagger tagger.Com } // TODO(components): stop relying on globals and use injected components instead whenever possible. - return NewContainerProvider(metrics.GetProvider(optional.NewOption(wmeta)), wmeta, containerFilter, tagger) + return NewContainerProvider(metrics.GetProvider(option.New(wmeta)), wmeta, containerFilter, tagger) } // GetContainers returns containers found on the machine diff --git a/pkg/proto/datadog/README.md b/pkg/proto/datadog/README.md index 2bbdf6a78af86..3fc631553e9a0 100644 --- a/pkg/proto/datadog/README.md +++ b/pkg/proto/datadog/README.md @@ -1,45 +1,4 @@ -## gRPC: Protobuf and Gateway code generation +## gRPC: Protobuf and Gateway code generation -To generate the code for the API you have defined in your `.proto` -files we will need three different grpc-related packages: - -- protobuf - protoc-gen-go: generates the golang protobuf definitions. -- grpc-gateway - protoc-gen-grpc-gateway: generates the gRPC-REST gateway -- grpc-gateway - protoc-gen-swagger (optional) - -### Install - -Run the following: -``` -go install github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger github.com/golang/protobuf/protoc-gen-go -``` -This should drop all required binaries in your `$GOPATH/bin` - -Remember to make sure `GOPATH/bin` is in your `PATH`, also make -sure no other versions of those binaries you may have installed -elsewhere take precedence (`which` is your friend). - -### Code Generation - -Chdir yourself into this directory (`cmd/agent/api/pb`), and run -the following commands: - -``` -protoc -I. --go_out=plugins=grpc,paths=source_relative:. api.proto -protoc -I. --grpc-gateway_out=logtostderr=true,paths=source_relative:. api.proto -``` - -Those two will generate the protobuf golang definitions _and_ the -gRPC gateway code that will allow us to serve the API also as a -REST application. - - -### Note/ToDo - -At the time of this writing we had been using the dev branch for -all the grpc projects we pull binaries for when we [install](#install) -as we had been experiencing some issues with prior versions (ie. 1.12.2). - -This should probably be formally addressed such that the versions -of the packages tracked by gomod is the same we pull for the -binaries. This should be part of the bootstrapping steps. +1. Ensure that you have the all the tools installed in your `$PATH` by running `inv -e install-tools`. +2. To generate the code for the `.proto` files run `inv -e generate-protobuf`. diff --git a/pkg/proto/datadog/api/v1/api.proto b/pkg/proto/datadog/api/v1/api.proto index 2d03883d00cf0..d6ef3bae7e8ab 100644 --- a/pkg/proto/datadog/api/v1/api.proto +++ b/pkg/proto/datadog/api/v1/api.proto @@ -178,6 +178,14 @@ service AgentSecure { body: "*" }; }; + + // Get the host tags + rpc GetHostTags(datadog.model.v1.HostTagRequest) returns (datadog.model.v1.HostTagReply) { + option (google.api.http) = { + get: "/v1/grpc/host_tags" + }; + }; + } // Service exposed by remote agents to allow querying by the Core Agent. diff --git a/pkg/proto/datadog/model/v1/model.proto b/pkg/proto/datadog/model/v1/model.proto index 2d4f96d208483..d7c397eef6224 100644 --- a/pkg/proto/datadog/model/v1/model.proto +++ b/pkg/proto/datadog/model/v1/model.proto @@ -14,6 +14,13 @@ message HostnameReply { string hostname = 1; } +message HostTagRequest {} + +message HostTagReply { + repeated string system = 1; + repeated string googleCloudPlatform = 2; +} + // Dogstatsd capture types message CaptureTriggerRequest { diff --git a/pkg/proto/datadog/remoteconfig/remoteconfig.proto b/pkg/proto/datadog/remoteconfig/remoteconfig.proto index 488c406bae3d9..fd6a13b4c1884 100644 --- a/pkg/proto/datadog/remoteconfig/remoteconfig.proto +++ b/pkg/proto/datadog/remoteconfig/remoteconfig.proto @@ -121,17 +121,9 @@ message PackageState { string stable_version = 2; string experiment_version = 3; PackageStateTask task = 4; - reserved 5; - reserved 6; - reserved 7; - PoliciesState stable_config_state = 8; - PoliciesState experiment_config_state = 9; - PoliciesState remote_config_state = 10; -} - -message PoliciesState { - string version = 1; - repeated string matched_policies = 2; + reserved 5, 6, 7, 8, 9, 10; + string stable_config_version = 11; + string experiment_config_version = 12; } message PackageStateTask { diff --git a/pkg/proto/datadog/trace/span.proto b/pkg/proto/datadog/trace/span.proto index 3d15fcecdc094..dbd185c3e2220 100644 --- a/pkg/proto/datadog/trace/span.proto +++ b/pkg/proto/datadog/trace/span.proto @@ -8,7 +8,7 @@ message SpanLink { // @gotags: json:"trace_id" msg:"trace_id" uint64 traceID = 1; // Required. // @gotags: json:"trace_id_high" msg:"trace_id_high,omitempty" - uint64 traceID_high = 2; // Optional. The high 64 bits of a referenced trace id. + uint64 traceID_high = 2; // Optional. The high 64 bits of a referenced trace id. // @gotags: json:"span_id" msg:"span_id" uint64 spanID = 3; // Required. // @gotags: msg:"attributes,omitempty" @@ -19,6 +19,73 @@ message SpanLink { uint32 flags = 6; // Optional. W3C trace flags. If set, the high bit (bit 31) must be set. } +message SpanEvent { + // time is the number of nanoseconds between the Unix epoch and this event. + fixed64 time_unix_nano = 1; + // name is this event's name. + string name = 2; + // attributes is a mapping from attribute key string to any value. + // The order of attributes should be preserved in the key/value map. + // The supported values match the OpenTelemetry attributes specification: + // https://github.com/open-telemetry/opentelemetry-proto/blob/a8f08fc49d60538f97ffabcc7feac92f832976dd/opentelemetry/proto/common/v1/common.proto + map attributes = 3; +} + +// AttributeAnyValue is used to represent any type of attribute value. AttributeAnyValue may contain a +// primitive value such as a string or integer or it may contain an arbitrary nested +// object containing arrays, key-value lists and primitives. +message AttributeAnyValue { + // We implement a union manually here because Go's MessagePack generator does not support + // Protobuf `oneof` unions: https://github.com/tinylib/msgp/issues/184 + // Despite this, the format represented here is binary compatible with `oneof`, if we choose + // to migrate to that in the future. + AttributeAnyValueType type = 1; + + enum AttributeAnyValueType { + STRING_VALUE = 0; + BOOL_VALUE = 1; + INT_VALUE = 2; + DOUBLE_VALUE = 3; + ARRAY_VALUE = 4; + } + + string string_value = 2; + bool bool_value = 3; + int64 int_value = 4; + double double_value = 5; + AttributeArray array_value = 6; +} + + +// AttributeArray is a list of AttributeArrayValue messages. We need this as a message since `oneof` in AttributeAnyValue does not allow repeated fields. +message AttributeArray { + // Array of values. The array may be empty (contain 0 elements). + repeated AttributeArrayValue values = 1; +} + +// An element in the homogeneous AttributeArray. +// Compared to AttributeAnyValue, it only supports scalar values. +message AttributeArrayValue { + // We implement a union manually here because Go's MessagePack generator does not support + // Protobuf `oneof` unions: https://github.com/tinylib/msgp/issues/184 + // Despite this, the format represented here is binary compatible with `oneof`, if we choose + // to migrate to that in the future. + AttributeArrayValueType type = 1; + + enum AttributeArrayValueType { + STRING_VALUE = 0; + BOOL_VALUE = 1; + INT_VALUE = 2; + DOUBLE_VALUE = 3; + } + + string string_value = 2; + bool bool_value = 3; + int64 int_value = 4; + double double_value = 5; +} + + message Span { // service is the name of the service with which this span is associated. // @gotags: json:"service" msg:"service" @@ -62,4 +129,7 @@ message Span { // span_links represents a collection of links, where each link defines a causal relationship between two spans. // @gotags: json:"span_links,omitempty" msg:"span_links,omitempty" repeated SpanLink spanLinks = 14; + // spanEvents represent an event at an instant in time related to this span, but not necessarily during the span. + // @gotags: json:"span_events,omitempty" msg:"span_events,omitempty" + repeated SpanEvent spanEvents = 15; } diff --git a/pkg/proto/datadog/workloadmeta/workloadmeta.proto b/pkg/proto/datadog/workloadmeta/workloadmeta.proto index ab5126b2d53f1..f32b1202e5f10 100644 --- a/pkg/proto/datadog/workloadmeta/workloadmeta.proto +++ b/pkg/proto/datadog/workloadmeta/workloadmeta.proto @@ -67,6 +67,7 @@ enum Runtime { CRIO = 3; GARDEN = 4; ECS_FARGATE = 5; + UNKNOWN = 6; } enum ContainerStatus { diff --git a/pkg/proto/go.mod b/pkg/proto/go.mod index 890f1f5360dcf..3df52c5baf93b 100644 --- a/pkg/proto/go.mod +++ b/pkg/proto/go.mod @@ -5,32 +5,31 @@ go 1.22.0 retract v0.46.0-devel require ( - github.com/golang/mock v1.6.0 + github.com/golang/mock v1.7.0-rc.1 github.com/golang/protobuf v1.5.4 github.com/google/gofuzz v1.2.0 - github.com/grpc-ecosystem/grpc-gateway v1.16.0 + github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.0 github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 github.com/stretchr/testify v1.10.0 - github.com/tinylib/msgp v1.2.4 + github.com/tinylib/msgp v1.2.5 github.com/vmihailenco/msgpack/v4 v4.3.13 - google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 - google.golang.org/grpc v1.67.1 - google.golang.org/protobuf v1.35.2 + google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f + google.golang.org/grpc v1.69.4 + google.golang.org/protobuf v1.36.3 ) require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/kr/pretty v0.3.1 // indirect github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/rogpeppe/go-internal v1.13.1 // indirect github.com/vmihailenco/tagparser v0.1.2 // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/sys v0.28.0 // indirect + go.opentelemetry.io/otel v1.33.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.33.0 // indirect + golang.org/x/net v0.34.0 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect - gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/proto/go.sum b/pkg/proto/go.sum index bbbf81762e92d..ae836112e4adf 100644 --- a/pkg/proto/go.sum +++ b/pkg/proto/go.sum @@ -1,44 +1,28 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/golang/mock v1.7.0-rc.1 h1:YojYx61/OLFsiv6Rw1Z96LpldJIy31o+UHmwAUMJ6/U= +github.com/golang/mock v1.7.0-rc.1/go.mod h1:s42URUywIqd+OcERslBJvOjepvNymP31m3q8d/GkuRs= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.0 h1:VD1gqscl4nYs1YxVuSdemTrSgTKrwOWDK0FVFMqm+Cg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.0/go.mod h1:4EgsQoS4TOhJizV+JTFg40qx1Ofh3XmXEQNBpgvNT40= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -47,124 +31,96 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c h1:dAMKvw0MlJT1GshSTtih8C2gDs04w8dReiOGXrGLNoY= github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/tinylib/msgp v1.2.4 h1:yLFeUGostXXSGW5vxfT5dXG/qzkn4schv2I7at5+hVU= -github.com/tinylib/msgp v1.2.4/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= +github.com/tinylib/msgp v1.2.5 h1:WeQg1whrXRFiZusidTQqzETkRpGjFjcIhW6uqWH09po= +github.com/tinylib/msgp v1.2.5/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= github.com/vmihailenco/msgpack/v4 v4.3.13 h1:A2wsiTbvp63ilDaWmsk2wjx6xZdxQOvpiNlKBGKKXKI= github.com/vmihailenco/msgpack/v4 v4.3.13/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/vmihailenco/tagparser v0.1.2 h1:gnjoVuB/kljJ5wICEEOpx98oXMWPLj22G67Vbd1qPqc= github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= +go.opentelemetry.io/otel/sdk/metric v1.33.0 h1:Gs5VK9/WUJhNXZgn8MR6ITatvAmKeIuCtNbsP3JkNqU= +go.opentelemetry.io/otel/sdk/metric v1.33.0/go.mod h1:dL5ykHZmm1B1nVRk9dDjChwDmt81MjVp3gLkQRwKf/Q= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU= -google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= -google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= -google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= -google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f h1:gap6+3Gk41EItBuyi4XX/bp4oqJ3UwuIMl25yGinuAA= +google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:Ic02D47M+zbarjYYUlK57y316f2MoN0gjAwI3f2S95o= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= +google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= +google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/pkg/proto/pbgo/core/api.pb.go b/pkg/proto/pbgo/core/api.pb.go index 86c3b100390c1..27da44b2ba8de 100644 --- a/pkg/proto/pbgo/core/api.pb.go +++ b/pkg/proto/pbgo/core/api.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.2 -// protoc v5.26.1 +// protoc-gen-go v1.36.3 +// protoc v5.29.3 // source: datadog/api/v1/api.proto package core @@ -53,7 +53,7 @@ var file_datadog_api_v1_api_proto_rawDesc = []byte{ 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x15, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x0f, 0x12, 0x0d, 0x2f, 0x76, 0x31, - 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, 0x6f, 0x73, 0x74, 0x32, 0xe8, 0x0e, 0x0a, 0x0b, 0x41, + 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, 0x6f, 0x73, 0x74, 0x32, 0xd5, 0x0f, 0x0a, 0x0b, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, 0x65, 0x12, 0x8f, 0x01, 0x0a, 0x14, 0x54, 0x61, 0x67, 0x67, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x23, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d, 0x6f, @@ -172,23 +172,30 @@ var file_datadog_api_v1_api_proto_rawDesc = []byte{ 0x65, 0x22, 0x30, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x3a, 0x01, 0x2a, 0x22, 0x25, 0x2f, 0x76, 0x31, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x61, 0x75, 0x74, 0x6f, 0x64, 0x69, 0x73, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x79, 0x2f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x73, 0x30, 0x01, 0x32, 0xe6, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, - 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, 0x6f, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x2c, 0x2e, 0x64, 0x61, 0x74, 0x61, - 0x64, 0x6f, 0x67, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, - 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, - 0x67, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, - 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x61, - 0x72, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x29, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, - 0x67, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, - 0x74, 0x46, 0x6c, 0x61, 0x72, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x72, 0x65, 0x6d, - 0x6f, 0x74, 0x65, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x61, 0x72, - 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x15, - 0x5a, 0x13, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x67, 0x6f, - 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x69, 0x67, 0x73, 0x30, 0x01, 0x12, 0x6b, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x48, 0x6f, 0x73, 0x74, + 0x54, 0x61, 0x67, 0x73, 0x12, 0x20, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d, + 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x54, 0x61, 0x67, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, + 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x6f, 0x73, 0x74, 0x54, 0x61, + 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x1a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x14, 0x12, 0x12, + 0x2f, 0x76, 0x31, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x74, 0x61, + 0x67, 0x73, 0x32, 0xe6, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x41, 0x67, 0x65, + 0x6e, 0x74, 0x12, 0x6f, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x12, 0x2c, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, + 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x72, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x66, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x61, 0x72, 0x65, 0x46, + 0x69, 0x6c, 0x65, 0x73, 0x12, 0x29, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x72, + 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x6c, + 0x61, 0x72, 0x65, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x2a, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, + 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x61, 0x72, 0x65, 0x46, 0x69, + 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x15, 0x5a, 0x13, 0x70, + 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x67, 0x6f, 0x2f, 0x63, 0x6f, + 0x72, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var file_datadog_api_v1_api_proto_goTypes = []any{ @@ -202,21 +209,23 @@ var file_datadog_api_v1_api_proto_goTypes = []any{ (*empty.Empty)(nil), // 7: google.protobuf.Empty (*WorkloadmetaStreamRequest)(nil), // 8: datadog.workloadmeta.WorkloadmetaStreamRequest (*RegisterRemoteAgentRequest)(nil), // 9: datadog.remoteagent.RegisterRemoteAgentRequest - (*GetStatusDetailsRequest)(nil), // 10: datadog.remoteagent.GetStatusDetailsRequest - (*GetFlareFilesRequest)(nil), // 11: datadog.remoteagent.GetFlareFilesRequest - (*HostnameReply)(nil), // 12: datadog.model.v1.HostnameReply - (*StreamTagsResponse)(nil), // 13: datadog.model.v1.StreamTagsResponse - (*GenerateContainerIDFromOriginInfoResponse)(nil), // 14: datadog.model.v1.GenerateContainerIDFromOriginInfoResponse - (*FetchEntityResponse)(nil), // 15: datadog.model.v1.FetchEntityResponse - (*CaptureTriggerResponse)(nil), // 16: datadog.model.v1.CaptureTriggerResponse - (*TaggerStateResponse)(nil), // 17: datadog.model.v1.TaggerStateResponse - (*ClientGetConfigsResponse)(nil), // 18: datadog.config.ClientGetConfigsResponse - (*GetStateConfigResponse)(nil), // 19: datadog.config.GetStateConfigResponse - (*WorkloadmetaStreamResponse)(nil), // 20: datadog.workloadmeta.WorkloadmetaStreamResponse - (*RegisterRemoteAgentResponse)(nil), // 21: datadog.remoteagent.RegisterRemoteAgentResponse - (*AutodiscoveryStreamResponse)(nil), // 22: datadog.autodiscovery.AutodiscoveryStreamResponse - (*GetStatusDetailsResponse)(nil), // 23: datadog.remoteagent.GetStatusDetailsResponse - (*GetFlareFilesResponse)(nil), // 24: datadog.remoteagent.GetFlareFilesResponse + (*HostTagRequest)(nil), // 10: datadog.model.v1.HostTagRequest + (*GetStatusDetailsRequest)(nil), // 11: datadog.remoteagent.GetStatusDetailsRequest + (*GetFlareFilesRequest)(nil), // 12: datadog.remoteagent.GetFlareFilesRequest + (*HostnameReply)(nil), // 13: datadog.model.v1.HostnameReply + (*StreamTagsResponse)(nil), // 14: datadog.model.v1.StreamTagsResponse + (*GenerateContainerIDFromOriginInfoResponse)(nil), // 15: datadog.model.v1.GenerateContainerIDFromOriginInfoResponse + (*FetchEntityResponse)(nil), // 16: datadog.model.v1.FetchEntityResponse + (*CaptureTriggerResponse)(nil), // 17: datadog.model.v1.CaptureTriggerResponse + (*TaggerStateResponse)(nil), // 18: datadog.model.v1.TaggerStateResponse + (*ClientGetConfigsResponse)(nil), // 19: datadog.config.ClientGetConfigsResponse + (*GetStateConfigResponse)(nil), // 20: datadog.config.GetStateConfigResponse + (*WorkloadmetaStreamResponse)(nil), // 21: datadog.workloadmeta.WorkloadmetaStreamResponse + (*RegisterRemoteAgentResponse)(nil), // 22: datadog.remoteagent.RegisterRemoteAgentResponse + (*AutodiscoveryStreamResponse)(nil), // 23: datadog.autodiscovery.AutodiscoveryStreamResponse + (*HostTagReply)(nil), // 24: datadog.model.v1.HostTagReply + (*GetStatusDetailsResponse)(nil), // 25: datadog.remoteagent.GetStatusDetailsResponse + (*GetFlareFilesResponse)(nil), // 26: datadog.remoteagent.GetFlareFilesResponse } var file_datadog_api_v1_api_proto_depIdxs = []int32{ 0, // 0: datadog.api.v1.Agent.GetHostname:input_type -> datadog.model.v1.HostnameRequest @@ -232,25 +241,27 @@ var file_datadog_api_v1_api_proto_depIdxs = []int32{ 8, // 10: datadog.api.v1.AgentSecure.WorkloadmetaStreamEntities:input_type -> datadog.workloadmeta.WorkloadmetaStreamRequest 9, // 11: datadog.api.v1.AgentSecure.RegisterRemoteAgent:input_type -> datadog.remoteagent.RegisterRemoteAgentRequest 7, // 12: datadog.api.v1.AgentSecure.AutodiscoveryStreamConfig:input_type -> google.protobuf.Empty - 10, // 13: datadog.api.v1.RemoteAgent.GetStatusDetails:input_type -> datadog.remoteagent.GetStatusDetailsRequest - 11, // 14: datadog.api.v1.RemoteAgent.GetFlareFiles:input_type -> datadog.remoteagent.GetFlareFilesRequest - 12, // 15: datadog.api.v1.Agent.GetHostname:output_type -> datadog.model.v1.HostnameReply - 13, // 16: datadog.api.v1.AgentSecure.TaggerStreamEntities:output_type -> datadog.model.v1.StreamTagsResponse - 14, // 17: datadog.api.v1.AgentSecure.TaggerGenerateContainerIDFromOriginInfo:output_type -> datadog.model.v1.GenerateContainerIDFromOriginInfoResponse - 15, // 18: datadog.api.v1.AgentSecure.TaggerFetchEntity:output_type -> datadog.model.v1.FetchEntityResponse - 16, // 19: datadog.api.v1.AgentSecure.DogstatsdCaptureTrigger:output_type -> datadog.model.v1.CaptureTriggerResponse - 17, // 20: datadog.api.v1.AgentSecure.DogstatsdSetTaggerState:output_type -> datadog.model.v1.TaggerStateResponse - 18, // 21: datadog.api.v1.AgentSecure.ClientGetConfigs:output_type -> datadog.config.ClientGetConfigsResponse - 19, // 22: datadog.api.v1.AgentSecure.GetConfigState:output_type -> datadog.config.GetStateConfigResponse - 18, // 23: datadog.api.v1.AgentSecure.ClientGetConfigsHA:output_type -> datadog.config.ClientGetConfigsResponse - 19, // 24: datadog.api.v1.AgentSecure.GetConfigStateHA:output_type -> datadog.config.GetStateConfigResponse - 20, // 25: datadog.api.v1.AgentSecure.WorkloadmetaStreamEntities:output_type -> datadog.workloadmeta.WorkloadmetaStreamResponse - 21, // 26: datadog.api.v1.AgentSecure.RegisterRemoteAgent:output_type -> datadog.remoteagent.RegisterRemoteAgentResponse - 22, // 27: datadog.api.v1.AgentSecure.AutodiscoveryStreamConfig:output_type -> datadog.autodiscovery.AutodiscoveryStreamResponse - 23, // 28: datadog.api.v1.RemoteAgent.GetStatusDetails:output_type -> datadog.remoteagent.GetStatusDetailsResponse - 24, // 29: datadog.api.v1.RemoteAgent.GetFlareFiles:output_type -> datadog.remoteagent.GetFlareFilesResponse - 15, // [15:30] is the sub-list for method output_type - 0, // [0:15] is the sub-list for method input_type + 10, // 13: datadog.api.v1.AgentSecure.GetHostTags:input_type -> datadog.model.v1.HostTagRequest + 11, // 14: datadog.api.v1.RemoteAgent.GetStatusDetails:input_type -> datadog.remoteagent.GetStatusDetailsRequest + 12, // 15: datadog.api.v1.RemoteAgent.GetFlareFiles:input_type -> datadog.remoteagent.GetFlareFilesRequest + 13, // 16: datadog.api.v1.Agent.GetHostname:output_type -> datadog.model.v1.HostnameReply + 14, // 17: datadog.api.v1.AgentSecure.TaggerStreamEntities:output_type -> datadog.model.v1.StreamTagsResponse + 15, // 18: datadog.api.v1.AgentSecure.TaggerGenerateContainerIDFromOriginInfo:output_type -> datadog.model.v1.GenerateContainerIDFromOriginInfoResponse + 16, // 19: datadog.api.v1.AgentSecure.TaggerFetchEntity:output_type -> datadog.model.v1.FetchEntityResponse + 17, // 20: datadog.api.v1.AgentSecure.DogstatsdCaptureTrigger:output_type -> datadog.model.v1.CaptureTriggerResponse + 18, // 21: datadog.api.v1.AgentSecure.DogstatsdSetTaggerState:output_type -> datadog.model.v1.TaggerStateResponse + 19, // 22: datadog.api.v1.AgentSecure.ClientGetConfigs:output_type -> datadog.config.ClientGetConfigsResponse + 20, // 23: datadog.api.v1.AgentSecure.GetConfigState:output_type -> datadog.config.GetStateConfigResponse + 19, // 24: datadog.api.v1.AgentSecure.ClientGetConfigsHA:output_type -> datadog.config.ClientGetConfigsResponse + 20, // 25: datadog.api.v1.AgentSecure.GetConfigStateHA:output_type -> datadog.config.GetStateConfigResponse + 21, // 26: datadog.api.v1.AgentSecure.WorkloadmetaStreamEntities:output_type -> datadog.workloadmeta.WorkloadmetaStreamResponse + 22, // 27: datadog.api.v1.AgentSecure.RegisterRemoteAgent:output_type -> datadog.remoteagent.RegisterRemoteAgentResponse + 23, // 28: datadog.api.v1.AgentSecure.AutodiscoveryStreamConfig:output_type -> datadog.autodiscovery.AutodiscoveryStreamResponse + 24, // 29: datadog.api.v1.AgentSecure.GetHostTags:output_type -> datadog.model.v1.HostTagReply + 25, // 30: datadog.api.v1.RemoteAgent.GetStatusDetails:output_type -> datadog.remoteagent.GetStatusDetailsResponse + 26, // 31: datadog.api.v1.RemoteAgent.GetFlareFiles:output_type -> datadog.remoteagent.GetFlareFilesResponse + 16, // [16:32] is the sub-list for method output_type + 0, // [0:16] is the sub-list for method input_type 0, // [0:0] is the sub-list for extension type_name 0, // [0:0] is the sub-list for extension extendee 0, // [0:0] is the sub-list for field type_name @@ -464,6 +475,8 @@ type AgentSecureClient interface { RegisterRemoteAgent(ctx context.Context, in *RegisterRemoteAgentRequest, opts ...grpc.CallOption) (*RegisterRemoteAgentResponse, error) // Subscribes to autodiscovery config updates AutodiscoveryStreamConfig(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (AgentSecure_AutodiscoveryStreamConfigClient, error) + // Get the host tags + GetHostTags(ctx context.Context, in *HostTagRequest, opts ...grpc.CallOption) (*HostTagReply, error) } type agentSecureClient struct { @@ -651,6 +664,15 @@ func (x *agentSecureAutodiscoveryStreamConfigClient) Recv() (*AutodiscoveryStrea return m, nil } +func (c *agentSecureClient) GetHostTags(ctx context.Context, in *HostTagRequest, opts ...grpc.CallOption) (*HostTagReply, error) { + out := new(HostTagReply) + err := c.cc.Invoke(ctx, "/datadog.api.v1.AgentSecure/GetHostTags", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // AgentSecureServer is the server API for AgentSecure service. type AgentSecureServer interface { // subscribes to added, removed, or changed entities in the Tagger @@ -746,6 +768,8 @@ type AgentSecureServer interface { RegisterRemoteAgent(context.Context, *RegisterRemoteAgentRequest) (*RegisterRemoteAgentResponse, error) // Subscribes to autodiscovery config updates AutodiscoveryStreamConfig(*empty.Empty, AgentSecure_AutodiscoveryStreamConfigServer) error + // Get the host tags + GetHostTags(context.Context, *HostTagRequest) (*HostTagReply, error) } // UnimplementedAgentSecureServer can be embedded to have forward compatible implementations. @@ -788,6 +812,9 @@ func (*UnimplementedAgentSecureServer) RegisterRemoteAgent(context.Context, *Reg func (*UnimplementedAgentSecureServer) AutodiscoveryStreamConfig(*empty.Empty, AgentSecure_AutodiscoveryStreamConfigServer) error { return status.Errorf(codes.Unimplemented, "method AutodiscoveryStreamConfig not implemented") } +func (*UnimplementedAgentSecureServer) GetHostTags(context.Context, *HostTagRequest) (*HostTagReply, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetHostTags not implemented") +} func RegisterAgentSecureServer(s *grpc.Server, srv AgentSecureServer) { s.RegisterService(&_AgentSecure_serviceDesc, srv) @@ -1018,6 +1045,24 @@ func (x *agentSecureAutodiscoveryStreamConfigServer) Send(m *AutodiscoveryStream return x.ServerStream.SendMsg(m) } +func _AgentSecure_GetHostTags_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HostTagRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AgentSecureServer).GetHostTags(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/datadog.api.v1.AgentSecure/GetHostTags", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AgentSecureServer).GetHostTags(ctx, req.(*HostTagRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _AgentSecure_serviceDesc = grpc.ServiceDesc{ ServiceName: "datadog.api.v1.AgentSecure", HandlerType: (*AgentSecureServer)(nil), @@ -1058,6 +1103,10 @@ var _AgentSecure_serviceDesc = grpc.ServiceDesc{ MethodName: "RegisterRemoteAgent", Handler: _AgentSecure_RegisterRemoteAgent_Handler, }, + { + MethodName: "GetHostTags", + Handler: _AgentSecure_GetHostTags_Handler, + }, }, Streams: []grpc.StreamDesc{ { diff --git a/pkg/proto/pbgo/core/api.pb.gw.go b/pkg/proto/pbgo/core/api.pb.gw.go index 7be078c4eb534..f28d630456bca 100644 --- a/pkg/proto/pbgo/core/api.pb.gw.go +++ b/pkg/proto/pbgo/core/api.pb.gw.go @@ -10,60 +10,58 @@ package core import ( "context" + "errors" "io" "net/http" - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" "github.com/golang/protobuf/ptypes/empty" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/metadata" "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" ) // Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage -var _ = metadata.Join +var ( + _ codes.Code + _ io.Reader + _ status.Status + _ = errors.New + _ = runtime.String + _ = utilities.NewDoubleArray + _ = metadata.Join +) func request_Agent_GetHostname_0(ctx context.Context, marshaler runtime.Marshaler, client AgentClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq HostnameRequest - var metadata runtime.ServerMetadata - + var ( + protoReq HostnameRequest + metadata runtime.ServerMetadata + ) msg, err := client.GetHostname(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_Agent_GetHostname_0(ctx context.Context, marshaler runtime.Marshaler, server AgentServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq HostnameRequest - var metadata runtime.ServerMetadata - + var ( + protoReq HostnameRequest + metadata runtime.ServerMetadata + ) msg, err := server.GetHostname(ctx, &protoReq) return msg, metadata, err - } func request_AgentSecure_TaggerStreamEntities_0(ctx context.Context, marshaler runtime.Marshaler, client AgentSecureClient, req *http.Request, pathParams map[string]string) (AgentSecure_TaggerStreamEntitiesClient, runtime.ServerMetadata, error) { - var protoReq StreamTagsRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + var ( + protoReq StreamTagsRequest + metadata runtime.ServerMetadata + ) + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - stream, err := client.TaggerStreamEntities(ctx, &protoReq) if err != nil { return nil, metadata, err @@ -74,293 +72,208 @@ func request_AgentSecure_TaggerStreamEntities_0(ctx context.Context, marshaler r } metadata.HeaderMD = header return stream, metadata, nil - } func request_AgentSecure_TaggerGenerateContainerIDFromOriginInfo_0(ctx context.Context, marshaler runtime.Marshaler, client AgentSecureClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GenerateContainerIDFromOriginInfoRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + var ( + protoReq GenerateContainerIDFromOriginInfoRequest + metadata runtime.ServerMetadata + ) + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := client.TaggerGenerateContainerIDFromOriginInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_AgentSecure_TaggerGenerateContainerIDFromOriginInfo_0(ctx context.Context, marshaler runtime.Marshaler, server AgentSecureServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq GenerateContainerIDFromOriginInfoRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + var ( + protoReq GenerateContainerIDFromOriginInfoRequest + metadata runtime.ServerMetadata + ) + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := server.TaggerGenerateContainerIDFromOriginInfo(ctx, &protoReq) return msg, metadata, err - } func request_AgentSecure_TaggerFetchEntity_0(ctx context.Context, marshaler runtime.Marshaler, client AgentSecureClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq FetchEntityRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + var ( + protoReq FetchEntityRequest + metadata runtime.ServerMetadata + ) + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := client.TaggerFetchEntity(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_AgentSecure_TaggerFetchEntity_0(ctx context.Context, marshaler runtime.Marshaler, server AgentSecureServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq FetchEntityRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + var ( + protoReq FetchEntityRequest + metadata runtime.ServerMetadata + ) + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := server.TaggerFetchEntity(ctx, &protoReq) return msg, metadata, err - } func request_AgentSecure_DogstatsdCaptureTrigger_0(ctx context.Context, marshaler runtime.Marshaler, client AgentSecureClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq CaptureTriggerRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + var ( + protoReq CaptureTriggerRequest + metadata runtime.ServerMetadata + ) + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := client.DogstatsdCaptureTrigger(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_AgentSecure_DogstatsdCaptureTrigger_0(ctx context.Context, marshaler runtime.Marshaler, server AgentSecureServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq CaptureTriggerRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + var ( + protoReq CaptureTriggerRequest + metadata runtime.ServerMetadata + ) + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := server.DogstatsdCaptureTrigger(ctx, &protoReq) return msg, metadata, err - } func request_AgentSecure_DogstatsdSetTaggerState_0(ctx context.Context, marshaler runtime.Marshaler, client AgentSecureClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq TaggerState - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + var ( + protoReq TaggerState + metadata runtime.ServerMetadata + ) + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := client.DogstatsdSetTaggerState(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_AgentSecure_DogstatsdSetTaggerState_0(ctx context.Context, marshaler runtime.Marshaler, server AgentSecureServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq TaggerState - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + var ( + protoReq TaggerState + metadata runtime.ServerMetadata + ) + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := server.DogstatsdSetTaggerState(ctx, &protoReq) return msg, metadata, err - } func request_AgentSecure_ClientGetConfigs_0(ctx context.Context, marshaler runtime.Marshaler, client AgentSecureClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ClientGetConfigsRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + var ( + protoReq ClientGetConfigsRequest + metadata runtime.ServerMetadata + ) + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := client.ClientGetConfigs(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_AgentSecure_ClientGetConfigs_0(ctx context.Context, marshaler runtime.Marshaler, server AgentSecureServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ClientGetConfigsRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + var ( + protoReq ClientGetConfigsRequest + metadata runtime.ServerMetadata + ) + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := server.ClientGetConfigs(ctx, &protoReq) return msg, metadata, err - } func request_AgentSecure_GetConfigState_0(ctx context.Context, marshaler runtime.Marshaler, client AgentSecureClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq empty.Empty - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + var ( + protoReq empty.Empty + metadata runtime.ServerMetadata + ) + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := client.GetConfigState(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_AgentSecure_GetConfigState_0(ctx context.Context, marshaler runtime.Marshaler, server AgentSecureServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq empty.Empty - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + var ( + protoReq empty.Empty + metadata runtime.ServerMetadata + ) + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := server.GetConfigState(ctx, &protoReq) return msg, metadata, err - } func request_AgentSecure_ClientGetConfigsHA_0(ctx context.Context, marshaler runtime.Marshaler, client AgentSecureClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ClientGetConfigsRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + var ( + protoReq ClientGetConfigsRequest + metadata runtime.ServerMetadata + ) + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := client.ClientGetConfigsHA(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_AgentSecure_ClientGetConfigsHA_0(ctx context.Context, marshaler runtime.Marshaler, server AgentSecureServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ClientGetConfigsRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + var ( + protoReq ClientGetConfigsRequest + metadata runtime.ServerMetadata + ) + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := server.ClientGetConfigsHA(ctx, &protoReq) return msg, metadata, err - } func request_AgentSecure_GetConfigStateHA_0(ctx context.Context, marshaler runtime.Marshaler, client AgentSecureClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq empty.Empty - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + var ( + protoReq empty.Empty + metadata runtime.ServerMetadata + ) + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := client.GetConfigStateHA(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_AgentSecure_GetConfigStateHA_0(ctx context.Context, marshaler runtime.Marshaler, server AgentSecureServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq empty.Empty - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + var ( + protoReq empty.Empty + metadata runtime.ServerMetadata + ) + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := server.GetConfigStateHA(ctx, &protoReq) return msg, metadata, err - } func request_AgentSecure_WorkloadmetaStreamEntities_0(ctx context.Context, marshaler runtime.Marshaler, client AgentSecureClient, req *http.Request, pathParams map[string]string) (AgentSecure_WorkloadmetaStreamEntitiesClient, runtime.ServerMetadata, error) { - var protoReq WorkloadmetaStreamRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + var ( + protoReq WorkloadmetaStreamRequest + metadata runtime.ServerMetadata + ) + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - stream, err := client.WorkloadmetaStreamEntities(ctx, &protoReq) if err != nil { return nil, metadata, err @@ -371,55 +284,40 @@ func request_AgentSecure_WorkloadmetaStreamEntities_0(ctx context.Context, marsh } metadata.HeaderMD = header return stream, metadata, nil - } func request_AgentSecure_RegisterRemoteAgent_0(ctx context.Context, marshaler runtime.Marshaler, client AgentSecureClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq RegisterRemoteAgentRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + var ( + protoReq RegisterRemoteAgentRequest + metadata runtime.ServerMetadata + ) + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := client.RegisterRemoteAgent(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err - } func local_request_AgentSecure_RegisterRemoteAgent_0(ctx context.Context, marshaler runtime.Marshaler, server AgentSecureServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq RegisterRemoteAgentRequest - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + var ( + protoReq RegisterRemoteAgentRequest + metadata runtime.ServerMetadata + ) + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := server.RegisterRemoteAgent(ctx, &protoReq) return msg, metadata, err - } func request_AgentSecure_AutodiscoveryStreamConfig_0(ctx context.Context, marshaler runtime.Marshaler, client AgentSecureClient, req *http.Request, pathParams map[string]string) (AgentSecure_AutodiscoveryStreamConfigClient, runtime.ServerMetadata, error) { - var protoReq empty.Empty - var metadata runtime.ServerMetadata - - newReader, berr := utilities.IOReaderFactory(req.Body) - if berr != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) - } - if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + var ( + protoReq empty.Empty + metadata runtime.ServerMetadata + ) + if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) { return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - stream, err := client.AutodiscoveryStreamConfig(ctx, &protoReq) if err != nil { return nil, metadata, err @@ -430,36 +328,51 @@ func request_AgentSecure_AutodiscoveryStreamConfig_0(ctx context.Context, marsha } metadata.HeaderMD = header return stream, metadata, nil +} + +func request_AgentSecure_GetHostTags_0(ctx context.Context, marshaler runtime.Marshaler, client AgentSecureClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq HostTagRequest + metadata runtime.ServerMetadata + ) + msg, err := client.GetHostTags(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err +} +func local_request_AgentSecure_GetHostTags_0(ctx context.Context, marshaler runtime.Marshaler, server AgentSecureServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq HostTagRequest + metadata runtime.ServerMetadata + ) + msg, err := server.GetHostTags(ctx, &protoReq) + return msg, metadata, err } // RegisterAgentHandlerServer registers the http handlers for service Agent to "mux". // UnaryRPC :call AgentServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. // Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterAgentHandlerFromEndpoint instead. +// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call. func RegisterAgentHandlerServer(ctx context.Context, mux *runtime.ServeMux, server AgentServer) error { - - mux.Handle("GET", pattern_Agent_GetHostname_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_Agent_GetHostname_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/datadog.api.v1.Agent/GetHostname", runtime.WithHTTPPathPattern("/v1/grpc/host")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Agent_GetHostname_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_Agent_GetHostname_0(annotatedContext, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - - forward_Agent_GetHostname_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + forward_Agent_GetHostname_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) return nil @@ -469,235 +382,228 @@ func RegisterAgentHandlerServer(ctx context.Context, mux *runtime.ServeMux, serv // UnaryRPC :call AgentSecureServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. // Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterAgentSecureHandlerFromEndpoint instead. +// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call. func RegisterAgentSecureHandlerServer(ctx context.Context, mux *runtime.ServeMux, server AgentSecureServer) error { - - mux.Handle("POST", pattern_AgentSecure_TaggerStreamEntities_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodPost, pattern_AgentSecure_TaggerStreamEntities_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport") _, outboundMarshaler := runtime.MarshalerForRequest(mux, req) runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return }) - - mux.Handle("POST", pattern_AgentSecure_TaggerGenerateContainerIDFromOriginInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodPost, pattern_AgentSecure_TaggerGenerateContainerIDFromOriginInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/datadog.api.v1.AgentSecure/TaggerGenerateContainerIDFromOriginInfo", runtime.WithHTTPPathPattern("/v1/grpc/tagger/generate_container_id_from_origin_info")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_AgentSecure_TaggerGenerateContainerIDFromOriginInfo_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_AgentSecure_TaggerGenerateContainerIDFromOriginInfo_0(annotatedContext, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - - forward_AgentSecure_TaggerGenerateContainerIDFromOriginInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + forward_AgentSecure_TaggerGenerateContainerIDFromOriginInfo_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - - mux.Handle("POST", pattern_AgentSecure_TaggerFetchEntity_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodPost, pattern_AgentSecure_TaggerFetchEntity_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/datadog.api.v1.AgentSecure/TaggerFetchEntity", runtime.WithHTTPPathPattern("/v1/grpc/tagger/fetch_entity")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_AgentSecure_TaggerFetchEntity_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_AgentSecure_TaggerFetchEntity_0(annotatedContext, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - - forward_AgentSecure_TaggerFetchEntity_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + forward_AgentSecure_TaggerFetchEntity_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - - mux.Handle("POST", pattern_AgentSecure_DogstatsdCaptureTrigger_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodPost, pattern_AgentSecure_DogstatsdCaptureTrigger_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/datadog.api.v1.AgentSecure/DogstatsdCaptureTrigger", runtime.WithHTTPPathPattern("/v1/grpc/dogstatsd/capture/trigger")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_AgentSecure_DogstatsdCaptureTrigger_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_AgentSecure_DogstatsdCaptureTrigger_0(annotatedContext, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - - forward_AgentSecure_DogstatsdCaptureTrigger_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + forward_AgentSecure_DogstatsdCaptureTrigger_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - - mux.Handle("POST", pattern_AgentSecure_DogstatsdSetTaggerState_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodPost, pattern_AgentSecure_DogstatsdSetTaggerState_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/datadog.api.v1.AgentSecure/DogstatsdSetTaggerState", runtime.WithHTTPPathPattern("/v1/grpc/dogstatsd/capture/state")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_AgentSecure_DogstatsdSetTaggerState_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_AgentSecure_DogstatsdSetTaggerState_0(annotatedContext, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - - forward_AgentSecure_DogstatsdSetTaggerState_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + forward_AgentSecure_DogstatsdSetTaggerState_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - - mux.Handle("POST", pattern_AgentSecure_ClientGetConfigs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodPost, pattern_AgentSecure_ClientGetConfigs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/datadog.api.v1.AgentSecure/ClientGetConfigs", runtime.WithHTTPPathPattern("/v1/grpc/remoteconfig/configs")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_AgentSecure_ClientGetConfigs_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_AgentSecure_ClientGetConfigs_0(annotatedContext, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - - forward_AgentSecure_ClientGetConfigs_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + forward_AgentSecure_ClientGetConfigs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - - mux.Handle("POST", pattern_AgentSecure_GetConfigState_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodPost, pattern_AgentSecure_GetConfigState_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/datadog.api.v1.AgentSecure/GetConfigState", runtime.WithHTTPPathPattern("/v1/grpc/remoteconfig/state")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_AgentSecure_GetConfigState_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_AgentSecure_GetConfigState_0(annotatedContext, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - - forward_AgentSecure_GetConfigState_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + forward_AgentSecure_GetConfigState_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - - mux.Handle("POST", pattern_AgentSecure_ClientGetConfigsHA_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodPost, pattern_AgentSecure_ClientGetConfigsHA_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/datadog.api.v1.AgentSecure/ClientGetConfigsHA", runtime.WithHTTPPathPattern("/v1/grpc/remoteconfig/configs_ha")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_AgentSecure_ClientGetConfigsHA_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_AgentSecure_ClientGetConfigsHA_0(annotatedContext, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - - forward_AgentSecure_ClientGetConfigsHA_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + forward_AgentSecure_ClientGetConfigsHA_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - - mux.Handle("POST", pattern_AgentSecure_GetConfigStateHA_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodPost, pattern_AgentSecure_GetConfigStateHA_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/datadog.api.v1.AgentSecure/GetConfigStateHA", runtime.WithHTTPPathPattern("/v1/grpc/remoteconfig/state_ha")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_AgentSecure_GetConfigStateHA_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_AgentSecure_GetConfigStateHA_0(annotatedContext, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - - forward_AgentSecure_GetConfigStateHA_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + forward_AgentSecure_GetConfigStateHA_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - mux.Handle("POST", pattern_AgentSecure_WorkloadmetaStreamEntities_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodPost, pattern_AgentSecure_WorkloadmetaStreamEntities_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport") _, outboundMarshaler := runtime.MarshalerForRequest(mux, req) runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return }) - - mux.Handle("POST", pattern_AgentSecure_RegisterRemoteAgent_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodPost, pattern_AgentSecure_RegisterRemoteAgent_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/datadog.api.v1.AgentSecure/RegisterRemoteAgent", runtime.WithHTTPPathPattern("/v1/grpc/remoteagent/register_remote_agent")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_AgentSecure_RegisterRemoteAgent_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_AgentSecure_RegisterRemoteAgent_0(annotatedContext, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - - forward_AgentSecure_RegisterRemoteAgent_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + forward_AgentSecure_RegisterRemoteAgent_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - mux.Handle("POST", pattern_AgentSecure_AutodiscoveryStreamConfig_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodPost, pattern_AgentSecure_AutodiscoveryStreamConfig_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { err := status.Error(codes.Unimplemented, "streaming calls are not yet supported in the in-process transport") _, outboundMarshaler := runtime.MarshalerForRequest(mux, req) runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return }) + mux.Handle(http.MethodGet, pattern_AgentSecure_GetHostTags_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/datadog.api.v1.AgentSecure/GetHostTags", runtime.WithHTTPPathPattern("/v1/grpc/host_tags")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_AgentSecure_GetHostTags_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_AgentSecure_GetHostTags_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) return nil } @@ -705,25 +611,24 @@ func RegisterAgentSecureHandlerServer(ctx context.Context, mux *runtime.ServeMux // RegisterAgentHandlerFromEndpoint is same as RegisterAgentHandler but // automatically dials to "endpoint" and closes the connection when "ctx" gets done. func RegisterAgentHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) + conn, err := grpc.NewClient(endpoint, opts...) if err != nil { return err } defer func() { if err != nil { if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) } return } go func() { <-ctx.Done() if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) } }() }() - return RegisterAgentHandler(ctx, mux, conn) } @@ -737,34 +642,30 @@ func RegisterAgentHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc // to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "AgentClient". // Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "AgentClient" // doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "AgentClient" to call the correct interceptors. +// "AgentClient" to call the correct interceptors. This client ignores the HTTP middlewares. func RegisterAgentHandlerClient(ctx context.Context, mux *runtime.ServeMux, client AgentClient) error { - - mux.Handle("GET", pattern_Agent_GetHostname_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodGet, pattern_Agent_GetHostname_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/datadog.api.v1.Agent/GetHostname", runtime.WithHTTPPathPattern("/v1/grpc/host")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Agent_GetHostname_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_Agent_GetHostname_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - - forward_Agent_GetHostname_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + forward_Agent_GetHostname_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - return nil } var ( - pattern_Agent_GetHostname_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "grpc", "host"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_Agent_GetHostname_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "grpc", "host"}, "")) ) var ( @@ -774,25 +675,24 @@ var ( // RegisterAgentSecureHandlerFromEndpoint is same as RegisterAgentSecureHandler but // automatically dials to "endpoint" and closes the connection when "ctx" gets done. func RegisterAgentSecureHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) + conn, err := grpc.NewClient(endpoint, opts...) if err != nil { return err } defer func() { if err != nil { if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) } return } go func() { <-ctx.Done() if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr) } }() }() - return RegisterAgentSecureHandler(ctx, mux, conn) } @@ -806,300 +706,260 @@ func RegisterAgentSecureHandler(ctx context.Context, mux *runtime.ServeMux, conn // to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "AgentSecureClient". // Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "AgentSecureClient" // doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "AgentSecureClient" to call the correct interceptors. +// "AgentSecureClient" to call the correct interceptors. This client ignores the HTTP middlewares. func RegisterAgentSecureHandlerClient(ctx context.Context, mux *runtime.ServeMux, client AgentSecureClient) error { - - mux.Handle("POST", pattern_AgentSecure_TaggerStreamEntities_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodPost, pattern_AgentSecure_TaggerStreamEntities_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/datadog.api.v1.AgentSecure/TaggerStreamEntities", runtime.WithHTTPPathPattern("/v1/grpc/tagger/stream_entities")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_AgentSecure_TaggerStreamEntities_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_AgentSecure_TaggerStreamEntities_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - - forward_AgentSecure_TaggerStreamEntities_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) - + forward_AgentSecure_TaggerStreamEntities_0(annotatedContext, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) }) - - mux.Handle("POST", pattern_AgentSecure_TaggerGenerateContainerIDFromOriginInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodPost, pattern_AgentSecure_TaggerGenerateContainerIDFromOriginInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/datadog.api.v1.AgentSecure/TaggerGenerateContainerIDFromOriginInfo", runtime.WithHTTPPathPattern("/v1/grpc/tagger/generate_container_id_from_origin_info")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_AgentSecure_TaggerGenerateContainerIDFromOriginInfo_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_AgentSecure_TaggerGenerateContainerIDFromOriginInfo_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - - forward_AgentSecure_TaggerGenerateContainerIDFromOriginInfo_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + forward_AgentSecure_TaggerGenerateContainerIDFromOriginInfo_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - - mux.Handle("POST", pattern_AgentSecure_TaggerFetchEntity_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodPost, pattern_AgentSecure_TaggerFetchEntity_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/datadog.api.v1.AgentSecure/TaggerFetchEntity", runtime.WithHTTPPathPattern("/v1/grpc/tagger/fetch_entity")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_AgentSecure_TaggerFetchEntity_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_AgentSecure_TaggerFetchEntity_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - - forward_AgentSecure_TaggerFetchEntity_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + forward_AgentSecure_TaggerFetchEntity_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - - mux.Handle("POST", pattern_AgentSecure_DogstatsdCaptureTrigger_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodPost, pattern_AgentSecure_DogstatsdCaptureTrigger_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/datadog.api.v1.AgentSecure/DogstatsdCaptureTrigger", runtime.WithHTTPPathPattern("/v1/grpc/dogstatsd/capture/trigger")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_AgentSecure_DogstatsdCaptureTrigger_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_AgentSecure_DogstatsdCaptureTrigger_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - - forward_AgentSecure_DogstatsdCaptureTrigger_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + forward_AgentSecure_DogstatsdCaptureTrigger_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - - mux.Handle("POST", pattern_AgentSecure_DogstatsdSetTaggerState_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodPost, pattern_AgentSecure_DogstatsdSetTaggerState_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/datadog.api.v1.AgentSecure/DogstatsdSetTaggerState", runtime.WithHTTPPathPattern("/v1/grpc/dogstatsd/capture/state")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_AgentSecure_DogstatsdSetTaggerState_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_AgentSecure_DogstatsdSetTaggerState_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - - forward_AgentSecure_DogstatsdSetTaggerState_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + forward_AgentSecure_DogstatsdSetTaggerState_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - - mux.Handle("POST", pattern_AgentSecure_ClientGetConfigs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodPost, pattern_AgentSecure_ClientGetConfigs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/datadog.api.v1.AgentSecure/ClientGetConfigs", runtime.WithHTTPPathPattern("/v1/grpc/remoteconfig/configs")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_AgentSecure_ClientGetConfigs_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_AgentSecure_ClientGetConfigs_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - - forward_AgentSecure_ClientGetConfigs_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + forward_AgentSecure_ClientGetConfigs_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - - mux.Handle("POST", pattern_AgentSecure_GetConfigState_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodPost, pattern_AgentSecure_GetConfigState_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/datadog.api.v1.AgentSecure/GetConfigState", runtime.WithHTTPPathPattern("/v1/grpc/remoteconfig/state")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_AgentSecure_GetConfigState_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_AgentSecure_GetConfigState_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - - forward_AgentSecure_GetConfigState_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + forward_AgentSecure_GetConfigState_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - - mux.Handle("POST", pattern_AgentSecure_ClientGetConfigsHA_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodPost, pattern_AgentSecure_ClientGetConfigsHA_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/datadog.api.v1.AgentSecure/ClientGetConfigsHA", runtime.WithHTTPPathPattern("/v1/grpc/remoteconfig/configs_ha")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_AgentSecure_ClientGetConfigsHA_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_AgentSecure_ClientGetConfigsHA_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - - forward_AgentSecure_ClientGetConfigsHA_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + forward_AgentSecure_ClientGetConfigsHA_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - - mux.Handle("POST", pattern_AgentSecure_GetConfigStateHA_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodPost, pattern_AgentSecure_GetConfigStateHA_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/datadog.api.v1.AgentSecure/GetConfigStateHA", runtime.WithHTTPPathPattern("/v1/grpc/remoteconfig/state_ha")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_AgentSecure_GetConfigStateHA_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_AgentSecure_GetConfigStateHA_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - - forward_AgentSecure_GetConfigStateHA_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + forward_AgentSecure_GetConfigStateHA_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - - mux.Handle("POST", pattern_AgentSecure_WorkloadmetaStreamEntities_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodPost, pattern_AgentSecure_WorkloadmetaStreamEntities_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/datadog.api.v1.AgentSecure/WorkloadmetaStreamEntities", runtime.WithHTTPPathPattern("/v1/grpc/workloadmeta/stream_entities")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_AgentSecure_WorkloadmetaStreamEntities_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_AgentSecure_WorkloadmetaStreamEntities_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - - forward_AgentSecure_WorkloadmetaStreamEntities_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) - + forward_AgentSecure_WorkloadmetaStreamEntities_0(annotatedContext, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) }) - - mux.Handle("POST", pattern_AgentSecure_RegisterRemoteAgent_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodPost, pattern_AgentSecure_RegisterRemoteAgent_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/datadog.api.v1.AgentSecure/RegisterRemoteAgent", runtime.WithHTTPPathPattern("/v1/grpc/remoteagent/register_remote_agent")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_AgentSecure_RegisterRemoteAgent_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_AgentSecure_RegisterRemoteAgent_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) return } - - forward_AgentSecure_RegisterRemoteAgent_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - + forward_AgentSecure_RegisterRemoteAgent_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - - mux.Handle("POST", pattern_AgentSecure_AutodiscoveryStreamConfig_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle(http.MethodPost, pattern_AgentSecure_AutodiscoveryStreamConfig_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/datadog.api.v1.AgentSecure/AutodiscoveryStreamConfig", runtime.WithHTTPPathPattern("/v1/grpc/autodiscovery/stream_configs")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_AgentSecure_AutodiscoveryStreamConfig_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) + resp, md, err := request_AgentSecure_AutodiscoveryStreamConfig_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_AgentSecure_AutodiscoveryStreamConfig_0(annotatedContext, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + }) + mux.Handle(http.MethodGet, pattern_AgentSecure_GetHostTags_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/datadog.api.v1.AgentSecure/GetHostTags", runtime.WithHTTPPathPattern("/v1/grpc/host_tags")) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - - forward_AgentSecure_AutodiscoveryStreamConfig_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) - + resp, md, err := request_AgentSecure_GetHostTags_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_AgentSecure_GetHostTags_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) - return nil } var ( - pattern_AgentSecure_TaggerStreamEntities_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "grpc", "tagger", "stream_entities"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_AgentSecure_TaggerGenerateContainerIDFromOriginInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "grpc", "tagger", "generate_container_id_from_origin_info"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_AgentSecure_TaggerFetchEntity_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "grpc", "tagger", "fetch_entity"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_AgentSecure_DogstatsdCaptureTrigger_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"v1", "grpc", "dogstatsd", "capture", "trigger"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_AgentSecure_DogstatsdSetTaggerState_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"v1", "grpc", "dogstatsd", "capture", "state"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_AgentSecure_ClientGetConfigs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "grpc", "remoteconfig", "configs"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_AgentSecure_GetConfigState_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "grpc", "remoteconfig", "state"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_AgentSecure_ClientGetConfigsHA_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "grpc", "remoteconfig", "configs_ha"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_AgentSecure_GetConfigStateHA_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "grpc", "remoteconfig", "state_ha"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_AgentSecure_WorkloadmetaStreamEntities_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "grpc", "workloadmeta", "stream_entities"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_AgentSecure_RegisterRemoteAgent_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "grpc", "remoteagent", "register_remote_agent"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_AgentSecure_AutodiscoveryStreamConfig_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "grpc", "autodiscovery", "stream_configs"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_AgentSecure_TaggerStreamEntities_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "grpc", "tagger", "stream_entities"}, "")) + pattern_AgentSecure_TaggerGenerateContainerIDFromOriginInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "grpc", "tagger", "generate_container_id_from_origin_info"}, "")) + pattern_AgentSecure_TaggerFetchEntity_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "grpc", "tagger", "fetch_entity"}, "")) + pattern_AgentSecure_DogstatsdCaptureTrigger_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"v1", "grpc", "dogstatsd", "capture", "trigger"}, "")) + pattern_AgentSecure_DogstatsdSetTaggerState_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"v1", "grpc", "dogstatsd", "capture", "state"}, "")) + pattern_AgentSecure_ClientGetConfigs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "grpc", "remoteconfig", "configs"}, "")) + pattern_AgentSecure_GetConfigState_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "grpc", "remoteconfig", "state"}, "")) + pattern_AgentSecure_ClientGetConfigsHA_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "grpc", "remoteconfig", "configs_ha"}, "")) + pattern_AgentSecure_GetConfigStateHA_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "grpc", "remoteconfig", "state_ha"}, "")) + pattern_AgentSecure_WorkloadmetaStreamEntities_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "grpc", "workloadmeta", "stream_entities"}, "")) + pattern_AgentSecure_RegisterRemoteAgent_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "grpc", "remoteagent", "register_remote_agent"}, "")) + pattern_AgentSecure_AutodiscoveryStreamConfig_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "grpc", "autodiscovery", "stream_configs"}, "")) + pattern_AgentSecure_GetHostTags_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "grpc", "host_tags"}, "")) ) var ( - forward_AgentSecure_TaggerStreamEntities_0 = runtime.ForwardResponseStream - + forward_AgentSecure_TaggerStreamEntities_0 = runtime.ForwardResponseStream forward_AgentSecure_TaggerGenerateContainerIDFromOriginInfo_0 = runtime.ForwardResponseMessage - - forward_AgentSecure_TaggerFetchEntity_0 = runtime.ForwardResponseMessage - - forward_AgentSecure_DogstatsdCaptureTrigger_0 = runtime.ForwardResponseMessage - - forward_AgentSecure_DogstatsdSetTaggerState_0 = runtime.ForwardResponseMessage - - forward_AgentSecure_ClientGetConfigs_0 = runtime.ForwardResponseMessage - - forward_AgentSecure_GetConfigState_0 = runtime.ForwardResponseMessage - - forward_AgentSecure_ClientGetConfigsHA_0 = runtime.ForwardResponseMessage - - forward_AgentSecure_GetConfigStateHA_0 = runtime.ForwardResponseMessage - - forward_AgentSecure_WorkloadmetaStreamEntities_0 = runtime.ForwardResponseStream - - forward_AgentSecure_RegisterRemoteAgent_0 = runtime.ForwardResponseMessage - - forward_AgentSecure_AutodiscoveryStreamConfig_0 = runtime.ForwardResponseStream + forward_AgentSecure_TaggerFetchEntity_0 = runtime.ForwardResponseMessage + forward_AgentSecure_DogstatsdCaptureTrigger_0 = runtime.ForwardResponseMessage + forward_AgentSecure_DogstatsdSetTaggerState_0 = runtime.ForwardResponseMessage + forward_AgentSecure_ClientGetConfigs_0 = runtime.ForwardResponseMessage + forward_AgentSecure_GetConfigState_0 = runtime.ForwardResponseMessage + forward_AgentSecure_ClientGetConfigsHA_0 = runtime.ForwardResponseMessage + forward_AgentSecure_GetConfigStateHA_0 = runtime.ForwardResponseMessage + forward_AgentSecure_WorkloadmetaStreamEntities_0 = runtime.ForwardResponseStream + forward_AgentSecure_RegisterRemoteAgent_0 = runtime.ForwardResponseMessage + forward_AgentSecure_AutodiscoveryStreamConfig_0 = runtime.ForwardResponseStream + forward_AgentSecure_GetHostTags_0 = runtime.ForwardResponseMessage ) diff --git a/pkg/proto/pbgo/core/autodiscovery.pb.go b/pkg/proto/pbgo/core/autodiscovery.pb.go index 898df3ccfe4af..711d000f0328a 100644 --- a/pkg/proto/pbgo/core/autodiscovery.pb.go +++ b/pkg/proto/pbgo/core/autodiscovery.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.2 -// protoc v5.26.1 +// protoc-gen-go v1.36.3 +// protoc v5.29.3 // source: datadog/autodiscovery/autodiscovery.proto package core @@ -67,12 +67,11 @@ func (ConfigEventType) EnumDescriptor() ([]byte, []int) { } type KubeNamespacedName struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + sizeCache protoimpl.SizeCache } func (x *KubeNamespacedName) Reset() { @@ -120,12 +119,11 @@ func (x *KubeNamespacedName) GetNamespace() string { } type AdvancedADIdentifier struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + KubeService *KubeNamespacedName `protobuf:"bytes,1,opt,name=kubeService,proto3" json:"kubeService,omitempty"` + KubeEndpoints *KubeNamespacedName `protobuf:"bytes,2,opt,name=kubeEndpoints,proto3" json:"kubeEndpoints,omitempty"` unknownFields protoimpl.UnknownFields - - KubeService *KubeNamespacedName `protobuf:"bytes,1,opt,name=kubeService,proto3" json:"kubeService,omitempty"` - KubeEndpoints *KubeNamespacedName `protobuf:"bytes,2,opt,name=kubeEndpoints,proto3" json:"kubeEndpoints,omitempty"` + sizeCache protoimpl.SizeCache } func (x *AdvancedADIdentifier) Reset() { @@ -173,10 +171,7 @@ func (x *AdvancedADIdentifier) GetKubeEndpoints() *KubeNamespacedName { } type Config struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Instances [][]byte `protobuf:"bytes,2,rep,name=instances,proto3" json:"instances,omitempty"` InitConfig []byte `protobuf:"bytes,3,opt,name=initConfig,proto3" json:"initConfig,omitempty"` @@ -194,6 +189,8 @@ type Config struct { MetricsExcluded bool `protobuf:"varint,15,opt,name=metricsExcluded,proto3" json:"metricsExcluded,omitempty"` LogsExcluded bool `protobuf:"varint,16,opt,name=logsExcluded,proto3" json:"logsExcluded,omitempty"` EventType ConfigEventType `protobuf:"varint,17,opt,name=eventType,proto3,enum=datadog.autodiscovery.ConfigEventType" json:"eventType,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Config) Reset() { @@ -346,11 +343,10 @@ func (x *Config) GetEventType() ConfigEventType { } type AutodiscoveryStreamResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Configs []*Config `protobuf:"bytes,1,rep,name=configs,proto3" json:"configs,omitempty"` unknownFields protoimpl.UnknownFields - - Configs []*Config `protobuf:"bytes,1,rep,name=configs,proto3" json:"configs,omitempty"` + sizeCache protoimpl.SizeCache } func (x *AutodiscoveryStreamResponse) Reset() { diff --git a/pkg/proto/pbgo/core/model.pb.go b/pkg/proto/pbgo/core/model.pb.go index 9262b4dc43a5a..2053e3233c594 100644 --- a/pkg/proto/pbgo/core/model.pb.go +++ b/pkg/proto/pbgo/core/model.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.2 -// protoc v5.26.1 +// protoc-gen-go v1.36.3 +// protoc v5.29.3 // source: datadog/model/v1/model.proto package core @@ -119,9 +119,9 @@ func (TagCardinality) EnumDescriptor() ([]byte, []int) { } type HostnameRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *HostnameRequest) Reset() { @@ -156,11 +156,10 @@ func (*HostnameRequest) Descriptor() ([]byte, []int) { // The response message containing the requested hostname type HostnameReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname,omitempty"` unknownFields protoimpl.UnknownFields - - Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname,omitempty"` + sizeCache protoimpl.SizeCache } func (x *HostnameReply) Reset() { @@ -200,20 +199,107 @@ func (x *HostnameReply) GetHostname() string { return "" } +type HostTagRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HostTagRequest) Reset() { + *x = HostTagRequest{} + mi := &file_datadog_model_v1_model_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HostTagRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HostTagRequest) ProtoMessage() {} + +func (x *HostTagRequest) ProtoReflect() protoreflect.Message { + mi := &file_datadog_model_v1_model_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HostTagRequest.ProtoReflect.Descriptor instead. +func (*HostTagRequest) Descriptor() ([]byte, []int) { + return file_datadog_model_v1_model_proto_rawDescGZIP(), []int{2} +} + +type HostTagReply struct { + state protoimpl.MessageState `protogen:"open.v1"` + System []string `protobuf:"bytes,1,rep,name=system,proto3" json:"system,omitempty"` + GoogleCloudPlatform []string `protobuf:"bytes,2,rep,name=googleCloudPlatform,proto3" json:"googleCloudPlatform,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HostTagReply) Reset() { + *x = HostTagReply{} + mi := &file_datadog_model_v1_model_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HostTagReply) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HostTagReply) ProtoMessage() {} + +func (x *HostTagReply) ProtoReflect() protoreflect.Message { + mi := &file_datadog_model_v1_model_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HostTagReply.ProtoReflect.Descriptor instead. +func (*HostTagReply) Descriptor() ([]byte, []int) { + return file_datadog_model_v1_model_proto_rawDescGZIP(), []int{3} +} + +func (x *HostTagReply) GetSystem() []string { + if x != nil { + return x.System + } + return nil +} + +func (x *HostTagReply) GetGoogleCloudPlatform() []string { + if x != nil { + return x.GoogleCloudPlatform + } + return nil +} + // Dogstatsd capture types type CaptureTriggerRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Duration string `protobuf:"bytes,1,opt,name=duration,proto3" json:"duration,omitempty"` + Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` + Compressed bool `protobuf:"varint,3,opt,name=compressed,proto3" json:"compressed,omitempty"` unknownFields protoimpl.UnknownFields - - Duration string `protobuf:"bytes,1,opt,name=duration,proto3" json:"duration,omitempty"` - Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` - Compressed bool `protobuf:"varint,3,opt,name=compressed,proto3" json:"compressed,omitempty"` + sizeCache protoimpl.SizeCache } func (x *CaptureTriggerRequest) Reset() { *x = CaptureTriggerRequest{} - mi := &file_datadog_model_v1_model_proto_msgTypes[2] + mi := &file_datadog_model_v1_model_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -225,7 +311,7 @@ func (x *CaptureTriggerRequest) String() string { func (*CaptureTriggerRequest) ProtoMessage() {} func (x *CaptureTriggerRequest) ProtoReflect() protoreflect.Message { - mi := &file_datadog_model_v1_model_proto_msgTypes[2] + mi := &file_datadog_model_v1_model_proto_msgTypes[4] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -238,7 +324,7 @@ func (x *CaptureTriggerRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CaptureTriggerRequest.ProtoReflect.Descriptor instead. func (*CaptureTriggerRequest) Descriptor() ([]byte, []int) { - return file_datadog_model_v1_model_proto_rawDescGZIP(), []int{2} + return file_datadog_model_v1_model_proto_rawDescGZIP(), []int{4} } func (x *CaptureTriggerRequest) GetDuration() string { @@ -263,16 +349,15 @@ func (x *CaptureTriggerRequest) GetCompressed() bool { } type CaptureTriggerResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` unknownFields protoimpl.UnknownFields - - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + sizeCache protoimpl.SizeCache } func (x *CaptureTriggerResponse) Reset() { *x = CaptureTriggerResponse{} - mi := &file_datadog_model_v1_model_proto_msgTypes[3] + mi := &file_datadog_model_v1_model_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -284,7 +369,7 @@ func (x *CaptureTriggerResponse) String() string { func (*CaptureTriggerResponse) ProtoMessage() {} func (x *CaptureTriggerResponse) ProtoReflect() protoreflect.Message { - mi := &file_datadog_model_v1_model_proto_msgTypes[3] + mi := &file_datadog_model_v1_model_proto_msgTypes[5] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -297,7 +382,7 @@ func (x *CaptureTriggerResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CaptureTriggerResponse.ProtoReflect.Descriptor instead. func (*CaptureTriggerResponse) Descriptor() ([]byte, []int) { - return file_datadog_model_v1_model_proto_rawDescGZIP(), []int{3} + return file_datadog_model_v1_model_proto_rawDescGZIP(), []int{5} } func (x *CaptureTriggerResponse) GetPath() string { @@ -308,20 +393,19 @@ func (x *CaptureTriggerResponse) GetPath() string { } type StreamTagsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Cardinality TagCardinality `protobuf:"varint,1,opt,name=cardinality,proto3,enum=datadog.model.v1.TagCardinality" json:"cardinality,omitempty"` + IncludeFilter *DeprecatedFilter `protobuf:"bytes,2,opt,name=includeFilter,proto3" json:"includeFilter,omitempty"` + ExcludeFilter *DeprecatedFilter `protobuf:"bytes,3,opt,name=excludeFilter,proto3" json:"excludeFilter,omitempty"` + Prefixes []string `protobuf:"bytes,4,rep,name=prefixes,proto3" json:"prefixes,omitempty"` + StreamingID string `protobuf:"bytes,5,opt,name=streamingID,proto3" json:"streamingID,omitempty"` unknownFields protoimpl.UnknownFields - - Cardinality TagCardinality `protobuf:"varint,1,opt,name=cardinality,proto3,enum=datadog.model.v1.TagCardinality" json:"cardinality,omitempty"` - IncludeFilter *DeprecatedFilter `protobuf:"bytes,2,opt,name=includeFilter,proto3" json:"includeFilter,omitempty"` - ExcludeFilter *DeprecatedFilter `protobuf:"bytes,3,opt,name=excludeFilter,proto3" json:"excludeFilter,omitempty"` - Prefixes []string `protobuf:"bytes,4,rep,name=prefixes,proto3" json:"prefixes,omitempty"` - StreamingID string `protobuf:"bytes,5,opt,name=streamingID,proto3" json:"streamingID,omitempty"` + sizeCache protoimpl.SizeCache } func (x *StreamTagsRequest) Reset() { *x = StreamTagsRequest{} - mi := &file_datadog_model_v1_model_proto_msgTypes[4] + mi := &file_datadog_model_v1_model_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -333,7 +417,7 @@ func (x *StreamTagsRequest) String() string { func (*StreamTagsRequest) ProtoMessage() {} func (x *StreamTagsRequest) ProtoReflect() protoreflect.Message { - mi := &file_datadog_model_v1_model_proto_msgTypes[4] + mi := &file_datadog_model_v1_model_proto_msgTypes[6] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -346,7 +430,7 @@ func (x *StreamTagsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use StreamTagsRequest.ProtoReflect.Descriptor instead. func (*StreamTagsRequest) Descriptor() ([]byte, []int) { - return file_datadog_model_v1_model_proto_rawDescGZIP(), []int{4} + return file_datadog_model_v1_model_proto_rawDescGZIP(), []int{6} } func (x *StreamTagsRequest) GetCardinality() TagCardinality { @@ -385,16 +469,15 @@ func (x *StreamTagsRequest) GetStreamingID() string { } type StreamTagsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Events []*StreamTagsEvent `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` unknownFields protoimpl.UnknownFields - - Events []*StreamTagsEvent `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` + sizeCache protoimpl.SizeCache } func (x *StreamTagsResponse) Reset() { *x = StreamTagsResponse{} - mi := &file_datadog_model_v1_model_proto_msgTypes[5] + mi := &file_datadog_model_v1_model_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -406,7 +489,7 @@ func (x *StreamTagsResponse) String() string { func (*StreamTagsResponse) ProtoMessage() {} func (x *StreamTagsResponse) ProtoReflect() protoreflect.Message { - mi := &file_datadog_model_v1_model_proto_msgTypes[5] + mi := &file_datadog_model_v1_model_proto_msgTypes[7] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -419,7 +502,7 @@ func (x *StreamTagsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use StreamTagsResponse.ProtoReflect.Descriptor instead. func (*StreamTagsResponse) Descriptor() ([]byte, []int) { - return file_datadog_model_v1_model_proto_rawDescGZIP(), []int{5} + return file_datadog_model_v1_model_proto_rawDescGZIP(), []int{7} } func (x *StreamTagsResponse) GetEvents() []*StreamTagsEvent { @@ -430,17 +513,16 @@ func (x *StreamTagsResponse) GetEvents() []*StreamTagsEvent { } type StreamTagsEvent struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Type EventType `protobuf:"varint,1,opt,name=type,proto3,enum=datadog.model.v1.EventType" json:"type,omitempty"` + Entity *Entity `protobuf:"bytes,2,opt,name=entity,proto3" json:"entity,omitempty"` unknownFields protoimpl.UnknownFields - - Type EventType `protobuf:"varint,1,opt,name=type,proto3,enum=datadog.model.v1.EventType" json:"type,omitempty"` - Entity *Entity `protobuf:"bytes,2,opt,name=entity,proto3" json:"entity,omitempty"` + sizeCache protoimpl.SizeCache } func (x *StreamTagsEvent) Reset() { *x = StreamTagsEvent{} - mi := &file_datadog_model_v1_model_proto_msgTypes[6] + mi := &file_datadog_model_v1_model_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -452,7 +534,7 @@ func (x *StreamTagsEvent) String() string { func (*StreamTagsEvent) ProtoMessage() {} func (x *StreamTagsEvent) ProtoReflect() protoreflect.Message { - mi := &file_datadog_model_v1_model_proto_msgTypes[6] + mi := &file_datadog_model_v1_model_proto_msgTypes[8] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -465,7 +547,7 @@ func (x *StreamTagsEvent) ProtoReflect() protoreflect.Message { // Deprecated: Use StreamTagsEvent.ProtoReflect.Descriptor instead. func (*StreamTagsEvent) Descriptor() ([]byte, []int) { - return file_datadog_model_v1_model_proto_rawDescGZIP(), []int{6} + return file_datadog_model_v1_model_proto_rawDescGZIP(), []int{8} } func (x *StreamTagsEvent) GetType() EventType { @@ -483,18 +565,17 @@ func (x *StreamTagsEvent) GetEntity() *Entity { } type DeprecatedFilter struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + KubeNamespace string `protobuf:"bytes,1,opt,name=kubeNamespace,proto3" json:"kubeNamespace,omitempty"` + Image string `protobuf:"bytes,2,opt,name=image,proto3" json:"image,omitempty"` + ContainerName string `protobuf:"bytes,3,opt,name=containerName,proto3" json:"containerName,omitempty"` unknownFields protoimpl.UnknownFields - - KubeNamespace string `protobuf:"bytes,1,opt,name=kubeNamespace,proto3" json:"kubeNamespace,omitempty"` - Image string `protobuf:"bytes,2,opt,name=image,proto3" json:"image,omitempty"` - ContainerName string `protobuf:"bytes,3,opt,name=containerName,proto3" json:"containerName,omitempty"` + sizeCache protoimpl.SizeCache } func (x *DeprecatedFilter) Reset() { *x = DeprecatedFilter{} - mi := &file_datadog_model_v1_model_proto_msgTypes[7] + mi := &file_datadog_model_v1_model_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -506,7 +587,7 @@ func (x *DeprecatedFilter) String() string { func (*DeprecatedFilter) ProtoMessage() {} func (x *DeprecatedFilter) ProtoReflect() protoreflect.Message { - mi := &file_datadog_model_v1_model_proto_msgTypes[7] + mi := &file_datadog_model_v1_model_proto_msgTypes[9] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -519,7 +600,7 @@ func (x *DeprecatedFilter) ProtoReflect() protoreflect.Message { // Deprecated: Use DeprecatedFilter.ProtoReflect.Descriptor instead. func (*DeprecatedFilter) Descriptor() ([]byte, []int) { - return file_datadog_model_v1_model_proto_rawDescGZIP(), []int{7} + return file_datadog_model_v1_model_proto_rawDescGZIP(), []int{9} } func (x *DeprecatedFilter) GetKubeNamespace() string { @@ -544,21 +625,20 @@ func (x *DeprecatedFilter) GetContainerName() string { } type Entity struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id *EntityId `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Hash string `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` - HighCardinalityTags []string `protobuf:"bytes,3,rep,name=highCardinalityTags,proto3" json:"highCardinalityTags,omitempty"` - OrchestratorCardinalityTags []string `protobuf:"bytes,4,rep,name=orchestratorCardinalityTags,proto3" json:"orchestratorCardinalityTags,omitempty"` - LowCardinalityTags []string `protobuf:"bytes,5,rep,name=lowCardinalityTags,proto3" json:"lowCardinalityTags,omitempty"` - StandardTags []string `protobuf:"bytes,6,rep,name=standardTags,proto3" json:"standardTags,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Id *EntityId `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Hash string `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` + HighCardinalityTags []string `protobuf:"bytes,3,rep,name=highCardinalityTags,proto3" json:"highCardinalityTags,omitempty"` + OrchestratorCardinalityTags []string `protobuf:"bytes,4,rep,name=orchestratorCardinalityTags,proto3" json:"orchestratorCardinalityTags,omitempty"` + LowCardinalityTags []string `protobuf:"bytes,5,rep,name=lowCardinalityTags,proto3" json:"lowCardinalityTags,omitempty"` + StandardTags []string `protobuf:"bytes,6,rep,name=standardTags,proto3" json:"standardTags,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Entity) Reset() { *x = Entity{} - mi := &file_datadog_model_v1_model_proto_msgTypes[8] + mi := &file_datadog_model_v1_model_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -570,7 +650,7 @@ func (x *Entity) String() string { func (*Entity) ProtoMessage() {} func (x *Entity) ProtoReflect() protoreflect.Message { - mi := &file_datadog_model_v1_model_proto_msgTypes[8] + mi := &file_datadog_model_v1_model_proto_msgTypes[10] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -583,7 +663,7 @@ func (x *Entity) ProtoReflect() protoreflect.Message { // Deprecated: Use Entity.ProtoReflect.Descriptor instead. func (*Entity) Descriptor() ([]byte, []int) { - return file_datadog_model_v1_model_proto_rawDescGZIP(), []int{8} + return file_datadog_model_v1_model_proto_rawDescGZIP(), []int{10} } func (x *Entity) GetId() *EntityId { @@ -629,17 +709,16 @@ func (x *Entity) GetStandardTags() []string { } type GenerateContainerIDFromOriginInfoRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + LocalData *GenerateContainerIDFromOriginInfoRequest_LocalData `protobuf:"bytes,1,opt,name=localData,proto3,oneof" json:"localData,omitempty"` // Local data for the container, generated by the client. + ExternalData *GenerateContainerIDFromOriginInfoRequest_ExternalData `protobuf:"bytes,2,opt,name=externalData,proto3,oneof" json:"externalData,omitempty"` // External data for the container, generated by the Admission Controller. unknownFields protoimpl.UnknownFields - - LocalData *GenerateContainerIDFromOriginInfoRequest_LocalData `protobuf:"bytes,1,opt,name=localData,proto3,oneof" json:"localData,omitempty"` // Local data for the container, generated by the client. - ExternalData *GenerateContainerIDFromOriginInfoRequest_ExternalData `protobuf:"bytes,2,opt,name=externalData,proto3,oneof" json:"externalData,omitempty"` // External data for the container, generated by the Admission Controller. + sizeCache protoimpl.SizeCache } func (x *GenerateContainerIDFromOriginInfoRequest) Reset() { *x = GenerateContainerIDFromOriginInfoRequest{} - mi := &file_datadog_model_v1_model_proto_msgTypes[9] + mi := &file_datadog_model_v1_model_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -651,7 +730,7 @@ func (x *GenerateContainerIDFromOriginInfoRequest) String() string { func (*GenerateContainerIDFromOriginInfoRequest) ProtoMessage() {} func (x *GenerateContainerIDFromOriginInfoRequest) ProtoReflect() protoreflect.Message { - mi := &file_datadog_model_v1_model_proto_msgTypes[9] + mi := &file_datadog_model_v1_model_proto_msgTypes[11] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -664,7 +743,7 @@ func (x *GenerateContainerIDFromOriginInfoRequest) ProtoReflect() protoreflect.M // Deprecated: Use GenerateContainerIDFromOriginInfoRequest.ProtoReflect.Descriptor instead. func (*GenerateContainerIDFromOriginInfoRequest) Descriptor() ([]byte, []int) { - return file_datadog_model_v1_model_proto_rawDescGZIP(), []int{9} + return file_datadog_model_v1_model_proto_rawDescGZIP(), []int{11} } func (x *GenerateContainerIDFromOriginInfoRequest) GetLocalData() *GenerateContainerIDFromOriginInfoRequest_LocalData { @@ -682,16 +761,15 @@ func (x *GenerateContainerIDFromOriginInfoRequest) GetExternalData() *GenerateCo } type GenerateContainerIDFromOriginInfoResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ContainerID string `protobuf:"bytes,1,opt,name=containerID,proto3" json:"containerID,omitempty"` unknownFields protoimpl.UnknownFields - - ContainerID string `protobuf:"bytes,1,opt,name=containerID,proto3" json:"containerID,omitempty"` + sizeCache protoimpl.SizeCache } func (x *GenerateContainerIDFromOriginInfoResponse) Reset() { *x = GenerateContainerIDFromOriginInfoResponse{} - mi := &file_datadog_model_v1_model_proto_msgTypes[10] + mi := &file_datadog_model_v1_model_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -703,7 +781,7 @@ func (x *GenerateContainerIDFromOriginInfoResponse) String() string { func (*GenerateContainerIDFromOriginInfoResponse) ProtoMessage() {} func (x *GenerateContainerIDFromOriginInfoResponse) ProtoReflect() protoreflect.Message { - mi := &file_datadog_model_v1_model_proto_msgTypes[10] + mi := &file_datadog_model_v1_model_proto_msgTypes[12] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -716,7 +794,7 @@ func (x *GenerateContainerIDFromOriginInfoResponse) ProtoReflect() protoreflect. // Deprecated: Use GenerateContainerIDFromOriginInfoResponse.ProtoReflect.Descriptor instead. func (*GenerateContainerIDFromOriginInfoResponse) Descriptor() ([]byte, []int) { - return file_datadog_model_v1_model_proto_rawDescGZIP(), []int{10} + return file_datadog_model_v1_model_proto_rawDescGZIP(), []int{12} } func (x *GenerateContainerIDFromOriginInfoResponse) GetContainerID() string { @@ -727,17 +805,16 @@ func (x *GenerateContainerIDFromOriginInfoResponse) GetContainerID() string { } type FetchEntityRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Id *EntityId `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Cardinality TagCardinality `protobuf:"varint,2,opt,name=cardinality,proto3,enum=datadog.model.v1.TagCardinality" json:"cardinality,omitempty"` unknownFields protoimpl.UnknownFields - - Id *EntityId `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Cardinality TagCardinality `protobuf:"varint,2,opt,name=cardinality,proto3,enum=datadog.model.v1.TagCardinality" json:"cardinality,omitempty"` + sizeCache protoimpl.SizeCache } func (x *FetchEntityRequest) Reset() { *x = FetchEntityRequest{} - mi := &file_datadog_model_v1_model_proto_msgTypes[11] + mi := &file_datadog_model_v1_model_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -749,7 +826,7 @@ func (x *FetchEntityRequest) String() string { func (*FetchEntityRequest) ProtoMessage() {} func (x *FetchEntityRequest) ProtoReflect() protoreflect.Message { - mi := &file_datadog_model_v1_model_proto_msgTypes[11] + mi := &file_datadog_model_v1_model_proto_msgTypes[13] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -762,7 +839,7 @@ func (x *FetchEntityRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use FetchEntityRequest.ProtoReflect.Descriptor instead. func (*FetchEntityRequest) Descriptor() ([]byte, []int) { - return file_datadog_model_v1_model_proto_rawDescGZIP(), []int{11} + return file_datadog_model_v1_model_proto_rawDescGZIP(), []int{13} } func (x *FetchEntityRequest) GetId() *EntityId { @@ -780,18 +857,17 @@ func (x *FetchEntityRequest) GetCardinality() TagCardinality { } type FetchEntityResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Id *EntityId `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Cardinality TagCardinality `protobuf:"varint,2,opt,name=cardinality,proto3,enum=datadog.model.v1.TagCardinality" json:"cardinality,omitempty"` + Tags []string `protobuf:"bytes,3,rep,name=tags,proto3" json:"tags,omitempty"` unknownFields protoimpl.UnknownFields - - Id *EntityId `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Cardinality TagCardinality `protobuf:"varint,2,opt,name=cardinality,proto3,enum=datadog.model.v1.TagCardinality" json:"cardinality,omitempty"` - Tags []string `protobuf:"bytes,3,rep,name=tags,proto3" json:"tags,omitempty"` + sizeCache protoimpl.SizeCache } func (x *FetchEntityResponse) Reset() { *x = FetchEntityResponse{} - mi := &file_datadog_model_v1_model_proto_msgTypes[12] + mi := &file_datadog_model_v1_model_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -803,7 +879,7 @@ func (x *FetchEntityResponse) String() string { func (*FetchEntityResponse) ProtoMessage() {} func (x *FetchEntityResponse) ProtoReflect() protoreflect.Message { - mi := &file_datadog_model_v1_model_proto_msgTypes[12] + mi := &file_datadog_model_v1_model_proto_msgTypes[14] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -816,7 +892,7 @@ func (x *FetchEntityResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use FetchEntityResponse.ProtoReflect.Descriptor instead. func (*FetchEntityResponse) Descriptor() ([]byte, []int) { - return file_datadog_model_v1_model_proto_rawDescGZIP(), []int{12} + return file_datadog_model_v1_model_proto_rawDescGZIP(), []int{14} } func (x *FetchEntityResponse) GetId() *EntityId { @@ -841,17 +917,16 @@ func (x *FetchEntityResponse) GetTags() []string { } type EntityId struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Prefix string `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"` + Uid string `protobuf:"bytes,2,opt,name=uid,proto3" json:"uid,omitempty"` unknownFields protoimpl.UnknownFields - - Prefix string `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"` - Uid string `protobuf:"bytes,2,opt,name=uid,proto3" json:"uid,omitempty"` + sizeCache protoimpl.SizeCache } func (x *EntityId) Reset() { *x = EntityId{} - mi := &file_datadog_model_v1_model_proto_msgTypes[13] + mi := &file_datadog_model_v1_model_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -863,7 +938,7 @@ func (x *EntityId) String() string { func (*EntityId) ProtoMessage() {} func (x *EntityId) ProtoReflect() protoreflect.Message { - mi := &file_datadog_model_v1_model_proto_msgTypes[13] + mi := &file_datadog_model_v1_model_proto_msgTypes[15] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -876,7 +951,7 @@ func (x *EntityId) ProtoReflect() protoreflect.Message { // Deprecated: Use EntityId.ProtoReflect.Descriptor instead. func (*EntityId) Descriptor() ([]byte, []int) { - return file_datadog_model_v1_model_proto_rawDescGZIP(), []int{13} + return file_datadog_model_v1_model_proto_rawDescGZIP(), []int{15} } func (x *EntityId) GetPrefix() string { @@ -896,21 +971,20 @@ func (x *EntityId) GetUid() string { // UDS Capture // The message contains the payload and the ancillary info type UnixDogstatsdMsg struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Timestamp int64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + PayloadSize int32 `protobuf:"varint,2,opt,name=payloadSize,proto3" json:"payloadSize,omitempty"` + Payload []byte `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"` + Pid int32 `protobuf:"varint,4,opt,name=pid,proto3" json:"pid,omitempty"` + AncillarySize int32 `protobuf:"varint,5,opt,name=ancillarySize,proto3" json:"ancillarySize,omitempty"` + Ancillary []byte `protobuf:"bytes,6,opt,name=ancillary,proto3" json:"ancillary,omitempty"` unknownFields protoimpl.UnknownFields - - Timestamp int64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - PayloadSize int32 `protobuf:"varint,2,opt,name=payloadSize,proto3" json:"payloadSize,omitempty"` - Payload []byte `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"` - Pid int32 `protobuf:"varint,4,opt,name=pid,proto3" json:"pid,omitempty"` - AncillarySize int32 `protobuf:"varint,5,opt,name=ancillarySize,proto3" json:"ancillarySize,omitempty"` - Ancillary []byte `protobuf:"bytes,6,opt,name=ancillary,proto3" json:"ancillary,omitempty"` + sizeCache protoimpl.SizeCache } func (x *UnixDogstatsdMsg) Reset() { *x = UnixDogstatsdMsg{} - mi := &file_datadog_model_v1_model_proto_msgTypes[14] + mi := &file_datadog_model_v1_model_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -922,7 +996,7 @@ func (x *UnixDogstatsdMsg) String() string { func (*UnixDogstatsdMsg) ProtoMessage() {} func (x *UnixDogstatsdMsg) ProtoReflect() protoreflect.Message { - mi := &file_datadog_model_v1_model_proto_msgTypes[14] + mi := &file_datadog_model_v1_model_proto_msgTypes[16] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -935,7 +1009,7 @@ func (x *UnixDogstatsdMsg) ProtoReflect() protoreflect.Message { // Deprecated: Use UnixDogstatsdMsg.ProtoReflect.Descriptor instead. func (*UnixDogstatsdMsg) Descriptor() ([]byte, []int) { - return file_datadog_model_v1_model_proto_rawDescGZIP(), []int{14} + return file_datadog_model_v1_model_proto_rawDescGZIP(), []int{16} } func (x *UnixDogstatsdMsg) GetTimestamp() int64 { @@ -981,17 +1055,16 @@ func (x *UnixDogstatsdMsg) GetAncillary() []byte { } type TaggerState struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + State map[string]*Entity `protobuf:"bytes,1,rep,name=state,proto3" json:"state,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + PidMap map[int32]string `protobuf:"bytes,2,rep,name=pidMap,proto3" json:"pidMap,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` unknownFields protoimpl.UnknownFields - - State map[string]*Entity `protobuf:"bytes,1,rep,name=state,proto3" json:"state,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - PidMap map[int32]string `protobuf:"bytes,2,rep,name=pidMap,proto3" json:"pidMap,omitempty" protobuf_key:"varint,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + sizeCache protoimpl.SizeCache } func (x *TaggerState) Reset() { *x = TaggerState{} - mi := &file_datadog_model_v1_model_proto_msgTypes[15] + mi := &file_datadog_model_v1_model_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1003,7 +1076,7 @@ func (x *TaggerState) String() string { func (*TaggerState) ProtoMessage() {} func (x *TaggerState) ProtoReflect() protoreflect.Message { - mi := &file_datadog_model_v1_model_proto_msgTypes[15] + mi := &file_datadog_model_v1_model_proto_msgTypes[17] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1016,7 +1089,7 @@ func (x *TaggerState) ProtoReflect() protoreflect.Message { // Deprecated: Use TaggerState.ProtoReflect.Descriptor instead. func (*TaggerState) Descriptor() ([]byte, []int) { - return file_datadog_model_v1_model_proto_rawDescGZIP(), []int{15} + return file_datadog_model_v1_model_proto_rawDescGZIP(), []int{17} } func (x *TaggerState) GetState() map[string]*Entity { @@ -1034,16 +1107,15 @@ func (x *TaggerState) GetPidMap() map[int32]string { } type TaggerStateResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Loaded bool `protobuf:"varint,1,opt,name=loaded,proto3" json:"loaded,omitempty"` unknownFields protoimpl.UnknownFields - - Loaded bool `protobuf:"varint,1,opt,name=loaded,proto3" json:"loaded,omitempty"` + sizeCache protoimpl.SizeCache } func (x *TaggerStateResponse) Reset() { *x = TaggerStateResponse{} - mi := &file_datadog_model_v1_model_proto_msgTypes[16] + mi := &file_datadog_model_v1_model_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1055,7 +1127,7 @@ func (x *TaggerStateResponse) String() string { func (*TaggerStateResponse) ProtoMessage() {} func (x *TaggerStateResponse) ProtoReflect() protoreflect.Message { - mi := &file_datadog_model_v1_model_proto_msgTypes[16] + mi := &file_datadog_model_v1_model_proto_msgTypes[18] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1068,7 +1140,7 @@ func (x *TaggerStateResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use TaggerStateResponse.ProtoReflect.Descriptor instead. func (*TaggerStateResponse) Descriptor() ([]byte, []int) { - return file_datadog_model_v1_model_proto_rawDescGZIP(), []int{16} + return file_datadog_model_v1_model_proto_rawDescGZIP(), []int{18} } func (x *TaggerStateResponse) GetLoaded() bool { @@ -1080,19 +1152,18 @@ func (x *TaggerStateResponse) GetLoaded() bool { // Nested message for the local data type GenerateContainerIDFromOriginInfoRequest_LocalData struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ProcessID *uint32 `protobuf:"varint,1,opt,name=processID,proto3,oneof" json:"processID,omitempty"` // Process ID of the container process on the host. + ContainerID *string `protobuf:"bytes,2,opt,name=containerID,proto3,oneof" json:"containerID,omitempty"` // Container ID send from the client. + Inode *uint64 `protobuf:"varint,3,opt,name=inode,proto3,oneof" json:"inode,omitempty"` // Cgroup inode of the container. + PodUID *string `protobuf:"bytes,4,opt,name=podUID,proto3,oneof" json:"podUID,omitempty"` // Pod UID send from the client. unknownFields protoimpl.UnknownFields - - ProcessID *uint32 `protobuf:"varint,1,opt,name=processID,proto3,oneof" json:"processID,omitempty"` // Process ID of the container process on the host. - ContainerID *string `protobuf:"bytes,2,opt,name=containerID,proto3,oneof" json:"containerID,omitempty"` // Container ID send from the client. - Inode *uint64 `protobuf:"varint,3,opt,name=inode,proto3,oneof" json:"inode,omitempty"` // Cgroup inode of the container. - PodUID *string `protobuf:"bytes,4,opt,name=podUID,proto3,oneof" json:"podUID,omitempty"` // Pod UID send from the client. + sizeCache protoimpl.SizeCache } func (x *GenerateContainerIDFromOriginInfoRequest_LocalData) Reset() { *x = GenerateContainerIDFromOriginInfoRequest_LocalData{} - mi := &file_datadog_model_v1_model_proto_msgTypes[17] + mi := &file_datadog_model_v1_model_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1104,7 +1175,7 @@ func (x *GenerateContainerIDFromOriginInfoRequest_LocalData) String() string { func (*GenerateContainerIDFromOriginInfoRequest_LocalData) ProtoMessage() {} func (x *GenerateContainerIDFromOriginInfoRequest_LocalData) ProtoReflect() protoreflect.Message { - mi := &file_datadog_model_v1_model_proto_msgTypes[17] + mi := &file_datadog_model_v1_model_proto_msgTypes[19] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1117,7 +1188,7 @@ func (x *GenerateContainerIDFromOriginInfoRequest_LocalData) ProtoReflect() prot // Deprecated: Use GenerateContainerIDFromOriginInfoRequest_LocalData.ProtoReflect.Descriptor instead. func (*GenerateContainerIDFromOriginInfoRequest_LocalData) Descriptor() ([]byte, []int) { - return file_datadog_model_v1_model_proto_rawDescGZIP(), []int{9, 0} + return file_datadog_model_v1_model_proto_rawDescGZIP(), []int{11, 0} } func (x *GenerateContainerIDFromOriginInfoRequest_LocalData) GetProcessID() uint32 { @@ -1150,18 +1221,17 @@ func (x *GenerateContainerIDFromOriginInfoRequest_LocalData) GetPodUID() string // Nested message for the external data type GenerateContainerIDFromOriginInfoRequest_ExternalData struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Init *bool `protobuf:"varint,1,opt,name=init,proto3,oneof" json:"init,omitempty"` // Init is true if the container is an init container. + ContainerName *string `protobuf:"bytes,2,opt,name=containerName,proto3,oneof" json:"containerName,omitempty"` // Container name in the Kubernetes Pod spec. + PodUID *string `protobuf:"bytes,3,opt,name=podUID,proto3,oneof" json:"podUID,omitempty"` // Pod UID in the Kubernetes Pod spec. unknownFields protoimpl.UnknownFields - - Init *bool `protobuf:"varint,1,opt,name=init,proto3,oneof" json:"init,omitempty"` // Init is true if the container is an init container. - ContainerName *string `protobuf:"bytes,2,opt,name=containerName,proto3,oneof" json:"containerName,omitempty"` // Container name in the Kubernetes Pod spec. - PodUID *string `protobuf:"bytes,3,opt,name=podUID,proto3,oneof" json:"podUID,omitempty"` // Pod UID in the Kubernetes Pod spec. + sizeCache protoimpl.SizeCache } func (x *GenerateContainerIDFromOriginInfoRequest_ExternalData) Reset() { *x = GenerateContainerIDFromOriginInfoRequest_ExternalData{} - mi := &file_datadog_model_v1_model_proto_msgTypes[18] + mi := &file_datadog_model_v1_model_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1173,7 +1243,7 @@ func (x *GenerateContainerIDFromOriginInfoRequest_ExternalData) String() string func (*GenerateContainerIDFromOriginInfoRequest_ExternalData) ProtoMessage() {} func (x *GenerateContainerIDFromOriginInfoRequest_ExternalData) ProtoReflect() protoreflect.Message { - mi := &file_datadog_model_v1_model_proto_msgTypes[18] + mi := &file_datadog_model_v1_model_proto_msgTypes[20] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1186,7 +1256,7 @@ func (x *GenerateContainerIDFromOriginInfoRequest_ExternalData) ProtoReflect() p // Deprecated: Use GenerateContainerIDFromOriginInfoRequest_ExternalData.ProtoReflect.Descriptor instead. func (*GenerateContainerIDFromOriginInfoRequest_ExternalData) Descriptor() ([]byte, []int) { - return file_datadog_model_v1_model_proto_rawDescGZIP(), []int{9, 1} + return file_datadog_model_v1_model_proto_rawDescGZIP(), []int{11, 1} } func (x *GenerateContainerIDFromOriginInfoRequest_ExternalData) GetInit() bool { @@ -1220,180 +1290,187 @@ var file_datadog_model_v1_model_proto_rawDesc = []byte{ 0x65, 0x73, 0x74, 0x22, 0x2b, 0x0a, 0x0d, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, - 0x22, 0x67, 0x0a, 0x15, 0x43, 0x61, 0x70, 0x74, 0x75, 0x72, 0x65, 0x54, 0x72, 0x69, 0x67, 0x67, - 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, - 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x63, - 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x65, 0x64, 0x22, 0x2c, 0x0a, 0x16, 0x43, 0x61, 0x70, - 0x74, 0x75, 0x72, 0x65, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0xa9, 0x02, 0x0a, 0x11, 0x53, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x54, 0x61, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, - 0x0b, 0x63, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d, 0x6f, 0x64, - 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x67, 0x43, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, - 0x6c, 0x69, 0x74, 0x79, 0x52, 0x0b, 0x63, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, - 0x79, 0x12, 0x48, 0x0a, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x46, 0x69, 0x6c, 0x74, - 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, - 0x6f, 0x67, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x0d, 0x69, 0x6e, - 0x63, 0x6c, 0x75, 0x64, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x48, 0x0a, 0x0d, 0x65, - 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d, 0x6f, 0x64, - 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x46, - 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, - 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, - 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x49, 0x44, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, - 0x67, 0x49, 0x44, 0x22, 0x4f, 0x0a, 0x12, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x61, 0x67, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x06, 0x65, 0x76, 0x65, - 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x64, 0x61, 0x74, 0x61, - 0x64, 0x6f, 0x67, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, - 0x65, 0x61, 0x6d, 0x54, 0x61, 0x67, 0x73, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, - 0x65, 0x6e, 0x74, 0x73, 0x22, 0x74, 0x0a, 0x0f, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x61, - 0x67, 0x73, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x2f, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, - 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x30, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, - 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, - 0x6f, 0x67, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x74, 0x69, - 0x74, 0x79, 0x52, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x22, 0x74, 0x0a, 0x10, 0x44, 0x65, - 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x24, - 0x0a, 0x0d, 0x6b, 0x75, 0x62, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6b, 0x75, 0x62, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, - 0x70, 0x61, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, - 0x22, 0x90, 0x02, 0x0a, 0x06, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x2a, 0x0a, 0x02, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, - 0x67, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x49, 0x64, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, 0x30, 0x0a, 0x13, 0x68, - 0x69, 0x67, 0x68, 0x43, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x54, 0x61, - 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x68, 0x69, 0x67, 0x68, 0x43, 0x61, - 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x54, 0x61, 0x67, 0x73, 0x12, 0x40, 0x0a, - 0x1b, 0x6f, 0x72, 0x63, 0x68, 0x65, 0x73, 0x74, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x43, 0x61, 0x72, - 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x54, 0x61, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x1b, 0x6f, 0x72, 0x63, 0x68, 0x65, 0x73, 0x74, 0x72, 0x61, 0x74, 0x6f, 0x72, - 0x43, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x54, 0x61, 0x67, 0x73, 0x12, - 0x2e, 0x0a, 0x12, 0x6c, 0x6f, 0x77, 0x43, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, - 0x79, 0x54, 0x61, 0x67, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x12, 0x6c, 0x6f, 0x77, - 0x43, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x54, 0x61, 0x67, 0x73, 0x12, - 0x22, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x54, 0x61, 0x67, 0x73, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x54, - 0x61, 0x67, 0x73, 0x22, 0xff, 0x04, 0x0a, 0x28, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, - 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x46, 0x72, 0x6f, 0x6d, 0x4f, - 0x72, 0x69, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x67, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d, 0x6f, - 0x64, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x72, - 0x69, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, - 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x6f, 0x63, - 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x88, 0x01, 0x01, 0x12, 0x70, 0x0a, 0x0c, 0x65, 0x78, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x47, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, + 0x22, 0x10, 0x0a, 0x0e, 0x48, 0x6f, 0x73, 0x74, 0x54, 0x61, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x22, 0x58, 0x0a, 0x0c, 0x48, 0x6f, 0x73, 0x74, 0x54, 0x61, 0x67, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x06, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x12, 0x30, 0x0a, 0x13, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, + 0x6d, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x43, + 0x6c, 0x6f, 0x75, 0x64, 0x50, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x22, 0x67, 0x0a, 0x15, + 0x43, 0x61, 0x70, 0x74, 0x75, 0x72, 0x65, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1e, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, + 0x73, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x72, + 0x65, 0x73, 0x73, 0x65, 0x64, 0x22, 0x2c, 0x0a, 0x16, 0x43, 0x61, 0x70, 0x74, 0x75, 0x72, 0x65, + 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x22, 0xa9, 0x02, 0x0a, 0x11, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x61, + 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x0b, 0x63, 0x61, 0x72, + 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, + 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x76, + 0x31, 0x2e, 0x54, 0x61, 0x67, 0x43, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, + 0x52, 0x0b, 0x63, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x48, 0x0a, + 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d, + 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x0d, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, + 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x48, 0x0a, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, + 0x64, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, + 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x76, + 0x31, 0x2e, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x52, 0x0d, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x65, 0x73, 0x12, 0x20, 0x0a, + 0x0b, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x49, 0x44, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x49, 0x44, 0x22, + 0x4f, 0x0a, 0x12, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x61, 0x67, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, + 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, + 0x61, 0x67, 0x73, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x22, 0x74, 0x0a, 0x0f, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x61, 0x67, 0x73, 0x45, 0x76, + 0x65, 0x6e, 0x74, 0x12, 0x2f, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x1b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d, 0x6f, 0x64, 0x65, + 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x12, 0x30, 0x0a, 0x06, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d, + 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x06, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x22, 0x74, 0x0a, 0x10, 0x44, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x24, 0x0a, 0x0d, 0x6b, 0x75, + 0x62, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0d, 0x6b, 0x75, 0x62, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x90, 0x02, 0x0a, + 0x06, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x2a, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d, 0x6f, + 0x64, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x52, + 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, 0x30, 0x0a, 0x13, 0x68, 0x69, 0x67, 0x68, 0x43, + 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x54, 0x61, 0x67, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x68, 0x69, 0x67, 0x68, 0x43, 0x61, 0x72, 0x64, 0x69, 0x6e, + 0x61, 0x6c, 0x69, 0x74, 0x79, 0x54, 0x61, 0x67, 0x73, 0x12, 0x40, 0x0a, 0x1b, 0x6f, 0x72, 0x63, + 0x68, 0x65, 0x73, 0x74, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x43, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, + 0x6c, 0x69, 0x74, 0x79, 0x54, 0x61, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x1b, + 0x6f, 0x72, 0x63, 0x68, 0x65, 0x73, 0x74, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x43, 0x61, 0x72, 0x64, + 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x54, 0x61, 0x67, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x6c, + 0x6f, 0x77, 0x43, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x54, 0x61, 0x67, + 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x12, 0x6c, 0x6f, 0x77, 0x43, 0x61, 0x72, 0x64, + 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x54, 0x61, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x73, + 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x54, 0x61, 0x67, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x61, 0x72, 0x64, 0x54, 0x61, 0x67, 0x73, 0x22, + 0xff, 0x04, 0x0a, 0x28, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x72, 0x69, 0x67, 0x69, + 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x67, 0x0a, 0x09, + 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x44, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, - 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x45, 0x78, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x88, 0x01, 0x01, 0x1a, 0xc0, 0x01, 0x0a, 0x09, - 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x09, 0x70, 0x72, 0x6f, - 0x63, 0x65, 0x73, 0x73, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x09, - 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x49, 0x44, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0b, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x48, 0x01, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, - 0x88, 0x01, 0x01, 0x12, 0x19, 0x0a, 0x05, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x04, 0x48, 0x02, 0x52, 0x05, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x88, 0x01, 0x01, 0x12, 0x1b, - 0x0a, 0x06, 0x70, 0x6f, 0x64, 0x55, 0x49, 0x44, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x03, - 0x52, 0x06, 0x70, 0x6f, 0x64, 0x55, 0x49, 0x44, 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, - 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x49, 0x44, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x69, 0x6e, - 0x6f, 0x64, 0x65, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x70, 0x6f, 0x64, 0x55, 0x49, 0x44, 0x1a, 0x95, - 0x01, 0x0a, 0x0c, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x12, - 0x17, 0x0a, 0x04, 0x69, 0x6e, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, - 0x04, 0x69, 0x6e, 0x69, 0x74, 0x88, 0x01, 0x01, 0x12, 0x29, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, - 0x61, 0x69, 0x6e, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, - 0x01, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, - 0x88, 0x01, 0x01, 0x12, 0x1b, 0x0a, 0x06, 0x70, 0x6f, 0x64, 0x55, 0x49, 0x44, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x48, 0x02, 0x52, 0x06, 0x70, 0x6f, 0x64, 0x55, 0x49, 0x44, 0x88, 0x01, 0x01, - 0x42, 0x07, 0x0a, 0x05, 0x5f, 0x69, 0x6e, 0x69, 0x74, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x09, 0x0a, 0x07, 0x5f, - 0x70, 0x6f, 0x64, 0x55, 0x49, 0x44, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, - 0x44, 0x61, 0x74, 0x61, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x44, 0x61, 0x74, 0x61, 0x22, 0x4d, 0x0a, 0x29, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, - 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x46, 0x72, 0x6f, 0x6d, - 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, - 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, - 0x65, 0x72, 0x49, 0x44, 0x22, 0x84, 0x01, 0x0a, 0x12, 0x46, 0x65, 0x74, 0x63, 0x68, 0x45, 0x6e, - 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x02, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, - 0x67, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x49, 0x64, 0x52, 0x02, 0x69, 0x64, 0x12, 0x42, 0x0a, 0x0b, 0x63, 0x61, 0x72, 0x64, 0x69, - 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x64, - 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, - 0x54, 0x61, 0x67, 0x43, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x0b, - 0x63, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x22, 0x99, 0x01, 0x0a, 0x13, - 0x46, 0x65, 0x74, 0x63, 0x68, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, - 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x52, 0x02, 0x69, 0x64, 0x12, - 0x42, 0x0a, 0x0b, 0x63, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d, - 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x67, 0x43, 0x61, 0x72, 0x64, 0x69, - 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x0b, 0x63, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, - 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x22, 0x34, 0x0a, 0x08, 0x45, 0x6e, 0x74, 0x69, 0x74, - 0x79, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x10, 0x0a, 0x03, 0x75, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x69, 0x64, 0x22, 0xc2, 0x01, - 0x0a, 0x10, 0x55, 0x6e, 0x69, 0x78, 0x44, 0x6f, 0x67, 0x73, 0x74, 0x61, 0x74, 0x73, 0x64, 0x4d, - 0x73, 0x67, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x69, - 0x7a, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x10, 0x0a, 0x03, - 0x70, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x24, - 0x0a, 0x0d, 0x61, 0x6e, 0x63, 0x69, 0x6c, 0x6c, 0x61, 0x72, 0x79, 0x53, 0x69, 0x7a, 0x65, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x61, 0x6e, 0x63, 0x69, 0x6c, 0x6c, 0x61, 0x72, 0x79, - 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x6e, 0x63, 0x69, 0x6c, 0x6c, 0x61, 0x72, - 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x61, 0x6e, 0x63, 0x69, 0x6c, 0x6c, 0x61, - 0x72, 0x79, 0x22, 0x9f, 0x02, 0x0a, 0x0b, 0x54, 0x61, 0x67, 0x67, 0x65, 0x72, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x3e, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x28, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d, 0x6f, 0x64, 0x65, - 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x67, 0x67, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x73, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x41, 0x0a, 0x06, 0x70, 0x69, 0x64, 0x4d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d, 0x6f, 0x64, - 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x67, 0x67, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x2e, 0x50, 0x69, 0x64, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x70, - 0x69, 0x64, 0x4d, 0x61, 0x70, 0x1a, 0x52, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x65, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2e, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d, - 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x39, 0x0a, 0x0b, 0x50, 0x69, 0x64, - 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x22, 0x2d, 0x0a, 0x13, 0x54, 0x61, 0x67, 0x67, 0x65, 0x72, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, - 0x6f, 0x61, 0x64, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x6c, 0x6f, 0x61, - 0x64, 0x65, 0x64, 0x2a, 0x31, 0x0a, 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x09, 0x0a, 0x05, 0x41, 0x44, 0x44, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x4d, - 0x4f, 0x44, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x4c, - 0x45, 0x54, 0x45, 0x44, 0x10, 0x02, 0x2a, 0x35, 0x0a, 0x0e, 0x54, 0x61, 0x67, 0x43, 0x61, 0x72, - 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, 0x07, 0x0a, 0x03, 0x4c, 0x4f, 0x57, 0x10, - 0x00, 0x12, 0x10, 0x0a, 0x0c, 0x4f, 0x52, 0x43, 0x48, 0x45, 0x53, 0x54, 0x52, 0x41, 0x54, 0x4f, - 0x52, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x49, 0x47, 0x48, 0x10, 0x02, 0x42, 0x15, 0x5a, - 0x13, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x67, 0x6f, 0x2f, - 0x63, 0x6f, 0x72, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4c, 0x6f, 0x63, 0x61, + 0x6c, 0x44, 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x44, 0x61, + 0x74, 0x61, 0x88, 0x01, 0x01, 0x12, 0x70, 0x0a, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x44, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x47, 0x2e, 0x64, 0x61, + 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x47, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x49, 0x44, 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x66, 0x6f, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x44, 0x61, 0x74, 0x61, 0x48, 0x01, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x44, 0x61, 0x74, 0x61, 0x88, 0x01, 0x01, 0x1a, 0xc0, 0x01, 0x0a, 0x09, 0x4c, 0x6f, 0x63, 0x61, + 0x6c, 0x44, 0x61, 0x74, 0x61, 0x12, 0x21, 0x0a, 0x09, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, + 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x09, 0x70, 0x72, 0x6f, 0x63, + 0x65, 0x73, 0x73, 0x49, 0x44, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, + 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x88, 0x01, 0x01, 0x12, + 0x19, 0x0a, 0x05, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x48, 0x02, + 0x52, 0x05, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x88, 0x01, 0x01, 0x12, 0x1b, 0x0a, 0x06, 0x70, 0x6f, + 0x64, 0x55, 0x49, 0x44, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x03, 0x52, 0x06, 0x70, 0x6f, + 0x64, 0x55, 0x49, 0x44, 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x70, 0x72, 0x6f, 0x63, + 0x65, 0x73, 0x73, 0x49, 0x44, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x49, 0x44, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x69, 0x6e, 0x6f, 0x64, 0x65, 0x42, + 0x09, 0x0a, 0x07, 0x5f, 0x70, 0x6f, 0x64, 0x55, 0x49, 0x44, 0x1a, 0x95, 0x01, 0x0a, 0x0c, 0x45, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, 0x12, 0x17, 0x0a, 0x04, 0x69, + 0x6e, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x04, 0x69, 0x6e, 0x69, + 0x74, 0x88, 0x01, 0x01, 0x12, 0x29, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x0d, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, + 0x1b, 0x0a, 0x06, 0x70, 0x6f, 0x64, 0x55, 0x49, 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x02, 0x52, 0x06, 0x70, 0x6f, 0x64, 0x55, 0x49, 0x44, 0x88, 0x01, 0x01, 0x42, 0x07, 0x0a, 0x05, + 0x5f, 0x69, 0x6e, 0x69, 0x74, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x70, 0x6f, 0x64, 0x55, + 0x49, 0x44, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x44, 0x61, 0x74, 0x61, + 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x61, 0x74, + 0x61, 0x22, 0x4d, 0x0a, 0x29, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x46, 0x72, 0x6f, 0x6d, 0x4f, 0x72, 0x69, 0x67, + 0x69, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, + 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, + 0x22, 0x84, 0x01, 0x0a, 0x12, 0x46, 0x65, 0x74, 0x63, 0x68, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2a, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d, 0x6f, + 0x64, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x52, + 0x02, 0x69, 0x64, 0x12, 0x42, 0x0a, 0x0b, 0x63, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, + 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, + 0x6f, 0x67, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x67, 0x43, + 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x52, 0x0b, 0x63, 0x61, 0x72, 0x64, + 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x22, 0x99, 0x01, 0x0a, 0x13, 0x46, 0x65, 0x74, 0x63, + 0x68, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x2a, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x64, 0x61, + 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x76, 0x31, 0x2e, 0x45, + 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x52, 0x02, 0x69, 0x64, 0x12, 0x42, 0x0a, 0x0b, 0x63, + 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x20, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, + 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x61, 0x67, 0x43, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, + 0x74, 0x79, 0x52, 0x0b, 0x63, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x12, + 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, + 0x61, 0x67, 0x73, 0x22, 0x34, 0x0a, 0x08, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x12, + 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x69, 0x64, 0x22, 0xc2, 0x01, 0x0a, 0x10, 0x55, 0x6e, + 0x69, 0x78, 0x44, 0x6f, 0x67, 0x73, 0x74, 0x61, 0x74, 0x73, 0x64, 0x4d, 0x73, 0x67, 0x12, 0x1c, + 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x20, 0x0a, 0x0b, + 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x0b, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x18, + 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x24, 0x0a, 0x0d, 0x61, 0x6e, + 0x63, 0x69, 0x6c, 0x6c, 0x61, 0x72, 0x79, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x0d, 0x61, 0x6e, 0x63, 0x69, 0x6c, 0x6c, 0x61, 0x72, 0x79, 0x53, 0x69, 0x7a, 0x65, + 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x6e, 0x63, 0x69, 0x6c, 0x6c, 0x61, 0x72, 0x79, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x09, 0x61, 0x6e, 0x63, 0x69, 0x6c, 0x6c, 0x61, 0x72, 0x79, 0x22, 0x9f, + 0x02, 0x0a, 0x0b, 0x54, 0x61, 0x67, 0x67, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x3e, + 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, + 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x76, 0x31, + 0x2e, 0x54, 0x61, 0x67, 0x67, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x41, + 0x0a, 0x06, 0x70, 0x69, 0x64, 0x4d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, + 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x76, + 0x31, 0x2e, 0x54, 0x61, 0x67, 0x67, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x50, 0x69, + 0x64, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x70, 0x69, 0x64, 0x4d, 0x61, + 0x70, 0x1a, 0x52, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x2e, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, + 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x39, 0x0a, 0x0b, 0x50, 0x69, 0x64, 0x4d, 0x61, 0x70, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0x2d, 0x0a, 0x13, 0x54, 0x61, 0x67, 0x67, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x6f, 0x61, 0x64, 0x65, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x6c, 0x6f, 0x61, 0x64, 0x65, 0x64, 0x2a, + 0x31, 0x0a, 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x09, 0x0a, 0x05, + 0x41, 0x44, 0x44, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x4d, 0x4f, 0x44, 0x49, 0x46, + 0x49, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, + 0x10, 0x02, 0x2a, 0x35, 0x0a, 0x0e, 0x54, 0x61, 0x67, 0x43, 0x61, 0x72, 0x64, 0x69, 0x6e, 0x61, + 0x6c, 0x69, 0x74, 0x79, 0x12, 0x07, 0x0a, 0x03, 0x4c, 0x4f, 0x57, 0x10, 0x00, 0x12, 0x10, 0x0a, + 0x0c, 0x4f, 0x52, 0x43, 0x48, 0x45, 0x53, 0x54, 0x52, 0x41, 0x54, 0x4f, 0x52, 0x10, 0x01, 0x12, + 0x08, 0x0a, 0x04, 0x48, 0x49, 0x47, 0x48, 0x10, 0x02, 0x42, 0x15, 0x5a, 0x13, 0x70, 0x6b, 0x67, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x67, 0x6f, 0x2f, 0x63, 0x6f, 0x72, 0x65, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1409,49 +1486,51 @@ func file_datadog_model_v1_model_proto_rawDescGZIP() []byte { } var file_datadog_model_v1_model_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_datadog_model_v1_model_proto_msgTypes = make([]protoimpl.MessageInfo, 21) +var file_datadog_model_v1_model_proto_msgTypes = make([]protoimpl.MessageInfo, 23) var file_datadog_model_v1_model_proto_goTypes = []any{ (EventType)(0), // 0: datadog.model.v1.EventType (TagCardinality)(0), // 1: datadog.model.v1.TagCardinality (*HostnameRequest)(nil), // 2: datadog.model.v1.HostnameRequest (*HostnameReply)(nil), // 3: datadog.model.v1.HostnameReply - (*CaptureTriggerRequest)(nil), // 4: datadog.model.v1.CaptureTriggerRequest - (*CaptureTriggerResponse)(nil), // 5: datadog.model.v1.CaptureTriggerResponse - (*StreamTagsRequest)(nil), // 6: datadog.model.v1.StreamTagsRequest - (*StreamTagsResponse)(nil), // 7: datadog.model.v1.StreamTagsResponse - (*StreamTagsEvent)(nil), // 8: datadog.model.v1.StreamTagsEvent - (*DeprecatedFilter)(nil), // 9: datadog.model.v1.DeprecatedFilter - (*Entity)(nil), // 10: datadog.model.v1.Entity - (*GenerateContainerIDFromOriginInfoRequest)(nil), // 11: datadog.model.v1.GenerateContainerIDFromOriginInfoRequest - (*GenerateContainerIDFromOriginInfoResponse)(nil), // 12: datadog.model.v1.GenerateContainerIDFromOriginInfoResponse - (*FetchEntityRequest)(nil), // 13: datadog.model.v1.FetchEntityRequest - (*FetchEntityResponse)(nil), // 14: datadog.model.v1.FetchEntityResponse - (*EntityId)(nil), // 15: datadog.model.v1.EntityId - (*UnixDogstatsdMsg)(nil), // 16: datadog.model.v1.UnixDogstatsdMsg - (*TaggerState)(nil), // 17: datadog.model.v1.TaggerState - (*TaggerStateResponse)(nil), // 18: datadog.model.v1.TaggerStateResponse - (*GenerateContainerIDFromOriginInfoRequest_LocalData)(nil), // 19: datadog.model.v1.GenerateContainerIDFromOriginInfoRequest.LocalData - (*GenerateContainerIDFromOriginInfoRequest_ExternalData)(nil), // 20: datadog.model.v1.GenerateContainerIDFromOriginInfoRequest.ExternalData - nil, // 21: datadog.model.v1.TaggerState.StateEntry - nil, // 22: datadog.model.v1.TaggerState.PidMapEntry + (*HostTagRequest)(nil), // 4: datadog.model.v1.HostTagRequest + (*HostTagReply)(nil), // 5: datadog.model.v1.HostTagReply + (*CaptureTriggerRequest)(nil), // 6: datadog.model.v1.CaptureTriggerRequest + (*CaptureTriggerResponse)(nil), // 7: datadog.model.v1.CaptureTriggerResponse + (*StreamTagsRequest)(nil), // 8: datadog.model.v1.StreamTagsRequest + (*StreamTagsResponse)(nil), // 9: datadog.model.v1.StreamTagsResponse + (*StreamTagsEvent)(nil), // 10: datadog.model.v1.StreamTagsEvent + (*DeprecatedFilter)(nil), // 11: datadog.model.v1.DeprecatedFilter + (*Entity)(nil), // 12: datadog.model.v1.Entity + (*GenerateContainerIDFromOriginInfoRequest)(nil), // 13: datadog.model.v1.GenerateContainerIDFromOriginInfoRequest + (*GenerateContainerIDFromOriginInfoResponse)(nil), // 14: datadog.model.v1.GenerateContainerIDFromOriginInfoResponse + (*FetchEntityRequest)(nil), // 15: datadog.model.v1.FetchEntityRequest + (*FetchEntityResponse)(nil), // 16: datadog.model.v1.FetchEntityResponse + (*EntityId)(nil), // 17: datadog.model.v1.EntityId + (*UnixDogstatsdMsg)(nil), // 18: datadog.model.v1.UnixDogstatsdMsg + (*TaggerState)(nil), // 19: datadog.model.v1.TaggerState + (*TaggerStateResponse)(nil), // 20: datadog.model.v1.TaggerStateResponse + (*GenerateContainerIDFromOriginInfoRequest_LocalData)(nil), // 21: datadog.model.v1.GenerateContainerIDFromOriginInfoRequest.LocalData + (*GenerateContainerIDFromOriginInfoRequest_ExternalData)(nil), // 22: datadog.model.v1.GenerateContainerIDFromOriginInfoRequest.ExternalData + nil, // 23: datadog.model.v1.TaggerState.StateEntry + nil, // 24: datadog.model.v1.TaggerState.PidMapEntry } var file_datadog_model_v1_model_proto_depIdxs = []int32{ 1, // 0: datadog.model.v1.StreamTagsRequest.cardinality:type_name -> datadog.model.v1.TagCardinality - 9, // 1: datadog.model.v1.StreamTagsRequest.includeFilter:type_name -> datadog.model.v1.DeprecatedFilter - 9, // 2: datadog.model.v1.StreamTagsRequest.excludeFilter:type_name -> datadog.model.v1.DeprecatedFilter - 8, // 3: datadog.model.v1.StreamTagsResponse.events:type_name -> datadog.model.v1.StreamTagsEvent + 11, // 1: datadog.model.v1.StreamTagsRequest.includeFilter:type_name -> datadog.model.v1.DeprecatedFilter + 11, // 2: datadog.model.v1.StreamTagsRequest.excludeFilter:type_name -> datadog.model.v1.DeprecatedFilter + 10, // 3: datadog.model.v1.StreamTagsResponse.events:type_name -> datadog.model.v1.StreamTagsEvent 0, // 4: datadog.model.v1.StreamTagsEvent.type:type_name -> datadog.model.v1.EventType - 10, // 5: datadog.model.v1.StreamTagsEvent.entity:type_name -> datadog.model.v1.Entity - 15, // 6: datadog.model.v1.Entity.id:type_name -> datadog.model.v1.EntityId - 19, // 7: datadog.model.v1.GenerateContainerIDFromOriginInfoRequest.localData:type_name -> datadog.model.v1.GenerateContainerIDFromOriginInfoRequest.LocalData - 20, // 8: datadog.model.v1.GenerateContainerIDFromOriginInfoRequest.externalData:type_name -> datadog.model.v1.GenerateContainerIDFromOriginInfoRequest.ExternalData - 15, // 9: datadog.model.v1.FetchEntityRequest.id:type_name -> datadog.model.v1.EntityId + 12, // 5: datadog.model.v1.StreamTagsEvent.entity:type_name -> datadog.model.v1.Entity + 17, // 6: datadog.model.v1.Entity.id:type_name -> datadog.model.v1.EntityId + 21, // 7: datadog.model.v1.GenerateContainerIDFromOriginInfoRequest.localData:type_name -> datadog.model.v1.GenerateContainerIDFromOriginInfoRequest.LocalData + 22, // 8: datadog.model.v1.GenerateContainerIDFromOriginInfoRequest.externalData:type_name -> datadog.model.v1.GenerateContainerIDFromOriginInfoRequest.ExternalData + 17, // 9: datadog.model.v1.FetchEntityRequest.id:type_name -> datadog.model.v1.EntityId 1, // 10: datadog.model.v1.FetchEntityRequest.cardinality:type_name -> datadog.model.v1.TagCardinality - 15, // 11: datadog.model.v1.FetchEntityResponse.id:type_name -> datadog.model.v1.EntityId + 17, // 11: datadog.model.v1.FetchEntityResponse.id:type_name -> datadog.model.v1.EntityId 1, // 12: datadog.model.v1.FetchEntityResponse.cardinality:type_name -> datadog.model.v1.TagCardinality - 21, // 13: datadog.model.v1.TaggerState.state:type_name -> datadog.model.v1.TaggerState.StateEntry - 22, // 14: datadog.model.v1.TaggerState.pidMap:type_name -> datadog.model.v1.TaggerState.PidMapEntry - 10, // 15: datadog.model.v1.TaggerState.StateEntry.value:type_name -> datadog.model.v1.Entity + 23, // 13: datadog.model.v1.TaggerState.state:type_name -> datadog.model.v1.TaggerState.StateEntry + 24, // 14: datadog.model.v1.TaggerState.pidMap:type_name -> datadog.model.v1.TaggerState.PidMapEntry + 12, // 15: datadog.model.v1.TaggerState.StateEntry.value:type_name -> datadog.model.v1.Entity 16, // [16:16] is the sub-list for method output_type 16, // [16:16] is the sub-list for method input_type 16, // [16:16] is the sub-list for extension type_name @@ -1464,16 +1543,16 @@ func file_datadog_model_v1_model_proto_init() { if File_datadog_model_v1_model_proto != nil { return } - file_datadog_model_v1_model_proto_msgTypes[9].OneofWrappers = []any{} - file_datadog_model_v1_model_proto_msgTypes[17].OneofWrappers = []any{} - file_datadog_model_v1_model_proto_msgTypes[18].OneofWrappers = []any{} + file_datadog_model_v1_model_proto_msgTypes[11].OneofWrappers = []any{} + file_datadog_model_v1_model_proto_msgTypes[19].OneofWrappers = []any{} + file_datadog_model_v1_model_proto_msgTypes[20].OneofWrappers = []any{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_datadog_model_v1_model_proto_rawDesc, NumEnums: 2, - NumMessages: 21, + NumMessages: 23, NumExtensions: 0, NumServices: 0, }, diff --git a/pkg/proto/pbgo/core/remoteagent.pb.go b/pkg/proto/pbgo/core/remoteagent.pb.go index 4e048d389b713..1d75a47970fd5 100644 --- a/pkg/proto/pbgo/core/remoteagent.pb.go +++ b/pkg/proto/pbgo/core/remoteagent.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.2 -// protoc v5.26.1 +// protoc-gen-go v1.36.3 +// protoc v5.29.3 // source: datadog/remoteagent/remoteagent.proto package core @@ -21,11 +21,10 @@ const ( ) type StatusSection struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Fields map[string]string `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` unknownFields protoimpl.UnknownFields - - Fields map[string]string `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + sizeCache protoimpl.SizeCache } func (x *StatusSection) Reset() { @@ -66,10 +65,7 @@ func (x *StatusSection) GetFields() map[string]string { } type RegisterRemoteAgentRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Unique ID of the remote agent. // // SHOULD be semi-human-readable, with a unique component, such as the process name followed by a UUID: @@ -91,7 +87,9 @@ type RegisterRemoteAgentRequest struct { // requests made to the endpoint. If the token is not provided, the remote agent SHOULD reject the request. // // SHOULD be a unique string value that is generated randomly before a remote agent registers itself for the first time. - AuthToken string `protobuf:"bytes,4,opt,name=auth_token,json=authToken,proto3" json:"auth_token,omitempty"` + AuthToken string `protobuf:"bytes,4,opt,name=auth_token,json=authToken,proto3" json:"auth_token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *RegisterRemoteAgentRequest) Reset() { @@ -153,10 +151,7 @@ func (x *RegisterRemoteAgentRequest) GetAuthToken() string { } type RegisterRemoteAgentResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Recommended refresh interval for the remote agent. // // This is the interval at which the remote agent should call the RegisterRemoteAgent RPC in order to assert that the @@ -164,6 +159,8 @@ type RegisterRemoteAgentResponse struct { // // The remote agent SHOULD refresh its status every `recommended_refresh_interval_secs` seconds. RecommendedRefreshIntervalSecs uint32 `protobuf:"varint,1,opt,name=recommended_refresh_interval_secs,json=recommendedRefreshIntervalSecs,proto3" json:"recommended_refresh_interval_secs,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *RegisterRemoteAgentResponse) Reset() { @@ -204,9 +201,9 @@ func (x *RegisterRemoteAgentResponse) GetRecommendedRefreshIntervalSecs() uint32 } type GetStatusDetailsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetStatusDetailsRequest) Reset() { @@ -240,10 +237,7 @@ func (*GetStatusDetailsRequest) Descriptor() ([]byte, []int) { } type GetStatusDetailsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Main status detail section. // // Generally reserved for high-level details such as version, uptime, configuration flags, etc. @@ -251,7 +245,9 @@ type GetStatusDetailsResponse struct { // Named status detail sections. // // Generally reserved for specific (sub)component details, such as the status of a specific feature or integration, etc. - NamedSections map[string]*StatusSection `protobuf:"bytes,2,rep,name=named_sections,json=namedSections,proto3" json:"named_sections,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + NamedSections map[string]*StatusSection `protobuf:"bytes,2,rep,name=named_sections,json=namedSections,proto3" json:"named_sections,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetStatusDetailsResponse) Reset() { @@ -299,9 +295,9 @@ func (x *GetStatusDetailsResponse) GetNamedSections() map[string]*StatusSection } type GetFlareFilesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetFlareFilesRequest) Reset() { @@ -335,10 +331,7 @@ func (*GetFlareFilesRequest) Descriptor() ([]byte, []int) { } type GetFlareFilesResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Set of files to add to the flare. // // The key is the name of the file, and the value is the contents of the file. @@ -346,7 +339,9 @@ type GetFlareFilesResponse struct { // The key SHOULD be an ASCII string with no path separators (`/`), and will be sanitized as necessary to ensure it can be // used as a valid filename. The key SHOULD have a file extension that is applicable to the file contents, such as // `.yaml` for YAML data. - Files map[string][]byte `protobuf:"bytes,1,rep,name=files,proto3" json:"files,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Files map[string][]byte `protobuf:"bytes,1,rep,name=files,proto3" json:"files,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetFlareFilesResponse) Reset() { diff --git a/pkg/proto/pbgo/core/remoteconfig.pb.go b/pkg/proto/pbgo/core/remoteconfig.pb.go index cfd551d59b690..cbba14756f2cd 100644 --- a/pkg/proto/pbgo/core/remoteconfig.pb.go +++ b/pkg/proto/pbgo/core/remoteconfig.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.2 -// protoc v5.26.1 +// protoc-gen-go v1.36.3 +// protoc v5.29.3 // source: datadog/remoteconfig/remoteconfig.proto package core @@ -76,15 +76,14 @@ func (TaskState) EnumDescriptor() ([]byte, []int) { } type ConfigMetas struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Roots []*TopMeta `protobuf:"bytes,1,rep,name=roots,proto3" json:"roots,omitempty"` - Timestamp *TopMeta `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - Snapshot *TopMeta `protobuf:"bytes,3,opt,name=snapshot,proto3" json:"snapshot,omitempty"` - TopTargets *TopMeta `protobuf:"bytes,4,opt,name=topTargets,proto3" json:"topTargets,omitempty"` - DelegatedTargets []*DelegatedMeta `protobuf:"bytes,5,rep,name=delegatedTargets,proto3" json:"delegatedTargets,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Roots []*TopMeta `protobuf:"bytes,1,rep,name=roots,proto3" json:"roots,omitempty"` + Timestamp *TopMeta `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Snapshot *TopMeta `protobuf:"bytes,3,opt,name=snapshot,proto3" json:"snapshot,omitempty"` + TopTargets *TopMeta `protobuf:"bytes,4,opt,name=topTargets,proto3" json:"topTargets,omitempty"` + DelegatedTargets []*DelegatedMeta `protobuf:"bytes,5,rep,name=delegatedTargets,proto3" json:"delegatedTargets,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ConfigMetas) Reset() { @@ -153,14 +152,13 @@ func (x *ConfigMetas) GetDelegatedTargets() []*DelegatedMeta { } type DirectorMetas struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Roots []*TopMeta `protobuf:"bytes,1,rep,name=roots,proto3" json:"roots,omitempty"` + Timestamp *TopMeta `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Snapshot *TopMeta `protobuf:"bytes,3,opt,name=snapshot,proto3" json:"snapshot,omitempty"` + Targets *TopMeta `protobuf:"bytes,4,opt,name=targets,proto3" json:"targets,omitempty"` unknownFields protoimpl.UnknownFields - - Roots []*TopMeta `protobuf:"bytes,1,rep,name=roots,proto3" json:"roots,omitempty"` - Timestamp *TopMeta `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - Snapshot *TopMeta `protobuf:"bytes,3,opt,name=snapshot,proto3" json:"snapshot,omitempty"` - Targets *TopMeta `protobuf:"bytes,4,opt,name=targets,proto3" json:"targets,omitempty"` + sizeCache protoimpl.SizeCache } func (x *DirectorMetas) Reset() { @@ -222,13 +220,12 @@ func (x *DirectorMetas) GetTargets() *TopMeta { } type DelegatedMeta struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Version uint64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` + Raw []byte `protobuf:"bytes,3,opt,name=raw,proto3" json:"raw,omitempty"` unknownFields protoimpl.UnknownFields - - Version uint64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` - Role string `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"` - Raw []byte `protobuf:"bytes,3,opt,name=raw,proto3" json:"raw,omitempty"` + sizeCache protoimpl.SizeCache } func (x *DelegatedMeta) Reset() { @@ -283,12 +280,11 @@ func (x *DelegatedMeta) GetRaw() []byte { } type TopMeta struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Version uint64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + Raw []byte `protobuf:"bytes,2,opt,name=raw,proto3" json:"raw,omitempty"` unknownFields protoimpl.UnknownFields - - Version uint64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` - Raw []byte `protobuf:"bytes,2,opt,name=raw,proto3" json:"raw,omitempty"` + sizeCache protoimpl.SizeCache } func (x *TopMeta) Reset() { @@ -336,12 +332,11 @@ func (x *TopMeta) GetRaw() []byte { } type File struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Raw []byte `protobuf:"bytes,2,opt,name=raw,proto3" json:"raw,omitempty"` unknownFields protoimpl.UnknownFields - - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` - Raw []byte `protobuf:"bytes,2,opt,name=raw,proto3" json:"raw,omitempty"` + sizeCache protoimpl.SizeCache } func (x *File) Reset() { @@ -389,12 +384,9 @@ func (x *File) GetRaw() []byte { } type LatestConfigsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname,omitempty"` - AgentVersion string `protobuf:"bytes,2,opt,name=agentVersion,proto3" json:"agentVersion,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname,omitempty"` + AgentVersion string `protobuf:"bytes,2,opt,name=agentVersion,proto3" json:"agentVersion,omitempty"` // timestamp and snapshot versions move in tandem so they are the same. CurrentConfigSnapshotVersion uint64 `protobuf:"varint,3,opt,name=current_config_snapshot_version,json=currentConfigSnapshotVersion,proto3" json:"current_config_snapshot_version,omitempty"` CurrentConfigRootVersion uint64 `protobuf:"varint,9,opt,name=current_config_root_version,json=currentConfigRootVersion,proto3" json:"current_config_root_version,omitempty"` @@ -409,6 +401,8 @@ type LatestConfigsRequest struct { OrgUuid string `protobuf:"bytes,14,opt,name=org_uuid,json=orgUuid,proto3" json:"org_uuid,omitempty"` Tags []string `protobuf:"bytes,15,rep,name=tags,proto3" json:"tags,omitempty"` AgentUuid string `protobuf:"bytes,16,opt,name=agent_uuid,json=agentUuid,proto3" json:"agent_uuid,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *LatestConfigsRequest) Reset() { @@ -547,13 +541,12 @@ func (x *LatestConfigsRequest) GetAgentUuid() string { } type LatestConfigsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ConfigMetas *ConfigMetas `protobuf:"bytes,1,opt,name=config_metas,json=configMetas,proto3" json:"config_metas,omitempty"` + DirectorMetas *DirectorMetas `protobuf:"bytes,2,opt,name=director_metas,json=directorMetas,proto3" json:"director_metas,omitempty"` + TargetFiles []*File `protobuf:"bytes,3,rep,name=target_files,json=targetFiles,proto3" json:"target_files,omitempty"` unknownFields protoimpl.UnknownFields - - ConfigMetas *ConfigMetas `protobuf:"bytes,1,opt,name=config_metas,json=configMetas,proto3" json:"config_metas,omitempty"` - DirectorMetas *DirectorMetas `protobuf:"bytes,2,opt,name=director_metas,json=directorMetas,proto3" json:"director_metas,omitempty"` - TargetFiles []*File `protobuf:"bytes,3,rep,name=target_files,json=targetFiles,proto3" json:"target_files,omitempty"` + sizeCache protoimpl.SizeCache } func (x *LatestConfigsResponse) Reset() { @@ -608,11 +601,10 @@ func (x *LatestConfigsResponse) GetTargetFiles() []*File { } type OrgDataResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` unknownFields protoimpl.UnknownFields - - Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` + sizeCache protoimpl.SizeCache } func (x *OrgDataResponse) Reset() { @@ -653,12 +645,11 @@ func (x *OrgDataResponse) GetUuid() string { } type OrgStatusResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` + Authorized bool `protobuf:"varint,2,opt,name=authorized,proto3" json:"authorized,omitempty"` unknownFields protoimpl.UnknownFields - - Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` - Authorized bool `protobuf:"varint,2,opt,name=authorized,proto3" json:"authorized,omitempty"` + sizeCache protoimpl.SizeCache } func (x *OrgStatusResponse) Reset() { @@ -706,21 +697,20 @@ func (x *OrgStatusResponse) GetAuthorized() bool { } type Client struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + State *ClientState `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + Products []string `protobuf:"bytes,3,rep,name=products,proto3" json:"products,omitempty"` + IsTracer bool `protobuf:"varint,6,opt,name=is_tracer,json=isTracer,proto3" json:"is_tracer,omitempty"` + ClientTracer *ClientTracer `protobuf:"bytes,7,opt,name=client_tracer,json=clientTracer,proto3" json:"client_tracer,omitempty"` + IsAgent bool `protobuf:"varint,8,opt,name=is_agent,json=isAgent,proto3" json:"is_agent,omitempty"` + ClientAgent *ClientAgent `protobuf:"bytes,9,opt,name=client_agent,json=clientAgent,proto3" json:"client_agent,omitempty"` + LastSeen uint64 `protobuf:"varint,10,opt,name=last_seen,json=lastSeen,proto3" json:"last_seen,omitempty"` + Capabilities []byte `protobuf:"bytes,11,opt,name=capabilities,proto3" json:"capabilities,omitempty"` + IsUpdater bool `protobuf:"varint,14,opt,name=is_updater,json=isUpdater,proto3" json:"is_updater,omitempty"` + ClientUpdater *ClientUpdater `protobuf:"bytes,15,opt,name=client_updater,json=clientUpdater,proto3" json:"client_updater,omitempty"` unknownFields protoimpl.UnknownFields - - State *ClientState `protobuf:"bytes,1,opt,name=state,proto3" json:"state,omitempty"` - Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` - Products []string `protobuf:"bytes,3,rep,name=products,proto3" json:"products,omitempty"` - IsTracer bool `protobuf:"varint,6,opt,name=is_tracer,json=isTracer,proto3" json:"is_tracer,omitempty"` - ClientTracer *ClientTracer `protobuf:"bytes,7,opt,name=client_tracer,json=clientTracer,proto3" json:"client_tracer,omitempty"` - IsAgent bool `protobuf:"varint,8,opt,name=is_agent,json=isAgent,proto3" json:"is_agent,omitempty"` - ClientAgent *ClientAgent `protobuf:"bytes,9,opt,name=client_agent,json=clientAgent,proto3" json:"client_agent,omitempty"` - LastSeen uint64 `protobuf:"varint,10,opt,name=last_seen,json=lastSeen,proto3" json:"last_seen,omitempty"` - Capabilities []byte `protobuf:"bytes,11,opt,name=capabilities,proto3" json:"capabilities,omitempty"` - IsUpdater bool `protobuf:"varint,14,opt,name=is_updater,json=isUpdater,proto3" json:"is_updater,omitempty"` - ClientUpdater *ClientUpdater `protobuf:"bytes,15,opt,name=client_updater,json=clientUpdater,proto3" json:"client_updater,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Client) Reset() { @@ -831,18 +821,17 @@ func (x *Client) GetClientUpdater() *ClientUpdater { } type ClientTracer struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + RuntimeId string `protobuf:"bytes,1,opt,name=runtime_id,json=runtimeId,proto3" json:"runtime_id,omitempty"` + Language string `protobuf:"bytes,2,opt,name=language,proto3" json:"language,omitempty"` + TracerVersion string `protobuf:"bytes,3,opt,name=tracer_version,json=tracerVersion,proto3" json:"tracer_version,omitempty"` + Service string `protobuf:"bytes,4,opt,name=service,proto3" json:"service,omitempty"` + ExtraServices []string `protobuf:"bytes,8,rep,name=extra_services,json=extraServices,proto3" json:"extra_services,omitempty"` + Env string `protobuf:"bytes,5,opt,name=env,proto3" json:"env,omitempty"` + AppVersion string `protobuf:"bytes,6,opt,name=app_version,json=appVersion,proto3" json:"app_version,omitempty"` + Tags []string `protobuf:"bytes,7,rep,name=tags,proto3" json:"tags,omitempty"` unknownFields protoimpl.UnknownFields - - RuntimeId string `protobuf:"bytes,1,opt,name=runtime_id,json=runtimeId,proto3" json:"runtime_id,omitempty"` - Language string `protobuf:"bytes,2,opt,name=language,proto3" json:"language,omitempty"` - TracerVersion string `protobuf:"bytes,3,opt,name=tracer_version,json=tracerVersion,proto3" json:"tracer_version,omitempty"` - Service string `protobuf:"bytes,4,opt,name=service,proto3" json:"service,omitempty"` - ExtraServices []string `protobuf:"bytes,8,rep,name=extra_services,json=extraServices,proto3" json:"extra_services,omitempty"` - Env string `protobuf:"bytes,5,opt,name=env,proto3" json:"env,omitempty"` - AppVersion string `protobuf:"bytes,6,opt,name=app_version,json=appVersion,proto3" json:"app_version,omitempty"` - Tags []string `protobuf:"bytes,7,rep,name=tags,proto3" json:"tags,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ClientTracer) Reset() { @@ -932,15 +921,14 @@ func (x *ClientTracer) GetTags() []string { } type ClientAgent struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + ClusterName string `protobuf:"bytes,3,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` + ClusterId string `protobuf:"bytes,4,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + CwsWorkloads []string `protobuf:"bytes,5,rep,name=cws_workloads,json=cwsWorkloads,proto3" json:"cws_workloads,omitempty"` unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` - ClusterName string `protobuf:"bytes,3,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"` - ClusterId string `protobuf:"bytes,4,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` - CwsWorkloads []string `protobuf:"bytes,5,rep,name=cws_workloads,json=cwsWorkloads,proto3" json:"cws_workloads,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ClientAgent) Reset() { @@ -1009,13 +997,12 @@ func (x *ClientAgent) GetCwsWorkloads() []string { } type ClientUpdater struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Tags []string `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags,omitempty"` - Packages []*PackageState `protobuf:"bytes,2,rep,name=packages,proto3" json:"packages,omitempty"` - AvailableDiskSpace uint64 `protobuf:"varint,3,opt,name=available_disk_space,json=availableDiskSpace,proto3" json:"available_disk_space,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Tags []string `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags,omitempty"` + Packages []*PackageState `protobuf:"bytes,2,rep,name=packages,proto3" json:"packages,omitempty"` + AvailableDiskSpace uint64 `protobuf:"varint,3,opt,name=available_disk_space,json=availableDiskSpace,proto3" json:"available_disk_space,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ClientUpdater) Reset() { @@ -1070,17 +1057,15 @@ func (x *ClientUpdater) GetAvailableDiskSpace() uint64 { } type PackageState struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Package string `protobuf:"bytes,1,opt,name=package,proto3" json:"package,omitempty"` - StableVersion string `protobuf:"bytes,2,opt,name=stable_version,json=stableVersion,proto3" json:"stable_version,omitempty"` - ExperimentVersion string `protobuf:"bytes,3,opt,name=experiment_version,json=experimentVersion,proto3" json:"experiment_version,omitempty"` - Task *PackageStateTask `protobuf:"bytes,4,opt,name=task,proto3" json:"task,omitempty"` - StableConfigState *PoliciesState `protobuf:"bytes,8,opt,name=stable_config_state,json=stableConfigState,proto3" json:"stable_config_state,omitempty"` - ExperimentConfigState *PoliciesState `protobuf:"bytes,9,opt,name=experiment_config_state,json=experimentConfigState,proto3" json:"experiment_config_state,omitempty"` - RemoteConfigState *PoliciesState `protobuf:"bytes,10,opt,name=remote_config_state,json=remoteConfigState,proto3" json:"remote_config_state,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Package string `protobuf:"bytes,1,opt,name=package,proto3" json:"package,omitempty"` + StableVersion string `protobuf:"bytes,2,opt,name=stable_version,json=stableVersion,proto3" json:"stable_version,omitempty"` + ExperimentVersion string `protobuf:"bytes,3,opt,name=experiment_version,json=experimentVersion,proto3" json:"experiment_version,omitempty"` + Task *PackageStateTask `protobuf:"bytes,4,opt,name=task,proto3" json:"task,omitempty"` + StableConfigVersion string `protobuf:"bytes,11,opt,name=stable_config_version,json=stableConfigVersion,proto3" json:"stable_config_version,omitempty"` + ExperimentConfigVersion string `protobuf:"bytes,12,opt,name=experiment_config_version,json=experimentConfigVersion,proto3" json:"experiment_config_version,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *PackageState) Reset() { @@ -1141,93 +1126,32 @@ func (x *PackageState) GetTask() *PackageStateTask { return nil } -func (x *PackageState) GetStableConfigState() *PoliciesState { - if x != nil { - return x.StableConfigState - } - return nil -} - -func (x *PackageState) GetExperimentConfigState() *PoliciesState { +func (x *PackageState) GetStableConfigVersion() string { if x != nil { - return x.ExperimentConfigState - } - return nil -} - -func (x *PackageState) GetRemoteConfigState() *PoliciesState { - if x != nil { - return x.RemoteConfigState - } - return nil -} - -type PoliciesState struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` - MatchedPolicies []string `protobuf:"bytes,2,rep,name=matched_policies,json=matchedPolicies,proto3" json:"matched_policies,omitempty"` -} - -func (x *PoliciesState) Reset() { - *x = PoliciesState{} - mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *PoliciesState) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PoliciesState) ProtoMessage() {} - -func (x *PoliciesState) ProtoReflect() protoreflect.Message { - mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[14] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PoliciesState.ProtoReflect.Descriptor instead. -func (*PoliciesState) Descriptor() ([]byte, []int) { - return file_datadog_remoteconfig_remoteconfig_proto_rawDescGZIP(), []int{14} -} - -func (x *PoliciesState) GetVersion() string { - if x != nil { - return x.Version + return x.StableConfigVersion } return "" } -func (x *PoliciesState) GetMatchedPolicies() []string { +func (x *PackageState) GetExperimentConfigVersion() string { if x != nil { - return x.MatchedPolicies + return x.ExperimentConfigVersion } - return nil + return "" } type PackageStateTask struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + State TaskState `protobuf:"varint,2,opt,name=state,proto3,enum=datadog.config.TaskState" json:"state,omitempty"` + Error *TaskError `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"` unknownFields protoimpl.UnknownFields - - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - State TaskState `protobuf:"varint,2,opt,name=state,proto3,enum=datadog.config.TaskState" json:"state,omitempty"` - Error *TaskError `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"` + sizeCache protoimpl.SizeCache } func (x *PackageStateTask) Reset() { *x = PackageStateTask{} - mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[15] + mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1239,7 +1163,7 @@ func (x *PackageStateTask) String() string { func (*PackageStateTask) ProtoMessage() {} func (x *PackageStateTask) ProtoReflect() protoreflect.Message { - mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[15] + mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[14] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1252,7 +1176,7 @@ func (x *PackageStateTask) ProtoReflect() protoreflect.Message { // Deprecated: Use PackageStateTask.ProtoReflect.Descriptor instead. func (*PackageStateTask) Descriptor() ([]byte, []int) { - return file_datadog_remoteconfig_remoteconfig_proto_rawDescGZIP(), []int{15} + return file_datadog_remoteconfig_remoteconfig_proto_rawDescGZIP(), []int{14} } func (x *PackageStateTask) GetId() string { @@ -1277,17 +1201,16 @@ func (x *PackageStateTask) GetError() *TaskError { } type TaskError struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Code uint64 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` unknownFields protoimpl.UnknownFields - - Code uint64 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + sizeCache protoimpl.SizeCache } func (x *TaskError) Reset() { *x = TaskError{} - mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[16] + mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1299,7 +1222,7 @@ func (x *TaskError) String() string { func (*TaskError) ProtoMessage() {} func (x *TaskError) ProtoReflect() protoreflect.Message { - mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[16] + mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[15] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1312,7 +1235,7 @@ func (x *TaskError) ProtoReflect() protoreflect.Message { // Deprecated: Use TaskError.ProtoReflect.Descriptor instead. func (*TaskError) Descriptor() ([]byte, []int) { - return file_datadog_remoteconfig_remoteconfig_proto_rawDescGZIP(), []int{16} + return file_datadog_remoteconfig_remoteconfig_proto_rawDescGZIP(), []int{15} } func (x *TaskError) GetCode() uint64 { @@ -1330,20 +1253,19 @@ func (x *TaskError) GetMessage() string { } type ConfigState struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Version uint64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` + Product string `protobuf:"bytes,3,opt,name=product,proto3" json:"product,omitempty"` + ApplyState uint64 `protobuf:"varint,4,opt,name=apply_state,json=applyState,proto3" json:"apply_state,omitempty"` + ApplyError string `protobuf:"bytes,5,opt,name=apply_error,json=applyError,proto3" json:"apply_error,omitempty"` unknownFields protoimpl.UnknownFields - - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Version uint64 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"` - Product string `protobuf:"bytes,3,opt,name=product,proto3" json:"product,omitempty"` - ApplyState uint64 `protobuf:"varint,4,opt,name=apply_state,json=applyState,proto3" json:"apply_state,omitempty"` - ApplyError string `protobuf:"bytes,5,opt,name=apply_error,json=applyError,proto3" json:"apply_error,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ConfigState) Reset() { *x = ConfigState{} - mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[17] + mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1355,7 +1277,7 @@ func (x *ConfigState) String() string { func (*ConfigState) ProtoMessage() {} func (x *ConfigState) ProtoReflect() protoreflect.Message { - mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[17] + mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[16] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1368,7 +1290,7 @@ func (x *ConfigState) ProtoReflect() protoreflect.Message { // Deprecated: Use ConfigState.ProtoReflect.Descriptor instead. func (*ConfigState) Descriptor() ([]byte, []int) { - return file_datadog_remoteconfig_remoteconfig_proto_rawDescGZIP(), []int{17} + return file_datadog_remoteconfig_remoteconfig_proto_rawDescGZIP(), []int{16} } func (x *ConfigState) GetId() string { @@ -1407,21 +1329,20 @@ func (x *ConfigState) GetApplyError() string { } type ClientState struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - RootVersion uint64 `protobuf:"varint,1,opt,name=root_version,json=rootVersion,proto3" json:"root_version,omitempty"` - TargetsVersion uint64 `protobuf:"varint,2,opt,name=targets_version,json=targetsVersion,proto3" json:"targets_version,omitempty"` - ConfigStates []*ConfigState `protobuf:"bytes,3,rep,name=config_states,json=configStates,proto3" json:"config_states,omitempty"` - HasError bool `protobuf:"varint,4,opt,name=has_error,json=hasError,proto3" json:"has_error,omitempty"` - Error string `protobuf:"bytes,5,opt,name=error,proto3" json:"error,omitempty"` - BackendClientState []byte `protobuf:"bytes,6,opt,name=backend_client_state,json=backendClientState,proto3" json:"backend_client_state,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + RootVersion uint64 `protobuf:"varint,1,opt,name=root_version,json=rootVersion,proto3" json:"root_version,omitempty"` + TargetsVersion uint64 `protobuf:"varint,2,opt,name=targets_version,json=targetsVersion,proto3" json:"targets_version,omitempty"` + ConfigStates []*ConfigState `protobuf:"bytes,3,rep,name=config_states,json=configStates,proto3" json:"config_states,omitempty"` + HasError bool `protobuf:"varint,4,opt,name=has_error,json=hasError,proto3" json:"has_error,omitempty"` + Error string `protobuf:"bytes,5,opt,name=error,proto3" json:"error,omitempty"` + BackendClientState []byte `protobuf:"bytes,6,opt,name=backend_client_state,json=backendClientState,proto3" json:"backend_client_state,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ClientState) Reset() { *x = ClientState{} - mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[18] + mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1433,7 +1354,7 @@ func (x *ClientState) String() string { func (*ClientState) ProtoMessage() {} func (x *ClientState) ProtoReflect() protoreflect.Message { - mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[18] + mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[17] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1446,7 +1367,7 @@ func (x *ClientState) ProtoReflect() protoreflect.Message { // Deprecated: Use ClientState.ProtoReflect.Descriptor instead. func (*ClientState) Descriptor() ([]byte, []int) { - return file_datadog_remoteconfig_remoteconfig_proto_rawDescGZIP(), []int{18} + return file_datadog_remoteconfig_remoteconfig_proto_rawDescGZIP(), []int{17} } func (x *ClientState) GetRootVersion() uint64 { @@ -1492,17 +1413,16 @@ func (x *ClientState) GetBackendClientState() []byte { } type TargetFileHash struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Algorithm string `protobuf:"bytes,1,opt,name=algorithm,proto3" json:"algorithm,omitempty"` + Hash string `protobuf:"bytes,3,opt,name=hash,proto3" json:"hash,omitempty"` unknownFields protoimpl.UnknownFields - - Algorithm string `protobuf:"bytes,1,opt,name=algorithm,proto3" json:"algorithm,omitempty"` - Hash string `protobuf:"bytes,3,opt,name=hash,proto3" json:"hash,omitempty"` + sizeCache protoimpl.SizeCache } func (x *TargetFileHash) Reset() { *x = TargetFileHash{} - mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[19] + mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1514,7 +1434,7 @@ func (x *TargetFileHash) String() string { func (*TargetFileHash) ProtoMessage() {} func (x *TargetFileHash) ProtoReflect() protoreflect.Message { - mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[19] + mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[18] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1527,7 +1447,7 @@ func (x *TargetFileHash) ProtoReflect() protoreflect.Message { // Deprecated: Use TargetFileHash.ProtoReflect.Descriptor instead. func (*TargetFileHash) Descriptor() ([]byte, []int) { - return file_datadog_remoteconfig_remoteconfig_proto_rawDescGZIP(), []int{19} + return file_datadog_remoteconfig_remoteconfig_proto_rawDescGZIP(), []int{18} } func (x *TargetFileHash) GetAlgorithm() string { @@ -1545,18 +1465,17 @@ func (x *TargetFileHash) GetHash() string { } type TargetFileMeta struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Length int64 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"` + Hashes []*TargetFileHash `protobuf:"bytes,3,rep,name=hashes,proto3" json:"hashes,omitempty"` unknownFields protoimpl.UnknownFields - - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` - Length int64 `protobuf:"varint,2,opt,name=length,proto3" json:"length,omitempty"` - Hashes []*TargetFileHash `protobuf:"bytes,3,rep,name=hashes,proto3" json:"hashes,omitempty"` + sizeCache protoimpl.SizeCache } func (x *TargetFileMeta) Reset() { *x = TargetFileMeta{} - mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[20] + mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1568,7 +1487,7 @@ func (x *TargetFileMeta) String() string { func (*TargetFileMeta) ProtoMessage() {} func (x *TargetFileMeta) ProtoReflect() protoreflect.Message { - mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[20] + mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[19] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1581,7 +1500,7 @@ func (x *TargetFileMeta) ProtoReflect() protoreflect.Message { // Deprecated: Use TargetFileMeta.ProtoReflect.Descriptor instead. func (*TargetFileMeta) Descriptor() ([]byte, []int) { - return file_datadog_remoteconfig_remoteconfig_proto_rawDescGZIP(), []int{20} + return file_datadog_remoteconfig_remoteconfig_proto_rawDescGZIP(), []int{19} } func (x *TargetFileMeta) GetPath() string { @@ -1606,17 +1525,16 @@ func (x *TargetFileMeta) GetHashes() []*TargetFileHash { } type ClientGetConfigsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Client *Client `protobuf:"bytes,1,opt,name=client,proto3" json:"client,omitempty"` - CachedTargetFiles []*TargetFileMeta `protobuf:"bytes,2,rep,name=cached_target_files,json=cachedTargetFiles,proto3" json:"cached_target_files,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Client *Client `protobuf:"bytes,1,opt,name=client,proto3" json:"client,omitempty"` + CachedTargetFiles []*TargetFileMeta `protobuf:"bytes,2,rep,name=cached_target_files,json=cachedTargetFiles,proto3" json:"cached_target_files,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ClientGetConfigsRequest) Reset() { *x = ClientGetConfigsRequest{} - mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[21] + mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1628,7 +1546,7 @@ func (x *ClientGetConfigsRequest) String() string { func (*ClientGetConfigsRequest) ProtoMessage() {} func (x *ClientGetConfigsRequest) ProtoReflect() protoreflect.Message { - mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[21] + mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[20] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1641,7 +1559,7 @@ func (x *ClientGetConfigsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ClientGetConfigsRequest.ProtoReflect.Descriptor instead. func (*ClientGetConfigsRequest) Descriptor() ([]byte, []int) { - return file_datadog_remoteconfig_remoteconfig_proto_rawDescGZIP(), []int{21} + return file_datadog_remoteconfig_remoteconfig_proto_rawDescGZIP(), []int{20} } func (x *ClientGetConfigsRequest) GetClient() *Client { @@ -1659,19 +1577,18 @@ func (x *ClientGetConfigsRequest) GetCachedTargetFiles() []*TargetFileMeta { } type ClientGetConfigsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Roots [][]byte `protobuf:"bytes,1,rep,name=roots,proto3" json:"roots,omitempty"` + Targets []byte `protobuf:"bytes,2,opt,name=targets,proto3" json:"targets,omitempty"` + TargetFiles []*File `protobuf:"bytes,3,rep,name=target_files,json=targetFiles,proto3" json:"target_files,omitempty"` + ClientConfigs []string `protobuf:"bytes,4,rep,name=client_configs,json=clientConfigs,proto3" json:"client_configs,omitempty"` unknownFields protoimpl.UnknownFields - - Roots [][]byte `protobuf:"bytes,1,rep,name=roots,proto3" json:"roots,omitempty"` - Targets []byte `protobuf:"bytes,2,opt,name=targets,proto3" json:"targets,omitempty"` - TargetFiles []*File `protobuf:"bytes,3,rep,name=target_files,json=targetFiles,proto3" json:"target_files,omitempty"` - ClientConfigs []string `protobuf:"bytes,4,rep,name=client_configs,json=clientConfigs,proto3" json:"client_configs,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ClientGetConfigsResponse) Reset() { *x = ClientGetConfigsResponse{} - mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[22] + mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1683,7 +1600,7 @@ func (x *ClientGetConfigsResponse) String() string { func (*ClientGetConfigsResponse) ProtoMessage() {} func (x *ClientGetConfigsResponse) ProtoReflect() protoreflect.Message { - mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[22] + mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[21] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1696,7 +1613,7 @@ func (x *ClientGetConfigsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ClientGetConfigsResponse.ProtoReflect.Descriptor instead. func (*ClientGetConfigsResponse) Descriptor() ([]byte, []int) { - return file_datadog_remoteconfig_remoteconfig_proto_rawDescGZIP(), []int{22} + return file_datadog_remoteconfig_remoteconfig_proto_rawDescGZIP(), []int{21} } func (x *ClientGetConfigsResponse) GetRoots() [][]byte { @@ -1728,17 +1645,16 @@ func (x *ClientGetConfigsResponse) GetClientConfigs() []string { } type FileMetaState struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Version uint64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + Hash string `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` unknownFields protoimpl.UnknownFields - - Version uint64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` - Hash string `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` + sizeCache protoimpl.SizeCache } func (x *FileMetaState) Reset() { *x = FileMetaState{} - mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[23] + mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1750,7 +1666,7 @@ func (x *FileMetaState) String() string { func (*FileMetaState) ProtoMessage() {} func (x *FileMetaState) ProtoReflect() protoreflect.Message { - mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[23] + mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[22] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1763,7 +1679,7 @@ func (x *FileMetaState) ProtoReflect() protoreflect.Message { // Deprecated: Use FileMetaState.ProtoReflect.Descriptor instead. func (*FileMetaState) Descriptor() ([]byte, []int) { - return file_datadog_remoteconfig_remoteconfig_proto_rawDescGZIP(), []int{23} + return file_datadog_remoteconfig_remoteconfig_proto_rawDescGZIP(), []int{22} } func (x *FileMetaState) GetVersion() uint64 { @@ -1781,19 +1697,18 @@ func (x *FileMetaState) GetHash() string { } type GetStateConfigResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ConfigState map[string]*FileMetaState `protobuf:"bytes,1,rep,name=config_state,json=configState,proto3" json:"config_state,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - DirectorState map[string]*FileMetaState `protobuf:"bytes,2,rep,name=director_state,json=directorState,proto3" json:"director_state,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - TargetFilenames map[string]string `protobuf:"bytes,3,rep,name=target_filenames,json=targetFilenames,proto3" json:"target_filenames,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + state protoimpl.MessageState `protogen:"open.v1"` + ConfigState map[string]*FileMetaState `protobuf:"bytes,1,rep,name=config_state,json=configState,proto3" json:"config_state,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + DirectorState map[string]*FileMetaState `protobuf:"bytes,2,rep,name=director_state,json=directorState,proto3" json:"director_state,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + TargetFilenames map[string]string `protobuf:"bytes,3,rep,name=target_filenames,json=targetFilenames,proto3" json:"target_filenames,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` ActiveClients []*Client `protobuf:"bytes,4,rep,name=active_clients,json=activeClients,proto3" json:"active_clients,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetStateConfigResponse) Reset() { *x = GetStateConfigResponse{} - mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[24] + mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1805,7 +1720,7 @@ func (x *GetStateConfigResponse) String() string { func (*GetStateConfigResponse) ProtoMessage() {} func (x *GetStateConfigResponse) ProtoReflect() protoreflect.Message { - mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[24] + mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[23] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1818,7 +1733,7 @@ func (x *GetStateConfigResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetStateConfigResponse.ProtoReflect.Descriptor instead. func (*GetStateConfigResponse) Descriptor() ([]byte, []int) { - return file_datadog_remoteconfig_remoteconfig_proto_rawDescGZIP(), []int{24} + return file_datadog_remoteconfig_remoteconfig_proto_rawDescGZIP(), []int{23} } func (x *GetStateConfigResponse) GetConfigState() map[string]*FileMetaState { @@ -1850,22 +1765,21 @@ func (x *GetStateConfigResponse) GetActiveClients() []*Client { } type TracerPredicateV1 struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ClientID string `protobuf:"bytes,1,opt,name=clientID,proto3" json:"clientID,omitempty"` + Service string `protobuf:"bytes,2,opt,name=service,proto3" json:"service,omitempty"` + Environment string `protobuf:"bytes,3,opt,name=environment,proto3" json:"environment,omitempty"` + AppVersion string `protobuf:"bytes,4,opt,name=appVersion,proto3" json:"appVersion,omitempty"` + TracerVersion string `protobuf:"bytes,5,opt,name=tracerVersion,proto3" json:"tracerVersion,omitempty"` + Language string `protobuf:"bytes,6,opt,name=language,proto3" json:"language,omitempty"` + RuntimeID string `protobuf:"bytes,7,opt,name=runtimeID,proto3" json:"runtimeID,omitempty"` unknownFields protoimpl.UnknownFields - - ClientID string `protobuf:"bytes,1,opt,name=clientID,proto3" json:"clientID,omitempty"` - Service string `protobuf:"bytes,2,opt,name=service,proto3" json:"service,omitempty"` - Environment string `protobuf:"bytes,3,opt,name=environment,proto3" json:"environment,omitempty"` - AppVersion string `protobuf:"bytes,4,opt,name=appVersion,proto3" json:"appVersion,omitempty"` - TracerVersion string `protobuf:"bytes,5,opt,name=tracerVersion,proto3" json:"tracerVersion,omitempty"` - Language string `protobuf:"bytes,6,opt,name=language,proto3" json:"language,omitempty"` - RuntimeID string `protobuf:"bytes,7,opt,name=runtimeID,proto3" json:"runtimeID,omitempty"` + sizeCache protoimpl.SizeCache } func (x *TracerPredicateV1) Reset() { *x = TracerPredicateV1{} - mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[25] + mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1877,7 +1791,7 @@ func (x *TracerPredicateV1) String() string { func (*TracerPredicateV1) ProtoMessage() {} func (x *TracerPredicateV1) ProtoReflect() protoreflect.Message { - mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[25] + mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[24] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1890,7 +1804,7 @@ func (x *TracerPredicateV1) ProtoReflect() protoreflect.Message { // Deprecated: Use TracerPredicateV1.ProtoReflect.Descriptor instead. func (*TracerPredicateV1) Descriptor() ([]byte, []int) { - return file_datadog_remoteconfig_remoteconfig_proto_rawDescGZIP(), []int{25} + return file_datadog_remoteconfig_remoteconfig_proto_rawDescGZIP(), []int{24} } func (x *TracerPredicateV1) GetClientID() string { @@ -1943,16 +1857,15 @@ func (x *TracerPredicateV1) GetRuntimeID() string { } type TracerPredicates struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - TracerPredicatesV1 []*TracerPredicateV1 `protobuf:"bytes,1,rep,name=tracer_predicates_v1,json=tracerPredicatesV1,proto3" json:"tracer_predicates_v1,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + TracerPredicatesV1 []*TracerPredicateV1 `protobuf:"bytes,1,rep,name=tracer_predicates_v1,json=tracerPredicatesV1,proto3" json:"tracer_predicates_v1,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *TracerPredicates) Reset() { *x = TracerPredicates{} - mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[26] + mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1964,7 +1877,7 @@ func (x *TracerPredicates) String() string { func (*TracerPredicates) ProtoMessage() {} func (x *TracerPredicates) ProtoReflect() protoreflect.Message { - mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[26] + mi := &file_datadog_remoteconfig_remoteconfig_proto_msgTypes[25] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1977,7 +1890,7 @@ func (x *TracerPredicates) ProtoReflect() protoreflect.Message { // Deprecated: Use TracerPredicates.ProtoReflect.Descriptor instead. func (*TracerPredicates) Descriptor() ([]byte, []int) { - return file_datadog_remoteconfig_remoteconfig_proto_rawDescGZIP(), []int{26} + return file_datadog_remoteconfig_remoteconfig_proto_rawDescGZIP(), []int{25} } func (x *TracerPredicates) GetTracerPredicatesV1() []*TracerPredicateV1 { @@ -2163,7 +2076,7 @@ var file_datadog_remoteconfig_remoteconfig_proto_rawDesc = []byte{ 0x65, 0x52, 0x08, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x64, 0x69, 0x73, 0x6b, 0x5f, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x12, 0x61, 0x76, 0x61, 0x69, 0x6c, - 0x61, 0x62, 0x6c, 0x65, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x70, 0x61, 0x63, 0x65, 0x22, 0xbb, 0x03, + 0x61, 0x62, 0x6c, 0x65, 0x44, 0x69, 0x73, 0x6b, 0x53, 0x70, 0x61, 0x63, 0x65, 0x22, 0xc8, 0x02, 0x0a, 0x0c, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x61, 0x62, @@ -2175,171 +2088,159 @@ var file_datadog_remoteconfig_remoteconfig_proto_rawDesc = []byte{ 0x0a, 0x04, 0x74, 0x61, 0x73, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x04, - 0x74, 0x61, 0x73, 0x6b, 0x12, 0x4d, 0x0a, 0x13, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x74, 0x61, 0x73, 0x6b, 0x12, 0x32, 0x0a, 0x15, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, 0x19, 0x65, 0x78, 0x70, 0x65, + 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x65, 0x78, 0x70, + 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x06, 0x10, 0x07, + 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x4a, 0x04, 0x08, 0x08, 0x10, 0x09, 0x4a, 0x04, 0x08, 0x09, + 0x10, 0x0a, 0x4a, 0x04, 0x08, 0x0a, 0x10, 0x0b, 0x22, 0x84, 0x01, 0x0a, 0x10, 0x50, 0x61, 0x63, + 0x6b, 0x61, 0x67, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x0e, 0x0a, + 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2f, 0x0a, + 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x64, + 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, + 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, + 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, + 0x61, 0x73, 0x6b, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, + 0x39, 0x0a, 0x09, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, + 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x93, 0x01, 0x0a, 0x0b, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x74, 0x12, 0x1f, + 0x0a, 0x0b, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x0a, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x1f, 0x0a, 0x0b, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x45, 0x72, 0x72, 0x6f, 0x72, + 0x22, 0x80, 0x02, 0x0a, 0x0b, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x6f, 0x6f, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x72, 0x6f, 0x6f, 0x74, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x5f, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x74, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x73, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0d, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x12, 0x1b, + 0x0a, 0x09, 0x68, 0x61, 0x73, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x08, 0x68, 0x61, 0x73, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x12, 0x30, 0x0a, 0x14, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x12, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x22, 0x48, 0x0a, 0x0e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, + 0x65, 0x48, 0x61, 0x73, 0x68, 0x12, 0x1c, 0x0a, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, + 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, + 0x74, 0x68, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x22, 0x74, 0x0a, + 0x0e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, + 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, + 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x36, 0x0a, 0x06, 0x68, + 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x64, 0x61, + 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x48, 0x61, 0x73, 0x68, 0x52, 0x06, 0x68, 0x61, 0x73, + 0x68, 0x65, 0x73, 0x22, 0x99, 0x01, 0x0a, 0x17, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x47, 0x65, + 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x2e, 0x0a, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x16, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, + 0x4e, 0x0a, 0x13, 0x63, 0x61, 0x63, 0x68, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x64, + 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x11, 0x63, 0x61, + 0x63, 0x68, 0x65, 0x64, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x22, + 0xaa, 0x01, 0x0a, 0x18, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x72, 0x6f, 0x6f, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x05, 0x72, 0x6f, 0x6f, + 0x74, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x0c, + 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x22, 0x3d, 0x0a, 0x0d, + 0x46, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x22, 0x81, 0x05, 0x0a, 0x16, + 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x64, + 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x47, 0x65, + 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x60, 0x0a, 0x0e, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x64, 0x61, 0x74, + 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x47, 0x65, 0x74, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x2e, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x12, 0x66, 0x0a, 0x10, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x66, + 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, + 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, + 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, + 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x74, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x0e, + 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x0d, 0x61, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x5d, 0x0a, 0x10, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x33, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x5f, 0x0a, 0x12, 0x44, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x33, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x52, 0x11, 0x73, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x12, 0x55, 0x0a, 0x17, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, - 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x09, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x52, 0x15, 0x65, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x4d, 0x0a, 0x13, 0x72, 0x65, - 0x6d, 0x6f, 0x74, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x74, 0x61, 0x74, - 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, - 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, - 0x73, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x11, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, - 0x04, 0x08, 0x06, 0x10, 0x07, 0x4a, 0x04, 0x08, 0x07, 0x10, 0x08, 0x22, 0x54, 0x0a, 0x0d, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x0a, 0x10, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, - 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x0f, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, - 0x73, 0x22, 0x84, 0x01, 0x0a, 0x10, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2f, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, - 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x72, 0x72, 0x6f, - 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x39, 0x0a, 0x09, 0x54, 0x61, 0x73, 0x6b, - 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x22, 0x93, 0x01, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x02, 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, - 0x07, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70, 0x70, 0x6c, 0x79, - 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0a, 0x61, 0x70, - 0x70, 0x6c, 0x79, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x70, 0x70, 0x6c, - 0x79, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, - 0x70, 0x70, 0x6c, 0x79, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x80, 0x02, 0x0a, 0x0b, 0x43, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x6f, 0x6f, - 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x0b, 0x72, 0x6f, 0x6f, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, - 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x64, - 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x68, 0x61, 0x73, 0x5f, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x68, 0x61, 0x73, 0x45, - 0x72, 0x72, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x30, 0x0a, 0x14, 0x62, 0x61, - 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, - 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x12, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, - 0x64, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x22, 0x48, 0x0a, 0x0e, - 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x48, 0x61, 0x73, 0x68, 0x12, 0x1c, - 0x0a, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x61, 0x6c, 0x67, 0x6f, 0x72, 0x69, 0x74, 0x68, 0x6d, 0x12, 0x12, 0x0a, 0x04, - 0x68, 0x61, 0x73, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, - 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x22, 0x74, 0x0a, 0x0e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x46, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, - 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6c, 0x65, - 0x6e, 0x67, 0x74, 0x68, 0x12, 0x36, 0x0a, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, - 0x48, 0x61, 0x73, 0x68, 0x52, 0x06, 0x68, 0x61, 0x73, 0x68, 0x65, 0x73, 0x22, 0x99, 0x01, 0x0a, - 0x17, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x06, 0x63, 0x6c, 0x69, 0x65, - 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, - 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, - 0x52, 0x06, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x4e, 0x0a, 0x13, 0x63, 0x61, 0x63, 0x68, - 0x65, 0x64, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, - 0x65, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x11, 0x63, 0x61, 0x63, 0x68, 0x65, 0x64, 0x54, 0x61, 0x72, - 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x22, 0xaa, 0x01, 0x0a, 0x18, 0x43, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x6f, 0x6f, 0x74, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0c, 0x52, 0x05, 0x72, 0x6f, 0x6f, 0x74, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x74, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x74, 0x61, - 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x37, 0x0a, 0x0c, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, - 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x64, 0x61, - 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x46, 0x69, 0x6c, - 0x65, 0x52, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x25, - 0x0a, 0x0e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x73, - 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x73, 0x22, 0x3d, 0x0a, 0x0d, 0x46, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x74, - 0x61, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x12, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x68, 0x61, 0x73, 0x68, 0x22, 0x81, 0x05, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x5a, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x60, 0x0a, 0x0e, 0x64, - 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x44, 0x69, 0x72, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, - 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x66, 0x0a, - 0x10, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, - 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, - 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x0e, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, - 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, - 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x43, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x0d, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x5d, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x33, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x64, 0x61, 0x74, 0x61, - 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4d, - 0x65, 0x74, 0x61, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x1a, 0x5f, 0x0a, 0x12, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x33, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x64, 0x61, 0x74, - 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x46, 0x69, 0x6c, 0x65, - 0x4d, 0x65, 0x74, 0x61, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x42, 0x0a, 0x14, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, - 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xeb, 0x01, 0x0a, 0x11, 0x54, 0x72, 0x61, - 0x63, 0x65, 0x72, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x31, 0x12, 0x1a, - 0x0a, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, - 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x65, 0x6e, 0x76, 0x69, 0x72, - 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x61, 0x70, 0x70, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x70, 0x56, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x74, - 0x72, 0x61, 0x63, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, - 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x75, 0x6e, 0x74, - 0x69, 0x6d, 0x65, 0x49, 0x44, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x75, 0x6e, - 0x74, 0x69, 0x6d, 0x65, 0x49, 0x44, 0x22, 0x67, 0x0a, 0x10, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, - 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x12, 0x53, 0x0a, 0x14, 0x74, 0x72, - 0x61, 0x63, 0x65, 0x72, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x5f, - 0x76, 0x31, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, - 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, - 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x56, 0x31, 0x52, 0x12, 0x74, 0x72, 0x61, - 0x63, 0x65, 0x72, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x56, 0x31, 0x2a, - 0x4a, 0x0a, 0x09, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x08, 0x0a, 0x04, - 0x49, 0x44, 0x4c, 0x45, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, - 0x47, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x4f, 0x4e, 0x45, 0x10, 0x02, 0x12, 0x11, 0x0a, - 0x0d, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x45, 0x10, 0x03, - 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x04, 0x42, 0x15, 0x5a, 0x13, 0x70, - 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x67, 0x6f, 0x2f, 0x63, 0x6f, - 0x72, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x69, 0x67, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x42, 0x0a, 0x14, 0x54, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, + 0xeb, 0x01, 0x0a, 0x11, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x56, 0x31, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, + 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, + 0x44, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x65, + 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0b, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x1e, 0x0a, + 0x0a, 0x61, 0x70, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x61, 0x70, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, + 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x12, + 0x1c, 0x0a, 0x09, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x49, 0x44, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x49, 0x44, 0x22, 0x67, 0x0a, + 0x10, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x73, 0x12, 0x53, 0x0a, 0x14, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x5f, 0x70, 0x72, 0x65, 0x64, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x31, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x21, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x56, 0x31, 0x52, 0x12, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x73, 0x56, 0x31, 0x2a, 0x4a, 0x0a, 0x09, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x44, 0x4c, 0x45, 0x10, 0x00, 0x12, 0x0b, 0x0a, + 0x07, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x44, 0x4f, + 0x4e, 0x45, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, + 0x53, 0x54, 0x41, 0x54, 0x45, 0x10, 0x03, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, + 0x10, 0x04, 0x42, 0x15, 0x5a, 0x13, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x70, 0x62, 0x67, 0x6f, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( @@ -2355,7 +2256,7 @@ func file_datadog_remoteconfig_remoteconfig_proto_rawDescGZIP() []byte { } var file_datadog_remoteconfig_remoteconfig_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_datadog_remoteconfig_remoteconfig_proto_msgTypes = make([]protoimpl.MessageInfo, 30) +var file_datadog_remoteconfig_remoteconfig_proto_msgTypes = make([]protoimpl.MessageInfo, 29) var file_datadog_remoteconfig_remoteconfig_proto_goTypes = []any{ (TaskState)(0), // 0: datadog.config.TaskState (*ConfigMetas)(nil), // 1: datadog.config.ConfigMetas @@ -2372,22 +2273,21 @@ var file_datadog_remoteconfig_remoteconfig_proto_goTypes = []any{ (*ClientAgent)(nil), // 12: datadog.config.ClientAgent (*ClientUpdater)(nil), // 13: datadog.config.ClientUpdater (*PackageState)(nil), // 14: datadog.config.PackageState - (*PoliciesState)(nil), // 15: datadog.config.PoliciesState - (*PackageStateTask)(nil), // 16: datadog.config.PackageStateTask - (*TaskError)(nil), // 17: datadog.config.TaskError - (*ConfigState)(nil), // 18: datadog.config.ConfigState - (*ClientState)(nil), // 19: datadog.config.ClientState - (*TargetFileHash)(nil), // 20: datadog.config.TargetFileHash - (*TargetFileMeta)(nil), // 21: datadog.config.TargetFileMeta - (*ClientGetConfigsRequest)(nil), // 22: datadog.config.ClientGetConfigsRequest - (*ClientGetConfigsResponse)(nil), // 23: datadog.config.ClientGetConfigsResponse - (*FileMetaState)(nil), // 24: datadog.config.FileMetaState - (*GetStateConfigResponse)(nil), // 25: datadog.config.GetStateConfigResponse - (*TracerPredicateV1)(nil), // 26: datadog.config.TracerPredicateV1 - (*TracerPredicates)(nil), // 27: datadog.config.TracerPredicates - nil, // 28: datadog.config.GetStateConfigResponse.ConfigStateEntry - nil, // 29: datadog.config.GetStateConfigResponse.DirectorStateEntry - nil, // 30: datadog.config.GetStateConfigResponse.TargetFilenamesEntry + (*PackageStateTask)(nil), // 15: datadog.config.PackageStateTask + (*TaskError)(nil), // 16: datadog.config.TaskError + (*ConfigState)(nil), // 17: datadog.config.ConfigState + (*ClientState)(nil), // 18: datadog.config.ClientState + (*TargetFileHash)(nil), // 19: datadog.config.TargetFileHash + (*TargetFileMeta)(nil), // 20: datadog.config.TargetFileMeta + (*ClientGetConfigsRequest)(nil), // 21: datadog.config.ClientGetConfigsRequest + (*ClientGetConfigsResponse)(nil), // 22: datadog.config.ClientGetConfigsResponse + (*FileMetaState)(nil), // 23: datadog.config.FileMetaState + (*GetStateConfigResponse)(nil), // 24: datadog.config.GetStateConfigResponse + (*TracerPredicateV1)(nil), // 25: datadog.config.TracerPredicateV1 + (*TracerPredicates)(nil), // 26: datadog.config.TracerPredicates + nil, // 27: datadog.config.GetStateConfigResponse.ConfigStateEntry + nil, // 28: datadog.config.GetStateConfigResponse.DirectorStateEntry + nil, // 29: datadog.config.GetStateConfigResponse.TargetFilenamesEntry } var file_datadog_remoteconfig_remoteconfig_proto_depIdxs = []int32{ 4, // 0: datadog.config.ConfigMetas.roots:type_name -> datadog.config.TopMeta @@ -2403,34 +2303,31 @@ var file_datadog_remoteconfig_remoteconfig_proto_depIdxs = []int32{ 1, // 10: datadog.config.LatestConfigsResponse.config_metas:type_name -> datadog.config.ConfigMetas 2, // 11: datadog.config.LatestConfigsResponse.director_metas:type_name -> datadog.config.DirectorMetas 5, // 12: datadog.config.LatestConfigsResponse.target_files:type_name -> datadog.config.File - 19, // 13: datadog.config.Client.state:type_name -> datadog.config.ClientState + 18, // 13: datadog.config.Client.state:type_name -> datadog.config.ClientState 11, // 14: datadog.config.Client.client_tracer:type_name -> datadog.config.ClientTracer 12, // 15: datadog.config.Client.client_agent:type_name -> datadog.config.ClientAgent 13, // 16: datadog.config.Client.client_updater:type_name -> datadog.config.ClientUpdater 14, // 17: datadog.config.ClientUpdater.packages:type_name -> datadog.config.PackageState - 16, // 18: datadog.config.PackageState.task:type_name -> datadog.config.PackageStateTask - 15, // 19: datadog.config.PackageState.stable_config_state:type_name -> datadog.config.PoliciesState - 15, // 20: datadog.config.PackageState.experiment_config_state:type_name -> datadog.config.PoliciesState - 15, // 21: datadog.config.PackageState.remote_config_state:type_name -> datadog.config.PoliciesState - 0, // 22: datadog.config.PackageStateTask.state:type_name -> datadog.config.TaskState - 17, // 23: datadog.config.PackageStateTask.error:type_name -> datadog.config.TaskError - 18, // 24: datadog.config.ClientState.config_states:type_name -> datadog.config.ConfigState - 20, // 25: datadog.config.TargetFileMeta.hashes:type_name -> datadog.config.TargetFileHash - 10, // 26: datadog.config.ClientGetConfigsRequest.client:type_name -> datadog.config.Client - 21, // 27: datadog.config.ClientGetConfigsRequest.cached_target_files:type_name -> datadog.config.TargetFileMeta - 5, // 28: datadog.config.ClientGetConfigsResponse.target_files:type_name -> datadog.config.File - 28, // 29: datadog.config.GetStateConfigResponse.config_state:type_name -> datadog.config.GetStateConfigResponse.ConfigStateEntry - 29, // 30: datadog.config.GetStateConfigResponse.director_state:type_name -> datadog.config.GetStateConfigResponse.DirectorStateEntry - 30, // 31: datadog.config.GetStateConfigResponse.target_filenames:type_name -> datadog.config.GetStateConfigResponse.TargetFilenamesEntry - 10, // 32: datadog.config.GetStateConfigResponse.active_clients:type_name -> datadog.config.Client - 26, // 33: datadog.config.TracerPredicates.tracer_predicates_v1:type_name -> datadog.config.TracerPredicateV1 - 24, // 34: datadog.config.GetStateConfigResponse.ConfigStateEntry.value:type_name -> datadog.config.FileMetaState - 24, // 35: datadog.config.GetStateConfigResponse.DirectorStateEntry.value:type_name -> datadog.config.FileMetaState - 36, // [36:36] is the sub-list for method output_type - 36, // [36:36] is the sub-list for method input_type - 36, // [36:36] is the sub-list for extension type_name - 36, // [36:36] is the sub-list for extension extendee - 0, // [0:36] is the sub-list for field type_name + 15, // 18: datadog.config.PackageState.task:type_name -> datadog.config.PackageStateTask + 0, // 19: datadog.config.PackageStateTask.state:type_name -> datadog.config.TaskState + 16, // 20: datadog.config.PackageStateTask.error:type_name -> datadog.config.TaskError + 17, // 21: datadog.config.ClientState.config_states:type_name -> datadog.config.ConfigState + 19, // 22: datadog.config.TargetFileMeta.hashes:type_name -> datadog.config.TargetFileHash + 10, // 23: datadog.config.ClientGetConfigsRequest.client:type_name -> datadog.config.Client + 20, // 24: datadog.config.ClientGetConfigsRequest.cached_target_files:type_name -> datadog.config.TargetFileMeta + 5, // 25: datadog.config.ClientGetConfigsResponse.target_files:type_name -> datadog.config.File + 27, // 26: datadog.config.GetStateConfigResponse.config_state:type_name -> datadog.config.GetStateConfigResponse.ConfigStateEntry + 28, // 27: datadog.config.GetStateConfigResponse.director_state:type_name -> datadog.config.GetStateConfigResponse.DirectorStateEntry + 29, // 28: datadog.config.GetStateConfigResponse.target_filenames:type_name -> datadog.config.GetStateConfigResponse.TargetFilenamesEntry + 10, // 29: datadog.config.GetStateConfigResponse.active_clients:type_name -> datadog.config.Client + 25, // 30: datadog.config.TracerPredicates.tracer_predicates_v1:type_name -> datadog.config.TracerPredicateV1 + 23, // 31: datadog.config.GetStateConfigResponse.ConfigStateEntry.value:type_name -> datadog.config.FileMetaState + 23, // 32: datadog.config.GetStateConfigResponse.DirectorStateEntry.value:type_name -> datadog.config.FileMetaState + 33, // [33:33] is the sub-list for method output_type + 33, // [33:33] is the sub-list for method input_type + 33, // [33:33] is the sub-list for extension type_name + 33, // [33:33] is the sub-list for extension extendee + 0, // [0:33] is the sub-list for field type_name } func init() { file_datadog_remoteconfig_remoteconfig_proto_init() } @@ -2444,7 +2341,7 @@ func file_datadog_remoteconfig_remoteconfig_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_datadog_remoteconfig_remoteconfig_proto_rawDesc, NumEnums: 1, - NumMessages: 30, + NumMessages: 29, NumExtensions: 0, NumServices: 0, }, diff --git a/pkg/proto/pbgo/core/remoteconfig_gen.go b/pkg/proto/pbgo/core/remoteconfig_gen.go index cdaaa9e7c3d0a..73545feb77358 100644 --- a/pkg/proto/pbgo/core/remoteconfig_gen.go +++ b/pkg/proto/pbgo/core/remoteconfig_gen.go @@ -3059,9 +3059,9 @@ func (z OrgStatusResponse) Msgsize() (s int) { // MarshalMsg implements msgp.Marshaler func (z *PackageState) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // map header, size 7 + // map header, size 6 // string "Package" - o = append(o, 0x87, 0xa7, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65) + o = append(o, 0x86, 0xa7, 0x50, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65) o = msgp.AppendString(o, z.Package) // string "StableVersion" o = append(o, 0xad, 0x53, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) @@ -3080,54 +3080,12 @@ func (z *PackageState) MarshalMsg(b []byte) (o []byte, err error) { return } } - // string "StableConfigState" - o = append(o, 0xb1, 0x53, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65) - if z.StableConfigState == nil { - o = msgp.AppendNil(o) - } else { - // map header, size 2 - // string "Version" - o = append(o, 0x82, 0xa7, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) - o = msgp.AppendString(o, z.StableConfigState.Version) - // string "MatchedPolicies" - o = append(o, 0xaf, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.StableConfigState.MatchedPolicies))) - for za0001 := range z.StableConfigState.MatchedPolicies { - o = msgp.AppendString(o, z.StableConfigState.MatchedPolicies[za0001]) - } - } - // string "ExperimentConfigState" - o = append(o, 0xb5, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65) - if z.ExperimentConfigState == nil { - o = msgp.AppendNil(o) - } else { - // map header, size 2 - // string "Version" - o = append(o, 0x82, 0xa7, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) - o = msgp.AppendString(o, z.ExperimentConfigState.Version) - // string "MatchedPolicies" - o = append(o, 0xaf, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.ExperimentConfigState.MatchedPolicies))) - for za0002 := range z.ExperimentConfigState.MatchedPolicies { - o = msgp.AppendString(o, z.ExperimentConfigState.MatchedPolicies[za0002]) - } - } - // string "RemoteConfigState" - o = append(o, 0xb1, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x53, 0x74, 0x61, 0x74, 0x65) - if z.RemoteConfigState == nil { - o = msgp.AppendNil(o) - } else { - // map header, size 2 - // string "Version" - o = append(o, 0x82, 0xa7, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) - o = msgp.AppendString(o, z.RemoteConfigState.Version) - // string "MatchedPolicies" - o = append(o, 0xaf, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.RemoteConfigState.MatchedPolicies))) - for za0003 := range z.RemoteConfigState.MatchedPolicies { - o = msgp.AppendString(o, z.RemoteConfigState.MatchedPolicies[za0003]) - } - } + // string "StableConfigVersion" + o = append(o, 0xb3, 0x53, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendString(o, z.StableConfigVersion) + // string "ExperimentConfigVersion" + o = append(o, 0xb7, 0x45, 0x78, 0x70, 0x65, 0x72, 0x69, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) + o = msgp.AppendString(o, z.ExperimentConfigVersion) return } @@ -3184,182 +3142,17 @@ func (z *PackageState) UnmarshalMsg(bts []byte) (o []byte, err error) { return } } - case "StableConfigState": - if msgp.IsNil(bts) { - bts, err = msgp.ReadNilBytes(bts) - if err != nil { - return - } - z.StableConfigState = nil - } else { - if z.StableConfigState == nil { - z.StableConfigState = new(PoliciesState) - } - var zb0002 uint32 - zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "StableConfigState") - return - } - for zb0002 > 0 { - zb0002-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err, "StableConfigState") - return - } - switch msgp.UnsafeString(field) { - case "Version": - z.StableConfigState.Version, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "StableConfigState", "Version") - return - } - case "MatchedPolicies": - var zb0003 uint32 - zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "StableConfigState", "MatchedPolicies") - return - } - if cap(z.StableConfigState.MatchedPolicies) >= int(zb0003) { - z.StableConfigState.MatchedPolicies = (z.StableConfigState.MatchedPolicies)[:zb0003] - } else { - z.StableConfigState.MatchedPolicies = make([]string, zb0003) - } - for za0001 := range z.StableConfigState.MatchedPolicies { - z.StableConfigState.MatchedPolicies[za0001], bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "StableConfigState", "MatchedPolicies", za0001) - return - } - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err, "StableConfigState") - return - } - } - } - } - case "ExperimentConfigState": - if msgp.IsNil(bts) { - bts, err = msgp.ReadNilBytes(bts) - if err != nil { - return - } - z.ExperimentConfigState = nil - } else { - if z.ExperimentConfigState == nil { - z.ExperimentConfigState = new(PoliciesState) - } - var zb0004 uint32 - zb0004, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "ExperimentConfigState") - return - } - for zb0004 > 0 { - zb0004-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err, "ExperimentConfigState") - return - } - switch msgp.UnsafeString(field) { - case "Version": - z.ExperimentConfigState.Version, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "ExperimentConfigState", "Version") - return - } - case "MatchedPolicies": - var zb0005 uint32 - zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "ExperimentConfigState", "MatchedPolicies") - return - } - if cap(z.ExperimentConfigState.MatchedPolicies) >= int(zb0005) { - z.ExperimentConfigState.MatchedPolicies = (z.ExperimentConfigState.MatchedPolicies)[:zb0005] - } else { - z.ExperimentConfigState.MatchedPolicies = make([]string, zb0005) - } - for za0002 := range z.ExperimentConfigState.MatchedPolicies { - z.ExperimentConfigState.MatchedPolicies[za0002], bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "ExperimentConfigState", "MatchedPolicies", za0002) - return - } - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err, "ExperimentConfigState") - return - } - } - } + case "StableConfigVersion": + z.StableConfigVersion, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "StableConfigVersion") + return } - case "RemoteConfigState": - if msgp.IsNil(bts) { - bts, err = msgp.ReadNilBytes(bts) - if err != nil { - return - } - z.RemoteConfigState = nil - } else { - if z.RemoteConfigState == nil { - z.RemoteConfigState = new(PoliciesState) - } - var zb0006 uint32 - zb0006, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "RemoteConfigState") - return - } - for zb0006 > 0 { - zb0006-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err, "RemoteConfigState") - return - } - switch msgp.UnsafeString(field) { - case "Version": - z.RemoteConfigState.Version, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "RemoteConfigState", "Version") - return - } - case "MatchedPolicies": - var zb0007 uint32 - zb0007, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "RemoteConfigState", "MatchedPolicies") - return - } - if cap(z.RemoteConfigState.MatchedPolicies) >= int(zb0007) { - z.RemoteConfigState.MatchedPolicies = (z.RemoteConfigState.MatchedPolicies)[:zb0007] - } else { - z.RemoteConfigState.MatchedPolicies = make([]string, zb0007) - } - for za0003 := range z.RemoteConfigState.MatchedPolicies { - z.RemoteConfigState.MatchedPolicies[za0003], bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "RemoteConfigState", "MatchedPolicies", za0003) - return - } - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err, "RemoteConfigState") - return - } - } - } + case "ExperimentConfigVersion": + z.ExperimentConfigVersion, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ExperimentConfigVersion") + return } default: bts, err = msgp.Skip(bts) @@ -3381,33 +3174,7 @@ func (z *PackageState) Msgsize() (s int) { } else { s += z.Task.Msgsize() } - s += 18 - if z.StableConfigState == nil { - s += msgp.NilSize - } else { - s += 1 + 8 + msgp.StringPrefixSize + len(z.StableConfigState.Version) + 16 + msgp.ArrayHeaderSize - for za0001 := range z.StableConfigState.MatchedPolicies { - s += msgp.StringPrefixSize + len(z.StableConfigState.MatchedPolicies[za0001]) - } - } - s += 22 - if z.ExperimentConfigState == nil { - s += msgp.NilSize - } else { - s += 1 + 8 + msgp.StringPrefixSize + len(z.ExperimentConfigState.Version) + 16 + msgp.ArrayHeaderSize - for za0002 := range z.ExperimentConfigState.MatchedPolicies { - s += msgp.StringPrefixSize + len(z.ExperimentConfigState.MatchedPolicies[za0002]) - } - } - s += 18 - if z.RemoteConfigState == nil { - s += msgp.NilSize - } else { - s += 1 + 8 + msgp.StringPrefixSize + len(z.RemoteConfigState.Version) + 16 + msgp.ArrayHeaderSize - for za0003 := range z.RemoteConfigState.MatchedPolicies { - s += msgp.StringPrefixSize + len(z.RemoteConfigState.MatchedPolicies[za0003]) - } - } + s += 20 + msgp.StringPrefixSize + len(z.StableConfigVersion) + 24 + msgp.StringPrefixSize + len(z.ExperimentConfigVersion) return } @@ -3540,86 +3307,6 @@ func (z *PackageStateTask) Msgsize() (s int) { return } -// MarshalMsg implements msgp.Marshaler -func (z *PoliciesState) MarshalMsg(b []byte) (o []byte, err error) { - o = msgp.Require(b, z.Msgsize()) - // map header, size 2 - // string "Version" - o = append(o, 0x82, 0xa7, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e) - o = msgp.AppendString(o, z.Version) - // string "MatchedPolicies" - o = append(o, 0xaf, 0x4d, 0x61, 0x74, 0x63, 0x68, 0x65, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73) - o = msgp.AppendArrayHeader(o, uint32(len(z.MatchedPolicies))) - for za0001 := range z.MatchedPolicies { - o = msgp.AppendString(o, z.MatchedPolicies[za0001]) - } - return -} - -// UnmarshalMsg implements msgp.Unmarshaler -func (z *PoliciesState) UnmarshalMsg(bts []byte) (o []byte, err error) { - var field []byte - _ = field - var zb0001 uint32 - zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - for zb0001 > 0 { - zb0001-- - field, bts, err = msgp.ReadMapKeyZC(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - switch msgp.UnsafeString(field) { - case "Version": - z.Version, bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "Version") - return - } - case "MatchedPolicies": - var zb0002 uint32 - zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) - if err != nil { - err = msgp.WrapError(err, "MatchedPolicies") - return - } - if cap(z.MatchedPolicies) >= int(zb0002) { - z.MatchedPolicies = (z.MatchedPolicies)[:zb0002] - } else { - z.MatchedPolicies = make([]string, zb0002) - } - for za0001 := range z.MatchedPolicies { - z.MatchedPolicies[za0001], bts, err = msgp.ReadStringBytes(bts) - if err != nil { - err = msgp.WrapError(err, "MatchedPolicies", za0001) - return - } - } - default: - bts, err = msgp.Skip(bts) - if err != nil { - err = msgp.WrapError(err) - return - } - } - } - o = bts - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *PoliciesState) Msgsize() (s int) { - s = 1 + 8 + msgp.StringPrefixSize + len(z.Version) + 16 + msgp.ArrayHeaderSize - for za0001 := range z.MatchedPolicies { - s += msgp.StringPrefixSize + len(z.MatchedPolicies[za0001]) - } - return -} - // MarshalMsg implements msgp.Marshaler func (z TargetFileHash) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) diff --git a/pkg/proto/pbgo/core/remoteconfig_gen_test.go b/pkg/proto/pbgo/core/remoteconfig_gen_test.go index 4f572c39bff7a..8bb705b8db5b5 100644 --- a/pkg/proto/pbgo/core/remoteconfig_gen_test.go +++ b/pkg/proto/pbgo/core/remoteconfig_gen_test.go @@ -1168,64 +1168,6 @@ func BenchmarkUnmarshalPackageStateTask(b *testing.B) { } } -func TestMarshalUnmarshalPoliciesState(t *testing.T) { - v := PoliciesState{} - bts, err := v.MarshalMsg(nil) - if err != nil { - t.Fatal(err) - } - left, err := v.UnmarshalMsg(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) - } - - left, err = msgp.Skip(bts) - if err != nil { - t.Fatal(err) - } - if len(left) > 0 { - t.Errorf("%d bytes left over after Skip(): %q", len(left), left) - } -} - -func BenchmarkMarshalMsgPoliciesState(b *testing.B) { - v := PoliciesState{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalMsg(nil) - } -} - -func BenchmarkAppendMsgPoliciesState(b *testing.B) { - v := PoliciesState{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalMsg(bts[0:0]) - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalMsg(bts[0:0]) - } -} - -func BenchmarkUnmarshalPoliciesState(b *testing.B) { - v := PoliciesState{} - bts, _ := v.MarshalMsg(nil) - b.ReportAllocs() - b.SetBytes(int64(len(bts))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := v.UnmarshalMsg(bts) - if err != nil { - b.Fatal(err) - } - } -} - func TestMarshalUnmarshalTargetFileHash(t *testing.T) { v := TargetFileHash{} bts, err := v.MarshalMsg(nil) diff --git a/pkg/proto/pbgo/core/workloadmeta.pb.go b/pkg/proto/pbgo/core/workloadmeta.pb.go index a6299b85850fb..5e7347de64fe0 100644 --- a/pkg/proto/pbgo/core/workloadmeta.pb.go +++ b/pkg/proto/pbgo/core/workloadmeta.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.2 -// protoc v5.26.1 +// protoc-gen-go v1.36.3 +// protoc v5.29.3 // source: datadog/workloadmeta/workloadmeta.proto package core @@ -179,6 +179,7 @@ const ( Runtime_CRIO Runtime = 3 Runtime_GARDEN Runtime = 4 Runtime_ECS_FARGATE Runtime = 5 + Runtime_UNKNOWN Runtime = 6 ) // Enum value maps for Runtime. @@ -190,6 +191,7 @@ var ( 3: "CRIO", 4: "GARDEN", 5: "ECS_FARGATE", + 6: "UNKNOWN", } Runtime_value = map[string]int32{ "DOCKER": 0, @@ -198,6 +200,7 @@ var ( "CRIO": 3, "GARDEN": 4, "ECS_FARGATE": 5, + "UNKNOWN": 6, } ) @@ -382,13 +385,12 @@ func (ECSLaunchType) EnumDescriptor() ([]byte, []int) { } type WorkloadmetaFilter struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Kinds []WorkloadmetaKind `protobuf:"varint,1,rep,packed,name=kinds,proto3,enum=datadog.workloadmeta.WorkloadmetaKind" json:"kinds,omitempty"` + Source WorkloadmetaSource `protobuf:"varint,2,opt,name=source,proto3,enum=datadog.workloadmeta.WorkloadmetaSource" json:"source,omitempty"` + EventType WorkloadmetaEventType `protobuf:"varint,3,opt,name=eventType,proto3,enum=datadog.workloadmeta.WorkloadmetaEventType" json:"eventType,omitempty"` unknownFields protoimpl.UnknownFields - - Kinds []WorkloadmetaKind `protobuf:"varint,1,rep,packed,name=kinds,proto3,enum=datadog.workloadmeta.WorkloadmetaKind" json:"kinds,omitempty"` - Source WorkloadmetaSource `protobuf:"varint,2,opt,name=source,proto3,enum=datadog.workloadmeta.WorkloadmetaSource" json:"source,omitempty"` - EventType WorkloadmetaEventType `protobuf:"varint,3,opt,name=eventType,proto3,enum=datadog.workloadmeta.WorkloadmetaEventType" json:"eventType,omitempty"` + sizeCache protoimpl.SizeCache } func (x *WorkloadmetaFilter) Reset() { @@ -443,11 +445,10 @@ func (x *WorkloadmetaFilter) GetEventType() WorkloadmetaEventType { } type WorkloadmetaStreamRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Filter *WorkloadmetaFilter `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` unknownFields protoimpl.UnknownFields - - Filter *WorkloadmetaFilter `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` + sizeCache protoimpl.SizeCache } func (x *WorkloadmetaStreamRequest) Reset() { @@ -488,12 +489,11 @@ func (x *WorkloadmetaStreamRequest) GetFilter() *WorkloadmetaFilter { } type WorkloadmetaEntityId struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Kind WorkloadmetaKind `protobuf:"varint,1,opt,name=kind,proto3,enum=datadog.workloadmeta.WorkloadmetaKind" json:"kind,omitempty"` + Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` unknownFields protoimpl.UnknownFields - - Kind WorkloadmetaKind `protobuf:"varint,1,opt,name=kind,proto3,enum=datadog.workloadmeta.WorkloadmetaKind" json:"kind,omitempty"` - Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"` + sizeCache protoimpl.SizeCache } func (x *WorkloadmetaEntityId) Reset() { @@ -541,14 +541,13 @@ func (x *WorkloadmetaEntityId) GetId() string { } type EntityMeta struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + Annotations map[string]string `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` - Annotations map[string]string `protobuf:"bytes,3,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + sizeCache protoimpl.SizeCache } func (x *EntityMeta) Reset() { @@ -610,16 +609,15 @@ func (x *EntityMeta) GetLabels() map[string]string { } type ContainerImage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + RawName string `protobuf:"bytes,2,opt,name=rawName,proto3" json:"rawName,omitempty"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + ShortName string `protobuf:"bytes,4,opt,name=shortName,proto3" json:"shortName,omitempty"` + Tag string `protobuf:"bytes,5,opt,name=tag,proto3" json:"tag,omitempty"` + RepoDigest string `protobuf:"bytes,6,opt,name=repo_digest,json=repoDigest,proto3" json:"repo_digest,omitempty"` unknownFields protoimpl.UnknownFields - - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - RawName string `protobuf:"bytes,2,opt,name=rawName,proto3" json:"rawName,omitempty"` - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` - ShortName string `protobuf:"bytes,4,opt,name=shortName,proto3" json:"shortName,omitempty"` - Tag string `protobuf:"bytes,5,opt,name=tag,proto3" json:"tag,omitempty"` - RepoDigest string `protobuf:"bytes,6,opt,name=repo_digest,json=repoDigest,proto3" json:"repo_digest,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ContainerImage) Reset() { @@ -695,13 +693,12 @@ func (x *ContainerImage) GetRepoDigest() string { } type ContainerPort struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Port int32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` + Protocol string `protobuf:"bytes,3,opt,name=protocol,proto3" json:"protocol,omitempty"` unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Port int32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` - Protocol string `protobuf:"bytes,3,opt,name=protocol,proto3" json:"protocol,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ContainerPort) Reset() { @@ -756,17 +753,16 @@ func (x *ContainerPort) GetProtocol() string { } type ContainerState struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Running bool `protobuf:"varint,1,opt,name=running,proto3" json:"running,omitempty"` + Status ContainerStatus `protobuf:"varint,2,opt,name=status,proto3,enum=datadog.workloadmeta.ContainerStatus" json:"status,omitempty"` + Health ContainerHealth `protobuf:"varint,3,opt,name=health,proto3,enum=datadog.workloadmeta.ContainerHealth" json:"health,omitempty"` + CreatedAt int64 `protobuf:"varint,4,opt,name=createdAt,proto3" json:"createdAt,omitempty"` + StartedAt int64 `protobuf:"varint,5,opt,name=startedAt,proto3" json:"startedAt,omitempty"` + FinishedAt int64 `protobuf:"varint,6,opt,name=finishedAt,proto3" json:"finishedAt,omitempty"` + ExitCode int64 `protobuf:"varint,7,opt,name=exitCode,proto3" json:"exitCode,omitempty"` unknownFields protoimpl.UnknownFields - - Running bool `protobuf:"varint,1,opt,name=running,proto3" json:"running,omitempty"` - Status ContainerStatus `protobuf:"varint,2,opt,name=status,proto3,enum=datadog.workloadmeta.ContainerStatus" json:"status,omitempty"` - Health ContainerHealth `protobuf:"varint,3,opt,name=health,proto3,enum=datadog.workloadmeta.ContainerHealth" json:"health,omitempty"` - CreatedAt int64 `protobuf:"varint,4,opt,name=createdAt,proto3" json:"createdAt,omitempty"` - StartedAt int64 `protobuf:"varint,5,opt,name=startedAt,proto3" json:"startedAt,omitempty"` - FinishedAt int64 `protobuf:"varint,6,opt,name=finishedAt,proto3" json:"finishedAt,omitempty"` - ExitCode int64 `protobuf:"varint,7,opt,name=exitCode,proto3" json:"exitCode,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ContainerState) Reset() { @@ -849,22 +845,21 @@ func (x *ContainerState) GetExitCode() int64 { } type Container struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + EntityId *WorkloadmetaEntityId `protobuf:"bytes,1,opt,name=entityId,proto3" json:"entityId,omitempty"` + EntityMeta *EntityMeta `protobuf:"bytes,2,opt,name=entityMeta,proto3" json:"entityMeta,omitempty"` + EnvVars map[string]string `protobuf:"bytes,3,rep,name=envVars,proto3" json:"envVars,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Hostname string `protobuf:"bytes,4,opt,name=hostname,proto3" json:"hostname,omitempty"` + Image *ContainerImage `protobuf:"bytes,5,opt,name=image,proto3" json:"image,omitempty"` + NetworkIps map[string]string `protobuf:"bytes,6,rep,name=networkIps,proto3" json:"networkIps,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Pid int32 `protobuf:"varint,7,opt,name=pid,proto3" json:"pid,omitempty"` + Ports []*ContainerPort `protobuf:"bytes,8,rep,name=ports,proto3" json:"ports,omitempty"` + Runtime Runtime `protobuf:"varint,9,opt,name=runtime,proto3,enum=datadog.workloadmeta.Runtime" json:"runtime,omitempty"` + State *ContainerState `protobuf:"bytes,10,opt,name=state,proto3" json:"state,omitempty"` + CollectorTags []string `protobuf:"bytes,11,rep,name=collectorTags,proto3" json:"collectorTags,omitempty"` + CgroupPath string `protobuf:"bytes,12,opt,name=cgroupPath,proto3" json:"cgroupPath,omitempty"` unknownFields protoimpl.UnknownFields - - EntityId *WorkloadmetaEntityId `protobuf:"bytes,1,opt,name=entityId,proto3" json:"entityId,omitempty"` - EntityMeta *EntityMeta `protobuf:"bytes,2,opt,name=entityMeta,proto3" json:"entityMeta,omitempty"` - EnvVars map[string]string `protobuf:"bytes,3,rep,name=envVars,proto3" json:"envVars,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Hostname string `protobuf:"bytes,4,opt,name=hostname,proto3" json:"hostname,omitempty"` - Image *ContainerImage `protobuf:"bytes,5,opt,name=image,proto3" json:"image,omitempty"` - NetworkIps map[string]string `protobuf:"bytes,6,rep,name=networkIps,proto3" json:"networkIps,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Pid int32 `protobuf:"varint,7,opt,name=pid,proto3" json:"pid,omitempty"` - Ports []*ContainerPort `protobuf:"bytes,8,rep,name=ports,proto3" json:"ports,omitempty"` - Runtime Runtime `protobuf:"varint,9,opt,name=runtime,proto3,enum=datadog.workloadmeta.Runtime" json:"runtime,omitempty"` - State *ContainerState `protobuf:"bytes,10,opt,name=state,proto3" json:"state,omitempty"` - CollectorTags []string `protobuf:"bytes,11,rep,name=collectorTags,proto3" json:"collectorTags,omitempty"` - CgroupPath string `protobuf:"bytes,12,opt,name=cgroupPath,proto3" json:"cgroupPath,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Container) Reset() { @@ -982,13 +977,12 @@ func (x *Container) GetCgroupPath() string { } type KubernetesPodOwner struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Id string `protobuf:"bytes,3,opt,name=id,proto3" json:"id,omitempty"` unknownFields protoimpl.UnknownFields - - Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - Id string `protobuf:"bytes,3,opt,name=id,proto3" json:"id,omitempty"` + sizeCache protoimpl.SizeCache } func (x *KubernetesPodOwner) Reset() { @@ -1043,13 +1037,12 @@ func (x *KubernetesPodOwner) GetId() string { } type OrchestratorContainer struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Image *ContainerImage `protobuf:"bytes,3,opt,name=image,proto3" json:"image,omitempty"` unknownFields protoimpl.UnknownFields - - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - Image *ContainerImage `protobuf:"bytes,3,opt,name=image,proto3" json:"image,omitempty"` + sizeCache protoimpl.SizeCache } func (x *OrchestratorContainer) Reset() { @@ -1104,10 +1097,7 @@ func (x *OrchestratorContainer) GetImage() *ContainerImage { } type KubernetesPod struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` EntityId *WorkloadmetaEntityId `protobuf:"bytes,1,opt,name=entityId,proto3" json:"entityId,omitempty"` EntityMeta *EntityMeta `protobuf:"bytes,2,opt,name=entityMeta,proto3" json:"entityMeta,omitempty"` Owners []*KubernetesPodOwner `protobuf:"bytes,3,rep,name=owners,proto3" json:"owners,omitempty"` @@ -1119,9 +1109,11 @@ type KubernetesPod struct { PriorityClass string `protobuf:"bytes,9,opt,name=priorityClass,proto3" json:"priorityClass,omitempty"` QosClass string `protobuf:"bytes,10,opt,name=qosClass,proto3" json:"qosClass,omitempty"` KubeServices []string `protobuf:"bytes,11,rep,name=kubeServices,proto3" json:"kubeServices,omitempty"` - NamespaceLabels map[string]string `protobuf:"bytes,12,rep,name=namespaceLabels,proto3" json:"namespaceLabels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + NamespaceLabels map[string]string `protobuf:"bytes,12,rep,name=namespaceLabels,proto3" json:"namespaceLabels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` InitContainers []*OrchestratorContainer `protobuf:"bytes,13,rep,name=InitContainers,proto3" json:"InitContainers,omitempty"` RuntimeClass string `protobuf:"bytes,14,opt,name=runtimeClass,proto3" json:"runtimeClass,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *KubernetesPod) Reset() { @@ -1253,14 +1245,11 @@ func (x *KubernetesPod) GetRuntimeClass() string { } type ECSTask struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` EntityId *WorkloadmetaEntityId `protobuf:"bytes,1,opt,name=entityId,proto3" json:"entityId,omitempty"` EntityMeta *EntityMeta `protobuf:"bytes,2,opt,name=entityMeta,proto3" json:"entityMeta,omitempty"` - Tags map[string]string `protobuf:"bytes,3,rep,name=tags,proto3" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - ContainerInstanceTags map[string]string `protobuf:"bytes,4,rep,name=containerInstanceTags,proto3" json:"containerInstanceTags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Tags map[string]string `protobuf:"bytes,3,rep,name=tags,proto3" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + ContainerInstanceTags map[string]string `protobuf:"bytes,4,rep,name=containerInstanceTags,proto3" json:"containerInstanceTags,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` ClusterName string `protobuf:"bytes,5,opt,name=clusterName,proto3" json:"clusterName,omitempty"` Region string `protobuf:"bytes,6,opt,name=region,proto3" json:"region,omitempty"` AvailabilityZone string `protobuf:"bytes,7,opt,name=availabilityZone,proto3" json:"availabilityZone,omitempty"` @@ -1269,6 +1258,8 @@ type ECSTask struct { LaunchType ECSLaunchType `protobuf:"varint,10,opt,name=launchType,proto3,enum=datadog.workloadmeta.ECSLaunchType" json:"launchType,omitempty"` Containers []*OrchestratorContainer `protobuf:"bytes,11,rep,name=containers,proto3" json:"containers,omitempty"` AwsAccountID int64 `protobuf:"varint,12,opt,name=awsAccountID,proto3" json:"awsAccountID,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ECSTask) Reset() { @@ -1386,14 +1377,13 @@ func (x *ECSTask) GetAwsAccountID() int64 { } type WorkloadmetaEvent struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Type WorkloadmetaEventType `protobuf:"varint,1,opt,name=type,proto3,enum=datadog.workloadmeta.WorkloadmetaEventType" json:"type,omitempty"` + Container *Container `protobuf:"bytes,2,opt,name=container,proto3" json:"container,omitempty"` + KubernetesPod *KubernetesPod `protobuf:"bytes,3,opt,name=kubernetesPod,proto3" json:"kubernetesPod,omitempty"` + EcsTask *ECSTask `protobuf:"bytes,4,opt,name=ecsTask,proto3" json:"ecsTask,omitempty"` unknownFields protoimpl.UnknownFields - - Type WorkloadmetaEventType `protobuf:"varint,1,opt,name=type,proto3,enum=datadog.workloadmeta.WorkloadmetaEventType" json:"type,omitempty"` - Container *Container `protobuf:"bytes,2,opt,name=container,proto3" json:"container,omitempty"` - KubernetesPod *KubernetesPod `protobuf:"bytes,3,opt,name=kubernetesPod,proto3" json:"kubernetesPod,omitempty"` - EcsTask *ECSTask `protobuf:"bytes,4,opt,name=ecsTask,proto3" json:"ecsTask,omitempty"` + sizeCache protoimpl.SizeCache } func (x *WorkloadmetaEvent) Reset() { @@ -1455,11 +1445,10 @@ func (x *WorkloadmetaEvent) GetEcsTask() *ECSTask { } type WorkloadmetaStreamResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Events []*WorkloadmetaEvent `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` unknownFields protoimpl.UnknownFields - - Events []*WorkloadmetaEvent `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` + sizeCache protoimpl.SizeCache } func (x *WorkloadmetaStreamResponse) Reset() { @@ -1787,37 +1776,38 @@ var file_datadog_workloadmeta_workloadmeta_proto_rawDesc = []byte{ 0x0a, 0x0e, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x41, 0x4c, 0x4c, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x53, 0x45, 0x54, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, - 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x45, 0x54, 0x10, 0x02, 0x2a, 0x58, 0x0a, 0x07, + 0x54, 0x59, 0x50, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x45, 0x54, 0x10, 0x02, 0x2a, 0x65, 0x0a, 0x07, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x4f, 0x43, 0x4b, 0x45, 0x52, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x45, 0x52, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x4f, 0x44, 0x4d, 0x41, 0x4e, 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x43, 0x52, 0x49, 0x4f, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x41, 0x52, 0x44, 0x45, 0x4e, 0x10, 0x04, 0x12, 0x0f, 0x0a, 0x0b, 0x45, 0x43, 0x53, 0x5f, 0x46, 0x41, 0x52, - 0x47, 0x41, 0x54, 0x45, 0x10, 0x05, 0x2a, 0xc7, 0x01, 0x0a, 0x0f, 0x43, 0x6f, 0x6e, 0x74, 0x61, - 0x69, 0x6e, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x4f, - 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x45, 0x52, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, - 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x4f, 0x4e, 0x54, - 0x41, 0x49, 0x4e, 0x45, 0x52, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x52, 0x45, - 0x41, 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, - 0x4e, 0x45, 0x52, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x52, 0x55, 0x4e, 0x4e, 0x49, - 0x4e, 0x47, 0x10, 0x02, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x45, - 0x52, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x52, 0x45, 0x53, 0x54, 0x41, 0x52, 0x54, - 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, 0x1b, 0x0a, 0x17, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, - 0x45, 0x52, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x50, 0x41, 0x55, 0x53, 0x45, 0x44, - 0x10, 0x04, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x45, 0x52, 0x5f, - 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x53, 0x54, 0x4f, 0x50, 0x50, 0x45, 0x44, 0x10, 0x05, - 0x2a, 0x6d, 0x0a, 0x0f, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x48, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x45, 0x52, - 0x5f, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, - 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x45, 0x52, 0x5f, 0x48, - 0x45, 0x41, 0x4c, 0x54, 0x48, 0x5f, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, 0x01, 0x12, - 0x1e, 0x0a, 0x1a, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x45, 0x52, 0x5f, 0x48, 0x45, 0x41, - 0x4c, 0x54, 0x48, 0x5f, 0x55, 0x4e, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, 0x02, 0x2a, - 0x25, 0x0a, 0x0d, 0x45, 0x43, 0x53, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x07, 0x0a, 0x03, 0x45, 0x43, 0x32, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x46, 0x41, 0x52, - 0x47, 0x41, 0x54, 0x45, 0x10, 0x01, 0x42, 0x15, 0x5a, 0x13, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x67, 0x6f, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x62, 0x06, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x47, 0x41, 0x54, 0x45, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, + 0x4e, 0x10, 0x06, 0x2a, 0xc7, 0x01, 0x0a, 0x0f, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x4f, 0x4e, 0x54, 0x41, + 0x49, 0x4e, 0x45, 0x52, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, + 0x45, 0x52, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, + 0x44, 0x10, 0x01, 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x45, 0x52, + 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, + 0x02, 0x12, 0x1f, 0x0a, 0x1b, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x45, 0x52, 0x5f, 0x53, + 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x52, 0x45, 0x53, 0x54, 0x41, 0x52, 0x54, 0x49, 0x4e, 0x47, + 0x10, 0x03, 0x12, 0x1b, 0x0a, 0x17, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x45, 0x52, 0x5f, + 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x50, 0x41, 0x55, 0x53, 0x45, 0x44, 0x10, 0x04, 0x12, + 0x1c, 0x0a, 0x18, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x45, 0x52, 0x5f, 0x53, 0x54, 0x41, + 0x54, 0x55, 0x53, 0x5f, 0x53, 0x54, 0x4f, 0x50, 0x50, 0x45, 0x44, 0x10, 0x05, 0x2a, 0x6d, 0x0a, + 0x0f, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, + 0x12, 0x1c, 0x0a, 0x18, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x45, 0x52, 0x5f, 0x48, 0x45, + 0x41, 0x4c, 0x54, 0x48, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x1c, + 0x0a, 0x18, 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x45, 0x52, 0x5f, 0x48, 0x45, 0x41, 0x4c, + 0x54, 0x48, 0x5f, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, 0x01, 0x12, 0x1e, 0x0a, 0x1a, + 0x43, 0x4f, 0x4e, 0x54, 0x41, 0x49, 0x4e, 0x45, 0x52, 0x5f, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, + 0x5f, 0x55, 0x4e, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, 0x02, 0x2a, 0x25, 0x0a, 0x0d, + 0x45, 0x43, 0x53, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x54, 0x79, 0x70, 0x65, 0x12, 0x07, 0x0a, + 0x03, 0x45, 0x43, 0x32, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x46, 0x41, 0x52, 0x47, 0x41, 0x54, + 0x45, 0x10, 0x01, 0x42, 0x15, 0x5a, 0x13, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2f, 0x70, 0x62, 0x67, 0x6f, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( diff --git a/pkg/proto/pbgo/languagedetection/api.pb.go b/pkg/proto/pbgo/languagedetection/api.pb.go index 8b0005ef541f8..ed03ed51271fe 100644 --- a/pkg/proto/pbgo/languagedetection/api.pb.go +++ b/pkg/proto/pbgo/languagedetection/api.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.2 -// protoc v5.26.1 +// protoc-gen-go v1.36.3 +// protoc v5.29.3 // source: datadog/languagedetection/api.proto package languagedetection @@ -21,13 +21,12 @@ const ( ) type Process struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Pid int32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` + Command string `protobuf:"bytes,2,opt,name=command,proto3" json:"command,omitempty"` + Cmdline []string `protobuf:"bytes,3,rep,name=cmdline,proto3" json:"cmdline,omitempty"` unknownFields protoimpl.UnknownFields - - Pid int32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` - Command string `protobuf:"bytes,2,opt,name=command,proto3" json:"command,omitempty"` - Cmdline []string `protobuf:"bytes,3,rep,name=cmdline,proto3" json:"cmdline,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Process) Reset() { @@ -83,12 +82,11 @@ func (x *Process) GetCmdline() []string { // Should closely match `languagemodels.Language` type Language struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Language) Reset() { @@ -136,11 +134,10 @@ func (x *Language) GetVersion() string { } type DetectLanguageRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Processes []*Process `protobuf:"bytes,1,rep,name=processes,proto3" json:"processes,omitempty"` unknownFields protoimpl.UnknownFields - - Processes []*Process `protobuf:"bytes,1,rep,name=processes,proto3" json:"processes,omitempty"` + sizeCache protoimpl.SizeCache } func (x *DetectLanguageRequest) Reset() { @@ -181,11 +178,10 @@ func (x *DetectLanguageRequest) GetProcesses() []*Process { } type DetectLanguageResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Languages []*Language `protobuf:"bytes,1,rep,name=languages,proto3" json:"languages,omitempty"` unknownFields protoimpl.UnknownFields - - Languages []*Language `protobuf:"bytes,1,rep,name=languages,proto3" json:"languages,omitempty"` + sizeCache protoimpl.SizeCache } func (x *DetectLanguageResponse) Reset() { diff --git a/pkg/proto/pbgo/mocks/core/api_mockgen.pb.go b/pkg/proto/pbgo/mocks/core/api_mockgen.pb.go index a28d85a34a874..277e32f19ed99 100644 --- a/pkg/proto/pbgo/mocks/core/api_mockgen.pb.go +++ b/pkg/proto/pbgo/mocks/core/api_mockgen.pb.go @@ -259,6 +259,26 @@ func (mr *MockAgentSecureClientMockRecorder) GetConfigStateHA(ctx, in interface{ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetConfigStateHA", reflect.TypeOf((*MockAgentSecureClient)(nil).GetConfigStateHA), varargs...) } +// GetHostTags mocks base method. +func (m *MockAgentSecureClient) GetHostTags(ctx context.Context, in *core.HostTagRequest, opts ...grpc.CallOption) (*core.HostTagReply, error) { + m.ctrl.T.Helper() + varargs := []interface{}{ctx, in} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetHostTags", varargs...) + ret0, _ := ret[0].(*core.HostTagReply) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetHostTags indicates an expected call of GetHostTags. +func (mr *MockAgentSecureClientMockRecorder) GetHostTags(ctx, in interface{}, opts ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{ctx, in}, opts...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHostTags", reflect.TypeOf((*MockAgentSecureClient)(nil).GetHostTags), varargs...) +} + // RegisterRemoteAgent mocks base method. func (m *MockAgentSecureClient) RegisterRemoteAgent(ctx context.Context, in *core.RegisterRemoteAgentRequest, opts ...grpc.CallOption) (*core.RegisterRemoteAgentResponse, error) { m.ctrl.T.Helper() @@ -855,6 +875,21 @@ func (mr *MockAgentSecureServerMockRecorder) GetConfigStateHA(arg0, arg1 interfa return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetConfigStateHA", reflect.TypeOf((*MockAgentSecureServer)(nil).GetConfigStateHA), arg0, arg1) } +// GetHostTags mocks base method. +func (m *MockAgentSecureServer) GetHostTags(arg0 context.Context, arg1 *core.HostTagRequest) (*core.HostTagReply, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetHostTags", arg0, arg1) + ret0, _ := ret[0].(*core.HostTagReply) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetHostTags indicates an expected call of GetHostTags. +func (mr *MockAgentSecureServerMockRecorder) GetHostTags(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetHostTags", reflect.TypeOf((*MockAgentSecureServer)(nil).GetHostTags), arg0, arg1) +} + // RegisterRemoteAgent mocks base method. func (m *MockAgentSecureServer) RegisterRemoteAgent(arg0 context.Context, arg1 *core.RegisterRemoteAgentRequest) (*core.RegisterRemoteAgentResponse, error) { m.ctrl.T.Helper() diff --git a/pkg/proto/pbgo/process/process.pb.go b/pkg/proto/pbgo/process/process.pb.go index cd13a446613b4..d65b7578bb94e 100644 --- a/pkg/proto/pbgo/process/process.pb.go +++ b/pkg/proto/pbgo/process/process.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.2 -// protoc v5.26.1 +// protoc-gen-go v1.36.3 +// protoc v5.29.3 // source: datadog/process/process.proto package process @@ -22,11 +22,10 @@ const ( // ProcessStatRequest is the request to get process stats. type ProcessStatRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Pids []int32 `protobuf:"varint,1,rep,packed,name=pids,proto3" json:"pids,omitempty"` unknownFields protoimpl.UnknownFields - - Pids []int32 `protobuf:"varint,1,rep,packed,name=pids,proto3" json:"pids,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ProcessStatRequest) Reset() { diff --git a/pkg/proto/pbgo/process/workloadmeta_process.pb.go b/pkg/proto/pbgo/process/workloadmeta_process.pb.go index 3c5d2ddd71152..d08aa79b92cdc 100644 --- a/pkg/proto/pbgo/process/workloadmeta_process.pb.go +++ b/pkg/proto/pbgo/process/workloadmeta_process.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.2 -// protoc v5.26.1 +// protoc-gen-go v1.36.3 +// protoc v5.29.3 // source: datadog/process/workloadmeta_process.proto package process @@ -25,13 +25,12 @@ const ( ) type ProcessStreamResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + EventID int32 `protobuf:"varint,1,opt,name=eventID,proto3" json:"eventID,omitempty"` + SetEvents []*ProcessEventSet `protobuf:"bytes,2,rep,name=setEvents,proto3" json:"setEvents,omitempty"` + UnsetEvents []*ProcessEventUnset `protobuf:"bytes,3,rep,name=unsetEvents,proto3" json:"unsetEvents,omitempty"` unknownFields protoimpl.UnknownFields - - EventID int32 `protobuf:"varint,1,opt,name=eventID,proto3" json:"eventID,omitempty"` - SetEvents []*ProcessEventSet `protobuf:"bytes,2,rep,name=setEvents,proto3" json:"setEvents,omitempty"` - UnsetEvents []*ProcessEventUnset `protobuf:"bytes,3,rep,name=unsetEvents,proto3" json:"unsetEvents,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ProcessStreamResponse) Reset() { @@ -86,15 +85,14 @@ func (x *ProcessStreamResponse) GetUnsetEvents() []*ProcessEventUnset { } type ProcessEventSet struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Pid int32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` + Nspid int32 `protobuf:"varint,2,opt,name=nspid,proto3" json:"nspid,omitempty"` + ContainerID string `protobuf:"bytes,3,opt,name=containerID,proto3" json:"containerID,omitempty"` + CreationTime int64 `protobuf:"varint,4,opt,name=creationTime,proto3" json:"creationTime,omitempty"` + Language *Language `protobuf:"bytes,5,opt,name=language,proto3" json:"language,omitempty"` unknownFields protoimpl.UnknownFields - - Pid int32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` - Nspid int32 `protobuf:"varint,2,opt,name=nspid,proto3" json:"nspid,omitempty"` - ContainerID string `protobuf:"bytes,3,opt,name=containerID,proto3" json:"containerID,omitempty"` - CreationTime int64 `protobuf:"varint,4,opt,name=creationTime,proto3" json:"creationTime,omitempty"` - Language *Language `protobuf:"bytes,5,opt,name=language,proto3" json:"language,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ProcessEventSet) Reset() { @@ -163,11 +161,10 @@ func (x *ProcessEventSet) GetLanguage() *Language { } type ProcessEventUnset struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Pid int32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` unknownFields protoimpl.UnknownFields - - Pid int32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ProcessEventUnset) Reset() { @@ -208,11 +205,10 @@ func (x *ProcessEventUnset) GetPid() int32 { } type Language struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + sizeCache protoimpl.SizeCache } func (x *Language) Reset() { @@ -253,9 +249,9 @@ func (x *Language) GetName() string { } type ProcessStreamEntitiesRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ProcessStreamEntitiesRequest) Reset() { @@ -291,11 +287,10 @@ func (*ProcessStreamEntitiesRequest) Descriptor() ([]byte, []int) { // ParentLanguageAnnotationRequest is sent from the Core-Agent to the Cluster-Agent to notify that // a language was detected for a given container type ParentLanguageAnnotationRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + PodDetails []*PodLanguageDetails `protobuf:"bytes,1,rep,name=podDetails,proto3" json:"podDetails,omitempty"` unknownFields protoimpl.UnknownFields - - PodDetails []*PodLanguageDetails `protobuf:"bytes,1,rep,name=podDetails,proto3" json:"podDetails,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ParentLanguageAnnotationRequest) Reset() { @@ -337,15 +332,14 @@ func (x *ParentLanguageAnnotationRequest) GetPodDetails() []*PodLanguageDetails // PodLanguageDetails holds the language metadata associated to a given pod type PodLanguageDetails struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` ContainerDetails []*ContainerLanguageDetails `protobuf:"bytes,3,rep,name=containerDetails,proto3" json:"containerDetails,omitempty"` Ownerref *KubeOwnerInfo `protobuf:"bytes,4,opt,name=ownerref,proto3" json:"ownerref,omitempty"` InitContainerDetails []*ContainerLanguageDetails `protobuf:"bytes,5,rep,name=initContainerDetails,proto3" json:"initContainerDetails,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *PodLanguageDetails) Reset() { @@ -415,12 +409,11 @@ func (x *PodLanguageDetails) GetInitContainerDetails() []*ContainerLanguageDetai // ContainerLanguageDetails contains the different languages used in a container type ContainerLanguageDetails struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ContainerName string `protobuf:"bytes,1,opt,name=containerName,proto3" json:"containerName,omitempty"` + Languages []*Language `protobuf:"bytes,2,rep,name=languages,proto3" json:"languages,omitempty"` unknownFields protoimpl.UnknownFields - - ContainerName string `protobuf:"bytes,1,opt,name=containerName,proto3" json:"containerName,omitempty"` - Languages []*Language `protobuf:"bytes,2,rep,name=languages,proto3" json:"languages,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ContainerLanguageDetails) Reset() { @@ -469,13 +462,12 @@ func (x *ContainerLanguageDetails) GetLanguages() []*Language { // KubeOwnerInfo holds metadata about the owner of the pod type KubeOwnerInfo struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Kind string `protobuf:"bytes,3,opt,name=kind,proto3" json:"kind,omitempty"` unknownFields protoimpl.UnknownFields - - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - Kind string `protobuf:"bytes,3,opt,name=kind,proto3" json:"kind,omitempty"` + sizeCache protoimpl.SizeCache } func (x *KubeOwnerInfo) Reset() { diff --git a/pkg/proto/pbgo/trace/agent_payload.pb.go b/pkg/proto/pbgo/trace/agent_payload.pb.go index 30a8415307f40..303595432ed69 100644 --- a/pkg/proto/pbgo/trace/agent_payload.pb.go +++ b/pkg/proto/pbgo/trace/agent_payload.pb.go @@ -2,8 +2,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.2 -// protoc v5.26.1 +// protoc-gen-go v1.36.3 +// protoc v5.29.3 // source: datadog/trace/agent_payload.proto package trace @@ -24,10 +24,7 @@ const ( // AgentPayload represents payload the agent sends to the intake. type AgentPayload struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // hostName specifies hostname of where the agent is running. HostName string `protobuf:"bytes,1,opt,name=hostName,proto3" json:"hostName,omitempty"` // env specifies `env` set in agent configuration. @@ -35,7 +32,7 @@ type AgentPayload struct { // tracerPayloads specifies list of the payloads received from tracers. TracerPayloads []*TracerPayload `protobuf:"bytes,5,rep,name=tracerPayloads,proto3" json:"tracerPayloads,omitempty"` // tags specifies tags common in all `tracerPayloads`. - Tags map[string]string `protobuf:"bytes,6,rep,name=tags,proto3" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Tags map[string]string `protobuf:"bytes,6,rep,name=tags,proto3" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // agentVersion specifies version of the agent. AgentVersion string `protobuf:"bytes,7,opt,name=agentVersion,proto3" json:"agentVersion,omitempty"` // targetTPS holds `TargetTPS` value in AgentConfig. @@ -44,6 +41,8 @@ type AgentPayload struct { ErrorTPS float64 `protobuf:"fixed64,9,opt,name=errorTPS,proto3" json:"errorTPS,omitempty"` // rareSamplerEnabled holds `RareSamplerEnabled` value in AgentConfig RareSamplerEnabled bool `protobuf:"varint,10,opt,name=rareSamplerEnabled,proto3" json:"rareSamplerEnabled,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *AgentPayload) Reset() { diff --git a/pkg/proto/pbgo/trace/span.pb.go b/pkg/proto/pbgo/trace/span.pb.go index 1d0ad90af6351..c1ddc73b36c0d 100644 --- a/pkg/proto/pbgo/trace/span.pb.go +++ b/pkg/proto/pbgo/trace/span.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.2 -// protoc v5.26.1 +// protoc-gen-go v1.36.3 +// protoc v5.29.3 // source: datadog/trace/span.proto package trace @@ -20,11 +20,115 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -type SpanLink struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +type AttributeAnyValue_AttributeAnyValueType int32 + +const ( + AttributeAnyValue_STRING_VALUE AttributeAnyValue_AttributeAnyValueType = 0 + AttributeAnyValue_BOOL_VALUE AttributeAnyValue_AttributeAnyValueType = 1 + AttributeAnyValue_INT_VALUE AttributeAnyValue_AttributeAnyValueType = 2 + AttributeAnyValue_DOUBLE_VALUE AttributeAnyValue_AttributeAnyValueType = 3 + AttributeAnyValue_ARRAY_VALUE AttributeAnyValue_AttributeAnyValueType = 4 +) + +// Enum value maps for AttributeAnyValue_AttributeAnyValueType. +var ( + AttributeAnyValue_AttributeAnyValueType_name = map[int32]string{ + 0: "STRING_VALUE", + 1: "BOOL_VALUE", + 2: "INT_VALUE", + 3: "DOUBLE_VALUE", + 4: "ARRAY_VALUE", + } + AttributeAnyValue_AttributeAnyValueType_value = map[string]int32{ + "STRING_VALUE": 0, + "BOOL_VALUE": 1, + "INT_VALUE": 2, + "DOUBLE_VALUE": 3, + "ARRAY_VALUE": 4, + } +) + +func (x AttributeAnyValue_AttributeAnyValueType) Enum() *AttributeAnyValue_AttributeAnyValueType { + p := new(AttributeAnyValue_AttributeAnyValueType) + *p = x + return p +} + +func (x AttributeAnyValue_AttributeAnyValueType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AttributeAnyValue_AttributeAnyValueType) Descriptor() protoreflect.EnumDescriptor { + return file_datadog_trace_span_proto_enumTypes[0].Descriptor() +} + +func (AttributeAnyValue_AttributeAnyValueType) Type() protoreflect.EnumType { + return &file_datadog_trace_span_proto_enumTypes[0] +} + +func (x AttributeAnyValue_AttributeAnyValueType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AttributeAnyValue_AttributeAnyValueType.Descriptor instead. +func (AttributeAnyValue_AttributeAnyValueType) EnumDescriptor() ([]byte, []int) { + return file_datadog_trace_span_proto_rawDescGZIP(), []int{2, 0} +} + +type AttributeArrayValue_AttributeArrayValueType int32 + +const ( + AttributeArrayValue_STRING_VALUE AttributeArrayValue_AttributeArrayValueType = 0 + AttributeArrayValue_BOOL_VALUE AttributeArrayValue_AttributeArrayValueType = 1 + AttributeArrayValue_INT_VALUE AttributeArrayValue_AttributeArrayValueType = 2 + AttributeArrayValue_DOUBLE_VALUE AttributeArrayValue_AttributeArrayValueType = 3 +) + +// Enum value maps for AttributeArrayValue_AttributeArrayValueType. +var ( + AttributeArrayValue_AttributeArrayValueType_name = map[int32]string{ + 0: "STRING_VALUE", + 1: "BOOL_VALUE", + 2: "INT_VALUE", + 3: "DOUBLE_VALUE", + } + AttributeArrayValue_AttributeArrayValueType_value = map[string]int32{ + "STRING_VALUE": 0, + "BOOL_VALUE": 1, + "INT_VALUE": 2, + "DOUBLE_VALUE": 3, + } +) + +func (x AttributeArrayValue_AttributeArrayValueType) Enum() *AttributeArrayValue_AttributeArrayValueType { + p := new(AttributeArrayValue_AttributeArrayValueType) + *p = x + return p +} + +func (x AttributeArrayValue_AttributeArrayValueType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AttributeArrayValue_AttributeArrayValueType) Descriptor() protoreflect.EnumDescriptor { + return file_datadog_trace_span_proto_enumTypes[1].Descriptor() +} + +func (AttributeArrayValue_AttributeArrayValueType) Type() protoreflect.EnumType { + return &file_datadog_trace_span_proto_enumTypes[1] +} + +func (x AttributeArrayValue_AttributeArrayValueType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AttributeArrayValue_AttributeArrayValueType.Descriptor instead. +func (AttributeArrayValue_AttributeArrayValueType) EnumDescriptor() ([]byte, []int) { + return file_datadog_trace_span_proto_rawDescGZIP(), []int{4, 0} +} +type SpanLink struct { + state protoimpl.MessageState `protogen:"open.v1"` // @gotags: json:"trace_id" msg:"trace_id" TraceID uint64 `protobuf:"varint,1,opt,name=traceID,proto3" json:"trace_id" msg:"trace_id"` // Required. // @gotags: json:"trace_id_high" msg:"trace_id_high,omitempty" @@ -32,11 +136,13 @@ type SpanLink struct { // @gotags: json:"span_id" msg:"span_id" SpanID uint64 `protobuf:"varint,3,opt,name=spanID,proto3" json:"span_id" msg:"span_id"` // Required. // @gotags: msg:"attributes,omitempty" - Attributes map[string]string `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3" msg:"attributes,omitempty"` // Optional. Simple mapping of keys to string values. + Attributes map[string]string `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value" msg:"attributes,omitempty"` // Optional. Simple mapping of keys to string values. // @gotags: msg:"tracestate,omitempty" Tracestate string `protobuf:"bytes,5,opt,name=tracestate,proto3" json:"tracestate,omitempty" msg:"tracestate,omitempty"` // Optional. W3C tracestate. // @gotags: msg:"flags,omitempty" - Flags uint32 `protobuf:"varint,6,opt,name=flags,proto3" json:"flags,omitempty" msg:"flags,omitempty"` // Optional. W3C trace flags. If set, the high bit (bit 31) must be set. + Flags uint32 `protobuf:"varint,6,opt,name=flags,proto3" json:"flags,omitempty" msg:"flags,omitempty"` // Optional. W3C trace flags. If set, the high bit (bit 31) must be set. + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SpanLink) Reset() { @@ -111,11 +217,293 @@ func (x *SpanLink) GetFlags() uint32 { return 0 } -type Span struct { - state protoimpl.MessageState +type SpanEvent struct { + state protoimpl.MessageState `protogen:"open.v1"` + // time is the number of nanoseconds between the Unix epoch and this event. + TimeUnixNano uint64 `protobuf:"fixed64,1,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // name is this event's name. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // attributes is a mapping from attribute key string to any value. + // The order of attributes should be preserved in the key/value map. + // The supported values match the OpenTelemetry attributes specification: + // https://github.com/open-telemetry/opentelemetry-proto/blob/a8f08fc49d60538f97ffabcc7feac92f832976dd/opentelemetry/proto/common/v1/common.proto + Attributes map[string]*AttributeAnyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SpanEvent) Reset() { + *x = SpanEvent{} + mi := &file_datadog_trace_span_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SpanEvent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SpanEvent) ProtoMessage() {} + +func (x *SpanEvent) ProtoReflect() protoreflect.Message { + mi := &file_datadog_trace_span_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SpanEvent.ProtoReflect.Descriptor instead. +func (*SpanEvent) Descriptor() ([]byte, []int) { + return file_datadog_trace_span_proto_rawDescGZIP(), []int{1} +} + +func (x *SpanEvent) GetTimeUnixNano() uint64 { + if x != nil { + return x.TimeUnixNano + } + return 0 +} + +func (x *SpanEvent) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *SpanEvent) GetAttributes() map[string]*AttributeAnyValue { + if x != nil { + return x.Attributes + } + return nil +} + +// AttributeAnyValue is used to represent any type of attribute value. AttributeAnyValue may contain a +// primitive value such as a string or integer or it may contain an arbitrary nested +// object containing arrays, key-value lists and primitives. +type AttributeAnyValue struct { + state protoimpl.MessageState `protogen:"open.v1"` + // We implement a union manually here because Go's MessagePack generator does not support + // Protobuf `oneof` unions: https://github.com/tinylib/msgp/issues/184 + // Despite this, the format represented here is binary compatible with `oneof`, if we choose + // to migrate to that in the future. + Type AttributeAnyValue_AttributeAnyValueType `protobuf:"varint,1,opt,name=type,proto3,enum=datadog.trace.AttributeAnyValue_AttributeAnyValueType" json:"type,omitempty"` + StringValue string `protobuf:"bytes,2,opt,name=string_value,json=stringValue,proto3" json:"string_value,omitempty"` + BoolValue bool `protobuf:"varint,3,opt,name=bool_value,json=boolValue,proto3" json:"bool_value,omitempty"` + IntValue int64 `protobuf:"varint,4,opt,name=int_value,json=intValue,proto3" json:"int_value,omitempty"` + DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3" json:"double_value,omitempty"` + ArrayValue *AttributeArray `protobuf:"bytes,6,opt,name=array_value,json=arrayValue,proto3" json:"array_value,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *AttributeAnyValue) Reset() { + *x = AttributeAnyValue{} + mi := &file_datadog_trace_span_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AttributeAnyValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AttributeAnyValue) ProtoMessage() {} + +func (x *AttributeAnyValue) ProtoReflect() protoreflect.Message { + mi := &file_datadog_trace_span_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AttributeAnyValue.ProtoReflect.Descriptor instead. +func (*AttributeAnyValue) Descriptor() ([]byte, []int) { + return file_datadog_trace_span_proto_rawDescGZIP(), []int{2} +} + +func (x *AttributeAnyValue) GetType() AttributeAnyValue_AttributeAnyValueType { + if x != nil { + return x.Type + } + return AttributeAnyValue_STRING_VALUE +} + +func (x *AttributeAnyValue) GetStringValue() string { + if x != nil { + return x.StringValue + } + return "" +} + +func (x *AttributeAnyValue) GetBoolValue() bool { + if x != nil { + return x.BoolValue + } + return false +} + +func (x *AttributeAnyValue) GetIntValue() int64 { + if x != nil { + return x.IntValue + } + return 0 +} + +func (x *AttributeAnyValue) GetDoubleValue() float64 { + if x != nil { + return x.DoubleValue + } + return 0 +} + +func (x *AttributeAnyValue) GetArrayValue() *AttributeArray { + if x != nil { + return x.ArrayValue + } + return nil +} + +// AttributeArray is a list of AttributeArrayValue messages. We need this as a message since `oneof` in AttributeAnyValue does not allow repeated fields. +type AttributeArray struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Array of values. The array may be empty (contain 0 elements). + Values []*AttributeArrayValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache +} + +func (x *AttributeArray) Reset() { + *x = AttributeArray{} + mi := &file_datadog_trace_span_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AttributeArray) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AttributeArray) ProtoMessage() {} + +func (x *AttributeArray) ProtoReflect() protoreflect.Message { + mi := &file_datadog_trace_span_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AttributeArray.ProtoReflect.Descriptor instead. +func (*AttributeArray) Descriptor() ([]byte, []int) { + return file_datadog_trace_span_proto_rawDescGZIP(), []int{3} +} + +func (x *AttributeArray) GetValues() []*AttributeArrayValue { + if x != nil { + return x.Values + } + return nil +} + +// An element in the homogeneous AttributeArray. +// Compared to AttributeAnyValue, it only supports scalar values. +type AttributeArrayValue struct { + state protoimpl.MessageState `protogen:"open.v1"` + // We implement a union manually here because Go's MessagePack generator does not support + // Protobuf `oneof` unions: https://github.com/tinylib/msgp/issues/184 + // Despite this, the format represented here is binary compatible with `oneof`, if we choose + // to migrate to that in the future. + Type AttributeArrayValue_AttributeArrayValueType `protobuf:"varint,1,opt,name=type,proto3,enum=datadog.trace.AttributeArrayValue_AttributeArrayValueType" json:"type,omitempty"` + StringValue string `protobuf:"bytes,2,opt,name=string_value,json=stringValue,proto3" json:"string_value,omitempty"` + BoolValue bool `protobuf:"varint,3,opt,name=bool_value,json=boolValue,proto3" json:"bool_value,omitempty"` + IntValue int64 `protobuf:"varint,4,opt,name=int_value,json=intValue,proto3" json:"int_value,omitempty"` + DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3" json:"double_value,omitempty"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *AttributeArrayValue) Reset() { + *x = AttributeArrayValue{} + mi := &file_datadog_trace_span_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} +func (x *AttributeArrayValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AttributeArrayValue) ProtoMessage() {} + +func (x *AttributeArrayValue) ProtoReflect() protoreflect.Message { + mi := &file_datadog_trace_span_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AttributeArrayValue.ProtoReflect.Descriptor instead. +func (*AttributeArrayValue) Descriptor() ([]byte, []int) { + return file_datadog_trace_span_proto_rawDescGZIP(), []int{4} +} + +func (x *AttributeArrayValue) GetType() AttributeArrayValue_AttributeArrayValueType { + if x != nil { + return x.Type + } + return AttributeArrayValue_STRING_VALUE +} + +func (x *AttributeArrayValue) GetStringValue() string { + if x != nil { + return x.StringValue + } + return "" +} + +func (x *AttributeArrayValue) GetBoolValue() bool { + if x != nil { + return x.BoolValue + } + return false +} + +func (x *AttributeArrayValue) GetIntValue() int64 { + if x != nil { + return x.IntValue + } + return 0 +} + +func (x *AttributeArrayValue) GetDoubleValue() float64 { + if x != nil { + return x.DoubleValue + } + return 0 +} + +type Span struct { + state protoimpl.MessageState `protogen:"open.v1"` // service is the name of the service with which this span is associated. // @gotags: json:"service" msg:"service" Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service" msg:"service"` @@ -145,24 +533,29 @@ type Span struct { Error int32 `protobuf:"varint,9,opt,name=error,proto3" json:"error" msg:"error"` // meta is a mapping from tag name to tag value for string-valued tags. // @gotags: json:"meta,omitempty" msg:"meta,omitempty" - Meta map[string]string `protobuf:"bytes,10,rep,name=meta,proto3" json:"meta,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3" msg:"meta,omitempty"` + Meta map[string]string `protobuf:"bytes,10,rep,name=meta,proto3" json:"meta,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value" msg:"meta,omitempty"` // metrics is a mapping from tag name to tag value for numeric-valued tags. // @gotags: json:"metrics,omitempty" msg:"metrics,omitempty" - Metrics map[string]float64 `protobuf:"bytes,11,rep,name=metrics,proto3" json:"metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3" msg:"metrics,omitempty"` + Metrics map[string]float64 `protobuf:"bytes,11,rep,name=metrics,proto3" json:"metrics,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"fixed64,2,opt,name=value" msg:"metrics,omitempty"` // type is the type of the service with which this span is associated. Example values: web, db, lambda. // @gotags: json:"type" msg:"type" Type string `protobuf:"bytes,12,opt,name=type,proto3" json:"type" msg:"type"` // meta_struct is a registry of structured "other" data used by, e.g., AppSec. // @gotags: json:"meta_struct,omitempty" msg:"meta_struct,omitempty" - MetaStruct map[string][]byte `protobuf:"bytes,13,rep,name=meta_struct,json=metaStruct,proto3" json:"meta_struct,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3" msg:"meta_struct,omitempty"` + MetaStruct map[string][]byte `protobuf:"bytes,13,rep,name=meta_struct,json=metaStruct,proto3" json:"meta_struct,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value" msg:"meta_struct,omitempty"` // span_links represents a collection of links, where each link defines a causal relationship between two spans. // @gotags: json:"span_links,omitempty" msg:"span_links,omitempty" SpanLinks []*SpanLink `protobuf:"bytes,14,rep,name=spanLinks,proto3" json:"span_links,omitempty" msg:"span_links,omitempty"` + // spanEvents represent an event at an instant in time related to this span, but not necessarily during the span. + // @gotags: json:"span_events,omitempty" msg:"span_events,omitempty" + SpanEvents []*SpanEvent `protobuf:"bytes,15,rep,name=spanEvents,proto3" json:"span_events,omitempty" msg:"span_events,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Span) Reset() { *x = Span{} - mi := &file_datadog_trace_span_proto_msgTypes[1] + mi := &file_datadog_trace_span_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -174,7 +567,7 @@ func (x *Span) String() string { func (*Span) ProtoMessage() {} func (x *Span) ProtoReflect() protoreflect.Message { - mi := &file_datadog_trace_span_proto_msgTypes[1] + mi := &file_datadog_trace_span_proto_msgTypes[5] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -187,7 +580,7 @@ func (x *Span) ProtoReflect() protoreflect.Message { // Deprecated: Use Span.ProtoReflect.Descriptor instead. func (*Span) Descriptor() ([]byte, []int) { - return file_datadog_trace_span_proto_rawDescGZIP(), []int{1} + return file_datadog_trace_span_proto_rawDescGZIP(), []int{5} } func (x *Span) GetService() string { @@ -288,6 +681,13 @@ func (x *Span) GetSpanLinks() []*SpanLink { return nil } +func (x *Span) GetSpanEvents() []*SpanEvent { + if x != nil { + return x.SpanEvents + } + return nil +} + var File_datadog_trace_span_proto protoreflect.FileDescriptor var file_datadog_trace_span_proto_rawDesc = []byte{ @@ -311,51 +711,120 @@ var file_datadog_trace_span_proto_rawDesc = []byte{ 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x9a, 0x05, 0x0a, 0x04, 0x53, 0x70, - 0x61, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07, - 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x44, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x74, - 0x72, 0x61, 0x63, 0x65, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, 0x44, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, 0x44, 0x12, 0x1a, - 0x0a, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, - 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x12, 0x31, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x1d, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, - 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x3a, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, - 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, - 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, - 0x73, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x44, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x73, 0x74, - 0x72, 0x75, 0x63, 0x74, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x64, 0x61, 0x74, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf0, 0x01, 0x0a, 0x09, 0x53, 0x70, + 0x61, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x74, 0x69, 0x6d, 0x65, 0x5f, + 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x06, 0x52, + 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x48, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x2e, + 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x1a, 0x5f, 0x0a, 0x0f, 0x41, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x36, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, + 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x41, 0x6e, 0x79, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x8e, 0x03, 0x0a, + 0x11, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x41, 0x6e, 0x79, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x4a, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x36, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x41, 0x6e, 0x79, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x41, 0x6e, 0x79, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x21, + 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x1b, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, + 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x3e, 0x0a, 0x0b, 0x61, 0x72, 0x72, 0x61, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x41, + 0x72, 0x72, 0x61, 0x79, 0x52, 0x0a, 0x61, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x22, 0x6b, 0x0a, 0x15, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x41, 0x6e, 0x79, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, + 0x49, 0x4e, 0x47, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x42, + 0x4f, 0x4f, 0x4c, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x49, + 0x4e, 0x54, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x4f, + 0x55, 0x42, 0x4c, 0x45, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, + 0x41, 0x52, 0x52, 0x41, 0x59, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x10, 0x04, 0x22, 0x4c, 0x0a, + 0x0e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x41, 0x72, 0x72, 0x61, 0x79, 0x12, + 0x3a, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x22, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, + 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x41, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0xc5, 0x02, 0x0a, 0x13, + 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x41, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x4e, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x3a, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, + 0x65, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x41, 0x72, 0x72, 0x61, 0x79, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x41, + 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x5c, 0x0a, 0x17, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x41, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x54, 0x79, 0x70, 0x65, + 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, + 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x42, 0x4f, 0x4f, 0x4c, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, + 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x49, 0x4e, 0x54, 0x5f, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x10, + 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x44, 0x4f, 0x55, 0x42, 0x4c, 0x45, 0x5f, 0x56, 0x41, 0x4c, 0x55, + 0x45, 0x10, 0x03, 0x22, 0xd4, 0x05, 0x0a, 0x04, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x18, 0x0a, 0x07, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, + 0x44, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x44, + 0x12, 0x16, 0x0a, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, 0x44, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, 0x44, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x49, 0x44, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x64, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x31, 0x0a, 0x04, + 0x6d, 0x65, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, - 0x4d, 0x65, 0x74, 0x61, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, 0x35, 0x0a, 0x09, 0x73, - 0x70, 0x61, 0x6e, 0x4c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, - 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x53, - 0x70, 0x61, 0x6e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x09, 0x73, 0x70, 0x61, 0x6e, 0x4c, 0x69, 0x6e, - 0x6b, 0x73, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, + 0x3a, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x20, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x44, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x18, 0x0d, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x53, 0x74, + 0x72, 0x75, 0x63, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x6d, 0x65, 0x74, 0x61, 0x53, + 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, 0x35, 0x0a, 0x09, 0x73, 0x70, 0x61, 0x6e, 0x4c, 0x69, 0x6e, + 0x6b, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, + 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x4c, 0x69, 0x6e, + 0x6b, 0x52, 0x09, 0x73, 0x70, 0x61, 0x6e, 0x4c, 0x69, 0x6e, 0x6b, 0x73, 0x12, 0x38, 0x0a, 0x0a, + 0x73, 0x70, 0x61, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x0f, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x0a, 0x73, 0x70, 0x61, 0x6e, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, + 0x3a, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3a, 0x0a, 0x0c, 0x4d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3d, 0x0a, 0x0f, 0x4d, 0x65, 0x74, 0x61, 0x53, - 0x74, 0x72, 0x75, 0x63, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x16, 0x5a, 0x14, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x67, 0x6f, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3d, 0x0a, 0x0f, 0x4d, + 0x65, 0x74, 0x61, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x16, 0x5a, 0x14, 0x70, 0x6b, + 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x67, 0x6f, 0x2f, 0x74, 0x72, 0x61, + 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -370,26 +839,41 @@ func file_datadog_trace_span_proto_rawDescGZIP() []byte { return file_datadog_trace_span_proto_rawDescData } -var file_datadog_trace_span_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_datadog_trace_span_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_datadog_trace_span_proto_msgTypes = make([]protoimpl.MessageInfo, 11) var file_datadog_trace_span_proto_goTypes = []any{ - (*SpanLink)(nil), // 0: datadog.trace.SpanLink - (*Span)(nil), // 1: datadog.trace.Span - nil, // 2: datadog.trace.SpanLink.AttributesEntry - nil, // 3: datadog.trace.Span.MetaEntry - nil, // 4: datadog.trace.Span.MetricsEntry - nil, // 5: datadog.trace.Span.MetaStructEntry + (AttributeAnyValue_AttributeAnyValueType)(0), // 0: datadog.trace.AttributeAnyValue.AttributeAnyValueType + (AttributeArrayValue_AttributeArrayValueType)(0), // 1: datadog.trace.AttributeArrayValue.AttributeArrayValueType + (*SpanLink)(nil), // 2: datadog.trace.SpanLink + (*SpanEvent)(nil), // 3: datadog.trace.SpanEvent + (*AttributeAnyValue)(nil), // 4: datadog.trace.AttributeAnyValue + (*AttributeArray)(nil), // 5: datadog.trace.AttributeArray + (*AttributeArrayValue)(nil), // 6: datadog.trace.AttributeArrayValue + (*Span)(nil), // 7: datadog.trace.Span + nil, // 8: datadog.trace.SpanLink.AttributesEntry + nil, // 9: datadog.trace.SpanEvent.AttributesEntry + nil, // 10: datadog.trace.Span.MetaEntry + nil, // 11: datadog.trace.Span.MetricsEntry + nil, // 12: datadog.trace.Span.MetaStructEntry } var file_datadog_trace_span_proto_depIdxs = []int32{ - 2, // 0: datadog.trace.SpanLink.attributes:type_name -> datadog.trace.SpanLink.AttributesEntry - 3, // 1: datadog.trace.Span.meta:type_name -> datadog.trace.Span.MetaEntry - 4, // 2: datadog.trace.Span.metrics:type_name -> datadog.trace.Span.MetricsEntry - 5, // 3: datadog.trace.Span.meta_struct:type_name -> datadog.trace.Span.MetaStructEntry - 0, // 4: datadog.trace.Span.spanLinks:type_name -> datadog.trace.SpanLink - 5, // [5:5] is the sub-list for method output_type - 5, // [5:5] is the sub-list for method input_type - 5, // [5:5] is the sub-list for extension type_name - 5, // [5:5] is the sub-list for extension extendee - 0, // [0:5] is the sub-list for field type_name + 8, // 0: datadog.trace.SpanLink.attributes:type_name -> datadog.trace.SpanLink.AttributesEntry + 9, // 1: datadog.trace.SpanEvent.attributes:type_name -> datadog.trace.SpanEvent.AttributesEntry + 0, // 2: datadog.trace.AttributeAnyValue.type:type_name -> datadog.trace.AttributeAnyValue.AttributeAnyValueType + 5, // 3: datadog.trace.AttributeAnyValue.array_value:type_name -> datadog.trace.AttributeArray + 6, // 4: datadog.trace.AttributeArray.values:type_name -> datadog.trace.AttributeArrayValue + 1, // 5: datadog.trace.AttributeArrayValue.type:type_name -> datadog.trace.AttributeArrayValue.AttributeArrayValueType + 10, // 6: datadog.trace.Span.meta:type_name -> datadog.trace.Span.MetaEntry + 11, // 7: datadog.trace.Span.metrics:type_name -> datadog.trace.Span.MetricsEntry + 12, // 8: datadog.trace.Span.meta_struct:type_name -> datadog.trace.Span.MetaStructEntry + 2, // 9: datadog.trace.Span.spanLinks:type_name -> datadog.trace.SpanLink + 3, // 10: datadog.trace.Span.spanEvents:type_name -> datadog.trace.SpanEvent + 4, // 11: datadog.trace.SpanEvent.AttributesEntry.value:type_name -> datadog.trace.AttributeAnyValue + 12, // [12:12] is the sub-list for method output_type + 12, // [12:12] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name } func init() { file_datadog_trace_span_proto_init() } @@ -402,13 +886,14 @@ func file_datadog_trace_span_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_datadog_trace_span_proto_rawDesc, - NumEnums: 0, - NumMessages: 6, + NumEnums: 2, + NumMessages: 11, NumExtensions: 0, NumServices: 0, }, GoTypes: file_datadog_trace_span_proto_goTypes, DependencyIndexes: file_datadog_trace_span_proto_depIdxs, + EnumInfos: file_datadog_trace_span_proto_enumTypes, MessageInfos: file_datadog_trace_span_proto_msgTypes, }.Build() File_datadog_trace_span_proto = out.File diff --git a/pkg/proto/pbgo/trace/span_gen.go b/pkg/proto/pbgo/trace/span_gen.go index 0ebaf789abc63..5f61a51c4acb0 100644 --- a/pkg/proto/pbgo/trace/span_gen.go +++ b/pkg/proto/pbgo/trace/span_gen.go @@ -6,12 +6,443 @@ import ( "github.com/tinylib/msgp/msgp" ) +// MarshalMsg implements msgp.Marshaler +func (z *AttributeAnyValue) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 6 + // string "Type" + o = append(o, 0x86, 0xa4, 0x54, 0x79, 0x70, 0x65) + o = msgp.AppendInt32(o, int32(z.Type)) + // string "StringValue" + o = append(o, 0xab, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65) + o = msgp.AppendString(o, z.StringValue) + // string "BoolValue" + o = append(o, 0xa9, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65) + o = msgp.AppendBool(o, z.BoolValue) + // string "IntValue" + o = append(o, 0xa8, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) + o = msgp.AppendInt64(o, z.IntValue) + // string "DoubleValue" + o = append(o, 0xab, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65) + o = msgp.AppendFloat64(o, z.DoubleValue) + // string "ArrayValue" + o = append(o, 0xaa, 0x41, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65) + if z.ArrayValue == nil { + o = msgp.AppendNil(o) + } else { + // map header, size 1 + // string "Values" + o = append(o, 0x81, 0xa6, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.ArrayValue.Values))) + for za0001 := range z.ArrayValue.Values { + if z.ArrayValue.Values[za0001] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.ArrayValue.Values[za0001].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "ArrayValue", "Values", za0001) + return + } + } + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *AttributeAnyValue) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Type": + { + var zb0002 int32 + zb0002, bts, err = msgp.ReadInt32Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Type") + return + } + z.Type = AttributeAnyValue_AttributeAnyValueType(zb0002) + } + case "StringValue": + z.StringValue, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "StringValue") + return + } + case "BoolValue": + z.BoolValue, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "BoolValue") + return + } + case "IntValue": + z.IntValue, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "IntValue") + return + } + case "DoubleValue": + z.DoubleValue, bts, err = msgp.ReadFloat64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "DoubleValue") + return + } + case "ArrayValue": + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.ArrayValue = nil + } else { + if z.ArrayValue == nil { + z.ArrayValue = new(AttributeArray) + } + var zb0003 uint32 + zb0003, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ArrayValue") + return + } + for zb0003 > 0 { + zb0003-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err, "ArrayValue") + return + } + switch msgp.UnsafeString(field) { + case "Values": + var zb0004 uint32 + zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ArrayValue", "Values") + return + } + if cap(z.ArrayValue.Values) >= int(zb0004) { + z.ArrayValue.Values = (z.ArrayValue.Values)[:zb0004] + } else { + z.ArrayValue.Values = make([]*AttributeArrayValue, zb0004) + } + for za0001 := range z.ArrayValue.Values { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.ArrayValue.Values[za0001] = nil + } else { + if z.ArrayValue.Values[za0001] == nil { + z.ArrayValue.Values[za0001] = new(AttributeArrayValue) + } + bts, err = z.ArrayValue.Values[za0001].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "ArrayValue", "Values", za0001) + return + } + } + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err, "ArrayValue") + return + } + } + } + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *AttributeAnyValue) Msgsize() (s int) { + s = 1 + 5 + msgp.Int32Size + 12 + msgp.StringPrefixSize + len(z.StringValue) + 10 + msgp.BoolSize + 9 + msgp.Int64Size + 12 + msgp.Float64Size + 11 + if z.ArrayValue == nil { + s += msgp.NilSize + } else { + s += 1 + 7 + msgp.ArrayHeaderSize + for za0001 := range z.ArrayValue.Values { + if z.ArrayValue.Values[za0001] == nil { + s += msgp.NilSize + } else { + s += z.ArrayValue.Values[za0001].Msgsize() + } + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z AttributeAnyValue_AttributeAnyValueType) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + o = msgp.AppendInt32(o, int32(z)) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *AttributeAnyValue_AttributeAnyValueType) UnmarshalMsg(bts []byte) (o []byte, err error) { + { + var zb0001 int32 + zb0001, bts, err = msgp.ReadInt32Bytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + (*z) = AttributeAnyValue_AttributeAnyValueType(zb0001) + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z AttributeAnyValue_AttributeAnyValueType) Msgsize() (s int) { + s = msgp.Int32Size + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *AttributeArray) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 1 + // string "Values" + o = append(o, 0x81, 0xa6, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.Values))) + for za0001 := range z.Values { + if z.Values[za0001] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.Values[za0001].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Values", za0001) + return + } + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *AttributeArray) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Values": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Values") + return + } + if cap(z.Values) >= int(zb0002) { + z.Values = (z.Values)[:zb0002] + } else { + z.Values = make([]*AttributeArrayValue, zb0002) + } + for za0001 := range z.Values { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.Values[za0001] = nil + } else { + if z.Values[za0001] == nil { + z.Values[za0001] = new(AttributeArrayValue) + } + bts, err = z.Values[za0001].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Values", za0001) + return + } + } + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *AttributeArray) Msgsize() (s int) { + s = 1 + 7 + msgp.ArrayHeaderSize + for za0001 := range z.Values { + if z.Values[za0001] == nil { + s += msgp.NilSize + } else { + s += z.Values[za0001].Msgsize() + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *AttributeArrayValue) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 5 + // string "Type" + o = append(o, 0x85, 0xa4, 0x54, 0x79, 0x70, 0x65) + o = msgp.AppendInt32(o, int32(z.Type)) + // string "StringValue" + o = append(o, 0xab, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65) + o = msgp.AppendString(o, z.StringValue) + // string "BoolValue" + o = append(o, 0xa9, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65) + o = msgp.AppendBool(o, z.BoolValue) + // string "IntValue" + o = append(o, 0xa8, 0x49, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65) + o = msgp.AppendInt64(o, z.IntValue) + // string "DoubleValue" + o = append(o, 0xab, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65) + o = msgp.AppendFloat64(o, z.DoubleValue) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *AttributeArrayValue) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "Type": + { + var zb0002 int32 + zb0002, bts, err = msgp.ReadInt32Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "Type") + return + } + z.Type = AttributeArrayValue_AttributeArrayValueType(zb0002) + } + case "StringValue": + z.StringValue, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "StringValue") + return + } + case "BoolValue": + z.BoolValue, bts, err = msgp.ReadBoolBytes(bts) + if err != nil { + err = msgp.WrapError(err, "BoolValue") + return + } + case "IntValue": + z.IntValue, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "IntValue") + return + } + case "DoubleValue": + z.DoubleValue, bts, err = msgp.ReadFloat64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "DoubleValue") + return + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *AttributeArrayValue) Msgsize() (s int) { + s = 1 + 5 + msgp.Int32Size + 12 + msgp.StringPrefixSize + len(z.StringValue) + 10 + msgp.BoolSize + 9 + msgp.Int64Size + 12 + msgp.Float64Size + return +} + +// MarshalMsg implements msgp.Marshaler +func (z AttributeArrayValue_AttributeArrayValueType) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + o = msgp.AppendInt32(o, int32(z)) + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *AttributeArrayValue_AttributeArrayValueType) UnmarshalMsg(bts []byte) (o []byte, err error) { + { + var zb0001 int32 + zb0001, bts, err = msgp.ReadInt32Bytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + (*z) = AttributeArrayValue_AttributeArrayValueType(zb0001) + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z AttributeArrayValue_AttributeArrayValueType) Msgsize() (s int) { + s = msgp.Int32Size + return +} + // MarshalMsg implements msgp.Marshaler func (z *Span) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) // check for omitted fields - zb0001Len := uint32(14) - var zb0001Mask uint16 /* 14 bits */ + zb0001Len := uint32(15) + var zb0001Mask uint16 /* 15 bits */ _ = zb0001Mask if z.Meta == nil { zb0001Len-- @@ -29,6 +460,10 @@ func (z *Span) MarshalMsg(b []byte) (o []byte, err error) { zb0001Len-- zb0001Mask |= 0x2000 } + if z.SpanEvents == nil { + zb0001Len-- + zb0001Mask |= 0x4000 + } // variable map header, size zb0001Len o = append(o, 0x80|uint8(zb0001Len)) @@ -107,6 +542,22 @@ func (z *Span) MarshalMsg(b []byte) (o []byte, err error) { } } } + if (zb0001Mask & 0x4000) == 0 { // if not omitted + // string "span_events" + o = append(o, 0xab, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73) + o = msgp.AppendArrayHeader(o, uint32(len(z.SpanEvents))) + for za0008 := range z.SpanEvents { + if z.SpanEvents[za0008] == nil { + o = msgp.AppendNil(o) + } else { + o, err = z.SpanEvents[za0008].MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "SpanEvents", za0008) + return + } + } + } + } } return } @@ -369,6 +820,36 @@ func (z *Span) UnmarshalMsg(bts []byte) (o []byte, err error) { } } } + case "span_events": + var zb0006 uint32 + zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "SpanEvents") + return + } + if cap(z.SpanEvents) >= int(zb0006) { + z.SpanEvents = (z.SpanEvents)[:zb0006] + } else { + z.SpanEvents = make([]*SpanEvent, zb0006) + } + for za0008 := range z.SpanEvents { + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + z.SpanEvents[za0008] = nil + } else { + if z.SpanEvents[za0008] == nil { + z.SpanEvents[za0008] = new(SpanEvent) + } + bts, err = z.SpanEvents[za0008].UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "SpanEvents", za0008) + return + } + } + } default: bts, err = msgp.Skip(bts) if err != nil { @@ -412,6 +893,142 @@ func (z *Span) Msgsize() (s int) { s += z.SpanLinks[za0007].Msgsize() } } + s += 12 + msgp.ArrayHeaderSize + for za0008 := range z.SpanEvents { + if z.SpanEvents[za0008] == nil { + s += msgp.NilSize + } else { + s += z.SpanEvents[za0008].Msgsize() + } + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *SpanEvent) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + // map header, size 3 + // string "TimeUnixNano" + o = append(o, 0x83, 0xac, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f) + o = msgp.AppendUint64(o, z.TimeUnixNano) + // string "Name" + o = append(o, 0xa4, 0x4e, 0x61, 0x6d, 0x65) + o = msgp.AppendString(o, z.Name) + // string "Attributes" + o = append(o, 0xaa, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73) + o = msgp.AppendMapHeader(o, uint32(len(z.Attributes))) + for za0001, za0002 := range z.Attributes { + o = msgp.AppendString(o, za0001) + if za0002 == nil { + o = msgp.AppendNil(o) + } else { + o, err = za0002.MarshalMsg(o) + if err != nil { + err = msgp.WrapError(err, "Attributes", za0001) + return + } + } + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *SpanEvent) UnmarshalMsg(bts []byte) (o []byte, err error) { + var field []byte + _ = field + var zb0001 uint32 + zb0001, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + for zb0001 > 0 { + zb0001-- + field, bts, err = msgp.ReadMapKeyZC(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + switch msgp.UnsafeString(field) { + case "TimeUnixNano": + z.TimeUnixNano, bts, err = msgp.ReadUint64Bytes(bts) + if err != nil { + err = msgp.WrapError(err, "TimeUnixNano") + return + } + case "Name": + z.Name, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Name") + return + } + case "Attributes": + var zb0002 uint32 + zb0002, bts, err = msgp.ReadMapHeaderBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Attributes") + return + } + if z.Attributes == nil { + z.Attributes = make(map[string]*AttributeAnyValue, zb0002) + } else if len(z.Attributes) > 0 { + for key := range z.Attributes { + delete(z.Attributes, key) + } + } + for zb0002 > 0 { + var za0001 string + var za0002 *AttributeAnyValue + zb0002-- + za0001, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + err = msgp.WrapError(err, "Attributes") + return + } + if msgp.IsNil(bts) { + bts, err = msgp.ReadNilBytes(bts) + if err != nil { + return + } + za0002 = nil + } else { + if za0002 == nil { + za0002 = new(AttributeAnyValue) + } + bts, err = za0002.UnmarshalMsg(bts) + if err != nil { + err = msgp.WrapError(err, "Attributes", za0001) + return + } + } + z.Attributes[za0001] = za0002 + } + default: + bts, err = msgp.Skip(bts) + if err != nil { + err = msgp.WrapError(err) + return + } + } + } + o = bts + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *SpanEvent) Msgsize() (s int) { + s = 1 + 13 + msgp.Uint64Size + 5 + msgp.StringPrefixSize + len(z.Name) + 11 + msgp.MapHeaderSize + if z.Attributes != nil { + for za0001, za0002 := range z.Attributes { + _ = za0002 + s += msgp.StringPrefixSize + len(za0001) + if za0002 == nil { + s += msgp.NilSize + } else { + s += za0002.Msgsize() + } + } + } return } diff --git a/pkg/proto/pbgo/trace/span_gen_test.go b/pkg/proto/pbgo/trace/span_gen_test.go index d970954c5f562..80c142420d490 100644 --- a/pkg/proto/pbgo/trace/span_gen_test.go +++ b/pkg/proto/pbgo/trace/span_gen_test.go @@ -8,6 +8,180 @@ import ( "github.com/tinylib/msgp/msgp" ) +func TestMarshalUnmarshalAttributeAnyValue(t *testing.T) { + v := AttributeAnyValue{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgAttributeAnyValue(b *testing.B) { + v := AttributeAnyValue{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgAttributeAnyValue(b *testing.B) { + v := AttributeAnyValue{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalAttributeAnyValue(b *testing.B) { + v := AttributeAnyValue{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalAttributeArray(t *testing.T) { + v := AttributeArray{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgAttributeArray(b *testing.B) { + v := AttributeArray{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgAttributeArray(b *testing.B) { + v := AttributeArray{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalAttributeArray(b *testing.B) { + v := AttributeArray{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + +func TestMarshalUnmarshalAttributeArrayValue(t *testing.T) { + v := AttributeArrayValue{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgAttributeArrayValue(b *testing.B) { + v := AttributeArrayValue{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgAttributeArrayValue(b *testing.B) { + v := AttributeArrayValue{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalAttributeArrayValue(b *testing.B) { + v := AttributeArrayValue{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + func TestMarshalUnmarshalSpan(t *testing.T) { v := Span{} bts, err := v.MarshalMsg(nil) @@ -66,6 +240,64 @@ func BenchmarkUnmarshalSpan(b *testing.B) { } } +func TestMarshalUnmarshalSpanEvent(t *testing.T) { + v := SpanEvent{} + bts, err := v.MarshalMsg(nil) + if err != nil { + t.Fatal(err) + } + left, err := v.UnmarshalMsg(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left) + } + + left, err = msgp.Skip(bts) + if err != nil { + t.Fatal(err) + } + if len(left) > 0 { + t.Errorf("%d bytes left over after Skip(): %q", len(left), left) + } +} + +func BenchmarkMarshalMsgSpanEvent(b *testing.B) { + v := SpanEvent{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalMsg(nil) + } +} + +func BenchmarkAppendMsgSpanEvent(b *testing.B) { + v := SpanEvent{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalMsg(bts[0:0]) + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalMsg(bts[0:0]) + } +} + +func BenchmarkUnmarshalSpanEvent(b *testing.B) { + v := SpanEvent{} + bts, _ := v.MarshalMsg(nil) + b.ReportAllocs() + b.SetBytes(int64(len(bts))) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := v.UnmarshalMsg(bts) + if err != nil { + b.Fatal(err) + } + } +} + func TestMarshalUnmarshalSpanLink(t *testing.T) { v := SpanLink{} bts, err := v.MarshalMsg(nil) diff --git a/pkg/proto/pbgo/trace/span_utils.go b/pkg/proto/pbgo/trace/span_utils.go index 7c4919a237a23..3594e870516c1 100644 --- a/pkg/proto/pbgo/trace/span_utils.go +++ b/pkg/proto/pbgo/trace/span_utils.go @@ -23,6 +23,7 @@ var spanCopiedFields = map[string]struct{}{ "Type": {}, "MetaStruct": {}, "SpanLinks": {}, + "SpanEvents": {}, } // ShallowCopy returns a shallow copy of the copy-able portion of a Span. These are the @@ -49,5 +50,6 @@ func (s *Span) ShallowCopy() *Span { Type: s.Type, MetaStruct: s.MetaStruct, SpanLinks: s.SpanLinks, + SpanEvents: s.SpanEvents, } } diff --git a/pkg/proto/pbgo/trace/span_vtproto.pb.go b/pkg/proto/pbgo/trace/span_vtproto.pb.go index d975f7ffae1f2..027bcdfe39c62 100644 --- a/pkg/proto/pbgo/trace/span_vtproto.pb.go +++ b/pkg/proto/pbgo/trace/span_vtproto.pb.go @@ -99,6 +99,261 @@ func (m *SpanLink) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *SpanEvent) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SpanEvent) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SpanEvent) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Attributes) > 0 { + for k := range m.Attributes { + v := m.Attributes[k] + baseI := i + size, err := v.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = protohelpers.EncodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if m.TimeUnixNano != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *AttributeAnyValue) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AttributeAnyValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *AttributeAnyValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.ArrayValue != nil { + size, err := m.ArrayValue.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + if m.DoubleValue != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.DoubleValue)))) + i-- + dAtA[i] = 0x29 + } + if m.IntValue != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.IntValue)) + i-- + dAtA[i] = 0x20 + } + if m.BoolValue { + i-- + if m.BoolValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.StringValue) > 0 { + i -= len(m.StringValue) + copy(dAtA[i:], m.StringValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StringValue))) + i-- + dAtA[i] = 0x12 + } + if m.Type != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *AttributeArray) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AttributeArray) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *AttributeArray) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Values[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *AttributeArrayValue) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AttributeArrayValue) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *AttributeArrayValue) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.DoubleValue != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.DoubleValue)))) + i-- + dAtA[i] = 0x29 + } + if m.IntValue != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.IntValue)) + i-- + dAtA[i] = 0x20 + } + if m.BoolValue { + i-- + if m.BoolValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.StringValue) > 0 { + i -= len(m.StringValue) + copy(dAtA[i:], m.StringValue) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.StringValue))) + i-- + dAtA[i] = 0x12 + } + if m.Type != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func (m *Span) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -129,6 +384,18 @@ func (m *Span) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.SpanEvents) > 0 { + for iNdEx := len(m.SpanEvents) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.SpanEvents[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = protohelpers.EncodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x7a + } + } if len(m.SpanLinks) > 0 { for iNdEx := len(m.SpanLinks) - 1; iNdEx >= 0; iNdEx-- { size, err := m.SpanLinks[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) @@ -292,51 +559,153 @@ func (m *SpanLink) SizeVT() (n int) { return n } -func (m *Span) SizeVT() (n int) { +func (m *SpanEvent) SizeVT() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Service) - if l > 0 { - n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + if m.TimeUnixNano != 0 { + n += 9 } l = len(m.Name) if l > 0 { n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } - l = len(m.Resource) + if len(m.Attributes) > 0 { + for k, v := range m.Attributes { + _ = k + _ = v + l = 0 + if v != nil { + l = v.SizeVT() + } + l += 1 + protohelpers.SizeOfVarint(uint64(l)) + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + l + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *AttributeAnyValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Type)) + } + l = len(m.StringValue) if l > 0 { n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } - if m.TraceID != 0 { - n += 1 + protohelpers.SizeOfVarint(uint64(m.TraceID)) - } - if m.SpanID != 0 { - n += 1 + protohelpers.SizeOfVarint(uint64(m.SpanID)) + if m.BoolValue { + n += 2 } - if m.ParentID != 0 { - n += 1 + protohelpers.SizeOfVarint(uint64(m.ParentID)) + if m.IntValue != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.IntValue)) } - if m.Start != 0 { - n += 1 + protohelpers.SizeOfVarint(uint64(m.Start)) + if m.DoubleValue != 0 { + n += 9 } - if m.Duration != 0 { - n += 1 + protohelpers.SizeOfVarint(uint64(m.Duration)) + if m.ArrayValue != nil { + l = m.ArrayValue.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } - if m.Error != 0 { - n += 1 + protohelpers.SizeOfVarint(uint64(m.Error)) + n += len(m.unknownFields) + return n +} + +func (m *AttributeArray) SizeVT() (n int) { + if m == nil { + return 0 } - if len(m.Meta) > 0 { - for k, v := range m.Meta { - _ = k - _ = v - mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + 1 + len(v) + protohelpers.SizeOfVarint(uint64(len(v))) - n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + var l int + _ = l + if len(m.Values) > 0 { + for _, e := range m.Values { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } } - if len(m.Metrics) > 0 { + n += len(m.unknownFields) + return n +} + +func (m *AttributeArrayValue) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Type)) + } + l = len(m.StringValue) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.BoolValue { + n += 2 + } + if m.IntValue != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.IntValue)) + } + if m.DoubleValue != 0 { + n += 9 + } + n += len(m.unknownFields) + return n +} + +func (m *Span) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Service) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + l = len(m.Resource) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.TraceID != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.TraceID)) + } + if m.SpanID != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.SpanID)) + } + if m.ParentID != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.ParentID)) + } + if m.Start != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Start)) + } + if m.Duration != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Duration)) + } + if m.Error != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Error)) + } + if len(m.Meta) > 0 { + for k, v := range m.Meta { + _ = k + _ = v + mapEntrySize := 1 + len(k) + protohelpers.SizeOfVarint(uint64(len(k))) + 1 + len(v) + protohelpers.SizeOfVarint(uint64(len(v))) + n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) + } + } + if len(m.Metrics) > 0 { for k, v := range m.Metrics { _ = k _ = v @@ -357,17 +726,804 @@ func (m *Span) SizeVT() (n int) { n += mapEntrySize + 1 + protohelpers.SizeOfVarint(uint64(mapEntrySize)) } } - if len(m.SpanLinks) > 0 { - for _, e := range m.SpanLinks { - l = e.SizeVT() - n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) - } + if len(m.SpanLinks) > 0 { + for _, e := range m.SpanLinks { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + if len(m.SpanEvents) > 0 { + for _, e := range m.SpanEvents { + l = e.SizeVT() + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *SpanLink) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SpanLink: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SpanLink: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceID", wireType) + } + m.TraceID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TraceID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceIDHigh", wireType) + } + m.TraceIDHigh = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TraceIDHigh |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SpanID", wireType) + } + m.SpanID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SpanID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Attributes == nil { + m.Attributes = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return protohelpers.ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return protohelpers.ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return protohelpers.ErrInvalidLength + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return protohelpers.ErrInvalidLength + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Attributes[mapkey] = mapvalue + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tracestate", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tracestate = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType) + } + m.Flags = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Flags |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SpanEvent) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SpanEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SpanEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) + } + m.TimeUnixNano = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + m.TimeUnixNano = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Attributes == nil { + m.Attributes = make(map[string]*AttributeAnyValue) + } + var mapkey string + var mapvalue *AttributeAnyValue + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return protohelpers.ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return protohelpers.ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return protohelpers.ErrInvalidLength + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &AttributeAnyValue{} + if err := mapvalue.UnmarshalVT(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Attributes[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AttributeAnyValue) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AttributeAnyValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AttributeAnyValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= AttributeAnyValue_AttributeAnyValueType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StringValue = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.BoolValue = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType) + } + m.IntValue = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.IntValue |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field DoubleValue", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.DoubleValue = float64(math.Float64frombits(v)) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ArrayValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ArrayValue == nil { + m.ArrayValue = &AttributeArray{} + } + if err := m.ArrayValue.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AttributeArray) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AttributeArray: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AttributeArray: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, &AttributeArrayValue{}) + if err := m.Values[len(m.Values)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF } - n += len(m.unknownFields) - return n + return nil } - -func (m *SpanLink) UnmarshalVT(dAtA []byte) error { +func (m *AttributeArrayValue) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -390,17 +1546,17 @@ func (m *SpanLink) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SpanLink: wiretype end group for non-group") + return fmt.Errorf("proto: AttributeArrayValue: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SpanLink: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AttributeArrayValue: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TraceID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } - m.TraceID = 0 + m.Type = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return protohelpers.ErrIntOverflow @@ -410,54 +1566,16 @@ func (m *SpanLink) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TraceID |= uint64(b&0x7F) << shift + m.Type |= AttributeArrayValue_AttributeArrayValueType(b&0x7F) << shift if b < 0x80 { break } } case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TraceIDHigh", wireType) - } - m.TraceIDHigh = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return protohelpers.ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TraceIDHigh |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SpanID", wireType) - } - m.SpanID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return protohelpers.ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SpanID |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return protohelpers.ErrIntOverflow @@ -467,124 +1585,29 @@ func (m *SpanLink) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return protohelpers.ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex < 0 { return protohelpers.ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Attributes == nil { - m.Attributes = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return protohelpers.ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return protohelpers.ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return protohelpers.ErrInvalidLength - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return protohelpers.ErrInvalidLength - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return protohelpers.ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return protohelpers.ErrInvalidLength - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return protohelpers.ErrInvalidLength - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := protohelpers.Skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return protohelpers.ErrInvalidLength - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Attributes[mapkey] = mapvalue + m.StringValue = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tracestate", wireType) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return protohelpers.ErrIntOverflow @@ -594,29 +1617,17 @@ func (m *SpanLink) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return protohelpers.ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return protohelpers.ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Tracestate = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: + m.BoolValue = bool(v != 0) + case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IntValue", wireType) } - m.Flags = 0 + m.IntValue = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return protohelpers.ErrIntOverflow @@ -626,11 +1637,22 @@ func (m *SpanLink) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Flags |= uint32(b&0x7F) << shift + m.IntValue |= int64(b&0x7F) << shift if b < 0x80 { break } } + case 5: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field DoubleValue", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.DoubleValue = float64(math.Float64frombits(v)) default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) @@ -1319,6 +2341,40 @@ func (m *Span) UnmarshalVT(dAtA []byte) error { return err } iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpanEvents", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SpanEvents = append(m.SpanEvents, &SpanEvent{}) + if err := m.SpanEvents[len(m.SpanEvents)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) diff --git a/pkg/proto/pbgo/trace/stats.pb.go b/pkg/proto/pbgo/trace/stats.pb.go index c2dc1cfbbe86c..04bbef8562bae 100644 --- a/pkg/proto/pbgo/trace/stats.pb.go +++ b/pkg/proto/pbgo/trace/stats.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.2 -// protoc v5.26.1 +// protoc-gen-go v1.36.3 +// protoc v5.29.3 // source: datadog/trace/stats.proto package trace @@ -121,19 +121,18 @@ func (TraceRootFlag) EnumDescriptor() ([]byte, []int) { // StatsPayload is the payload used to send stats from the agent to the backend. type StatsPayload struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AgentHostname string `protobuf:"bytes,1,opt,name=agentHostname,proto3" json:"agentHostname,omitempty"` - AgentEnv string `protobuf:"bytes,2,opt,name=agentEnv,proto3" json:"agentEnv,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + AgentHostname string `protobuf:"bytes,1,opt,name=agentHostname,proto3" json:"agentHostname,omitempty"` + AgentEnv string `protobuf:"bytes,2,opt,name=agentEnv,proto3" json:"agentEnv,omitempty"` // @gotags: json:"stats,omitempty" msg:"Stats,omitempty" Stats []*ClientStatsPayload `protobuf:"bytes,3,rep,name=stats,proto3" json:"stats,omitempty" msg:"Stats,omitempty"` AgentVersion string `protobuf:"bytes,4,opt,name=agentVersion,proto3" json:"agentVersion,omitempty"` ClientComputed bool `protobuf:"varint,5,opt,name=clientComputed,proto3" json:"clientComputed,omitempty"` // splitPayload indicates if the payload is actually one of several payloads split out from a larger payload. // This field can be used in the backend to signal if re-aggregation is necessary. - SplitPayload bool `protobuf:"varint,6,opt,name=splitPayload,proto3" json:"splitPayload,omitempty"` + SplitPayload bool `protobuf:"varint,6,opt,name=splitPayload,proto3" json:"splitPayload,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *StatsPayload) Reset() { @@ -211,10 +210,7 @@ func (x *StatsPayload) GetSplitPayload() bool { // ClientStatsPayload is the first layer of span stats aggregation. It is also // the payload sent by tracers to the agent when stats in tracer are enabled. type ClientStatsPayload struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // Hostname is the tracer hostname. It's extracted from spans with "_dd.hostname" meta // or set by tracer stats payload when hostname reporting is enabled. Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname,omitempty"` @@ -241,7 +237,9 @@ type ClientStatsPayload struct { // The git commit SHA is obtained from a trace, where it may be set through a tracer <-> source code integration. GitCommitSha string `protobuf:"bytes,13,opt,name=git_commit_sha,json=gitCommitSha,proto3" json:"git_commit_sha,omitempty"` // The image tag is obtained from a container's set of tags. - ImageTag string `protobuf:"bytes,14,opt,name=image_tag,json=imageTag,proto3" json:"image_tag,omitempty"` + ImageTag string `protobuf:"bytes,14,opt,name=image_tag,json=imageTag,proto3" json:"image_tag,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ClientStatsPayload) Reset() { @@ -374,17 +372,16 @@ func (x *ClientStatsPayload) GetImageTag() string { // ClientStatsBucket is a time bucket containing aggregated stats. type ClientStatsBucket struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Start uint64 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` // bucket start in nanoseconds - Duration uint64 `protobuf:"varint,2,opt,name=duration,proto3" json:"duration,omitempty"` // bucket duration in nanoseconds + state protoimpl.MessageState `protogen:"open.v1"` + Start uint64 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` // bucket start in nanoseconds + Duration uint64 `protobuf:"varint,2,opt,name=duration,proto3" json:"duration,omitempty"` // bucket duration in nanoseconds // @gotags: json:"stats,omitempty" msg:"Stats,omitempty" Stats []*ClientGroupedStats `protobuf:"bytes,3,rep,name=stats,proto3" json:"stats,omitempty" msg:"Stats,omitempty"` // AgentTimeShift is the shift applied by the agent stats aggregator on bucket start // when the received bucket start is outside of the agent aggregation window AgentTimeShift int64 `protobuf:"varint,4,opt,name=agentTimeShift,proto3" json:"agentTimeShift,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ClientStatsBucket) Reset() { @@ -447,28 +444,27 @@ func (x *ClientStatsBucket) GetAgentTimeShift() int64 { // ClientGroupedStats aggregate stats on spans grouped by service, name, resource, status_code, type type ClientGroupedStats struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - Resource string `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` - HTTPStatusCode uint32 `protobuf:"varint,4,opt,name=HTTP_status_code,json=HTTPStatusCode,proto3" json:"HTTP_status_code,omitempty"` - Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` - DBType string `protobuf:"bytes,6,opt,name=DB_type,json=DBType,proto3" json:"DB_type,omitempty"` // db_type might be used in the future to help in the obfuscation step - Hits uint64 `protobuf:"varint,7,opt,name=hits,proto3" json:"hits,omitempty"` // count of all spans aggregated in the groupedstats - Errors uint64 `protobuf:"varint,8,opt,name=errors,proto3" json:"errors,omitempty"` // count of error spans aggregated in the groupedstats - Duration uint64 `protobuf:"varint,9,opt,name=duration,proto3" json:"duration,omitempty"` // total duration in nanoseconds of spans aggregated in the bucket - OkSummary []byte `protobuf:"bytes,10,opt,name=okSummary,proto3" json:"okSummary,omitempty"` // ddsketch summary of ok spans latencies encoded in protobuf - ErrorSummary []byte `protobuf:"bytes,11,opt,name=errorSummary,proto3" json:"errorSummary,omitempty"` // ddsketch summary of error spans latencies encoded in protobuf - Synthetics bool `protobuf:"varint,12,opt,name=synthetics,proto3" json:"synthetics,omitempty"` // set to true on spans generated by synthetics traffic - TopLevelHits uint64 `protobuf:"varint,13,opt,name=topLevelHits,proto3" json:"topLevelHits,omitempty"` // count of top level spans aggregated in the groupedstats - SpanKind string `protobuf:"bytes,15,opt,name=span_kind,json=spanKind,proto3" json:"span_kind,omitempty"` // value of the span.kind tag on the span + state protoimpl.MessageState `protogen:"open.v1"` + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Resource string `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` + HTTPStatusCode uint32 `protobuf:"varint,4,opt,name=HTTP_status_code,json=HTTPStatusCode,proto3" json:"HTTP_status_code,omitempty"` + Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` + DBType string `protobuf:"bytes,6,opt,name=DB_type,json=DBType,proto3" json:"DB_type,omitempty"` // db_type might be used in the future to help in the obfuscation step + Hits uint64 `protobuf:"varint,7,opt,name=hits,proto3" json:"hits,omitempty"` // count of all spans aggregated in the groupedstats + Errors uint64 `protobuf:"varint,8,opt,name=errors,proto3" json:"errors,omitempty"` // count of error spans aggregated in the groupedstats + Duration uint64 `protobuf:"varint,9,opt,name=duration,proto3" json:"duration,omitempty"` // total duration in nanoseconds of spans aggregated in the bucket + OkSummary []byte `protobuf:"bytes,10,opt,name=okSummary,proto3" json:"okSummary,omitempty"` // ddsketch summary of ok spans latencies encoded in protobuf + ErrorSummary []byte `protobuf:"bytes,11,opt,name=errorSummary,proto3" json:"errorSummary,omitempty"` // ddsketch summary of error spans latencies encoded in protobuf + Synthetics bool `protobuf:"varint,12,opt,name=synthetics,proto3" json:"synthetics,omitempty"` // set to true on spans generated by synthetics traffic + TopLevelHits uint64 `protobuf:"varint,13,opt,name=topLevelHits,proto3" json:"topLevelHits,omitempty"` // count of top level spans aggregated in the groupedstats + SpanKind string `protobuf:"bytes,15,opt,name=span_kind,json=spanKind,proto3" json:"span_kind,omitempty"` // value of the span.kind tag on the span // peer_tags are supplementary tags that further describe a peer entity // E.g., `grpc.target` to describe the name of a gRPC peer, or `db.hostname` to describe the name of peer DB - PeerTags []string `protobuf:"bytes,16,rep,name=peer_tags,json=peerTags,proto3" json:"peer_tags,omitempty"` - IsTraceRoot Trilean `protobuf:"varint,17,opt,name=is_trace_root,json=isTraceRoot,proto3,enum=datadog.trace.Trilean" json:"is_trace_root,omitempty"` // this field's value is equal to span's ParentID == 0. + PeerTags []string `protobuf:"bytes,16,rep,name=peer_tags,json=peerTags,proto3" json:"peer_tags,omitempty"` + IsTraceRoot Trilean `protobuf:"varint,17,opt,name=is_trace_root,json=isTraceRoot,proto3,enum=datadog.trace.Trilean" json:"is_trace_root,omitempty"` // this field's value is equal to span's ParentID == 0. + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ClientGroupedStats) Reset() { diff --git a/pkg/proto/pbgo/trace/tracer_payload.pb.go b/pkg/proto/pbgo/trace/tracer_payload.pb.go index 0420fc5b12ca2..3570299873aef 100644 --- a/pkg/proto/pbgo/trace/tracer_payload.pb.go +++ b/pkg/proto/pbgo/trace/tracer_payload.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.35.2 -// protoc v5.26.1 +// protoc-gen-go v1.36.3 +// protoc v5.29.3 // source: datadog/trace/tracer_payload.proto package trace @@ -22,10 +22,7 @@ const ( // TraceChunk represents a list of spans with the same trace ID. In other words, a chunk of a trace. type TraceChunk struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // priority specifies sampling priority of the trace. // @gotags: json:"priority" msg:"priority" Priority int32 `protobuf:"varint,1,opt,name=priority,proto3" json:"priority" msg:"priority"` @@ -37,10 +34,12 @@ type TraceChunk struct { Spans []*Span `protobuf:"bytes,3,rep,name=spans,proto3" json:"spans" msg:"spans"` // tags specifies tags common in all `spans`. // @gotags: json:"tags" msg:"tags" - Tags map[string]string `protobuf:"bytes,4,rep,name=tags,proto3" json:"tags" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3" msg:"tags"` + Tags map[string]string `protobuf:"bytes,4,rep,name=tags,proto3" json:"tags" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value" msg:"tags"` // droppedTrace specifies whether the trace was dropped by samplers or not. // @gotags: json:"dropped_trace" msg:"dropped_trace" - DroppedTrace bool `protobuf:"varint,5,opt,name=droppedTrace,proto3" json:"dropped_trace" msg:"dropped_trace"` + DroppedTrace bool `protobuf:"varint,5,opt,name=droppedTrace,proto3" json:"dropped_trace" msg:"dropped_trace"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *TraceChunk) Reset() { @@ -110,10 +109,7 @@ func (x *TraceChunk) GetDroppedTrace() bool { // TracerPayload represents a payload the trace agent receives from tracers. type TracerPayload struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` // containerID specifies the ID of the container where the tracer is running on. // @gotags: json:"container_id" msg:"container_id" ContainerID string `protobuf:"bytes,1,opt,name=containerID,proto3" json:"container_id" msg:"container_id"` @@ -134,7 +130,7 @@ type TracerPayload struct { Chunks []*TraceChunk `protobuf:"bytes,6,rep,name=chunks,proto3" json:"chunks" msg:"chunks"` // tags specifies tags common in all `chunks`. // @gotags: json:"tags" msg:"tags" - Tags map[string]string `protobuf:"bytes,7,rep,name=tags,proto3" json:"tags" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3" msg:"tags"` + Tags map[string]string `protobuf:"bytes,7,rep,name=tags,proto3" json:"tags" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value" msg:"tags"` // env specifies `env` tag that set with the tracer. // @gotags: json:"env" msg:"env" Env string `protobuf:"bytes,8,opt,name=env,proto3" json:"env" msg:"env"` @@ -143,7 +139,9 @@ type TracerPayload struct { Hostname string `protobuf:"bytes,9,opt,name=hostname,proto3" json:"hostname" msg:"hostname"` // version specifies `version` tag that set with the tracer. // @gotags: json:"app_version" msg:"app_version" - AppVersion string `protobuf:"bytes,10,opt,name=appVersion,proto3" json:"app_version" msg:"app_version"` + AppVersion string `protobuf:"bytes,10,opt,name=appVersion,proto3" json:"app_version" msg:"app_version"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *TracerPayload) Reset() { diff --git a/pkg/proto/protodep.lock b/pkg/proto/protodep.lock index 858523b6de4d8..ba321ea825913 100644 --- a/pkg/proto/protodep.lock +++ b/pkg/proto/protodep.lock @@ -2,7 +2,8 @@ proto_outdir = "./protodep" [[dependencies]] target = "github.com/mwitkow/go-proto-validators" - revision = "32a686adf8b5194d3ea07d632d49b6fb344af678" + subgroup = "" + revision = "875cb952c25c7ccadf261b169dba5fd0ced18a72" branch = "master" path = "github.com/mwitkow/go-proto-validators" ignores = ["./test", "./examples"] @@ -10,20 +11,15 @@ proto_outdir = "./protodep" [[dependencies]] target = "github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/google/api" + subgroup = "" revision = "83c7c1948180f4a79579be0a13eb46e820a3ddb5" branch = "" path = "google/api" protocol = "" -[[dependencies]] - target = "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options" - revision = "83c7c1948180f4a79579be0a13eb46e820a3ddb5" - branch = "" - path = "protoc-gen-swagger/options" - protocol = "" - [[dependencies]] target = "github.com/protocolbuffers/protobuf/src/google/protobuf" + subgroup = "" revision = "d0bfd5221182da1a7cc280f3337b5e41a89539cf" branch = "" path = "google/protobuf" diff --git a/pkg/proto/protodep.toml b/pkg/proto/protodep.toml index 748aa7634186e..8163de096479f 100644 --- a/pkg/proto/protodep.toml +++ b/pkg/proto/protodep.toml @@ -1,23 +1,28 @@ proto_outdir = "./protodep" [[dependencies]] - target = "github.com/mwitkow/go-proto-validators" - branch = "master" - path = "github.com/mwitkow/go-proto-validators" - ignores = ["./test", "./examples"] +target = "github.com/mwitkow/go-proto-validators" +branch = "master" +path = "github.com/mwitkow/go-proto-validators" +ignores = ["./test", "./examples"] [[dependencies]] - target = "github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/google/api" - revision = "v1.14.1" - path = "google/api" +target = "github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/google/api" +revision = "v1.14.1" +path = "google/api" -[[dependencies]] - target = "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options" - revision = "v1.14.1" - path = "protoc-gen-swagger/options" [[dependencies]] - target = "github.com/protocolbuffers/protobuf/src/google/protobuf" - revision = "v3.11.4" - path = "google/protobuf" - ignores = ["./compiler", "unittest", "test_messages_", "any_test", "map_lite_unittest", "map_proto2_unittest", "util", "map_unittest"] +target = "github.com/protocolbuffers/protobuf/src/google/protobuf" +revision = "v3.11.4" +path = "google/protobuf" +ignores = [ + "./compiler", + "unittest", + "test_messages_", + "any_test", + "map_lite_unittest", + "map_proto2_unittest", + "util", + "map_unittest", +] diff --git a/pkg/proto/protodep/protoc-gen-swagger/options/annotations.proto b/pkg/proto/protodep/protoc-gen-swagger/options/annotations.proto deleted file mode 100644 index 5151fd5a65204..0000000000000 --- a/pkg/proto/protodep/protoc-gen-swagger/options/annotations.proto +++ /dev/null @@ -1,44 +0,0 @@ -syntax = "proto3"; - -package grpc.gateway.protoc_gen_swagger.options; - -option go_package = "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options"; - -import "google/protobuf/descriptor.proto"; -import "protoc-gen-swagger/options/openapiv2.proto"; - -extend google.protobuf.FileOptions { - // ID assigned by protobuf-global-extension-registry@google.com for grpc-gateway project. - // - // All IDs are the same, as assigned. It is okay that they are the same, as they extend - // different descriptor messages. - Swagger openapiv2_swagger = 1042; -} -extend google.protobuf.MethodOptions { - // ID assigned by protobuf-global-extension-registry@google.com for grpc-gateway project. - // - // All IDs are the same, as assigned. It is okay that they are the same, as they extend - // different descriptor messages. - Operation openapiv2_operation = 1042; -} -extend google.protobuf.MessageOptions { - // ID assigned by protobuf-global-extension-registry@google.com for grpc-gateway project. - // - // All IDs are the same, as assigned. It is okay that they are the same, as they extend - // different descriptor messages. - Schema openapiv2_schema = 1042; -} -extend google.protobuf.ServiceOptions { - // ID assigned by protobuf-global-extension-registry@google.com for grpc-gateway project. - // - // All IDs are the same, as assigned. It is okay that they are the same, as they extend - // different descriptor messages. - Tag openapiv2_tag = 1042; -} -extend google.protobuf.FieldOptions { - // ID assigned by protobuf-global-extension-registry@google.com for grpc-gateway project. - // - // All IDs are the same, as assigned. It is okay that they are the same, as they extend - // different descriptor messages. - JSONSchema openapiv2_field = 1042; -} diff --git a/pkg/proto/protodep/protoc-gen-swagger/options/openapiv2.proto b/pkg/proto/protodep/protoc-gen-swagger/options/openapiv2.proto deleted file mode 100644 index 3dcac4d99ffc4..0000000000000 --- a/pkg/proto/protodep/protoc-gen-swagger/options/openapiv2.proto +++ /dev/null @@ -1,380 +0,0 @@ -syntax = "proto3"; - -package grpc.gateway.protoc_gen_swagger.options; - -option go_package = "github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options"; - -import "google/protobuf/any.proto"; -import "google/protobuf/struct.proto"; - -// `Swagger` is a representation of OpenAPI v2 specification's Swagger object. -// -// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#swaggerObject -// -// TODO(ivucica): document fields -message Swagger { - string swagger = 1; - Info info = 2; - string host = 3; - // `base_path` is the common prefix path used on all API endpoints (ie. /api, /v1, etc.). By adding this, - // it allows you to remove this portion from the path endpoints in your Swagger file making them easier - // to read. Note that using `base_path` does not change the endpoint paths that are generated in the resulting - // Swagger file. If you wish to use `base_path` with relatively generated Swagger paths, the - // `base_path` prefix must be manually removed from your `google.api.http` paths and your code changed to - // serve the API from the `base_path`. - string base_path = 4; - enum SwaggerScheme { - UNKNOWN = 0; - HTTP = 1; - HTTPS = 2; - WS = 3; - WSS = 4; - } - repeated SwaggerScheme schemes = 5; - repeated string consumes = 6; - repeated string produces = 7; - // field 8 is reserved for 'paths'. - reserved 8; - // field 9 is reserved for 'definitions', which at this time are already - // exposed as and customizable as proto messages. - reserved 9; - map responses = 10; - SecurityDefinitions security_definitions = 11; - repeated SecurityRequirement security = 12; - // field 13 is reserved for 'tags', which are supposed to be exposed as and - // customizable as proto services. TODO(ivucica): add processing of proto - // service objects into OpenAPI v2 Tag objects. - reserved 13; - ExternalDocumentation external_docs = 14; - map extensions = 15; -} - -// `Operation` is a representation of OpenAPI v2 specification's Operation object. -// -// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#operationObject -// -// TODO(ivucica): document fields -message Operation { - repeated string tags = 1; - string summary = 2; - string description = 3; - ExternalDocumentation external_docs = 4; - string operation_id = 5; - repeated string consumes = 6; - repeated string produces = 7; - // field 8 is reserved for 'parameters'. - reserved 8; - map responses = 9; - repeated string schemes = 10; - bool deprecated = 11; - repeated SecurityRequirement security = 12; - map extensions = 13; -} - -// `Response` is a representation of OpenAPI v2 specification's Response object. -// -// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#responseObject -// -message Response { - // `Description` is a short description of the response. - // GFM syntax can be used for rich text representation. - string description = 1; - // `Schema` optionally defines the structure of the response. - // If `Schema` is not provided, it means there is no content to the response. - Schema schema = 2; - // field 3 is reserved for 'headers'. - reserved 3; - // `Examples` gives per-mimetype response examples. - // See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#example-object - map examples = 4; - map extensions = 5; -} - -// `Info` is a representation of OpenAPI v2 specification's Info object. -// -// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#infoObject -// -// TODO(ivucica): document fields -message Info { - string title = 1; - string description = 2; - string terms_of_service = 3; - Contact contact = 4; - License license = 5; - string version = 6; - map extensions = 7; -} - -// `Contact` is a representation of OpenAPI v2 specification's Contact object. -// -// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#contactObject -// -// TODO(ivucica): document fields -message Contact { - string name = 1; - string url = 2; - string email = 3; -} - -// `License` is a representation of OpenAPI v2 specification's License object. -// -// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#licenseObject -// -message License { - // Required. The license name used for the API. - string name = 1; - // A URL to the license used for the API. - string url = 2; -} - -// `ExternalDocumentation` is a representation of OpenAPI v2 specification's -// ExternalDocumentation object. -// -// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#externalDocumentationObject -// -// TODO(ivucica): document fields -message ExternalDocumentation { - string description = 1; - string url = 2; -} - -// `Schema` is a representation of OpenAPI v2 specification's Schema object. -// -// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject -// -// TODO(ivucica): document fields -message Schema { - JSONSchema json_schema = 1; - string discriminator = 2; - bool read_only = 3; - // field 4 is reserved for 'xml'. - reserved 4; - ExternalDocumentation external_docs = 5; - google.protobuf.Any example = 6; -} - -// `JSONSchema` represents properties from JSON Schema taken, and as used, in -// the OpenAPI v2 spec. -// -// This includes changes made by OpenAPI v2. -// -// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject -// -// See also: https://cswr.github.io/JsonSchema/spec/basic_types/, -// https://github.com/json-schema-org/json-schema-spec/blob/master/schema.json -// -// TODO(ivucica): document fields -message JSONSchema { - // field 1 is reserved for '$id', omitted from OpenAPI v2. - reserved 1; - // field 2 is reserved for '$schema', omitted from OpenAPI v2. - reserved 2; - // Ref is used to define an external reference to include in the message. - // This could be a fully qualified proto message reference, and that type must be imported - // into the protofile. If no message is identified, the Ref will be used verbatim in - // the output. - // For example: - // `ref: ".google.protobuf.Timestamp"`. - string ref = 3; - // field 4 is reserved for '$comment', omitted from OpenAPI v2. - reserved 4; - string title = 5; - string description = 6; - string default = 7; - bool read_only = 8; - // field 9 is reserved for 'examples', which is omitted from OpenAPI v2 in favor of 'example' field. - reserved 9; - double multiple_of = 10; - double maximum = 11; - bool exclusive_maximum = 12; - double minimum = 13; - bool exclusive_minimum = 14; - uint64 max_length = 15; - uint64 min_length = 16; - string pattern = 17; - // field 18 is reserved for 'additionalItems', omitted from OpenAPI v2. - reserved 18; - // field 19 is reserved for 'items', but in OpenAPI-specific way. TODO(ivucica): add 'items'? - reserved 19; - uint64 max_items = 20; - uint64 min_items = 21; - bool unique_items = 22; - // field 23 is reserved for 'contains', omitted from OpenAPI v2. - reserved 23; - uint64 max_properties = 24; - uint64 min_properties = 25; - repeated string required = 26; - // field 27 is reserved for 'additionalProperties', but in OpenAPI-specific way. TODO(ivucica): add 'additionalProperties'? - reserved 27; - // field 28 is reserved for 'definitions', omitted from OpenAPI v2. - reserved 28; - // field 29 is reserved for 'properties', but in OpenAPI-specific way. TODO(ivucica): add 'additionalProperties'? - reserved 29; - // following fields are reserved, as the properties have been omitted from OpenAPI v2: - // patternProperties, dependencies, propertyNames, const - reserved 30 to 33; - // Items in 'array' must be unique. - repeated string array = 34; - - enum JSONSchemaSimpleTypes { - UNKNOWN = 0; - ARRAY = 1; - BOOLEAN = 2; - INTEGER = 3; - NULL = 4; - NUMBER = 5; - OBJECT = 6; - STRING = 7; - } - - repeated JSONSchemaSimpleTypes type = 35; - // following fields are reserved, as the properties have been omitted from OpenAPI v2: - // format, contentMediaType, contentEncoding, if, then, else - reserved 36 to 41; - // field 42 is reserved for 'allOf', but in OpenAPI-specific way. TODO(ivucica): add 'allOf'? - reserved 42; - // following fields are reserved, as the properties have been omitted from OpenAPI v2: - // anyOf, oneOf, not - reserved 43 to 45; -} - -// `Tag` is a representation of OpenAPI v2 specification's Tag object. -// -// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#tagObject -// -// TODO(ivucica): document fields -message Tag { - // field 1 is reserved for 'name'. In our generator, this is (to be) extracted - // from the name of proto service, and thus not exposed to the user, as - // changing tag object's name would break the link to the references to the - // tag in individual operation specifications. - // - // TODO(ivucica): Add 'name' property. Use it to allow override of the name of - // global Tag object, then use that name to reference the tag throughout the - // Swagger file. - reserved 1; - // TODO(ivucica): Description should be extracted from comments on the proto - // service object. - string description = 2; - ExternalDocumentation external_docs = 3; -} - -// `SecurityDefinitions` is a representation of OpenAPI v2 specification's -// Security Definitions object. -// -// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityDefinitionsObject -// -// A declaration of the security schemes available to be used in the -// specification. This does not enforce the security schemes on the operations -// and only serves to provide the relevant details for each scheme. -message SecurityDefinitions { - // A single security scheme definition, mapping a "name" to the scheme it defines. - map security = 1; -} - -// `SecurityScheme` is a representation of OpenAPI v2 specification's -// Security Scheme object. -// -// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securitySchemeObject -// -// Allows the definition of a security scheme that can be used by the -// operations. Supported schemes are basic authentication, an API key (either as -// a header or as a query parameter) and OAuth2's common flows (implicit, -// password, application and access code). -message SecurityScheme { - // Required. The type of the security scheme. Valid values are "basic", - // "apiKey" or "oauth2". - enum Type { - TYPE_INVALID = 0; - TYPE_BASIC = 1; - TYPE_API_KEY = 2; - TYPE_OAUTH2 = 3; - } - - // Required. The location of the API key. Valid values are "query" or "header". - enum In { - IN_INVALID = 0; - IN_QUERY = 1; - IN_HEADER = 2; - } - - // Required. The flow used by the OAuth2 security scheme. Valid values are - // "implicit", "password", "application" or "accessCode". - enum Flow { - FLOW_INVALID = 0; - FLOW_IMPLICIT = 1; - FLOW_PASSWORD = 2; - FLOW_APPLICATION = 3; - FLOW_ACCESS_CODE = 4; - } - - // Required. The type of the security scheme. Valid values are "basic", - // "apiKey" or "oauth2". - Type type = 1; - // A short description for security scheme. - string description = 2; - // Required. The name of the header or query parameter to be used. - // - // Valid for apiKey. - string name = 3; - // Required. The location of the API key. Valid values are "query" or "header". - // - // Valid for apiKey. - In in = 4; - // Required. The flow used by the OAuth2 security scheme. Valid values are - // "implicit", "password", "application" or "accessCode". - // - // Valid for oauth2. - Flow flow = 5; - // Required. The authorization URL to be used for this flow. This SHOULD be in - // the form of a URL. - // - // Valid for oauth2/implicit and oauth2/accessCode. - string authorization_url = 6; - // Required. The token URL to be used for this flow. This SHOULD be in the - // form of a URL. - // - // Valid for oauth2/password, oauth2/application and oauth2/accessCode. - string token_url = 7; - // Required. The available scopes for the OAuth2 security scheme. - // - // Valid for oauth2. - Scopes scopes = 8; - map extensions = 9; -} - -// `SecurityRequirement` is a representation of OpenAPI v2 specification's -// Security Requirement object. -// -// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityRequirementObject -// -// Lists the required security schemes to execute this operation. The object can -// have multiple security schemes declared in it which are all required (that -// is, there is a logical AND between the schemes). -// -// The name used for each property MUST correspond to a security scheme -// declared in the Security Definitions. -message SecurityRequirement { - // If the security scheme is of type "oauth2", then the value is a list of - // scope names required for the execution. For other security scheme types, - // the array MUST be empty. - message SecurityRequirementValue { - repeated string scope = 1; - } - // Each name must correspond to a security scheme which is declared in - // the Security Definitions. If the security scheme is of type "oauth2", - // then the value is a list of scope names required for the execution. - // For other security scheme types, the array MUST be empty. - map security_requirement = 1; -} - -// `Scopes` is a representation of OpenAPI v2 specification's Scopes object. -// -// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#scopesObject -// -// Lists the available scopes for an OAuth2 security scheme. -message Scopes { - // Maps between a name of a scope to a short description of it (as the value - // of the property). - map scope = 1; -} diff --git a/pkg/remoteconfig/state/go.mod b/pkg/remoteconfig/state/go.mod index d9fead6f8ede8..fc5c091a508e2 100644 --- a/pkg/remoteconfig/state/go.mod +++ b/pkg/remoteconfig/state/go.mod @@ -14,6 +14,6 @@ require ( github.com/kr/pretty v0.3.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/rogpeppe/go-internal v1.13.1 // indirect - golang.org/x/crypto v0.31.0 // indirect + golang.org/x/crypto v0.32.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/remoteconfig/state/go.sum b/pkg/remoteconfig/state/go.sum index d86844e767b4e..0cb8bc80d1c48 100644 --- a/pkg/remoteconfig/state/go.sum +++ b/pkg/remoteconfig/state/go.sum @@ -21,10 +21,10 @@ github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbm github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/pkg/remoteconfig/state/products.go b/pkg/remoteconfig/state/products.go index be3133064b998..04938bbc6337e 100644 --- a/pkg/remoteconfig/state/products.go +++ b/pkg/remoteconfig/state/products.go @@ -6,6 +6,7 @@ package state var validProducts = map[string]struct{}{ + ProductInstallerConfig: {}, ProductUpdaterCatalogDD: {}, ProductUpdaterAgent: {}, ProductUpdaterTask: {}, @@ -36,6 +37,8 @@ var validProducts = map[string]struct{}{ } const ( + // ProductInstallerConfig is the product used to receive the installer configuration + ProductInstallerConfig = "INSTALLER_CONFIG" // ProductUpdaterCatalogDD is the product used to receive the package catalog from datadog ProductUpdaterCatalogDD = "UPDATER_CATALOG_DD" // ProductUpdaterAgent is the product used to receive defaults versions to install diff --git a/pkg/sbom/collectors/collectors.go b/pkg/sbom/collectors/collectors.go index adeec30f9f27d..13cc2b00c81d6 100644 --- a/pkg/sbom/collectors/collectors.go +++ b/pkg/sbom/collectors/collectors.go @@ -12,7 +12,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/sbom" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) // ScanType defines the scan type of the collector @@ -40,7 +40,7 @@ type Collector interface { // CleanCache cleans the collector cache CleanCache() error // Init initializes the collector - Init(config.Component, optional.Option[workloadmeta.Component]) error + Init(config.Component, option.Option[workloadmeta.Component]) error // Scan performs a scan Scan(context.Context, sbom.ScanRequest) sbom.ScanResult // Channel returns the channel to send scan results diff --git a/pkg/sbom/collectors/containerd/containerd.go b/pkg/sbom/collectors/containerd/containerd.go index a5461f08dc617..5e6b087e7089c 100644 --- a/pkg/sbom/collectors/containerd/containerd.go +++ b/pkg/sbom/collectors/containerd/containerd.go @@ -21,7 +21,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/sbom/collectors" cutil "github.com/DataDog/datadog-agent/pkg/util/containerd" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/util/trivy" ) @@ -66,7 +66,7 @@ type Collector struct { resChan chan sbom.ScanResult opts sbom.ScanOptions containerdClient cutil.ContainerdItf - wmeta optional.Option[workloadmeta.Component] + wmeta option.Option[workloadmeta.Component] closed bool } @@ -77,7 +77,7 @@ func (c *Collector) CleanCache() error { } // Init initializes the collector -func (c *Collector) Init(cfg config.Component, wmeta optional.Option[workloadmeta.Component]) error { +func (c *Collector) Init(cfg config.Component, wmeta option.Option[workloadmeta.Component]) error { trivyCollector, err := trivy.GetGlobalCollector(cfg, wmeta) if err != nil { return err diff --git a/pkg/sbom/collectors/crio/crio.go b/pkg/sbom/collectors/crio/crio.go index 92b8c5eb49f88..98699b1442bb8 100644 --- a/pkg/sbom/collectors/crio/crio.go +++ b/pkg/sbom/collectors/crio/crio.go @@ -17,7 +17,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/sbom" "github.com/DataDog/datadog-agent/pkg/sbom/collectors" crioUtil "github.com/DataDog/datadog-agent/pkg/util/crio" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/util/trivy" ) @@ -55,7 +55,7 @@ type Collector struct { resChan chan sbom.ScanResult opts sbom.ScanOptions crioClient crioUtil.Client - wmeta optional.Option[workloadmeta.Component] + wmeta option.Option[workloadmeta.Component] closed bool } @@ -66,7 +66,7 @@ func (c *Collector) CleanCache() error { } // Init initializes the collector with configuration and workloadmeta component -func (c *Collector) Init(cfg config.Component, wmeta optional.Option[workloadmeta.Component]) error { +func (c *Collector) Init(cfg config.Component, wmeta option.Option[workloadmeta.Component]) error { trivyCollector, err := trivy.GetGlobalCollector(cfg, wmeta) if err != nil { return err diff --git a/pkg/sbom/collectors/docker/docker.go b/pkg/sbom/collectors/docker/docker.go index 1f5759e941cc0..ae53d041bb3ce 100644 --- a/pkg/sbom/collectors/docker/docker.go +++ b/pkg/sbom/collectors/docker/docker.go @@ -17,7 +17,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/sbom" "github.com/DataDog/datadog-agent/pkg/sbom/collectors" "github.com/DataDog/datadog-agent/pkg/util/docker" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/util/trivy" "github.com/docker/docker/client" @@ -61,7 +61,7 @@ type Collector struct { resChan chan sbom.ScanResult opts sbom.ScanOptions cl client.ImageAPIClient - wmeta optional.Option[workloadmeta.Component] + wmeta option.Option[workloadmeta.Component] closed bool } @@ -72,7 +72,7 @@ func (c *Collector) CleanCache() error { } // Init initializes the collector -func (c *Collector) Init(cfg config.Component, wmeta optional.Option[workloadmeta.Component]) error { +func (c *Collector) Init(cfg config.Component, wmeta option.Option[workloadmeta.Component]) error { trivyCollector, err := trivy.GetGlobalCollector(cfg, wmeta) if err != nil { return err diff --git a/pkg/sbom/collectors/host/host.go b/pkg/sbom/collectors/host/host.go index bb792b0e2f313..cd7a5d9de80b4 100644 --- a/pkg/sbom/collectors/host/host.go +++ b/pkg/sbom/collectors/host/host.go @@ -17,7 +17,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/sbom" "github.com/DataDog/datadog-agent/pkg/sbom/collectors" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/util/trivy" ) @@ -36,7 +36,7 @@ func (c *Collector) CleanCache() error { } // Init initialize the host collector -func (c *Collector) Init(cfg config.Component, wmeta optional.Option[workloadmeta.Component]) error { +func (c *Collector) Init(cfg config.Component, wmeta option.Option[workloadmeta.Component]) error { trivyCollector, err := trivy.GetGlobalCollector(cfg, wmeta) if err != nil { return err @@ -54,7 +54,7 @@ func (c *Collector) Scan(ctx context.Context, request sbom.ScanRequest) sbom.Sca } log.Infof("host scan request [%v]", hostScanRequest.ID()) - report, err := c.trivyCollector.ScanFilesystem(ctx, hostScanRequest.FS, hostScanRequest.Path, c.opts) + report, err := c.trivyCollector.ScanFilesystem(ctx, hostScanRequest.Path, c.opts) return sbom.ScanResult{ Error: err, Report: report, diff --git a/pkg/sbom/collectors/host/host_wmi.go b/pkg/sbom/collectors/host/host_wmi.go index 937e2823dad90..09600b1897deb 100644 --- a/pkg/sbom/collectors/host/host_wmi.go +++ b/pkg/sbom/collectors/host/host_wmi.go @@ -16,7 +16,7 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/sbom" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/util/winutil" "github.com/DataDog/gopsutil/host" @@ -118,7 +118,7 @@ func (c *Collector) CleanCache() error { } // Init initialize the host collector -func (c *Collector) Init(_ config.Component, _ optional.Option[workloadmeta.Component]) (err error) { +func (c *Collector) Init(_ config.Component, _ option.Option[workloadmeta.Component]) (err error) { if c.version, err = winutil.GetWindowsBuildString(); err != nil { return err } diff --git a/pkg/sbom/collectors/host/request.go b/pkg/sbom/collectors/host/request.go index c30cb453ceab9..e88e54890131b 100644 --- a/pkg/sbom/collectors/host/request.go +++ b/pkg/sbom/collectors/host/request.go @@ -6,9 +6,7 @@ package host import ( - "io/fs" "os" - "path/filepath" "github.com/DataDog/datadog-agent/pkg/config/env" "github.com/DataDog/datadog-agent/pkg/sbom/types" @@ -18,34 +16,11 @@ import ( // hashable to be pushed in the work queue for processing. type scanRequest struct { Path string - FS fs.FS -} - -type relFS struct { - root string - fs fs.FS -} - -func newFS(root string) fs.FS { - fs := os.DirFS(root) - return &relFS{root: "/", fs: fs} -} - -func (f *relFS) Open(name string) (fs.File, error) { - if filepath.IsAbs(name) { - var err error - name, err = filepath.Rel(f.root, name) - if err != nil { - return nil, err - } - } - - return f.fs.Open(name) } // NewScanRequest creates a new scan request -func NewScanRequest(path string, fs fs.FS) types.ScanRequest { - return scanRequest{Path: path, FS: fs} +func NewScanRequest(path string) types.ScanRequest { + return scanRequest{Path: path} } // NewHostScanRequest creates a new scan request for the root filesystem @@ -54,7 +29,7 @@ func NewHostScanRequest() types.ScanRequest { if hostRoot := os.Getenv("HOST_ROOT"); env.IsContainerized() && hostRoot != "" { scanPath = hostRoot } - return NewScanRequest(scanPath, newFS("/")) + return NewScanRequest(scanPath) } // Collector returns the collector name diff --git a/pkg/sbom/collectors/mock.go b/pkg/sbom/collectors/mock.go index 5e22b71269e40..fa05e2afea673 100644 --- a/pkg/sbom/collectors/mock.go +++ b/pkg/sbom/collectors/mock.go @@ -14,8 +14,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/sbom" - "github.com/DataDog/datadog-agent/pkg/util/optional" - + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/stretchr/testify/mock" ) @@ -42,7 +41,7 @@ func (m *MockCollector) CleanCache() error { } // Init initializes the collector -func (m *MockCollector) Init(cfg config.Component, opt optional.Option[workloadmeta.Component]) error { +func (m *MockCollector) Init(cfg config.Component, opt option.Option[workloadmeta.Component]) error { args := m.Called(cfg, opt) return args.Error(0) } diff --git a/pkg/sbom/scanner/scanner.go b/pkg/sbom/scanner/scanner.go index e91bd594e3aeb..8bdd537f35e2d 100644 --- a/pkg/sbom/scanner/scanner.go +++ b/pkg/sbom/scanner/scanner.go @@ -22,7 +22,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/sbom/telemetry" "github.com/DataDog/datadog-agent/pkg/util/filesystem" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -51,13 +51,13 @@ type Scanner struct { // It cannot be cleaned when a scan is running cacheMutex sync.Mutex - wmeta optional.Option[workloadmeta.Component] + wmeta option.Option[workloadmeta.Component] collectors map[string]collectors.Collector } // NewScanner creates a new SBOM scanner. Call Start to start the store and its // collectors. -func NewScanner(cfg config.Component, collectors map[string]collectors.Collector, wmeta optional.Option[workloadmeta.Component]) *Scanner { +func NewScanner(cfg config.Component, collectors map[string]collectors.Collector, wmeta option.Option[workloadmeta.Component]) *Scanner { return &Scanner{ scanQueue: workqueue.NewTypedRateLimitingQueueWithConfig( workqueue.NewTypedItemExponentialFailureRateLimiter[sbom.ScanRequest]( @@ -81,7 +81,7 @@ func NewScanner(cfg config.Component, collectors map[string]collectors.Collector // CreateGlobalScanner creates a SBOM scanner, sets it as the default // global one, and returns it. Start() needs to be called before any data // collection happens. -func CreateGlobalScanner(cfg config.Component, wmeta optional.Option[workloadmeta.Component]) (*Scanner, error) { +func CreateGlobalScanner(cfg config.Component, wmeta option.Option[workloadmeta.Component]) (*Scanner, error) { if !cfg.GetBool("sbom.host.enabled") && !cfg.GetBool("sbom.container_image.enabled") && !cfg.GetBool("runtime_security_config.sbom.enabled") { return nil, nil } diff --git a/pkg/sbom/scanner/scanner_test.go b/pkg/sbom/scanner/scanner_test.go index 06b0cc1e6a588..a7064b00c7099 100644 --- a/pkg/sbom/scanner/scanner_test.go +++ b/pkg/sbom/scanner/scanner_test.go @@ -25,7 +25,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/sbom" "github.com/DataDog/datadog-agent/pkg/sbom/collectors" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" cyclonedxgo "github.com/CycloneDX/cyclonedx-go" "github.com/stretchr/testify/assert" @@ -127,7 +127,7 @@ func TestRetryLogic_Error(t *testing.T) { cfg.Set("sbom.cache.clean_interval", "10s", model.SourceAgentRuntime) // Required for the ticker // Create a scanner and start it - scanner := NewScanner(cfg, map[string]collectors.Collector{collName: mockCollector}, optional.NewOption[workloadmeta.Component](workloadmetaStore)) + scanner := NewScanner(cfg, map[string]collectors.Collector{collName: mockCollector}, option.New[workloadmeta.Component](workloadmetaStore)) ctx, cancel := context.WithCancel(context.Background()) scanner.Start(ctx) @@ -193,7 +193,7 @@ func TestRetryLogic_ImageDeleted(t *testing.T) { cfg.Set("sbom.cache.clean_interval", "10s", model.SourceAgentRuntime) // Required for the ticker // Create a scanner and start it - scanner := NewScanner(cfg, map[string]collectors.Collector{collName: mockCollector}, optional.NewOption[workloadmeta.Component](workloadmetaStore)) + scanner := NewScanner(cfg, map[string]collectors.Collector{collName: mockCollector}, option.New[workloadmeta.Component](workloadmetaStore)) ctx, cancel := context.WithCancel(context.Background()) scanner.Start(ctx) @@ -258,7 +258,7 @@ func TestRetryChannelFull(t *testing.T) { cfg.Set("sbom.cache.clean_interval", "10s", model.SourceAgentRuntime) // Required for the ticker // Create a scanner and start it - scanner := NewScanner(cfg, map[string]collectors.Collector{collName: mockCollector}, optional.NewOption[workloadmeta.Component](workloadmetaStore)) + scanner := NewScanner(cfg, map[string]collectors.Collector{collName: mockCollector}, option.New[workloadmeta.Component](workloadmetaStore)) ctx, cancel := context.WithCancel(context.Background()) scanner.Start(ctx) diff --git a/pkg/security/agent/agent.go b/pkg/security/agent/agent.go index 60d71694f9188..1c868c8536501 100644 --- a/pkg/security/agent/agent.go +++ b/pkg/security/agent/agent.go @@ -23,8 +23,8 @@ import ( "github.com/DataDog/datadog-agent/comp/logs/agent/config" "github.com/DataDog/datadog-agent/pkg/security/common" "github.com/DataDog/datadog-agent/pkg/security/proto/api" + "github.com/DataDog/datadog-agent/pkg/security/seclog" "github.com/DataDog/datadog-agent/pkg/security/security_profile/dump" - "github.com/DataDog/datadog-agent/pkg/util/log" ) // RuntimeSecurityAgent represents the main wrapper for the Runtime Security product @@ -106,7 +106,7 @@ func (rsa *RuntimeSecurityAgent) StartEventListener() { msg += ", please check that the runtime security module is enabled in the system-probe.yaml config file" } } - log.Error(msg) + seclog.Errorf("%s", msg) default: // do nothing } @@ -119,7 +119,7 @@ func (rsa *RuntimeSecurityAgent) StartEventListener() { if !rsa.connected.Load() { rsa.connected.Store(true) - log.Info("Successfully connected to the runtime security module") + seclog.Infof("Successfully connected to the runtime security module") } for { @@ -128,7 +128,10 @@ func (rsa *RuntimeSecurityAgent) StartEventListener() { if err == io.EOF || in == nil { break } - log.Tracef("Got message from rule `%s` for event `%s`", in.RuleID, string(in.Data)) + + if seclog.DefaultLogger.IsTracing() { + seclog.DefaultLogger.Tracef("Got message from rule `%s` for event `%s`", in.RuleID, string(in.Data)) + } rsa.eventReceived.Inc() @@ -157,7 +160,10 @@ func (rsa *RuntimeSecurityAgent) StartActivityDumpListener() { if err == io.EOF || msg == nil { break } - log.Tracef("Got activity dump [%s]", msg.GetDump().GetMetadata().GetName()) + + if seclog.DefaultLogger.IsTracing() { + seclog.DefaultLogger.Tracef("Got activity dump [%s]", msg.GetDump().GetMetadata().GetName()) + } rsa.activityDumpReceived.Inc() @@ -180,7 +186,7 @@ func (rsa *RuntimeSecurityAgent) DispatchActivityDump(msg *api.ActivityDumpStrea // parse dump from message dump, err := dump.NewActivityDumpFromMessage(msg.GetDump()) if err != nil { - log.Errorf("%v", err) + seclog.Errorf("%v", err) return } if rsa.profContainersTelemetry != nil { @@ -192,7 +198,7 @@ func (rsa *RuntimeSecurityAgent) DispatchActivityDump(msg *api.ActivityDumpStrea for _, requests := range dump.StorageRequests { if err := rsa.storage.PersistRaw(requests, dump, raw); err != nil { - log.Errorf("%v", err) + seclog.Errorf("%v", err) } } } diff --git a/pkg/security/agent/mocks/security_module_client_wrapper.go b/pkg/security/agent/mocks/security_module_client_wrapper.go index c31eae478f235..6e83e6e59a4b1 100644 --- a/pkg/security/agent/mocks/security_module_client_wrapper.go +++ b/pkg/security/agent/mocks/security_module_client_wrapper.go @@ -4,6 +4,8 @@ package mocks import ( api "github.com/DataDog/datadog-agent/pkg/security/proto/api" + grpc "google.golang.org/grpc" + mock "github.com/stretchr/testify/mock" ) @@ -164,23 +166,23 @@ func (_m *SecurityModuleClientWrapper) GenerateEncoding(request *api.Transcoding } // GetActivityDumpStream provides a mock function with no fields -func (_m *SecurityModuleClientWrapper) GetActivityDumpStream() (api.SecurityModule_GetActivityDumpStreamClient, error) { +func (_m *SecurityModuleClientWrapper) GetActivityDumpStream() (grpc.ServerStreamingClient[api.ActivityDumpStreamMessage], error) { ret := _m.Called() if len(ret) == 0 { panic("no return value specified for GetActivityDumpStream") } - var r0 api.SecurityModule_GetActivityDumpStreamClient + var r0 grpc.ServerStreamingClient[api.ActivityDumpStreamMessage] var r1 error - if rf, ok := ret.Get(0).(func() (api.SecurityModule_GetActivityDumpStreamClient, error)); ok { + if rf, ok := ret.Get(0).(func() (grpc.ServerStreamingClient[api.ActivityDumpStreamMessage], error)); ok { return rf() } - if rf, ok := ret.Get(0).(func() api.SecurityModule_GetActivityDumpStreamClient); ok { + if rf, ok := ret.Get(0).(func() grpc.ServerStreamingClient[api.ActivityDumpStreamMessage]); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(api.SecurityModule_GetActivityDumpStreamClient) + r0 = ret.Get(0).(grpc.ServerStreamingClient[api.ActivityDumpStreamMessage]) } } @@ -224,23 +226,23 @@ func (_m *SecurityModuleClientWrapper) GetConfig() (*api.SecurityConfigMessage, } // GetEvents provides a mock function with no fields -func (_m *SecurityModuleClientWrapper) GetEvents() (api.SecurityModule_GetEventsClient, error) { +func (_m *SecurityModuleClientWrapper) GetEvents() (grpc.ServerStreamingClient[api.SecurityEventMessage], error) { ret := _m.Called() if len(ret) == 0 { panic("no return value specified for GetEvents") } - var r0 api.SecurityModule_GetEventsClient + var r0 grpc.ServerStreamingClient[api.SecurityEventMessage] var r1 error - if rf, ok := ret.Get(0).(func() (api.SecurityModule_GetEventsClient, error)); ok { + if rf, ok := ret.Get(0).(func() (grpc.ServerStreamingClient[api.SecurityEventMessage], error)); ok { return rf() } - if rf, ok := ret.Get(0).(func() api.SecurityModule_GetEventsClient); ok { + if rf, ok := ret.Get(0).(func() grpc.ServerStreamingClient[api.SecurityEventMessage]); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(api.SecurityModule_GetEventsClient) + r0 = ret.Get(0).(grpc.ServerStreamingClient[api.SecurityEventMessage]) } } diff --git a/pkg/security/agent/status_provider.go b/pkg/security/agent/status_provider.go index 280618f8650cc..dbb59061d5853 100644 --- a/pkg/security/agent/status_provider.go +++ b/pkg/security/agent/status_provider.go @@ -57,6 +57,7 @@ func (s statusProvider) populateStatus(stats map[string]interface{}) { "kernelLockdown": cfStatus.Environment.KernelLockdown, "mmapableMaps": cfStatus.Environment.UseMmapableMaps, "ringBuffer": cfStatus.Environment.UseRingBuffer, + "fentry": cfStatus.Environment.UseFentry, } if cfStatus.Environment.Constants != nil { environment["constantFetchers"] = cfStatus.Environment.Constants diff --git a/pkg/security/agent/status_templates/runtimesecurity.tmpl b/pkg/security/agent/status_templates/runtimesecurity.tmpl index 0def50ef9d8c3..f2ca3710a596c 100644 --- a/pkg/security/agent/status_templates/runtimesecurity.tmpl +++ b/pkg/security/agent/status_templates/runtimesecurity.tmpl @@ -61,6 +61,9 @@ {{- if .ringBuffer }} Use eBPF ring buffer: {{ .ringBuffer }} {{- end }} + {{- if .fentry }} + Use fentry: {{ .fentry }} + {{- end }} {{ if .constantFetchers }} Available constant fetchers =========================== diff --git a/pkg/security/config/config.go b/pkg/security/config/config.go index 9d165a4ae1900..bc325dc5b865e 100644 --- a/pkg/security/config/config.go +++ b/pkg/security/config/config.go @@ -9,6 +9,7 @@ package config import ( "encoding/binary" "fmt" + "math" "net" "strings" "time" @@ -111,7 +112,7 @@ type RuntimeSecurityConfig struct { // ActivityDumpCgroupDumpTimeout defines the cgroup activity dumps timeout. ActivityDumpCgroupDumpTimeout time.Duration // ActivityDumpRateLimiter defines the kernel rate of max events per sec for activity dumps. - ActivityDumpRateLimiter int + ActivityDumpRateLimiter uint16 // ActivityDumpCgroupWaitListTimeout defines the time to wait before a cgroup can be dumped again. ActivityDumpCgroupWaitListTimeout time.Duration // ActivityDumpCgroupDifferentiateArgs defines if system-probe should differentiate process nodes using process @@ -274,6 +275,9 @@ type RuntimeSecurityConfig struct { // IMDSIPv4 is used to provide a custom IP address for the IMDS endpoint IMDSIPv4 uint32 + + // SendEventFromSystemProbe defines when the event are sent directly from system-probe + SendEventFromSystemProbe bool } // Config defines a security config @@ -374,7 +378,6 @@ func NewRuntimeSecurityConfig() (*RuntimeSecurityConfig, error) { ActivityDumpCgroupsManagers: pkgconfigsetup.SystemProbe().GetStringSlice("runtime_security_config.activity_dump.cgroup_managers"), ActivityDumpTracedEventTypes: parseEventTypeStringSlice(pkgconfigsetup.SystemProbe().GetStringSlice("runtime_security_config.activity_dump.traced_event_types")), ActivityDumpCgroupDumpTimeout: pkgconfigsetup.SystemProbe().GetDuration("runtime_security_config.activity_dump.dump_duration"), - ActivityDumpRateLimiter: pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.activity_dump.rate_limiter"), ActivityDumpCgroupWaitListTimeout: pkgconfigsetup.SystemProbe().GetDuration("runtime_security_config.activity_dump.cgroup_wait_list_timeout"), ActivityDumpCgroupDifferentiateArgs: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.activity_dump.cgroup_differentiate_args"), ActivityDumpLocalStorageDirectory: pkgconfigsetup.SystemProbe().GetString("runtime_security_config.activity_dump.local_storage.output_directory"), @@ -458,7 +461,16 @@ func NewRuntimeSecurityConfig() (*RuntimeSecurityConfig, error) { // IMDS IMDSIPv4: parseIMDSIPv4(), + + // direct sender + SendEventFromSystemProbe: pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.direct_send_from_system_probe"), + } + + activityDumpRateLimiter := pkgconfigsetup.SystemProbe().GetInt("runtime_security_config.activity_dump.rate_limiter") + if activityDumpRateLimiter < 0 || activityDumpRateLimiter > math.MaxUint16 { + return nil, fmt.Errorf("invalid value for runtime_security_config.activity_dump.rate_limiter: %d, must be in uint16 range", activityDumpRateLimiter) } + rsConfig.ActivityDumpRateLimiter = uint16(activityDumpRateLimiter) if err := rsConfig.sanitize(); err != nil { return nil, err @@ -504,7 +516,7 @@ func isRemoteConfigEnabled() bool { func IsEBPFLessModeEnabled() bool { const cfgKey = "runtime_security_config.ebpfless.enabled" // by default on fargate, we enable ebpfless mode - if !pkgconfigsetup.SystemProbe().IsSet(cfgKey) && fargate.IsFargateInstance() { + if !pkgconfigsetup.SystemProbe().IsConfigured(cfgKey) && fargate.IsFargateInstance() { seclog.Infof("Fargate instance detected, enabling CWS ebpfless mode") pkgconfigsetup.SystemProbe().Set(cfgKey, true, pkgconfigmodel.SourceAgentRuntime) } diff --git a/pkg/security/ebpf/c/include/constants/custom.h b/pkg/security/ebpf/c/include/constants/custom.h index 1e2311bf8fc7e..ac470714a2c64 100644 --- a/pkg/security/ebpf/c/include/constants/custom.h +++ b/pkg/security/ebpf/c/include/constants/custom.h @@ -200,4 +200,38 @@ static __attribute__((always_inline)) u64 get_imds_ip() { #define CGROUP_SYSTEMD_SERVICE (0 << 8) #define CGROUP_SYSTEMD_SCOPE (1 << 8) +#define ACTIVE_FLOWS_MAX_SIZE 128 + +enum PID_ROUTE_TYPE +{ + BIND_ENTRY, + PROCFS_ENTRY, + FLOW_CLASSIFICATION_ENTRY, +}; + +enum FLUSH_NETWORK_STATS_TYPE +{ + PID_EXIT, + PID_EXEC, + NETWORK_STATS_TICKER, +}; + +static __attribute__((always_inline)) u64 get_network_monitor_period() { + u64 network_monitor_period; + LOAD_CONSTANT("network_monitor_period", network_monitor_period); + return network_monitor_period; +} + +static __attribute__((always_inline)) u64 is_sk_storage_supported() { + u64 is_sk_storage_supported; + LOAD_CONSTANT("is_sk_storage_supported", is_sk_storage_supported); + return is_sk_storage_supported; +} + +static __attribute__((always_inline)) u64 is_network_flow_monitor_enabled() { + u64 is_network_flow_monitor_enabled; + LOAD_CONSTANT("is_network_flow_monitor_enabled", is_network_flow_monitor_enabled); + return is_network_flow_monitor_enabled; +} + #endif diff --git a/pkg/security/ebpf/c/include/constants/enums.h b/pkg/security/ebpf/c/include/constants/enums.h index 4837192476a36..a2475849001ab 100644 --- a/pkg/security/ebpf/c/include/constants/enums.h +++ b/pkg/security/ebpf/c/include/constants/enums.h @@ -43,6 +43,7 @@ enum event_type EVENT_DNS, EVENT_NET_DEVICE, EVENT_VETH_PAIR, + EVENT_ACCEPT, EVENT_BIND, EVENT_CONNECT, EVENT_UNSHARE_MNTNS, @@ -52,6 +53,8 @@ enum event_type EVENT_LOGIN_UID_WRITE, EVENT_CGROUP_WRITE, EVENT_RAW_PACKET, + EVENT_NETWORK_FLOW_MONITOR, + EVENT_STAT, EVENT_MAX, // has to be the last one EVENT_ALL = 0xffffffff // used as a mask for all the events diff --git a/pkg/security/ebpf/c/include/constants/fentry_macro.h b/pkg/security/ebpf/c/include/constants/fentry_macro.h index a0efe549f9b51..c49b7b9335025 100644 --- a/pkg/security/ebpf/c/include/constants/fentry_macro.h +++ b/pkg/security/ebpf/c/include/constants/fentry_macro.h @@ -26,6 +26,7 @@ typedef unsigned long long ctx_t; #define HOOK_SYSCALL_COMPAT_EXIT(name) SYSCALL_FEXIT(name) #define HOOK_SYSCALL_COMPAT_TIME_EXIT(name) SYSCALL_TIME_FEXIT(name) #define TAIL_CALL_TARGET(_name) SEC("fentry/start_kernel") // `start_kernel` is only used at boot time, the hook should never be hit +#define TAIL_CALL_TARGET_WITH_HOOK_POINT(name) SEC("fentry/" name) #define CTX_PARM1(ctx) (u64)(ctx[0]) #define CTX_PARM2(ctx) (u64)(ctx[1]) @@ -33,8 +34,26 @@ typedef unsigned long long ctx_t; #define CTX_PARM4(ctx) (u64)(ctx[3]) #define CTX_PARM5(ctx) (u64)(ctx[4]) -#define CTX_PARMRET(ctx, argc) (u64)(ctx[argc]) -#define SYSCALL_PARMRET(ctx) CTX_PARMRET(ctx, 1) +u64 __attribute__((always_inline)) CTX_PARMRET(ctx_t *ctx) { + u64 argc; + LOAD_CONSTANT("fentry_func_argc", argc); + + u64 ret = 0; +#define CTX_PARMRET_CASE(n) case n: asm("%0 = *(u64 *)(%1 +%2)" : "=r"(ret) : "r"(ctx), "i"(n * 8)); break; + switch (argc) { + CTX_PARMRET_CASE(0) + CTX_PARMRET_CASE(1) + CTX_PARMRET_CASE(2) + CTX_PARMRET_CASE(3) + CTX_PARMRET_CASE(4) + CTX_PARMRET_CASE(5) + CTX_PARMRET_CASE(6) + } + return ret; +#undef CTX_PARMRET_CASE +} + +#define SYSCALL_PARMRET(ctx) CTX_PARMRET(ctx) #else @@ -61,6 +80,7 @@ typedef struct pt_regs ctx_t; #define HOOK_SYSCALL_COMPAT_EXIT(name) SYSCALL_COMPAT_KRETPROBE(name) #define HOOK_SYSCALL_COMPAT_TIME_EXIT(name) SYSCALL_COMPAT_TIME_KRETPROBE(name) #define TAIL_CALL_TARGET(name) SEC("kprobe/" name) +#define TAIL_CALL_TARGET_WITH_HOOK_POINT(name) SEC("kprobe/" name) #define CTX_PARM1(ctx) PT_REGS_PARM1(ctx) #define CTX_PARM2(ctx) PT_REGS_PARM2(ctx) @@ -68,8 +88,11 @@ typedef struct pt_regs ctx_t; #define CTX_PARM4(ctx) PT_REGS_PARM4(ctx) #define CTX_PARM5(ctx) PT_REGS_PARM5(ctx) -#define CTX_PARMRET(ctx, _argc) PT_REGS_RC(ctx) -#define SYSCALL_PARMRET(ctx) CTX_PARMRET(ctx, _) +u64 __attribute__((always_inline)) CTX_PARMRET(ctx_t *ctx) { + return PT_REGS_RC(ctx); +} + +#define SYSCALL_PARMRET(ctx) CTX_PARMRET(ctx) #endif diff --git a/pkg/security/ebpf/c/include/constants/offsets/filesystem.h b/pkg/security/ebpf/c/include/constants/offsets/filesystem.h index 797070a8534e7..f6d1d16b44a61 100644 --- a/pkg/security/ebpf/c/include/constants/offsets/filesystem.h +++ b/pkg/security/ebpf/c/include/constants/offsets/filesystem.h @@ -17,23 +17,40 @@ unsigned long __attribute__((always_inline)) get_inode_ino(struct inode *inode) return ino; } -dev_t __attribute__((always_inline)) get_inode_dev(struct inode *inode) { +struct inode* get_dentry_inode(struct dentry *dentry) { + u64 offset; + LOAD_CONSTANT("dentry_d_inode_offset", offset); + + struct inode *inode; + bpf_probe_read(&inode, sizeof(inode), (void *)dentry + offset); + return inode; +} + +dev_t __attribute__((always_inline)) get_sb_dev(struct super_block *sb) { + u64 sb_dev_offset; + LOAD_CONSTANT("sb_dev_offset", sb_dev_offset); + dev_t dev; - struct super_block *sb; - bpf_probe_read(&sb, sizeof(sb), &inode->i_sb); - bpf_probe_read(&dev, sizeof(dev), &sb->s_dev); + bpf_probe_read(&dev, sizeof(dev), (void *)sb + sb_dev_offset); return dev; } +dev_t __attribute__((always_inline)) get_inode_dev(struct inode *inode) { + u64 offset; + LOAD_CONSTANT("inode_sb_offset", offset); + + struct super_block *sb; + bpf_probe_read(&sb, sizeof(sb), (void *)inode + offset); + return get_sb_dev(sb); +} + dev_t __attribute__((always_inline)) get_dentry_dev(struct dentry *dentry) { u64 offset; LOAD_CONSTANT("dentry_d_sb_offset", offset); - dev_t dev; struct super_block *sb; bpf_probe_read(&sb, sizeof(sb), (char *)dentry + offset); - bpf_probe_read(&dev, sizeof(dev), &sb->s_dev); - return dev; + return get_sb_dev(sb); } void *__attribute__((always_inline)) get_file_f_inode_addr(struct file *file) { @@ -67,10 +84,17 @@ int __attribute__((always_inline)) get_vfsmount_mount_id(struct vfsmount *mnt) { return mount_id; } -int __attribute__((always_inline)) get_path_mount_id(struct path *path) { +struct vfsmount* __attribute__((always_inline)) get_path_vfsmount(struct path *path) { + u64 offset; + LOAD_CONSTANT("path_mnt_offset", offset); + struct vfsmount *mnt; - bpf_probe_read(&mnt, sizeof(mnt), &path->mnt); - return get_vfsmount_mount_id(mnt); + bpf_probe_read(&mnt, sizeof(mnt), (void *)path + offset); + return mnt; +} + +int __attribute__((always_inline)) get_path_mount_id(struct path *path) { + return get_vfsmount_mount_id(get_path_vfsmount(path)); } int __attribute__((always_inline)) get_file_mount_id(struct file *file) { @@ -78,15 +102,16 @@ int __attribute__((always_inline)) get_file_mount_id(struct file *file) { } int __attribute__((always_inline)) get_vfsmount_mount_flags(struct vfsmount *mnt) { + u64 offset; + LOAD_CONSTANT("vfsmount_mnt_flags_offset", offset); + int mount_flags; - bpf_probe_read(&mount_flags, sizeof(mount_flags), &mnt->mnt_flags); + bpf_probe_read(&mount_flags, sizeof(mount_flags), (void *)mnt + offset); return mount_flags; } int __attribute__((always_inline)) get_path_mount_flags(struct path *path) { - struct vfsmount *mnt; - bpf_probe_read(&mnt, sizeof(mnt), &path->mnt); - return get_vfsmount_mount_flags(mnt); + return get_vfsmount_mount_flags(get_path_vfsmount(path)); } int __attribute__((always_inline)) get_mount_mount_id(void *mnt) { @@ -98,8 +123,11 @@ int __attribute__((always_inline)) get_mount_mount_id(void *mnt) { } struct dentry *__attribute__((always_inline)) get_mount_mountpoint_dentry(struct mount *mnt) { + u64 mount_mnt_mountpoint_offset; + LOAD_CONSTANT("mount_mnt_mountpoint_offset", mount_mnt_mountpoint_offset); + struct dentry *dentry; - bpf_probe_read(&dentry, sizeof(dentry), (char *)mnt + 24); + bpf_probe_read(&dentry, sizeof(dentry), (void *)mnt + mount_mnt_mountpoint_offset); return dentry; } @@ -108,8 +136,11 @@ struct vfsmount *__attribute__((always_inline)) get_mount_vfsmount(void *mnt) { } struct dentry *__attribute__((always_inline)) get_vfsmount_dentry(struct vfsmount *mnt) { + u64 offset; + LOAD_CONSTANT("vfsmount_mnt_root_offset", offset); + struct dentry *dentry; - bpf_probe_read(&dentry, sizeof(dentry), &mnt->mnt_root); + bpf_probe_read(&dentry, sizeof(dentry), (void *)mnt + offset); return dentry; } @@ -123,28 +154,29 @@ struct super_block *__attribute__((always_inline)) get_dentry_sb(struct dentry * } struct file_system_type *__attribute__((always_inline)) get_super_block_fs(struct super_block *sb) { + u64 offset; + LOAD_CONSTANT("super_block_s_type_offset", offset); + struct file_system_type *fs; - bpf_probe_read(&fs, sizeof(fs), &sb->s_type); + bpf_probe_read(&fs, sizeof(fs), (void *)sb + offset); return fs; } struct super_block *__attribute__((always_inline)) get_vfsmount_sb(struct vfsmount *mnt) { + u64 offset; + LOAD_CONSTANT("vfsmount_mnt_sb_offset", offset); + struct super_block *sb; - bpf_probe_read(&sb, sizeof(sb), &mnt->mnt_sb); + bpf_probe_read(&sb, sizeof(sb), (void *)mnt + offset); return sb; } -dev_t __attribute__((always_inline)) get_sb_dev(struct super_block *sb) { - dev_t dev; - bpf_probe_read(&dev, sizeof(dev), &sb->s_dev); - return dev; -} - struct dentry *__attribute__((always_inline)) get_mountpoint_dentry(void *mntpoint) { - struct dentry *dentry; + u64 offset; + LOAD_CONSTANT("mountpoint_dentry_offset", offset); - // bpf_probe_read(&dentry, sizeof(dentry), (char *)mntpoint + offsetof(struct mountpoint, m_dentry)); - bpf_probe_read(&dentry, sizeof(dentry), (char *)mntpoint + 16); + struct dentry *dentry; + bpf_probe_read(&dentry, sizeof(dentry), (void *)mntpoint + offset); return dentry; } @@ -156,41 +188,37 @@ dev_t __attribute__((always_inline)) get_mount_dev(void *mnt) { return get_vfsmount_dev(get_mount_vfsmount(mnt)); } -struct inode *__attribute__((always_inline)) get_dentry_inode(struct dentry *dentry) { - struct inode *d_inode; - bpf_probe_read(&d_inode, sizeof(d_inode), &dentry->d_inode); - return d_inode; -} - unsigned long __attribute__((always_inline)) get_dentry_ino(struct dentry *dentry) { + if (!dentry) { + return 0; + } return get_inode_ino(get_dentry_inode(dentry)); } -struct dentry *__attribute__((always_inline)) get_file_dentry(struct file *file) { - struct dentry *file_dentry; - bpf_probe_read(&file_dentry, sizeof(file_dentry), &get_file_f_path_addr(file)->dentry); - return file_dentry; -} - struct dentry *__attribute__((always_inline)) get_path_dentry(struct path *path) { + u64 offset; + LOAD_CONSTANT("path_dentry_offset", offset); + struct dentry *dentry; - bpf_probe_read(&dentry, sizeof(dentry), &path->dentry); + bpf_probe_read(&dentry, sizeof(dentry), (void *)path + offset); return dentry; } -unsigned long __attribute__((always_inline)) get_path_ino(struct path *path) { - struct dentry *dentry; - bpf_probe_read(&dentry, sizeof(dentry), &path->dentry); +struct dentry *__attribute__((always_inline)) get_file_dentry(struct file *file) { + return get_path_dentry(get_file_f_path_addr(file)); +} - if (dentry) { - return get_dentry_ino(dentry); - } - return 0; +unsigned long __attribute__((always_inline)) get_path_ino(struct path *path) { + struct dentry *dentry = get_path_dentry(path); + return get_dentry_ino(dentry); } void __attribute__((always_inline)) get_dentry_name(struct dentry *dentry, void *buffer, size_t n) { + u64 dentry_d_name_offset; + LOAD_CONSTANT("dentry_d_name_offset", dentry_d_name_offset); + struct qstr qstr; - bpf_probe_read(&qstr, sizeof(qstr), &dentry->d_name); + bpf_probe_read(&qstr, sizeof(qstr), (void *)dentry + dentry_d_name_offset); bpf_probe_read_str(buffer, n, (void *)qstr.name); } @@ -247,8 +275,7 @@ static __attribute__((always_inline)) int is_overlayfs(struct dentry *dentry) { } int __attribute__((always_inline)) get_ovl_lower_ino_direct(struct dentry *dentry) { - struct inode *d_inode; - bpf_probe_read(&d_inode, sizeof(d_inode), &dentry->d_inode); + struct inode *d_inode = get_dentry_inode(dentry); // escape from the embedded vfs_inode to reach ovl_inode struct inode *lower; @@ -258,8 +285,7 @@ int __attribute__((always_inline)) get_ovl_lower_ino_direct(struct dentry *dentr } int __attribute__((always_inline)) get_ovl_lower_ino_from_ovl_path(struct dentry *dentry) { - struct inode *d_inode; - bpf_probe_read(&d_inode, sizeof(d_inode), &dentry->d_inode); + struct inode *d_inode = get_dentry_inode(dentry); // escape from the embedded vfs_inode to reach ovl_inode struct dentry *lower; @@ -269,9 +295,12 @@ int __attribute__((always_inline)) get_ovl_lower_ino_from_ovl_path(struct dentry } int __attribute__((always_inline)) get_ovl_lower_ino_from_ovl_entry(struct dentry *dentry) { - struct inode *d_inode; - bpf_probe_read(&d_inode, sizeof(d_inode), &dentry->d_inode); + struct inode *d_inode = get_dentry_inode(dentry); + // escape from the embedded vfs_inode to reach ovl_entry + // struct inode vfs_inode; + // struct dentry *__upperdentry; + // struct ovl_entry *oe; void *oe; bpf_probe_read(&oe, sizeof(oe), (char *)d_inode + get_sizeof_inode() + 8); @@ -283,42 +312,52 @@ int __attribute__((always_inline)) get_ovl_lower_ino_from_ovl_entry(struct dentr } int __attribute__((always_inline)) get_ovl_upper_ino(struct dentry *dentry) { - struct inode *d_inode; - bpf_probe_read(&d_inode, sizeof(d_inode), &dentry->d_inode); + struct inode *d_inode = get_dentry_inode(dentry); - // escape from the embedded vfs_inode to reach ovl_inode + // escape from the embedded vfs_inode to reach upper dentry + // struct inode vfs_inode; + // struct dentry *__upperdentry; struct dentry *upper; bpf_probe_read(&upper, sizeof(upper), (char *)d_inode + get_sizeof_inode()); return get_dentry_ino(upper); } -void __always_inline set_overlayfs_ino(struct dentry *dentry, u64 *ino, u32 *flags) { - u64 lower_inode = 0; +int __attribute__((always_inline)) get_ovl_lower_ino(struct dentry *dentry) { switch (get_ovl_path_in_inode()) { case 2: - lower_inode = get_ovl_lower_ino_from_ovl_entry(dentry); - break; + return get_ovl_lower_ino_from_ovl_entry(dentry); case 1: - lower_inode = get_ovl_lower_ino_from_ovl_path(dentry); - break; + return get_ovl_lower_ino_from_ovl_path(dentry); default: - lower_inode = get_ovl_lower_ino_direct(dentry); - break; + return get_ovl_lower_ino_direct(dentry); } + + return 0; +} + +int __always_inline get_overlayfs_layer(struct dentry *dentry) { + return get_ovl_upper_ino(dentry) != 0 ? UPPER_LAYER : LOWER_LAYER; +} + +void __always_inline set_overlayfs_inode(struct dentry *dentry, struct file_t *file) { + u64 orig_inode = file->path_key.ino; + u64 lower_inode = get_ovl_lower_ino(dentry); u64 upper_inode = get_ovl_upper_ino(dentry); - if (upper_inode) { - *flags |= UPPER_LAYER; - } else if (lower_inode) { - *flags |= LOWER_LAYER; + // NOTE(safchain) both lower & upper inode seems to be incorrect somtimes on kernel >= 6.8. + // Need to investigate the root cause. + if (get_ovl_path_in_inode() == 2 && lower_inode != orig_inode && upper_inode != orig_inode) { + return; } if (lower_inode) { - *ino = lower_inode; + file->path_key.ino = lower_inode; } else if (upper_inode) { - *ino = upper_inode; + file->path_key.ino = upper_inode; } + + file->flags |= upper_inode != 0 ? UPPER_LAYER : LOWER_LAYER; } #define VFS_ARG_POSITION1 1 diff --git a/pkg/security/ebpf/c/include/constants/offsets/network.h b/pkg/security/ebpf/c/include/constants/offsets/network.h index 495fc0d20452f..681e0fcf4628a 100644 --- a/pkg/security/ebpf/c/include/constants/offsets/network.h +++ b/pkg/security/ebpf/c/include/constants/offsets/network.h @@ -12,12 +12,43 @@ __attribute__((always_inline)) u16 get_family_from_sock_common(struct sock_commo return family; } +__attribute__((always_inline)) u16 get_skc_num_from_sock_common(struct sock_common *sk) { + u64 sock_common_skc_num_offset; + LOAD_CONSTANT("sock_common_skc_num_offset", sock_common_skc_num_offset); + + u16 skc_num; + bpf_probe_read(&skc_num, sizeof(skc_num), (void *)sk + sock_common_skc_num_offset); + return htons(skc_num); +} + +__attribute__((always_inline)) struct sock* get_sock_from_socket(struct socket *socket) { + u64 socket_sock_offset; + LOAD_CONSTANT("socket_sock_offset", socket_sock_offset); + + struct sock *sk = NULL; + bpf_probe_read(&sk, sizeof(sk), (void *)socket + socket_sock_offset); + return sk; +} + __attribute__((always_inline)) u64 get_flowi4_saddr_offset() { u64 flowi4_saddr_offset; LOAD_CONSTANT("flowi4_saddr_offset", flowi4_saddr_offset); return flowi4_saddr_offset; } +// TODO: needed for l4_protocol resolution, see network/flow.h +__attribute__((always_inline)) u64 get_flowi4_proto_offset() { + u64 flowi4_proto_offset; + LOAD_CONSTANT("flowi4_proto_offset", flowi4_proto_offset); + return flowi4_proto_offset; +} + +__attribute__((always_inline)) u64 get_flowi6_proto_offset() { + u64 flowi6_proto_offset; + LOAD_CONSTANT("flowi6_proto_offset", flowi6_proto_offset); + return flowi6_proto_offset; +} + __attribute__((always_inline)) u64 get_flowi4_uli_offset() { u64 flowi4_uli_offset; LOAD_CONSTANT("flowi4_uli_offset", flowi4_uli_offset); diff --git a/pkg/security/ebpf/c/include/events_definition.h b/pkg/security/ebpf/c/include/events_definition.h index ef52f5edcf386..ab5e135724b2e 100644 --- a/pkg/security/ebpf/c/include/events_definition.h +++ b/pkg/security/ebpf/c/include/events_definition.h @@ -11,6 +11,18 @@ struct invalidate_dentry_event_t { u32 padding; }; +struct accept_event_t { + struct kevent_t event; + struct process_context_t process; + struct span_context_t span; + struct container_context_t container; + struct syscall_t syscall; + + u64 addr[2]; + u16 family; + u16 port; +}; + struct bind_event_t { struct kevent_t event; struct process_context_t process; @@ -122,6 +134,7 @@ struct cgroup_tracing_event_t { struct container_context_t container; struct activity_dump_config config; u64 cookie; + u32 pid; }; struct cgroup_write_event_t { @@ -223,6 +236,7 @@ struct mkdir_event_t { struct span_context_t span; struct container_context_t container; struct syscall_t syscall; + struct syscall_context_t syscall_ctx; struct file_t file; u32 mode; u32 padding; @@ -354,6 +368,7 @@ struct rmdir_event_t { struct span_context_t span; struct container_context_t container; struct syscall_t syscall; + struct syscall_context_t syscall_ctx; struct file_t file; }; @@ -441,4 +456,26 @@ struct on_demand_event_t { char data[256]; }; +struct raw_packet_event_t { + struct kevent_t event; + struct process_context_t process; + struct span_context_t span; + struct container_context_t container; + struct network_device_context_t device; + + int len; + char data[256]; +}; + +struct network_flow_monitor_event_t { + struct kevent_t event; + struct process_context_t process; + struct span_context_t span; + struct container_context_t container; + struct network_device_context_t device; + + u64 flows_count; // keep as u64 to prevent inconsistent verifier output on bounds checks + struct flow_stats_t flows[ACTIVE_FLOWS_MAX_SIZE]; +}; + #endif diff --git a/pkg/security/ebpf/c/include/helpers/activity_dump.h b/pkg/security/ebpf/c/include/helpers/activity_dump.h index 7505c923b7748..e444070371195 100644 --- a/pkg/security/ebpf/c/include/helpers/activity_dump.h +++ b/pkg/security/ebpf/c/include/helpers/activity_dump.h @@ -121,6 +121,7 @@ __attribute__((always_inline)) u64 trace_new_cgroup(void *ctx, u64 now, struct c evt->container.cgroup_context = container->cgroup_context; evt->cookie = cookie; evt->config = config; + evt->pid = bpf_get_current_pid_tgid() >> 32; send_event_ptr(ctx, EVENT_CGROUP_TRACING, evt); return cookie; diff --git a/pkg/security/ebpf/c/include/helpers/all.h b/pkg/security/ebpf/c/include/helpers/all.h index e3a31a2d3229b..181a3a7aa5b7f 100644 --- a/pkg/security/ebpf/c/include/helpers/all.h +++ b/pkg/security/ebpf/c/include/helpers/all.h @@ -8,14 +8,11 @@ #include "container.h" #include "dentry_resolver.h" #include "discaders.h" -#include "dns.h" -#include "imds.h" #include "erpc.h" #include "events.h" #include "events_predicates.h" #include "filesystem.h" #include "iouring.h" -#include "network.h" #include "process.h" #include "raw_syscalls.h" #include "selinux.h" @@ -25,4 +22,11 @@ #include "user_sessions.h" #include "utils.h" +#include "network/context.h" +#include "network/parser.h" +#include "network/pid_resolver.h" +#include "network/router.h" +#include "network/dns.h" +#include "network/imds.h" + #endif diff --git a/pkg/security/ebpf/c/include/helpers/filesystem.h b/pkg/security/ebpf/c/include/helpers/filesystem.h index 94210013c123e..41f92bde94664 100644 --- a/pkg/security/ebpf/c/include/helpers/filesystem.h +++ b/pkg/security/ebpf/c/include/helpers/filesystem.h @@ -97,6 +97,13 @@ static __attribute__((always_inline)) void umounted(struct pt_regs *ctx, u32 mou send_event(ctx, EVENT_MOUNT_RELEASED, event); } +static __attribute__((always_inline)) void set_file_layer(struct dentry *dentry, struct file_t *file) { + if (is_overlayfs(dentry)) { + u32 flags = get_overlayfs_layer(dentry); + file->flags |= flags; + } +} + void __attribute__((always_inline)) fill_file(struct dentry *dentry, struct file_t *file) { struct inode *d_inode = get_dentry_inode(dentry); @@ -153,6 +160,9 @@ void __attribute__((always_inline)) fill_file(struct dentry *dentry, struct file bpf_probe_read(&file->metadata.mtime.tv_nsec, sizeof(file->metadata.mtime.tv_nsec), &d_inode->i_mtime_nsec); #endif } + + // set again the layer here as after update a file will be moved to the upper layer + set_file_layer(dentry, file); } #define get_dentry_key_path(dentry, path) \ @@ -171,7 +181,7 @@ static __attribute__((always_inline)) void set_file_inode(struct dentry *dentry, } if (is_overlayfs(dentry)) { - set_overlayfs_ino(dentry, &file->path_key.ino, &file->flags); + set_overlayfs_inode(dentry, file); } } diff --git a/pkg/security/ebpf/c/include/helpers/network/context.h b/pkg/security/ebpf/c/include/helpers/network/context.h new file mode 100644 index 0000000000000..a62eea4735c7d --- /dev/null +++ b/pkg/security/ebpf/c/include/helpers/network/context.h @@ -0,0 +1,36 @@ +#ifndef _HELPERS_NETWORK_CONTEXT_H_ +#define _HELPERS_NETWORK_CONTEXT_H_ + +__attribute__((always_inline)) void fill_network_process_context(struct process_context_t *process, u32 pid, u32 netns) { + if (pid >= 0) { + process->pid = pid; + process->tid = pid; + } else { + process->pid = 0; + process->tid = 0; + } + process->netns = netns; +} + +__attribute__((always_inline)) void fill_network_process_context_from_pkt(struct process_context_t *process, struct packet_t *pkt) { + fill_network_process_context(process, pkt->pid, pkt->translated_ns_flow.netns); +} + +__attribute__((always_inline)) void fill_network_device_context(struct network_device_context_t *device_ctx, u32 netns, u32 ifindex) { + device_ctx->netns = netns; + device_ctx->ifindex = ifindex; +} + +__attribute__((always_inline)) void fill_network_device_context_from_pkt(struct network_device_context_t *device_ctx, struct __sk_buff *skb, struct packet_t *pkt) { + fill_network_device_context(device_ctx, pkt->translated_ns_flow.netns, skb->ifindex); +} + +__attribute__((always_inline)) void fill_network_context(struct network_context_t *net_ctx, struct __sk_buff *skb, struct packet_t *pkt) { + net_ctx->size = skb->len; + net_ctx->network_direction = pkt->network_direction; + net_ctx->flow = pkt->translated_ns_flow.flow; + + fill_network_device_context_from_pkt(&net_ctx->device, skb, pkt); +} + +#endif diff --git a/pkg/security/ebpf/c/include/helpers/dns.h b/pkg/security/ebpf/c/include/helpers/network/dns.h similarity index 84% rename from pkg/security/ebpf/c/include/helpers/dns.h rename to pkg/security/ebpf/c/include/helpers/network/dns.h index f6e394fbc08db..e7e1af3ce99e6 100644 --- a/pkg/security/ebpf/c/include/helpers/dns.h +++ b/pkg/security/ebpf/c/include/helpers/network/dns.h @@ -1,13 +1,14 @@ -#ifndef _HELPERS_DNS_H -#define _HELPERS_DNS_H +#ifndef _HELPERS_NETWORK_DNS_H +#define _HELPERS_NETWORK_DNS_H #include "constants/enums.h" -#include "maps.h" +#include "helpers/activity_dump.h" +#include "helpers/container.h" +#include "helpers/process.h" + +#include "context.h" -#include "activity_dump.h" -#include "container.h" -#include "network.h" -#include "process.h" +#include "maps.h" __attribute__((always_inline)) struct dns_event_t *get_dns_event() { u32 key = DNS_EVENT_KEY; @@ -27,7 +28,7 @@ __attribute__((always_inline)) struct dns_event_t *reset_dns_event(struct __sk_b evt->event.flags = 0; // process context - fill_network_process_context(&evt->process, pkt); + fill_network_process_context_from_pkt(&evt->process, pkt); // network context fill_network_context(&evt->network, skb, pkt); diff --git a/pkg/security/ebpf/c/include/helpers/imds.h b/pkg/security/ebpf/c/include/helpers/network/imds.h similarity index 84% rename from pkg/security/ebpf/c/include/helpers/imds.h rename to pkg/security/ebpf/c/include/helpers/network/imds.h index c53b53c15f9e0..ea5fe1d087c5d 100644 --- a/pkg/security/ebpf/c/include/helpers/imds.h +++ b/pkg/security/ebpf/c/include/helpers/network/imds.h @@ -1,12 +1,12 @@ -#ifndef _HELPERS_IMDS_H -#define _HELPERS_IMDS_H +#ifndef _HELPERS_NETWORK_IMDS_H +#define _HELPERS_NETWORK_IMDS_H #include "constants/enums.h" +#include "helpers/container.h" +#include "helpers/network/context.h" +#include "helpers/process.h" #include "maps.h" -#include "container.h" -#include "network.h" -#include "process.h" __attribute__((always_inline)) struct imds_event_t *get_imds_event() { u32 key = IMDS_EVENT_KEY; @@ -24,7 +24,7 @@ __attribute__((always_inline)) struct imds_event_t *reset_imds_event(struct __sk evt->event.flags = 0; // process context - fill_network_process_context(&evt->process, pkt); + fill_network_process_context_from_pkt(&evt->process, pkt); // network context fill_network_context(&evt->network, skb, pkt); diff --git a/pkg/security/ebpf/c/include/helpers/network.h b/pkg/security/ebpf/c/include/helpers/network/parser.h similarity index 60% rename from pkg/security/ebpf/c/include/helpers/network.h rename to pkg/security/ebpf/c/include/helpers/network/parser.h index 21e39e8591654..f6cdaac7489c8 100644 --- a/pkg/security/ebpf/c/include/helpers/network.h +++ b/pkg/security/ebpf/c/include/helpers/network/parser.h @@ -1,40 +1,10 @@ -#ifndef _HELPERS_NETWORK_H_ -#define _HELPERS_NETWORK_H_ +#ifndef _HELPERS_NETWORK_PARSER_H_ +#define _HELPERS_NETWORK_PARSER_H_ #include "constants/custom.h" #include "constants/macros.h" #include "maps.h" -__attribute__((always_inline)) s64 get_flow_pid(struct pid_route_t *key) { - u32 *value = bpf_map_lookup_elem(&flow_pid, key); - if (!value) { - // Try with IP set to 0.0.0.0 - key->addr[0] = 0; - key->addr[1] = 0; - value = bpf_map_lookup_elem(&flow_pid, key); - if (!value) { - return -1; - } - } - - return *value; -} - -__attribute__((always_inline)) void flip(struct flow_t *flow) { - u64 tmp = 0; - tmp = flow->sport; - flow->sport = flow->dport; - flow->dport = tmp; - - tmp = flow->saddr[0]; - flow->saddr[0] = flow->daddr[0]; - flow->daddr[0] = tmp; - - tmp = flow->saddr[1]; - flow->saddr[1] = flow->daddr[1]; - flow->daddr[1] = tmp; -} - __attribute__((always_inline)) void tc_cursor_init(struct cursor *c, struct __sk_buff *skb) { c->end = (void *)(long)skb->data_end; c->pos = (void *)(long)skb->data; @@ -62,31 +32,6 @@ __attribute__((always_inline)) struct packet_t *reset_packet() { return get_packet(); } -__attribute__((always_inline)) void fill_network_process_context(struct process_context_t *process, struct packet_t *pkt) { - if (pkt->pid >= 0) { - process->pid = pkt->pid; - process->tid = pkt->pid; - } else { - process->pid = 0; - process->tid = 0; - } - process->netns = pkt->translated_ns_flow.netns; -} - -__attribute__((always_inline)) void fill_network_device_context(struct network_device_context_t *device_ctx, struct __sk_buff *skb, struct packet_t *pkt) { - device_ctx->netns = pkt->translated_ns_flow.netns; - device_ctx->ifindex = skb->ifindex; -} - -__attribute__((always_inline)) void fill_network_context(struct network_context_t *net_ctx, struct __sk_buff *skb, struct packet_t *pkt) { - net_ctx->l3_protocol = htons(pkt->eth.h_proto); - net_ctx->l4_protocol = pkt->l4_protocol; - net_ctx->size = skb->len; - net_ctx->flow = pkt->translated_ns_flow.flow; - - fill_network_device_context(&net_ctx->device, skb, pkt); -} - __attribute__((always_inline)) void parse_tuple(struct nf_conntrack_tuple *tuple, struct flow_t *flow) { flow->sport = tuple->src.u.all; flow->dport = tuple->dst.u.all; @@ -109,8 +54,11 @@ __attribute__((always_inline)) struct packet_t * parse_packet(struct __sk_buff * return NULL; } - switch (pkt->eth.h_proto) { - case htons(ETH_P_IP): + pkt->network_direction = direction; + pkt->ns_flow.flow.l3_protocol = ntohs(pkt->eth.h_proto); + + switch (pkt->ns_flow.flow.l3_protocol) { + case ETH_P_IP: // parse IPv4 header if (!(parse_iphdr(&c, &pkt->ipv4))) { return NULL; @@ -124,19 +72,19 @@ __attribute__((always_inline)) struct packet_t * parse_packet(struct __sk_buff * } } - pkt->l4_protocol = pkt->ipv4.protocol; + pkt->ns_flow.flow.l4_protocol = pkt->ipv4.protocol; pkt->ns_flow.flow.saddr[0] = pkt->ipv4.saddr; pkt->ns_flow.flow.daddr[0] = pkt->ipv4.daddr; break; - case htons(ETH_P_IPV6): + case ETH_P_IPV6: // parse IPv6 header // TODO: handle multiple IPv6 extension headers if (!(parse_ipv6hdr(&c, &pkt->ipv6))) { return NULL; } - pkt->l4_protocol = pkt->ipv6.nexthdr; + pkt->ns_flow.flow.l4_protocol = pkt->ipv6.nexthdr; pkt->ns_flow.flow.saddr[0] = *(u64 *)&pkt->ipv6.saddr; pkt->ns_flow.flow.saddr[1] = *((u64 *)(&pkt->ipv6.saddr) + 1); pkt->ns_flow.flow.daddr[0] = *(u64 *)&pkt->ipv6.daddr; @@ -148,7 +96,7 @@ __attribute__((always_inline)) struct packet_t * parse_packet(struct __sk_buff * return NULL; } - switch (pkt->l4_protocol) { + switch (pkt->ns_flow.flow.l4_protocol) { case IPPROTO_TCP: // parse TCP header if (!(parse_tcphdr(&c, &pkt->tcp))) { @@ -183,7 +131,6 @@ __attribute__((always_inline)) struct packet_t * parse_packet(struct __sk_buff * return NULL; } - struct pid_route_t pid_route = {}; struct namespaced_flow_t tmp_ns_flow = pkt->ns_flow; // for compatibility with older kernels pkt->translated_ns_flow = pkt->ns_flow; @@ -201,25 +148,6 @@ __attribute__((always_inline)) struct packet_t * parse_packet(struct __sk_buff * // TODO: if nothing was found in the conntrack map, lookup ingress nat rules (nothing to do for egress though) - // resolve pid - switch (direction) { - case EGRESS: { - pid_route.addr[0] = pkt->translated_ns_flow.flow.saddr[0]; - pid_route.addr[1] = pkt->translated_ns_flow.flow.saddr[1]; - pid_route.port = pkt->translated_ns_flow.flow.sport; - pid_route.netns = pkt->translated_ns_flow.netns; - break; - } - case INGRESS: { - pid_route.addr[0] = pkt->translated_ns_flow.flow.daddr[0]; - pid_route.addr[1] = pkt->translated_ns_flow.flow.daddr[1]; - pid_route.port = pkt->translated_ns_flow.flow.dport; - pid_route.netns = pkt->translated_ns_flow.netns; - break; - } - } - pkt->pid = get_flow_pid(&pid_route); - return pkt; }; diff --git a/pkg/security/ebpf/c/include/helpers/network/pid_resolver.h b/pkg/security/ebpf/c/include/helpers/network/pid_resolver.h new file mode 100644 index 0000000000000..e3723bd033079 --- /dev/null +++ b/pkg/security/ebpf/c/include/helpers/network/pid_resolver.h @@ -0,0 +1,48 @@ +#ifndef _HELPERS_NETWORK_PID_RESOLVER_H_ +#define _HELPERS_NETWORK_PID_RESOLVER_H_ + +#include "maps.h" + +__attribute__((always_inline)) s64 get_flow_pid(struct pid_route_t *key) { + u32 *value = bpf_map_lookup_elem(&flow_pid, key); + if (!value) { + // Try with IP set to 0.0.0.0 + key->addr[0] = 0; + key->addr[1] = 0; + value = bpf_map_lookup_elem(&flow_pid, key); + if (!value) { + return -1; + } + } + + return *value; +} + +__attribute__((always_inline)) void resolve_pid(struct packet_t *pkt) { + struct pid_route_t pid_route = {}; + + // resolve pid + switch (pkt->network_direction) { + case EGRESS: { + pid_route.addr[0] = pkt->translated_ns_flow.flow.saddr[0]; + pid_route.addr[1] = pkt->translated_ns_flow.flow.saddr[1]; + pid_route.port = pkt->translated_ns_flow.flow.sport; + pid_route.netns = pkt->translated_ns_flow.netns; + break; + } + case INGRESS: { + pid_route.addr[0] = pkt->translated_ns_flow.flow.daddr[0]; + pid_route.addr[1] = pkt->translated_ns_flow.flow.daddr[1]; + pid_route.port = pkt->translated_ns_flow.flow.dport; + pid_route.netns = pkt->translated_ns_flow.netns; + break; + } + } + + // TODO: l4_protocol should be used to uniquely identify the PID - wait for implementation on security_socket_bind + // pid_route.l4_protocol = pkt->translated_ns_flow.flow.l4_protocol; + + pkt->pid = get_flow_pid(&pid_route); +} + +#endif diff --git a/pkg/security/ebpf/c/include/helpers/network/raw.h b/pkg/security/ebpf/c/include/helpers/network/raw.h new file mode 100644 index 0000000000000..ac1b520450f89 --- /dev/null +++ b/pkg/security/ebpf/c/include/helpers/network/raw.h @@ -0,0 +1,11 @@ +#ifndef _HELPERS_NETWORK_RAW_H_ +#define _HELPERS_NETWORK_RAW_H_ + +#include "maps.h" + +__attribute__((always_inline)) struct raw_packet_event_t *get_raw_packet_event() { + u32 key = 0; + return bpf_map_lookup_elem(&raw_packet_event, &key); +} + +#endif diff --git a/pkg/security/ebpf/c/include/helpers/network/router.h b/pkg/security/ebpf/c/include/helpers/network/router.h new file mode 100644 index 0000000000000..0c8a2b7904957 --- /dev/null +++ b/pkg/security/ebpf/c/include/helpers/network/router.h @@ -0,0 +1,29 @@ +#ifndef _HELPERS_NETWORK_ROUTER_H_ +#define _HELPERS_NETWORK_ROUTER_H_ + +#include "stats.h" +#include "maps.h" + +__attribute__((always_inline)) int route_pkt(struct __sk_buff *skb, struct packet_t *pkt, int direction) { + if (is_network_flow_monitor_enabled()) { + count_pkt(skb, pkt); + } + + // route DNS requests + if (is_event_enabled(EVENT_DNS)) { + if (pkt->translated_ns_flow.flow.l4_protocol == IPPROTO_UDP && pkt->translated_ns_flow.flow.dport == htons(53)) { + bpf_tail_call_compat(skb, &classifier_router, DNS_REQUEST); + } + } + + // route IMDS requests + if (is_event_enabled(EVENT_IMDS)) { + if (pkt->translated_ns_flow.flow.l4_protocol == IPPROTO_TCP && ((pkt->ns_flow.flow.saddr[0] & 0xFFFFFFFF) == get_imds_ip() || (pkt->ns_flow.flow.daddr[0] & 0xFFFFFFFF) == get_imds_ip())) { + bpf_tail_call_compat(skb, &classifier_router, IMDS_REQUEST); + } + } + + return ACT_OK; +} + +#endif diff --git a/pkg/security/ebpf/c/include/helpers/network/stats.h b/pkg/security/ebpf/c/include/helpers/network/stats.h new file mode 100644 index 0000000000000..10dc84d971c92 --- /dev/null +++ b/pkg/security/ebpf/c/include/helpers/network/stats.h @@ -0,0 +1,207 @@ +#ifndef _HELPERS_NETWORK_STATS_H_ +#define _HELPERS_NETWORK_STATS_H_ + +#include "context.h" +#include "utils.h" + +__attribute__((always_inline)) struct network_flow_monitor_event_t *get_network_flow_monitor_event() { + u32 key = 0; + struct network_flow_monitor_event_t *evt = bpf_map_lookup_elem(&network_flow_monitor_event_gen, &key); + // __builtin_memset doesn't work here because evt is too large and memset is allocating too much memory + return evt; +} + +__attribute__((always_inline)) struct active_flows_t *get_empty_active_flows() { + u32 key = 0; + return bpf_map_lookup_elem(&active_flows_gen, &key); +} + +__attribute__((always_inline)) int flush_network_stats(u32 pid, struct active_flows_t *entry, void *ctx, enum FLUSH_NETWORK_STATS_TYPE type) { + u64 now = bpf_ktime_get_ns(); + struct network_stats_t *stats = NULL; + struct namespaced_flow_t ns_flow_tmp = {}; + + if (entry == NULL || ctx == NULL) { + return 0; + } + + if ((type == NETWORK_STATS_TICKER) && (now < entry->last_sent + get_network_monitor_period())) { + // we'll flush later, move on + return 0; + } + + struct network_flow_monitor_event_t *evt = get_network_flow_monitor_event(); + if (evt == NULL) { + // should never happen + return 0; + } + evt->event.flags = EVENT_FLAGS_ACTIVITY_DUMP_SAMPLE; + + // Delete the entry now to try to limit race conditions with exiting processes. + // Two races may happen here: + // - we may send the same flows twice if both the ticker and the PID_EXIT hook points call this function + // at the same time and both get a hold of the same active_flows_t *entry. + // - we may miss some flows if a packet with a new flow is sent right when this function is called by the ticker, + // and if the TC program that captures the new flow appends it to the ticker active_flows_t *entry after the end + // of the unrolled loop. + bpf_map_delete_elem(&active_flows, &pid); + + // process context + fill_network_process_context(&evt->process, pid, entry->netns); + + // network context + fill_network_device_context(&evt->device, entry->netns, entry->ifindex); + + struct proc_cache_t *proc_cache_entry = get_proc_cache(pid); + if (proc_cache_entry == NULL) { + evt->container.container_id[0] = 0; + } else { + copy_container_id_no_tracing(proc_cache_entry->container.container_id, &evt->container.container_id); + evt->container.cgroup_context = proc_cache_entry->container.cgroup_context; + } + + evt->flows_count = 0; + +#pragma unroll + for (int i = 0; i < ACTIVE_FLOWS_MAX_SIZE; i++) { + if (i >= entry->cursor) { + break; + } + ns_flow_tmp.netns = entry->netns; + ns_flow_tmp.flow = entry->flows[i & (ACTIVE_FLOWS_MAX_SIZE - 1)]; + + // start by copying the flow + evt->flows[evt->flows_count & (ACTIVE_FLOWS_MAX_SIZE - 1)].flow = ns_flow_tmp.flow; + + // query the stats + stats = bpf_map_lookup_elem(&ns_flow_to_network_stats, &ns_flow_tmp); + if (stats != NULL) { + // Delete entry now to try to limit race conditions with "count_pkt" with other CPUs. + // Note that the "worse" that can happen with this race is that we miss a couple of bytes / packets for the + // current flow. + bpf_map_delete_elem(&ns_flow_to_network_stats, &ns_flow_tmp); + evt->flows[evt->flows_count & (ACTIVE_FLOWS_MAX_SIZE - 1)].stats = *stats; + } else { + // we copied only the flow without the stats - better to get at least the flow than nothing at all +#if defined(DEBUG_NETWORK_FLOW) + bpf_printk("no stats for sp:%d sa0:%lu sa1:%lu", ns_flow_tmp.flow.sport, ns_flow_tmp.flow.saddr[0], ns_flow_tmp.flow.saddr[1]); + bpf_printk(" dp:%d da0:%lu da1:%lu", ns_flow_tmp.flow.dport, ns_flow_tmp.flow.daddr[0], ns_flow_tmp.flow.daddr[1]); + bpf_printk(" netns:%lu l3:%d l4:%d", ns_flow_tmp.netns, ns_flow_tmp.flow.l3_protocol, ns_flow_tmp.flow.l4_protocol); +#endif + } + + evt->flows_count += 1; + } + + // send event + send_event_with_size_ptr(ctx, EVENT_NETWORK_FLOW_MONITOR, evt, offsetof(struct network_flow_monitor_event_t, flows) + (evt->flows_count & (ACTIVE_FLOWS_MAX_SIZE - 1)) * sizeof(struct flow_stats_t)); + +#if defined(DEBUG_NETWORK_FLOW) + bpf_printk("sent %d (out of %d) flows for pid %d!", evt->flows_count, entry->cursor, pid); + bpf_printk(" - type: %d", type); +#endif + + return 0; +} + +__attribute__((always_inline)) void flush_pid_network_stats(u32 pid, void *ctx, enum FLUSH_NETWORK_STATS_TYPE type) { + struct active_flows_t *entry = bpf_map_lookup_elem(&active_flows, &pid); + flush_network_stats(pid, entry, ctx, type); +} + +__attribute__((always_inline)) void count_pkt(struct __sk_buff *skb, struct packet_t *pkt) { + struct namespaced_flow_t ns_flow = pkt->translated_ns_flow; + if (pkt->network_direction == INGRESS) { + // EGRESS was arbitrarily chosen as "the 5-tuple order for indexing flow statistics". + // Reverse ingress flow now + flip(&ns_flow.flow); + } + + u8 should_register_flow = 0; + struct network_stats_t *stats = NULL; + struct network_stats_t stats_zero = {}; + u64 now = bpf_ktime_get_ns(); + int ret = bpf_map_update_elem(&ns_flow_to_network_stats, &ns_flow, &stats_zero, BPF_NOEXIST); + if (ret == 0) { + // register flow in active_flows + should_register_flow = 1; + } + + // lookup the existing (or new) entry (now that it has been created) + stats = bpf_map_lookup_elem(&ns_flow_to_network_stats, &ns_flow); + if (stats == NULL) { + // should never happen, ignore + return; + } + +#if defined(DEBUG_NETWORK_FLOW) + bpf_printk("added stats for sp:%d sa0:%lu sa1:%lu", ns_flow.flow.sport, ns_flow.flow.saddr[0], ns_flow.flow.saddr[1]); + bpf_printk(" dp:%d da0:%lu da1:%lu", ns_flow.flow.dport, ns_flow.flow.daddr[0], ns_flow.flow.daddr[1]); + bpf_printk(" netns:%lu l3:%d l4:%d", ns_flow.netns, ns_flow.flow.l3_protocol, ns_flow.flow.l4_protocol); +#endif + + // update stats + switch (pkt->network_direction) { + case EGRESS: { + __sync_fetch_and_add(&stats->egress.pkt_count, 1); + __sync_fetch_and_add(&stats->egress.data_size, skb->len); + break; + } + case INGRESS: { + __sync_fetch_and_add(&stats->ingress.pkt_count, 1); + __sync_fetch_and_add(&stats->ingress.data_size, skb->len); + break; + } + } + + if (should_register_flow) { + // make sure we hold the spin lock for the active flows entry + struct active_flows_spin_lock_t init_value = {}; + struct active_flows_spin_lock_t *active_flows_lock; + bpf_map_update_elem(&active_flows_spin_locks, &pkt->pid, &init_value, BPF_NOEXIST); + active_flows_lock = bpf_map_lookup_elem(&active_flows_spin_locks, &pkt->pid); + if (active_flows_lock == NULL) { + // shouldn't happen, ignore + return; + } + + struct active_flows_t *entry; + struct active_flows_t *zero = get_empty_active_flows(); + if (zero == NULL) { + // should never happen, ignore + return; + } + zero->netns = ns_flow.netns; + zero->ifindex = skb->ifindex; + zero->last_sent = now; + + // make sure the active_flows entry for the current pid exists + ret = bpf_map_update_elem(&active_flows, &pkt->pid, zero, BPF_NOEXIST); + if (ret < 0 && ret != -EEXIST) { + // no more space in the map, ignore for now + return; + } + + // lookup active_flows for current pid + entry = bpf_map_lookup_elem(&active_flows, &pkt->pid); + if (entry == NULL) { + // should not happen, ignore + return; + } + + // is the entry full ? + bpf_spin_lock(&active_flows_lock->lock); + if (entry->cursor < ACTIVE_FLOWS_MAX_SIZE) { + // add new flow to the list + entry->flows[entry->cursor & (ACTIVE_FLOWS_MAX_SIZE - 1)] = ns_flow.flow; + entry->cursor = entry->cursor + 1; + } else { + // TODO: send early and reset entry ? + // for now, drop the flow. + } + bpf_spin_unlock(&active_flows_lock->lock); + bpf_map_delete_elem(&active_flows_spin_locks, &pkt->pid); + } +} + +#endif diff --git a/pkg/security/ebpf/c/include/helpers/network/utils.h b/pkg/security/ebpf/c/include/helpers/network/utils.h new file mode 100644 index 0000000000000..66bb1293b0c6a --- /dev/null +++ b/pkg/security/ebpf/c/include/helpers/network/utils.h @@ -0,0 +1,19 @@ +#ifndef _HELPERS_NETWORK_UTILS_H_ +#define _HELPERS_NETWORK_UTILS_H_ + +__attribute__((always_inline)) void flip(struct flow_t *flow) { + u64 tmp = 0; + tmp = flow->sport; + flow->sport = flow->dport; + flow->dport = tmp; + + tmp = flow->saddr[0]; + flow->saddr[0] = flow->daddr[0]; + flow->daddr[0] = tmp; + + tmp = flow->saddr[1]; + flow->saddr[1] = flow->daddr[1]; + flow->daddr[1] = tmp; +} + +#endif diff --git a/pkg/security/ebpf/c/include/helpers/process.h b/pkg/security/ebpf/c/include/helpers/process.h index eeefb2359685d..83f5774753d84 100644 --- a/pkg/security/ebpf/c/include/helpers/process.h +++ b/pkg/security/ebpf/c/include/helpers/process.h @@ -44,10 +44,6 @@ void __attribute__((always_inline)) copy_proc_cache(struct proc_cache_t *src, st copy_proc_entry(&src->entry, &dst->entry); } -void __attribute__((always_inline)) copy_credentials(struct credentials_t *src, struct credentials_t *dst) { - *dst = *src; -} - void __attribute__((always_inline)) copy_pid_cache_except_exit_ts(struct pid_cache_t *src, struct pid_cache_t *dst) { dst->cookie = src->cookie; dst->user_session_id = src->user_session_id; diff --git a/pkg/security/ebpf/c/include/helpers/rate_limiter.h b/pkg/security/ebpf/c/include/helpers/rate_limiter.h index a72cb2d37c102..44d9a7023d216 100644 --- a/pkg/security/ebpf/c/include/helpers/rate_limiter.h +++ b/pkg/security/ebpf/c/include/helpers/rate_limiter.h @@ -6,31 +6,28 @@ #include "structs/rate_limiter.h" __attribute__((always_inline)) u8 rate_limiter_reset_period(u64 now, struct rate_limiter_ctx *rate_ctx_p) { - rate_ctx_p->current_period = now; - rate_ctx_p->counter = 0; + u64 data = (now & ~RATE_LIMITER_COUNTER_MASK); + rate_ctx_p->data = data; return 1; } -__attribute__((always_inline)) u8 rate_limiter_allow_basic(u32 rate, u64 now, struct rate_limiter_ctx *rate_ctx_p, u64 delta) { +__attribute__((always_inline)) u8 rate_limiter_allow_basic(u16 rate, u64 now, struct rate_limiter_ctx *rate_ctx_p, u64 delta) { if (delta > SEC_TO_NS(1)) { // if more than 1 sec ellapsed we reset the period return rate_limiter_reset_period(now, rate_ctx_p); } - if (rate_ctx_p->counter >= rate) { // if we already allowed more than rate + if (get_counter(rate_ctx_p) >= rate) { // if we already allowed more than rate return 0; } else { return 1; } } -__attribute__((always_inline)) u8 rate_limiter_allow_gen(struct rate_limiter_ctx *rate_ctx_p, u32 rate, u64 now, u8 should_count) { - if (now < rate_ctx_p->current_period) { // this should never happen, ignore - return 0; - } - u64 delta = now - rate_ctx_p->current_period; +__attribute__((always_inline)) u8 rate_limiter_allow_gen(struct rate_limiter_ctx *rate_ctx_p, u16 rate, u64 now, u8 should_count) { + u64 delta = now - get_current_period(rate_ctx_p); u8 allow = rate_limiter_allow_basic(rate, now, rate_ctx_p, delta); if (allow && should_count) { - __sync_fetch_and_add(&rate_ctx_p->counter, 1); + inc_counter(rate_ctx_p, 1); } return (allow); } @@ -39,7 +36,7 @@ __attribute__((always_inline)) u8 rate_limiter_allow_gen(struct rate_limiter_ctx // TODO: put it configurable #define GENERIC_RATE_LIMITER_RATE 100 -__attribute__((always_inline)) u8 rate_limiter_allow(u32 pid, u64 now, u8 should_count) { +__attribute__((always_inline)) u8 rate_limiter_allow(u32 pid, u64 now, u16 should_count) { if (now == 0) { now = bpf_ktime_get_ns(); } @@ -49,10 +46,7 @@ __attribute__((always_inline)) u8 rate_limiter_allow(u32 pid, u64 now, u8 should struct rate_limiter_ctx *rate_ctx_p = bpf_map_lookup_elem(&rate_limiters, &pid); if (rate_ctx_p == NULL) { - struct rate_limiter_ctx rate_ctx = { - .current_period = now, - .counter = should_count, - }; + struct rate_limiter_ctx rate_ctx = new_rate_limiter(now, should_count); bpf_map_update_elem(&rate_limiters, &pid, &rate_ctx, BPF_ANY); return 1; } @@ -62,17 +56,14 @@ __attribute__((always_inline)) u8 rate_limiter_allow(u32 pid, u64 now, u8 should } #define rate_limiter_allow_simple() rate_limiter_allow(0, 0, 1) -__attribute__((always_inline)) u8 activity_dump_rate_limiter_allow(u32 rate, u64 cookie, u64 now, u8 should_count) { +__attribute__((always_inline)) u8 activity_dump_rate_limiter_allow(u16 rate, u64 cookie, u64 now, u16 should_count) { if (now == 0) { now = bpf_ktime_get_ns(); } struct rate_limiter_ctx *rate_ctx_p = bpf_map_lookup_elem(&activity_dump_rate_limiters, &cookie); if (rate_ctx_p == NULL) { - struct rate_limiter_ctx rate_ctx = { - .current_period = now, - .counter = should_count, - }; + struct rate_limiter_ctx rate_ctx = new_rate_limiter(now, should_count); bpf_map_update_elem(&activity_dump_rate_limiters, &cookie, &rate_ctx, BPF_ANY); return 1; } diff --git a/pkg/security/ebpf/c/include/hooks/all.h b/pkg/security/ebpf/c/include/hooks/all.h index 5fc9709eb528c..8f1370edf651c 100644 --- a/pkg/security/ebpf/c/include/hooks/all.h +++ b/pkg/security/ebpf/c/include/hooks/all.h @@ -36,6 +36,7 @@ #include "on_demand.h" #include "chdir.h" +#include "network/accept.h" #include "network/bind.h" #include "network/connect.h" @@ -44,7 +45,7 @@ #include "network/imds.h" #include "network/flow.h" #include "network/net_device.h" -#include "network/router.h" +#include "network/stats_worker.h" #include "network/tc.h" #include "network/raw.h" #endif diff --git a/pkg/security/ebpf/c/include/hooks/cgroup.h b/pkg/security/ebpf/c/include/hooks/cgroup.h index b7ce66a870b92..0c796b6c81687 100644 --- a/pkg/security/ebpf/c/include/hooks/cgroup.h +++ b/pkg/security/ebpf/c/include/hooks/cgroup.h @@ -110,15 +110,15 @@ static __attribute__((always_inline)) int trace__cgroup_write(ctx_t *ctx) { bpf_probe_read(&f, sizeof(f), &kern_f->file); struct dentry *dentry = get_file_dentry(f); - resolver->key.ino = get_dentry_ino(dentry); - resolver->key.mount_id = get_file_mount_id(f); - resolver->dentry = dentry; - // The last dentry in the cgroup path should be `cgroup.procs`, thus the container ID should be its parent. bpf_probe_read(&container_d, sizeof(container_d), &dentry->d_parent); bpf_probe_read(&container_qstr, sizeof(container_qstr), &container_d->d_name); container_id = (void *)container_qstr.name; + resolver->key.ino = get_dentry_ino(container_d); + resolver->key.mount_id = get_file_mount_id(f); + resolver->dentry = container_d; + if (is_docker_cgroup(ctx, container_d)) { cgroup_flags = CGROUP_MANAGER_DOCKER; } @@ -133,13 +133,6 @@ static __attribute__((always_inline)) int trace__cgroup_write(ctx_t *ctx) { u64 inode = get_dentry_ino(container_d); resolver->key.ino = inode; - struct file_t *entry = bpf_map_lookup_elem(&exec_file_cache, &inode); - if (entry == NULL) { - return 0; - } - else { - resolver->key.mount_id = entry->path_key.mount_id; - } resolver->dentry = container_d; diff --git a/pkg/security/ebpf/c/include/hooks/chdir.h b/pkg/security/ebpf/c/include/hooks/chdir.h index 2d1e6c5db11eb..793c069061dea 100644 --- a/pkg/security/ebpf/c/include/hooks/chdir.h +++ b/pkg/security/ebpf/c/include/hooks/chdir.h @@ -55,8 +55,6 @@ int hook_set_fs_pwd(ctx_t *ctx) { syscall->chdir.dentry = dentry; syscall->chdir.file.path_key = get_dentry_key_path(syscall->chdir.dentry, path); - set_file_inode(dentry, &syscall->chdir.file, 0); - if (approve_syscall(syscall, chdir_approvers) == DISCARDED) { pop_syscall(EVENT_CHDIR); return 0; diff --git a/pkg/security/ebpf/c/include/hooks/chmod.h b/pkg/security/ebpf/c/include/hooks/chmod.h index f374c6478cc31..49eabddcf6510 100644 --- a/pkg/security/ebpf/c/include/hooks/chmod.h +++ b/pkg/security/ebpf/c/include/hooks/chmod.h @@ -51,6 +51,8 @@ int __attribute__((always_inline)) sys_chmod_ret(void *ctx, int retval) { return 0; } + set_file_layer(syscall->resolver.dentry, &syscall->setattr.file); + struct chmod_event_t event = { .syscall.retval = retval, .syscall_ctx.id = syscall->ctx_id, diff --git a/pkg/security/ebpf/c/include/hooks/chown.h b/pkg/security/ebpf/c/include/hooks/chown.h index 3bb4cbf243141..1d6f8a177c8ec 100644 --- a/pkg/security/ebpf/c/include/hooks/chown.h +++ b/pkg/security/ebpf/c/include/hooks/chown.h @@ -64,6 +64,8 @@ int __attribute__((always_inline)) sys_chown_ret(void *ctx, int retval) { return 0; } + set_file_layer(syscall->resolver.dentry, &syscall->setattr.file); + struct chown_event_t event = { .syscall.retval = retval, .syscall_ctx.id = syscall->ctx_id, diff --git a/pkg/security/ebpf/c/include/hooks/exec.h b/pkg/security/ebpf/c/include/hooks/exec.h index a99783248c5d9..59409b183b318 100644 --- a/pkg/security/ebpf/c/include/hooks/exec.h +++ b/pkg/security/ebpf/c/include/hooks/exec.h @@ -5,6 +5,7 @@ #include "constants/offsets/filesystem.h" #include "helpers/filesystem.h" #include "helpers/syscalls.h" +#include "helpers/network/stats.h" #include "constants/fentry_macro.h" int __attribute__((always_inline)) trace__sys_execveat(ctx_t *ctx, const char *path, const char **argv, const char **env) { @@ -272,8 +273,7 @@ int hook_do_coredump(ctx_t *ctx) { return 0; } -HOOK_ENTRY("do_exit") -int hook_do_exit(ctx_t *ctx) { +int __attribute__((always_inline)) handle_do_exit(ctx_t *ctx) { u64 pid_tgid = bpf_get_current_pid_tgid(); u32 tgid = pid_tgid >> 32; u32 pid = pid_tgid; @@ -328,6 +328,29 @@ int hook_do_exit(ctx_t *ctx) { return 0; } +TAIL_CALL_TARGET_WITH_HOOK_POINT("do_exit") +int tail_call_target_flush_network_stats_exit(ctx_t *ctx) { + u64 pid_tgid = bpf_get_current_pid_tgid(); + u32 pid = pid_tgid; + u32 tgid = pid_tgid >> 32; + + void *ignored = bpf_map_lookup_elem(&pid_ignored, &pid); + if (ignored == NULL) { + // flush network stats + flush_pid_network_stats(tgid, ctx, PID_EXIT); + } + + return handle_do_exit(ctx); +} + +HOOK_ENTRY("do_exit") +int hook_do_exit(ctx_t *ctx) { + if (is_network_flow_monitor_enabled()) { + bpf_tail_call_compat(ctx, &flush_network_stats_progs, PID_EXIT); + } + return handle_do_exit(ctx); +} + HOOK_ENTRY("exit_itimers") int hook_exit_itimers(ctx_t *ctx) { struct syscall_cache_t *syscall = peek_current_or_impersonated_exec_syscall(); @@ -756,8 +779,20 @@ int __attribute__((always_inline)) send_exec_event(ctx_t *ctx) { return 0; } +TAIL_CALL_TARGET_WITH_HOOK_POINT("mprotect_fixup") +int tail_call_target_flush_network_stats_exec(ctx_t *ctx) { + // flush network stats + u64 pid_tgid = bpf_get_current_pid_tgid(); + u32 tgid = pid_tgid >> 32; + flush_pid_network_stats(tgid, ctx, PID_EXEC); + return send_exec_event(ctx); +} + HOOK_ENTRY("mprotect_fixup") int hook_mprotect_fixup(ctx_t *ctx) { + if (is_network_flow_monitor_enabled()) { + bpf_tail_call_compat(ctx, &flush_network_stats_progs, PID_EXEC); + } return send_exec_event(ctx); } diff --git a/pkg/security/ebpf/c/include/hooks/iouring.h b/pkg/security/ebpf/c/include/hooks/iouring.h index ce82b04c0f50a..d8761ce7df45e 100644 --- a/pkg/security/ebpf/c/include/hooks/iouring.h +++ b/pkg/security/ebpf/c/include/hooks/iouring.h @@ -14,7 +14,7 @@ int io_uring_create(struct tracepoint_io_uring_io_uring_create_t *args) { HOOK_EXIT("io_ring_ctx_alloc") int rethook_io_ring_ctx_alloc(ctx_t *ctx) { - void *ioctx = (void *)CTX_PARMRET(ctx, 1); + void *ioctx = (void *)CTX_PARMRET(ctx); cache_ioctx_pid_tgid(ioctx); return 0; } diff --git a/pkg/security/ebpf/c/include/hooks/link.h b/pkg/security/ebpf/c/include/hooks/link.h index 8184b16cfd4bf..20c268d2d9219 100644 --- a/pkg/security/ebpf/c/include/hooks/link.h +++ b/pkg/security/ebpf/c/include/hooks/link.h @@ -151,7 +151,7 @@ int __attribute__((always_inline)) sys_link_ret(void *ctx, int retval, int dr_ty HOOK_EXIT("do_linkat") int rethook_do_linkat(ctx_t *ctx) { - int retval = CTX_PARMRET(ctx, 5); + int retval = CTX_PARMRET(ctx); return sys_link_ret(ctx, retval, DR_KPROBE_OR_FENTRY); } diff --git a/pkg/security/ebpf/c/include/hooks/login_uid.h b/pkg/security/ebpf/c/include/hooks/login_uid.h index d96148bdfa84f..35c41d1532fa7 100644 --- a/pkg/security/ebpf/c/include/hooks/login_uid.h +++ b/pkg/security/ebpf/c/include/hooks/login_uid.h @@ -18,7 +18,7 @@ int hook_audit_set_loginuid(ctx_t *ctx) { HOOK_EXIT("audit_set_loginuid") int rethook_audit_set_loginuid(ctx_t *ctx) { - int retval = CTX_PARMRET(ctx, 1); + int retval = CTX_PARMRET(ctx); if (retval < 0) { return 0; } diff --git a/pkg/security/ebpf/c/include/hooks/mkdir.h b/pkg/security/ebpf/c/include/hooks/mkdir.h index c736528d7222f..513b5506be6ba 100644 --- a/pkg/security/ebpf/c/include/hooks/mkdir.h +++ b/pkg/security/ebpf/c/include/hooks/mkdir.h @@ -7,7 +7,7 @@ #include "helpers/filesystem.h" #include "helpers/syscalls.h" -long __attribute__((always_inline)) trace__sys_mkdir(u8 async, umode_t mode) { +long __attribute__((always_inline)) trace__sys_mkdir(u8 async, const char *filename, umode_t mode) { if (is_discarded_by_pid()) { return 0; } @@ -21,17 +21,20 @@ long __attribute__((always_inline)) trace__sys_mkdir(u8 async, umode_t mode) { .mode = mode } }; + if (!async) { + collect_syscall_ctx(&syscall, SYSCALL_CTX_ARG_STR(0) | SYSCALL_CTX_ARG_INT(1), (void *)filename, (void *)&mode, NULL); + } cache_syscall(&syscall); return 0; } HOOK_SYSCALL_ENTRY2(mkdir, const char *, filename, umode_t, mode) { - return trace__sys_mkdir(SYNC_SYSCALL, mode); + return trace__sys_mkdir(SYNC_SYSCALL, filename, mode); } HOOK_SYSCALL_ENTRY3(mkdirat, int, dirfd, const char *, filename, umode_t, mode) { - return trace__sys_mkdir(SYNC_SYSCALL, mode); + return trace__sys_mkdir(SYNC_SYSCALL, filename, mode); } HOOK_ENTRY("vfs_mkdir") @@ -75,6 +78,10 @@ int __attribute__((always_inline)) sys_mkdir_ret(void *ctx, int retval, int dr_t // the inode of the dentry was not properly set when kprobe/security_path_mkdir was called, make sure we grab it now set_file_inode(syscall->mkdir.dentry, &syscall->mkdir.file, 0); + if (retval && !syscall->mkdir.file.path_key.ino) { + syscall->mkdir.file.path_key.mount_id = 0; // do not try resolving the path + } + syscall->resolver.key = syscall->mkdir.file.path_key; syscall->resolver.dentry = syscall->mkdir.dentry; syscall->resolver.discarder_event_type = dentry_resolver_discarder_event_type(syscall); @@ -95,14 +102,14 @@ int hook_do_mkdirat(ctx_t *ctx) { struct syscall_cache_t *syscall = peek_syscall(EVENT_MKDIR); if (!syscall) { umode_t mode = (umode_t)CTX_PARM3(ctx); - return trace__sys_mkdir(ASYNC_SYSCALL, mode); + return trace__sys_mkdir(ASYNC_SYSCALL, NULL, mode); } return 0; } HOOK_EXIT("do_mkdirat") int rethook_do_mkdirat(ctx_t *ctx) { - int retval = CTX_PARMRET(ctx, 3); + int retval = CTX_PARMRET(ctx); return sys_mkdir_ret(ctx, retval, DR_KPROBE_OR_FENTRY); } @@ -140,6 +147,7 @@ int __attribute__((always_inline)) dr_mkdir_callback(void *ctx) { struct mkdir_event_t event = { .syscall.retval = retval, + .syscall_ctx.id = syscall->ctx_id, .event.flags = syscall->async ? EVENT_FLAGS_ASYNC : 0, .file = syscall->mkdir.file, .mode = syscall->mkdir.mode, diff --git a/pkg/security/ebpf/c/include/hooks/mmap.h b/pkg/security/ebpf/c/include/hooks/mmap.h index d15a25651e63d..336554cdc5e9b 100644 --- a/pkg/security/ebpf/c/include/hooks/mmap.h +++ b/pkg/security/ebpf/c/include/hooks/mmap.h @@ -87,7 +87,7 @@ int __attribute__((always_inline)) sys_mmap_ret(void *ctx, int retval, u64 addr) HOOK_EXIT("vm_mmap_pgoff") int rethook_vm_mmap_pgoff(ctx_t *ctx) { - u64 ret = CTX_PARMRET(ctx, 6); + u64 ret = CTX_PARMRET(ctx); return sys_mmap_ret(ctx, (int)ret, ret); } diff --git a/pkg/security/ebpf/c/include/hooks/network/accept.h b/pkg/security/ebpf/c/include/hooks/network/accept.h new file mode 100644 index 0000000000000..521a12a0597fc --- /dev/null +++ b/pkg/security/ebpf/c/include/hooks/network/accept.h @@ -0,0 +1,45 @@ +#ifndef _HOOKS_ACCEPT_H_ +#define _HOOKS_ACCEPT_H_ + +#include "constants/offsets/network.h" + +int __attribute__((always_inline)) read_sock_and_send_event(ctx_t * ctx, struct sock * sock) { + if(sock == NULL) { + return 0; + } + + struct accept_event_t event = {0}; + + // Extract family from the socket + struct sock_common *sockcommon = (void *)sock; + event.family = get_family_from_sock_common(sockcommon); + // Only handle AF_INET and AF_INET6 + if (event.family != AF_INET && event.family != AF_INET6) { + return 0; + } + + // Read the listening port and source address + bpf_probe_read(&event.port, sizeof(event.port), &sockcommon->skc_num); + event.port = htons(event.port); + + if (event.family == AF_INET) { + bpf_probe_read(&event.addr[0], sizeof(event.addr[0]), &sockcommon->skc_daddr); + } else if (event.family == AF_INET6) { + bpf_probe_read((void*)&event.addr, sizeof(sockcommon->skc_v6_daddr), &sockcommon->skc_v6_daddr); + } + + struct proc_cache_t *entry = fill_process_context(&event.process); + fill_container_context(entry, &event.container); + fill_span_context(&event.span); + send_event(ctx, EVENT_ACCEPT, event); + + return 0; +} + +HOOK_EXIT("inet_csk_accept") +int hook_accept(ctx_t *ctx) { + struct sock *sock = (struct sock*)CTX_PARMRET(ctx); + return read_sock_and_send_event(ctx, sock); +} + +#endif /* _HOOKS_ACCEPT_H_ */ diff --git a/pkg/security/ebpf/c/include/hooks/network/bind.h b/pkg/security/ebpf/c/include/hooks/network/bind.h index cefaea905d501..be3fb884460af 100644 --- a/pkg/security/ebpf/c/include/hooks/network/bind.h +++ b/pkg/security/ebpf/c/include/hooks/network/bind.h @@ -64,68 +64,34 @@ HOOK_ENTRY("security_socket_bind") int hook_security_socket_bind(ctx_t *ctx) { struct socket *sk = (struct socket *)CTX_PARM1(ctx); struct sockaddr *address = (struct sockaddr *)CTX_PARM2(ctx); - struct pid_route_t key = {}; - u16 family = 0; - u16 protocol = 0; short socket_type = 0; + // fill syscall_cache if necessary + struct syscall_cache_t *syscall = peek_syscall(EVENT_BIND); + if (!syscall) { + return 0; + } + // Extract IP and port from the sockaddr structure - bpf_probe_read(&family, sizeof(family), &address->sa_family); - if (family == AF_INET) { + bpf_probe_read(&syscall->bind.family, sizeof(syscall->bind.family), &address->sa_family); + if (syscall->bind.family == AF_INET) { struct sockaddr_in *addr_in = (struct sockaddr_in *)address; - bpf_probe_read(&key.port, sizeof(addr_in->sin_port), &addr_in->sin_port); - bpf_probe_read(&key.addr, sizeof(addr_in->sin_addr.s_addr), &addr_in->sin_addr.s_addr); - } else if (family == AF_INET6) { + bpf_probe_read(&syscall->bind.port, sizeof(addr_in->sin_port), &addr_in->sin_port); + bpf_probe_read(&syscall->bind.addr, sizeof(addr_in->sin_addr.s_addr), &addr_in->sin_addr.s_addr); + } else if (syscall->bind.family == AF_INET6) { struct sockaddr_in6 *addr_in6 = (struct sockaddr_in6 *)address; - bpf_probe_read(&key.port, sizeof(addr_in6->sin6_port), &addr_in6->sin6_port); - bpf_probe_read(&key.addr, sizeof(u64) * 2, (char *)addr_in6 + offsetof(struct sockaddr_in6, sin6_addr)); + bpf_probe_read(&syscall->bind.port, sizeof(addr_in6->sin6_port), &addr_in6->sin6_port); + bpf_probe_read(&syscall->bind.addr, sizeof(u64) * 2, (char *)addr_in6 + offsetof(struct sockaddr_in6, sin6_addr)); } - bpf_probe_read(&socket_type, sizeof(socket_type), &sk->type); - // We only handle TCP and UDP sockets for now + bpf_probe_read(&socket_type, sizeof(socket_type), &sk->type); if (socket_type == SOCK_STREAM) { - protocol = IPPROTO_TCP; + syscall->connect.protocol = IPPROTO_TCP; } else if (socket_type == SOCK_DGRAM) { - protocol = IPPROTO_UDP; + syscall->connect.protocol = IPPROTO_UDP; } - // fill syscall_cache if necessary - struct syscall_cache_t *syscall = peek_syscall(EVENT_BIND); - if (syscall) { - syscall->bind.addr[0] = key.addr[0]; - syscall->bind.addr[1] = key.addr[1]; - syscall->bind.port = key.port; - syscall->bind.family = family; - syscall->connect.protocol = protocol; - } - - // past this point we care only about AF_INET and AF_INET6 - if (family != AF_INET && family != AF_INET6) { - return 0; - } - - // Register service PID - if (key.port != 0) { - u64 id = bpf_get_current_pid_tgid(); - u32 tid = (u32)id; - - // add netns information - key.netns = get_netns_from_socket(sk); - if (key.netns != 0) { - bpf_map_update_elem(&netns_cache, &tid, &key.netns, BPF_ANY); - } - -#ifndef DO_NOT_USE_TC - u32 pid = id >> 32; - bpf_map_update_elem(&flow_pid, &key, &pid, BPF_ANY); -#endif - -#if defined(DEBUG_BIND) - bpf_printk("# registered (bind) pid:%d", pid); - bpf_printk("# p:%d a:%d a:%d", key.port, key.addr[0], key.addr[1]); -#endif - } return 0; } diff --git a/pkg/security/ebpf/c/include/hooks/network/connect.h b/pkg/security/ebpf/c/include/hooks/network/connect.h index 65f520dcb3e95..1c278660abdcb 100644 --- a/pkg/security/ebpf/c/include/hooks/network/connect.h +++ b/pkg/security/ebpf/c/include/hooks/network/connect.h @@ -62,48 +62,35 @@ HOOK_ENTRY("security_socket_connect") int hook_security_socket_connect(ctx_t *ctx) { struct socket *sk = (struct socket *)CTX_PARM1(ctx); struct sockaddr *address = (struct sockaddr *)CTX_PARM2(ctx); - struct pid_route_t key = {}; - u16 family = 0; - u16 protocol = 0; short socket_type = 0; - + + // fill syscall_cache if necessary + struct syscall_cache_t *syscall = peek_syscall(EVENT_CONNECT); + if (!syscall) { + return 0; + } + // Extract IP and port from the sockaddr structure - bpf_probe_read(&family, sizeof(family), &address->sa_family); + bpf_probe_read(&syscall->connect.family, sizeof(syscall->connect.family), &address->sa_family); - if (family == AF_INET) { + if (syscall->connect.family == AF_INET) { struct sockaddr_in *addr_in = (struct sockaddr_in *)address; - bpf_probe_read(&key.port, sizeof(addr_in->sin_port), &addr_in->sin_port); - bpf_probe_read(&key.addr, sizeof(addr_in->sin_addr.s_addr), &addr_in->sin_addr.s_addr); - } else if (family == AF_INET6) { + bpf_probe_read(&syscall->connect.port, sizeof(addr_in->sin_port), &addr_in->sin_port); + bpf_probe_read(&syscall->connect.addr, sizeof(addr_in->sin_addr.s_addr), &addr_in->sin_addr.s_addr); + } else if (syscall->connect.family == AF_INET6) { struct sockaddr_in6 *addr_in6 = (struct sockaddr_in6 *)address; - bpf_probe_read(&key.port, sizeof(addr_in6->sin6_port), &addr_in6->sin6_port); - bpf_probe_read(&key.addr, sizeof(u64) * 2, (char *)addr_in6 + offsetof(struct sockaddr_in6, sin6_addr)); + bpf_probe_read(&syscall->connect.port, sizeof(addr_in6->sin6_port), &addr_in6->sin6_port); + bpf_probe_read(&syscall->connect.addr, sizeof(u64) * 2, (char *)addr_in6 + offsetof(struct sockaddr_in6, sin6_addr)); } bpf_probe_read(&socket_type, sizeof(socket_type), &sk->type); // We only handle TCP and UDP sockets for now if (socket_type == SOCK_STREAM) { - protocol = IPPROTO_TCP; + syscall->connect.protocol = IPPROTO_TCP; } else if (socket_type == SOCK_DGRAM) { - protocol = IPPROTO_UDP; + syscall->connect.protocol = IPPROTO_UDP; } - - // fill syscall_cache if necessary - struct syscall_cache_t *syscall = peek_syscall(EVENT_CONNECT); - if (syscall) { - syscall->connect.addr[0] = key.addr[0]; - syscall->connect.addr[1] = key.addr[1]; - syscall->connect.port = key.port; - syscall->connect.family = family; - syscall->connect.protocol = protocol; - } - - // Only handle AF_INET and AF_INET6 - if (family != AF_INET && family != AF_INET6) { - return 0; - } - return 0; } diff --git a/pkg/security/ebpf/c/include/hooks/network/dns.h b/pkg/security/ebpf/c/include/hooks/network/dns.h index 46fd79393fa7d..831776d1e964a 100644 --- a/pkg/security/ebpf/c/include/hooks/network/dns.h +++ b/pkg/security/ebpf/c/include/hooks/network/dns.h @@ -1,8 +1,9 @@ #ifndef _HOOKS_NETWORK_DNS_H_ #define _HOOKS_NETWORK_DNS_H_ -#include "helpers/dns.h" -#include "helpers/network.h" +#include "helpers/network/dns.h" +#include "helpers/network/parser.h" +#include "helpers/network/router.h" #include "perf_ring.h" __attribute__((always_inline)) int parse_dns_request(struct __sk_buff *skb, struct packet_t *pkt, struct dns_event_t *evt) { diff --git a/pkg/security/ebpf/c/include/hooks/network/flow.h b/pkg/security/ebpf/c/include/hooks/network/flow.h index 22a63a8ff8dd1..dc94ff71da27c 100644 --- a/pkg/security/ebpf/c/include/hooks/network/flow.h +++ b/pkg/security/ebpf/c/include/hooks/network/flow.h @@ -3,45 +3,106 @@ #include "constants/offsets/network.h" #include "constants/offsets/netns.h" -#include "helpers/network.h" +#include "helpers/network/pid_resolver.h" +#include "helpers/network/utils.h" HOOK_ENTRY("security_sk_classify_flow") int hook_security_sk_classify_flow(ctx_t *ctx) { struct sock *sk = (struct sock *)CTX_PARM1(ctx); struct flowi *fl = (struct flowi *)CTX_PARM2(ctx); struct pid_route_t key = {}; + struct pid_route_entry_t value = {}; union flowi_uli uli; - u16 family = get_family_from_sock_common((void *)sk); - if (family == AF_INET6) { - bpf_probe_read(&key.addr, sizeof(u64) * 2, (void *)fl + get_flowi6_saddr_offset()); + // There can be a missmatch between the family of the socket and the family of the flow. + // The socket can be of AF_INET6, and yet the flow could be AF_INET. + // See https://man7.org/linux/man-pages/man7/ipv6.7.html for more. + + // In our case, this means that we need to "guess" if the flow is AF_INET or AF_INET6 when the socket is AF_INET6. + u16 flow_family = get_family_from_sock_common((void *)sk); + u16 sk_port = get_skc_num_from_sock_common((void *)sk); + if (flow_family == AF_INET6) { + // check if the source port of the flow matches with the bound port of the socket bpf_probe_read(&uli, sizeof(uli), (void *)fl + get_flowi6_uli_offset()); bpf_probe_read(&key.port, sizeof(key.port), &uli.ports.sport); - } else if (family == AF_INET) { - bpf_probe_read(&key.addr, sizeof(u32), (void *)fl + get_flowi4_saddr_offset()); + + // if they don't match, then this is likely an AF_INET socket + if (sk_port != key.port) { + flow_family = AF_INET; + } else { + // this is an AF_INET6 flow + bpf_probe_read(&key.addr, sizeof(u64) * 2, (void *)fl + get_flowi6_saddr_offset()); + // TODO: fill l4_protocol, but wait for implementation on security_socket_bind to be ready first + // bpf_probe_read(&key.l4_protocol, 1, (void *)fl + get_flowi6_proto_offset()); + } + } + if (flow_family == AF_INET) { + // make sure the ports match bpf_probe_read(&uli, sizeof(uli), (void *)fl + get_flowi4_uli_offset()); bpf_probe_read(&key.port, sizeof(key.port), &uli.ports.sport); - } else { + + // if they don't match, return now, we don't know how to handle this flow + if (sk_port != key.port) { + return 0; + } else { + // This is an AF_INET flow + bpf_probe_read(&key.addr, sizeof(u32), (void *)fl + get_flowi4_saddr_offset()); + // TODO: fill l4_protocol, but wait for implementation on security_socket_bind to be ready first + // bpf_probe_read(&key.l4_protocol, 1, (void *)fl + get_flowi4_proto_offset()); + } + } + if (flow_family != AF_INET && flow_family != AF_INET6) { + // ignore these flows for now return 0; } + // add netns information + key.netns = get_netns_from_sock(sk); + +#if defined(DEBUG_NETWORK_FLOW) + bpf_printk("security_sk_classify_flow"); + bpf_printk(" p:%d a:%lu a:%lu", key.port, key.addr[0], key.addr[1]); +#endif + + if (is_sk_storage_supported()) { + // check if the socket already has an active flow + // This requires kernel v5.11+ (https://github.com/torvalds/linux/commit/8e4597c627fb48f361e2a5b012202cb1b6cbcd5e) + struct pid_route_t *existing_route = bpf_sk_storage_get(&sock_active_pid_route, sk, 0, BPF_SK_STORAGE_GET_F_CREATE); + if (existing_route != NULL) { + if (existing_route->port != 0 || existing_route->addr[0] != 0 || existing_route->addr[1] != 0) { + + #if defined(DEBUG_NETWORK_FLOW) + bpf_printk("flushing previous entry p:%d a:%lu a:%lu ...", existing_route->port, existing_route->addr[0], existing_route->addr[1]); + #endif + + // delete existing entry + bpf_map_delete_elem(&flow_pid, existing_route); + existing_route->addr[0] = 0; + existing_route->addr[1] = 0; + bpf_map_delete_elem(&flow_pid, existing_route); + } + + // register the new one in the sock_active_pid_route map + *existing_route = key; + } + } + // Register service PID if (key.port != 0) { u64 id = bpf_get_current_pid_tgid(); u32 tid = (u32)id; - u32 pid = id >> 32; + value.pid = id >> 32; + value.type = FLOW_CLASSIFICATION_ENTRY; - // add netns information - key.netns = get_netns_from_sock(sk); if (key.netns != 0) { bpf_map_update_elem(&netns_cache, &tid, &key.netns, BPF_ANY); } - bpf_map_update_elem(&flow_pid, &key, &pid, BPF_ANY); + bpf_map_update_elem(&flow_pid, &key, &value, BPF_ANY); #if defined(DEBUG_NETWORK_FLOW) - bpf_printk("# registered (flow) pid:%d netns:%u", pid, key.netns); - bpf_printk("# p:%d a:%d a:%d", key.port, key.addr[0], key.addr[1]); + bpf_printk("# registered (flow) pid:%d netns:%u", value.pid, key.netns); + bpf_printk("# p:%d a:%lu a:%lu", key.port, key.addr[0], key.addr[1]); #endif } return 0; @@ -89,4 +150,248 @@ int hook_nf_nat_packet(ctx_t *ctx) { return trace_nat_manip_pkt(ct); } +__attribute__((always_inline)) void fill_pid_route_from_sflow(struct pid_route_t *route, struct namespaced_flow_t *ns_flow) { + route->addr[0] = ns_flow->flow.saddr[0]; + route->addr[1] = ns_flow->flow.saddr[1]; + route->port = ns_flow->flow.sport; + route->netns = ns_flow->netns; +} + +__attribute__((always_inline)) void flush_flow_pid_by_route(struct pid_route_t *route) { + struct pid_route_entry_t *value = bpf_map_lookup_elem(&flow_pid, route); + if (value != NULL) { + if (value->type == FLOW_CLASSIFICATION_ENTRY) { + bpf_map_delete_elem(&flow_pid, route); + } + } else { + // try with no IP + route->addr[0] = 0; + route->addr[1] = 0; + value = bpf_map_lookup_elem(&flow_pid, route); + if (value != NULL) { + if (value->type == FLOW_CLASSIFICATION_ENTRY) { + bpf_map_delete_elem(&flow_pid, route); + } + } + } +} + +HOOK_ENTRY("nf_ct_delete") +int hook_nf_ct_delete(ctx_t *ctx) { + struct nf_conn *ct = (struct nf_conn *)CTX_PARM1(ctx); + u32 netns = get_netns_from_nf_conn(ct); + + struct nf_conntrack_tuple_hash tuplehash[IP_CT_DIR_MAX]; + bpf_probe_read(&tuplehash, sizeof(tuplehash), &ct->tuplehash); + struct nf_conntrack_tuple *orig_tuple = &tuplehash[IP_CT_DIR_ORIGINAL].tuple; + struct nf_conntrack_tuple *reply_tuple = &tuplehash[IP_CT_DIR_REPLY].tuple; + + // parse nat flows + struct namespaced_flow_t orig = { + .netns = netns, + }; + struct namespaced_flow_t reply = { + .netns = netns, + }; + parse_tuple(orig_tuple, &orig.flow); + parse_tuple(reply_tuple, &reply.flow); + +#if defined(DEBUG_NETWORK_FLOW) + bpf_printk("nf_ct_delete"); + bpf_printk(" - src p:%d a:%lu a:%lu", orig.flow.sport, orig.flow.saddr[0], orig.flow.saddr[1]); + bpf_printk(" - dst p:%d a:%lu a:%lu", orig.flow.dport, orig.flow.daddr[0], orig.flow.daddr[1]); +#endif + + // clean up entries in the conntrack map + bpf_map_delete_elem(&conntrack, &reply); + flip(&reply.flow); + bpf_map_delete_elem(&conntrack, &reply); + + // Between NAT operations and network direction, both `orig` and `reply` could hold entries + // in `flow_pid`, clean up all matching non-"BIND_ENTRY" entries. + struct pid_route_t route = {}; + + // start with orig + fill_pid_route_from_sflow(&route, &orig); + flush_flow_pid_by_route(&route); + + // flip orig and try again + flip(&orig.flow); + fill_pid_route_from_sflow(&route, &orig); + flush_flow_pid_by_route(&route); + + // reply + fill_pid_route_from_sflow(&route, &reply); + flush_flow_pid_by_route(&route); + + // flip reply and try again + flip(&reply.flow); + fill_pid_route_from_sflow(&route, &reply); + flush_flow_pid_by_route(&route); + + return 0; +} + +__attribute__((always_inline)) int handle_sk_release(struct sock *sk) { + struct pid_route_t route = {}; + + // copy netns + route.netns = get_netns_from_sock(sk); + if (route.netns == 0) { + return 0; + } + + // copy port + route.port = get_skc_num_from_sock_common((void *)sk); + + // copy ipv4 / ipv6 + u16 family = get_family_from_sock_common((void *)sk); + if (family == AF_INET6) { + bpf_probe_read(&route.addr, sizeof(u64) * 2, &sk->__sk_common.skc_v6_rcv_saddr); + +#if defined(DEBUG_NETWORK_FLOW) + bpf_printk("sk_release"); + bpf_printk(" netns:%u", route.netns); + bpf_printk(" v6 p:%d a:%lu a:%lu", route.port, route.addr[0], route.addr[1]); +#endif + + // clean up flow_pid entry + bpf_map_delete_elem(&flow_pid, &route); + // also clean up empty entry if it exists + route.addr[0] = 0; + route.addr[1] = 0; + bpf_map_delete_elem(&flow_pid, &route); + + // We might be dealing with an AF_INET traffic over an AF_INET6 socket. + // To be sure, clean AF_INET entries as well. + family = AF_INET; + } + if (family == AF_INET) { + bpf_probe_read(&route.addr, sizeof(sk->__sk_common.skc_rcv_saddr), &sk->__sk_common.skc_rcv_saddr); + +#if defined(DEBUG_NETWORK_FLOW) + bpf_printk("sk_release"); + bpf_printk(" netns:%u", route.netns); + bpf_printk(" v4 p:%d a:%lu a:%lu", route.port, route.addr[0], route.addr[1]); +#endif + + // clean up flow_pid entry + bpf_map_delete_elem(&flow_pid, &route); + // also clean up empty entry if it exists + route.addr[0] = 0; + route.addr[1] = 0; + bpf_map_delete_elem(&flow_pid, &route); + } + + return 0; +} + +// for kernel-initiated socket cleanup (timeout or error) +HOOK_ENTRY("sk_common_release") +int hook_sk_common_release(ctx_t *ctx) { + struct sock *sk = (struct sock *)CTX_PARM1(ctx); + if (sk == NULL) { + return 0; + } + return handle_sk_release(sk); +} + +// for user-space initiated socket shutdown +HOOK_ENTRY("inet_shutdown") +int hook_inet_shutdown(ctx_t *ctx) { + struct socket *socket = (struct socket *)CTX_PARM1(ctx); + struct sock *sk = get_sock_from_socket(socket); + if (sk == NULL) { + return 0; + } + + return handle_sk_release(sk); +} + +// for user space initiated socket termination +HOOK_ENTRY("inet_release") +int hook_inet_release(ctx_t *ctx) { + struct socket *socket = (struct socket *)CTX_PARM1(ctx); + struct sock *sk = get_sock_from_socket(socket); + if (sk == NULL) { + return 0; + } + + return handle_sk_release(sk); +} + +HOOK_ENTRY("inet_bind") +int hook_inet_bind(ctx_t *ctx) { + struct socket *sock = (struct socket *)CTX_PARM1(ctx); + struct inet_bind_args_t args = {}; + args.sock = sock; + u64 pid = bpf_get_current_pid_tgid(); + bpf_map_update_elem(&inet_bind_args, &pid, &args, BPF_ANY); + return 0; +} + +HOOK_EXIT("inet_bind") +int rethook_inet_bind(ctx_t *ctx) { + // fetch inet_bind arguments + u64 id = bpf_get_current_pid_tgid(); + u32 tid = (u32)id; + struct inet_bind_args_t *args = bpf_map_lookup_elem(&inet_bind_args, &id); + if (args == NULL) { + // should never happen, ignore + return 0; + } + + // delete the entry in inet_bind_args to make sure we always cleanup inet_bind_args and we don't leak entries + bpf_map_delete_elem(&inet_bind_args, &id); + + int ret = CTX_PARMRET(ctx); + if (ret < 0) { + // we only care about successful bind operations + return 0; + } + + struct socket *socket = args->sock; + if (socket == NULL) { + // should never happen, ignore + return 0; + } + + struct sock *sk = get_sock_from_socket(socket); + if (sk == NULL) { + return 0; + } + struct pid_route_t route = {}; + struct pid_route_entry_t value = {}; + value.type = BIND_ENTRY; + + // add netns information + route.netns = get_netns_from_sock(sk); + if (route.netns != 0) { + bpf_map_update_elem(&netns_cache, &tid, &route.netns, BPF_ANY); + } + + // copy ipv4 / ipv6 + u16 family = 0; + bpf_probe_read(&family, sizeof(family), &sk->__sk_common.skc_family); + if (family == AF_INET) { + bpf_probe_read(&route.addr, sizeof(sk->__sk_common.skc_rcv_saddr), &sk->__sk_common.skc_rcv_saddr); + } else if (family == AF_INET6) { + bpf_probe_read(&route.addr, sizeof(u64) * 2, &sk->__sk_common.skc_v6_rcv_saddr); + } else { + // we don't care about non IPv4 / IPV6 flows + return 0; + } + + // copy port + bpf_probe_read(&route.port, sizeof(route.port), &sk->__sk_common.skc_num); + route.port = htons(route.port); + + // Register service PID + if (route.port > 0) { + value.pid = id >> 32; + bpf_map_update_elem(&flow_pid, &route, &value, BPF_ANY); + } + return 0; +} + #endif diff --git a/pkg/security/ebpf/c/include/hooks/network/imds.h b/pkg/security/ebpf/c/include/hooks/network/imds.h index b0b72559bf8ea..1ca19b7e2e730 100644 --- a/pkg/security/ebpf/c/include/hooks/network/imds.h +++ b/pkg/security/ebpf/c/include/hooks/network/imds.h @@ -1,8 +1,8 @@ #ifndef _HOOKS_NETWORK_IMDS_H_ #define _HOOKS_NETWORK_IMDS_H_ -#include "helpers/imds.h" -#include "helpers/network.h" +#include "helpers/network/imds.h" +#include "helpers/network/parser.h" #include "perf_ring.h" SEC("classifier/imds_request") diff --git a/pkg/security/ebpf/c/include/hooks/network/net_device.h b/pkg/security/ebpf/c/include/hooks/network/net_device.h index 0f3cfcee66ee5..22eec086d1da5 100644 --- a/pkg/security/ebpf/c/include/hooks/network/net_device.h +++ b/pkg/security/ebpf/c/include/hooks/network/net_device.h @@ -85,7 +85,7 @@ int rethook_dev_new_index(ctx_t *ctx) { struct register_netdevice_cache_t *entry = bpf_map_lookup_elem(®ister_netdevice_cache, &id); if (entry != NULL) { - entry->ifindex.ifindex = (u32)CTX_PARMRET(ctx, 1); + entry->ifindex.ifindex = (u32)CTX_PARMRET(ctx); } return 0; }; @@ -112,7 +112,7 @@ int hook___dev_get_by_index(ctx_t *ctx) { HOOK_EXIT("register_netdevice") int rethook_register_netdevice(ctx_t *ctx) { u64 id = bpf_get_current_pid_tgid(); - int ret = CTX_PARMRET(ctx, 1); + int ret = CTX_PARMRET(ctx); if (ret != 0) { // interface registration failed, remove cache entry bpf_map_delete_elem(®ister_netdevice_cache, &id); diff --git a/pkg/security/ebpf/c/include/hooks/network/raw.h b/pkg/security/ebpf/c/include/hooks/network/raw.h index 6f46f6b4eb1a2..ad1ce69856f32 100644 --- a/pkg/security/ebpf/c/include/hooks/network/raw.h +++ b/pkg/security/ebpf/c/include/hooks/network/raw.h @@ -1,14 +1,10 @@ #ifndef _HOOKS_NETWORK_RAW_H_ #define _HOOKS_NETWORK_RAW_H_ -#include "helpers/network.h" +#include "helpers/network/parser.h" +#include "helpers/network/raw.h" #include "perf_ring.h" -__attribute__((always_inline)) struct raw_packet_event_t *get_raw_packet_event() { - u32 key = 0; - return bpf_map_lookup_elem(&raw_packet_event, &key); -} - SEC("classifier/raw_packet_sender") int classifier_raw_packet_sender(struct __sk_buff *skb) { struct packet_t *pkt = get_packet(); @@ -24,7 +20,7 @@ int classifier_raw_packet_sender(struct __sk_buff *skb) { } // process context - fill_network_process_context(&evt->process, pkt); + fill_network_process_context_from_pkt(&evt->process, pkt); struct proc_cache_t *entry = get_proc_cache(evt->process.pid); if (entry == NULL) { @@ -33,7 +29,7 @@ int classifier_raw_packet_sender(struct __sk_buff *skb) { copy_container_id_no_tracing(entry->container.container_id, &evt->container.container_id); } - fill_network_device_context(&evt->device, skb, pkt); + fill_network_device_context_from_pkt(&evt->device, skb, pkt); u32 len = evt->len; if (len > sizeof(evt->data)) { diff --git a/pkg/security/ebpf/c/include/hooks/network/router.h b/pkg/security/ebpf/c/include/hooks/network/router.h deleted file mode 100644 index 93cca5f4889ee..0000000000000 --- a/pkg/security/ebpf/c/include/hooks/network/router.h +++ /dev/null @@ -1,26 +0,0 @@ -#ifndef _HOOKS_NETWORK_ROUTER_H_ -#define _HOOKS_NETWORK_ROUTER_H_ - -#include "helpers/network.h" - -__attribute__((always_inline)) int route_pkt(struct __sk_buff *skb, struct packet_t *pkt, int direction) { - // TODO: l3 / l4 firewall - - // route DNS requests - if (is_event_enabled(EVENT_DNS)) { - if (pkt->l4_protocol == IPPROTO_UDP && pkt->translated_ns_flow.flow.dport == htons(53)) { - bpf_tail_call_compat(skb, &classifier_router, DNS_REQUEST); - } - } - - // route IMDS requests - if (is_event_enabled(EVENT_IMDS)) { - if (pkt->l4_protocol == IPPROTO_TCP && ((pkt->ns_flow.flow.saddr[0] & 0xFFFFFFFF) == get_imds_ip() || (pkt->ns_flow.flow.daddr[0] & 0xFFFFFFFF) == get_imds_ip())) { - bpf_tail_call_compat(skb, &classifier_router, IMDS_REQUEST); - } - } - - return ACT_OK; -} - -#endif diff --git a/pkg/security/ebpf/c/include/hooks/network/stats_worker.h b/pkg/security/ebpf/c/include/hooks/network/stats_worker.h new file mode 100644 index 0000000000000..b0a80b0c8d076 --- /dev/null +++ b/pkg/security/ebpf/c/include/hooks/network/stats_worker.h @@ -0,0 +1,31 @@ +#ifndef _HOOKS_NETWORK_WORKER_H_ +#define _HOOKS_NETWORK_WORKER_H_ + +struct ctx_holder { + struct bpf_perf_event_data *ctx; +}; + +static long active_flows_callback_fn(struct bpf_map *map, const void *key, void *value, void *callback_ctx) { + u32 pid = *(u32 *)key; + struct active_flows_t *entry = (struct active_flows_t *) value; + struct bpf_perf_event_data *ctx = ((struct ctx_holder *) callback_ctx)->ctx; + return flush_network_stats(pid, entry, ctx, NETWORK_STATS_TICKER); +} + +SEC("perf_event/cpu_clock") +int network_stats_worker(struct bpf_perf_event_data *ctx) +{ + // we want only one worker for network stats + if (bpf_get_smp_processor_id() > 0) { + return 0; + } + struct ctx_holder holder = {}; + holder.ctx = ctx; + + // iterate over the list of active flows, send when need be + bpf_for_each_map_elem(&active_flows, &active_flows_callback_fn, &holder, 0); + + return 0; +}; + +#endif diff --git a/pkg/security/ebpf/c/include/hooks/network/tc.h b/pkg/security/ebpf/c/include/hooks/network/tc.h index 8445905aa3ccf..5919e1d89885b 100644 --- a/pkg/security/ebpf/c/include/hooks/network/tc.h +++ b/pkg/security/ebpf/c/include/hooks/network/tc.h @@ -1,9 +1,9 @@ #ifndef _HOOKS_NETWORK_TC_H_ #define _HOOKS_NETWORK_TC_H_ -#include "helpers/network.h" - -#include "router.h" +#include "helpers/network/parser.h" +#include "helpers/network/router.h" +#include "helpers/network/pid_resolver.h" #include "raw.h" SEC("classifier/ingress") @@ -12,6 +12,7 @@ int classifier_ingress(struct __sk_buff *skb) { if (!pkt) { return ACT_OK; } + resolve_pid(pkt); return route_pkt(skb, pkt, INGRESS); }; @@ -22,6 +23,7 @@ int classifier_egress(struct __sk_buff *skb) { if (!pkt) { return ACT_OK; } + resolve_pid(pkt); return route_pkt(skb, pkt, EGRESS); }; @@ -68,6 +70,7 @@ int classifier_raw_packet_ingress(struct __sk_buff *skb) { if (!pkt) { return ACT_OK; } + resolve_pid(pkt); // do not handle packet without process context if (pkt->pid < 0) { @@ -93,6 +96,7 @@ int classifier_raw_packet_egress(struct __sk_buff *skb) { if (!pkt) { return ACT_OK; } + resolve_pid(pkt); // do not handle packet without process context if (pkt->pid < 0) { diff --git a/pkg/security/ebpf/c/include/hooks/open.h b/pkg/security/ebpf/c/include/hooks/open.h index c66a00ea5b84a..9e342935bcfdf 100644 --- a/pkg/security/ebpf/c/include/hooks/open.h +++ b/pkg/security/ebpf/c/include/hooks/open.h @@ -309,7 +309,7 @@ int tracepoint_handle_sys_open_exit(struct tracepoint_raw_syscalls_sys_exit_t *a HOOK_EXIT("io_openat2") int rethook_io_openat2(ctx_t *ctx) { - int retval = CTX_PARMRET(ctx, 2); + int retval = CTX_PARMRET(ctx); return sys_open_ret(ctx, retval, DR_KPROBE_OR_FENTRY); } @@ -357,6 +357,7 @@ int __attribute__((always_inline)) dr_open_callback(void *ctx) { }; fill_file(syscall->open.dentry, &event.file); + struct proc_cache_t *entry; if (syscall->open.pid_tgid != 0) { entry = fill_process_context_with_pid_tgid(&event.process, syscall->open.pid_tgid); diff --git a/pkg/security/ebpf/c/include/hooks/procfs.h b/pkg/security/ebpf/c/include/hooks/procfs.h index 9dbbfd063dc5f..efd87c73bba15 100644 --- a/pkg/security/ebpf/c/include/hooks/procfs.h +++ b/pkg/security/ebpf/c/include/hooks/procfs.h @@ -4,28 +4,62 @@ #include "constants/custom.h" #include "constants/offsets/filesystem.h" #include "constants/offsets/netns.h" +#include "constants/offsets/network.h" #include "helpers/filesystem.h" #include "helpers/utils.h" static __attribute__((always_inline)) void cache_file(struct dentry *dentry, u32 mount_id) { - u32 flags = 0; u64 inode = get_dentry_ino(dentry); - if (is_overlayfs(dentry)) { - set_overlayfs_ino(dentry, &inode, &flags); - } - struct file_t entry = { .path_key = { .ino = inode, .mount_id = mount_id, }, - .flags = flags, }; + if (is_overlayfs(dentry)) { + set_overlayfs_inode(dentry, &entry); + } + fill_file(dentry, &entry); - // why not inode + mount id ? - bpf_map_update_elem(&exec_file_cache, &inode, &entry, BPF_ANY); + // cache with the inode as key only as this map is used to capture the mount_id + // the userspace as to first push an entry so that it limits to eviction caused by other stats from system-probe. + bpf_map_update_elem(&inode_file, &entry.path_key.ino, &entry, BPF_EXIST); +} + +static __attribute__((always_inline)) int handle_stat() { + if (!is_runtime_request()) { + return 0; + } + + struct syscall_cache_t syscall = { + .type = EVENT_STAT, + }; + cache_syscall(&syscall); + return 0; +} + +HOOK_SYSCALL_ENTRY0(newfstatat) { + return handle_stat(); +} + +static __attribute__((always_inline)) int handle_ret_stat() { + if (!is_runtime_request()) { + return 0; + } + + pop_syscall(EVENT_STAT); + return 0; +} + +HOOK_SYSCALL_EXIT(newfstatat) { + return handle_ret_stat(); +} + +SEC("tracepoint/handle_sys_newfstatat_exit") +int tracepoint_handle_sys_newfstatat_exit(struct tracepoint_raw_syscalls_sys_exit_t *args) { + return handle_ret_stat(); } // used by both snapshot and process resolver fallback @@ -35,6 +69,16 @@ int hook_security_inode_getattr(ctx_t *ctx) { return 0; } + struct syscall_cache_t *syscall = peek_syscall(EVENT_STAT); + if (!syscall) { + return 0; + } + + if (syscall->stat.in_flight) { + return 0; + } + syscall->stat.in_flight = 1; + u32 mount_id = 0; struct dentry *dentry; @@ -78,15 +122,17 @@ int hook_path_get(ctx_t *ctx) { struct path *p = (struct path *)CTX_PARM1(ctx); struct file *sock_file = (void *)p - f_path_offset; struct pid_route_t route = {}; + struct pid_route_entry_t value = {}; + value.pid = *procfs_pid; + value.type = PROCFS_ENTRY; - struct socket *sock; - bpf_probe_read(&sock, sizeof(sock), &sock_file->private_data); - if (sock == NULL) { + struct socket *socket; + bpf_probe_read(&socket, sizeof(socket), &sock_file->private_data); + if (socket == NULL) { return 0; } - struct sock *sk; - bpf_probe_read(&sk, sizeof(sk), &sock->sk); + struct sock *sk = get_sock_from_socket(socket); if (sk == NULL) { return 0; } @@ -96,23 +142,27 @@ int hook_path_get(ctx_t *ctx) { return 0; } - u16 family = 0; - bpf_probe_read(&family, sizeof(family), &sk->__sk_common.skc_family); + route.port = get_skc_num_from_sock_common((void *)sk); + if (route.port == 0) { + // without a port we can't do much, leave early + return 0; + } + + u16 family = get_family_from_sock_common((void *)sk); + if (family == AF_INET6) { + bpf_probe_read(&route.addr, sizeof(u64) * 2, &sk->__sk_common.skc_v6_rcv_saddr); + bpf_map_update_elem(&flow_pid, &route, &value, BPF_ANY); + + // This AF_INET6 socket might also handle AF_INET traffic, store a mapping to AF_INET too + family = AF_INET; + } if (family == AF_INET) { bpf_probe_read(&route.addr, sizeof(sk->__sk_common.skc_rcv_saddr), &sk->__sk_common.skc_rcv_saddr); - } else if (family == AF_INET6) { - bpf_probe_read(&route.addr, sizeof(u64) * 2, &sk->__sk_common.skc_v6_rcv_saddr); + bpf_map_update_elem(&flow_pid, &route, &value, BPF_ANY); } else { + // ignore unsupported traffic for now return 0; } - bpf_probe_read(&route.port, sizeof(route.port), &sk->__sk_common.skc_num); - // Calling htons is necessary to support snapshotted bound port. Without it, we're can't properly route incoming - // traffic to the relevant process. - route.port = htons(route.port); - - // save pid route - u32 pid = *procfs_pid; - bpf_map_update_elem(&flow_pid, &route, &pid, BPF_ANY); #if defined(DEBUG_NETNS) bpf_printk("path_get netns: %u", route.netns); diff --git a/pkg/security/ebpf/c/include/hooks/rename.h b/pkg/security/ebpf/c/include/hooks/rename.h index 3cd813cac99f2..cdc660ad8880d 100644 --- a/pkg/security/ebpf/c/include/hooks/rename.h +++ b/pkg/security/ebpf/c/include/hooks/rename.h @@ -160,7 +160,7 @@ int __attribute__((always_inline)) sys_rename_ret(void *ctx, int retval, int dr_ HOOK_EXIT("do_renameat2") int rethook_do_renameat2(ctx_t *ctx) { - int retval = CTX_PARMRET(ctx, 5); + int retval = CTX_PARMRET(ctx); return sys_rename_ret(ctx, retval, DR_KPROBE_OR_FENTRY); } diff --git a/pkg/security/ebpf/c/include/hooks/rmdir.h b/pkg/security/ebpf/c/include/hooks/rmdir.h index f8b0b75a09f82..26fdd5588641a 100644 --- a/pkg/security/ebpf/c/include/hooks/rmdir.h +++ b/pkg/security/ebpf/c/include/hooks/rmdir.h @@ -7,27 +7,30 @@ #include "helpers/filesystem.h" #include "helpers/syscalls.h" -int __attribute__((always_inline)) trace__sys_rmdir(u8 async, int flags) { +int __attribute__((always_inline)) trace__sys_rmdir(u8 async, const char *filename) { struct syscall_cache_t syscall = { .type = EVENT_RMDIR, .policy = fetch_policy(EVENT_RMDIR), .async = async, }; + if (!async) { + collect_syscall_ctx(&syscall, SYSCALL_CTX_ARG_STR(0), (void *)filename, NULL, NULL); + } cache_syscall(&syscall); return 0; } -HOOK_SYSCALL_ENTRY0(rmdir) { - return trace__sys_rmdir(SYNC_SYSCALL, 0); +HOOK_SYSCALL_ENTRY1(rmdir, const char *, filename) { + return trace__sys_rmdir(SYNC_SYSCALL, filename); } HOOK_ENTRY("do_rmdir") int hook_do_rmdir(ctx_t *ctx) { struct syscall_cache_t *syscall = peek_syscall_with(rmdir_predicate); if (!syscall) { - return trace__sys_rmdir(ASYNC_SYSCALL, 0); + return trace__sys_rmdir(ASYNC_SYSCALL, NULL); } return 0; } @@ -138,6 +141,7 @@ int __attribute__((always_inline)) sys_rmdir_ret(void *ctx, int retval) { if (syscall->state != DISCARDED && is_event_enabled(EVENT_RMDIR)) { struct rmdir_event_t event = { .syscall.retval = retval, + .syscall_ctx.id = syscall->ctx_id, .event.flags = syscall->async ? EVENT_FLAGS_ASYNC : 0, .file = syscall->rmdir.file, }; @@ -158,7 +162,7 @@ int __attribute__((always_inline)) sys_rmdir_ret(void *ctx, int retval) { HOOK_EXIT("do_rmdir") int rethook_do_rmdir(ctx_t *ctx) { - int retval = CTX_PARMRET(ctx, 2); + int retval = CTX_PARMRET(ctx); return sys_rmdir_ret(ctx, retval); } diff --git a/pkg/security/ebpf/c/include/hooks/signal.h b/pkg/security/ebpf/c/include/hooks/signal.h index 325cf249c249c..3d1a4211f0a74 100644 --- a/pkg/security/ebpf/c/include/hooks/signal.h +++ b/pkg/security/ebpf/c/include/hooks/signal.h @@ -53,7 +53,7 @@ int hook_check_kill_permission(ctx_t *ctx) { /* hook here to grab the EPERM retval */ HOOK_EXIT("check_kill_permission") int rethook_check_kill_permission(ctx_t *ctx) { - int retval = (int)CTX_PARMRET(ctx, 3); + int retval = (int)CTX_PARMRET(ctx); struct syscall_cache_t *syscall = pop_syscall(EVENT_SIGNAL); if (!syscall) { diff --git a/pkg/security/ebpf/c/include/hooks/splice.h b/pkg/security/ebpf/c/include/hooks/splice.h index 4d289c94ab615..f5a95f4a2c025 100644 --- a/pkg/security/ebpf/c/include/hooks/splice.h +++ b/pkg/security/ebpf/c/include/hooks/splice.h @@ -48,7 +48,7 @@ int rethook_get_pipe_info(ctx_t *ctx) { return 0; } - struct pipe_inode_info *info = (struct pipe_inode_info *)CTX_PARMRET(ctx, 2); + struct pipe_inode_info *info = (struct pipe_inode_info *)CTX_PARMRET(ctx); if (info == NULL) { // this is not a pipe, so most likely a file, resolve its path now syscall->splice.file_found = 1; diff --git a/pkg/security/ebpf/c/include/hooks/unlink.h b/pkg/security/ebpf/c/include/hooks/unlink.h index aefe174c7465a..0fd2b035d734f 100644 --- a/pkg/security/ebpf/c/include/hooks/unlink.h +++ b/pkg/security/ebpf/c/include/hooks/unlink.h @@ -162,7 +162,7 @@ int __attribute__((always_inline)) sys_unlink_ret(void *ctx, int retval) { HOOK_EXIT("do_unlinkat") int rethook_do_unlinkat(ctx_t *ctx) { - int retval = CTX_PARMRET(ctx, 2); + int retval = CTX_PARMRET(ctx); return sys_unlink_ret(ctx, retval); } diff --git a/pkg/security/ebpf/c/include/hooks/utimes.h b/pkg/security/ebpf/c/include/hooks/utimes.h index 8cb865dc7f998..e3859d1763929 100644 --- a/pkg/security/ebpf/c/include/hooks/utimes.h +++ b/pkg/security/ebpf/c/include/hooks/utimes.h @@ -54,6 +54,8 @@ int __attribute__((always_inline)) sys_utimes_ret(void *ctx, int retval) { return 0; } + set_file_layer(syscall->resolver.dentry, &syscall->setattr.file); + struct utimes_event_t event = { .syscall.retval = retval, .syscall_ctx.id = syscall->ctx_id, diff --git a/pkg/security/ebpf/c/include/maps.h b/pkg/security/ebpf/c/include/maps.h index 56f38dd7688d1..56eb957d2d4fd 100644 --- a/pkg/security/ebpf/c/include/maps.h +++ b/pkg/security/ebpf/c/include/maps.h @@ -7,6 +7,14 @@ #include "constants/enums.h" #include "structs/all.h" +#define BPF_SK_MAP(_name, _value_type) \ + struct { \ + __uint(type, BPF_MAP_TYPE_SK_STORAGE); \ + __type(value, _value_type); \ + __uint(map_flags, BPF_F_NO_PREALLOC); \ + __type(key, u32); \ + } _name SEC(".maps"); + BPF_ARRAY_MAP(path_id, u32, PATH_ID_MAP_SIZE) BPF_ARRAY_MAP(enabled_events, u64, 1) BPF_ARRAY_MAP(buffer_selector, u32, 4) @@ -40,6 +48,11 @@ BPF_HASH_MAP(security_profiles, container_id_t, struct security_profile_t, 1) // BPF_HASH_MAP(secprofs_syscalls, u64, struct security_profile_syscalls_t, 1) // max entries will be overriden at runtime BPF_HASH_MAP(auid_approvers, u32, struct event_mask_filter_t, 128) BPF_HASH_MAP(auid_range_approvers, u32, struct u32_range_filter_t, EVENT_MAX) +BPF_HASH_MAP(active_flows_spin_locks, u32, struct active_flows_spin_lock_t, 1) // max entry will be overridden at runtime +BPF_HASH_MAP(inode_file, u64, struct file_t, 32) + +BPF_HASH_MAP_FLAGS(active_flows, u32, struct active_flows_t, 1, BPF_F_NO_PREALLOC) // max entry will be overridden at runtime +BPF_HASH_MAP_FLAGS(inet_bind_args, u64, struct inet_bind_args_t, 1, BPF_F_NO_PREALLOC) // max entries will be overridden at runtime BPF_LRU_MAP(activity_dump_rate_limiters, u64, struct rate_limiter_ctx, 1) // max entries will be overridden at runtime BPF_LRU_MAP(rate_limiters, u32, struct rate_limiter_ctx, 1) // max entries will be overridden at runtime @@ -55,22 +68,24 @@ BPF_LRU_MAP(exec_pid_transfer, u32, u64, 512) BPF_LRU_MAP(netns_cache, u32, u32, 40960) BPF_LRU_MAP(span_tls, u32, struct span_tls_t, 4096) BPF_LRU_MAP(inode_discarders, struct inode_discarder_t, struct inode_discarder_params_t, 4096) -BPF_LRU_MAP(flow_pid, struct pid_route_t, u32, 10240) -BPF_LRU_MAP(conntrack, struct namespaced_flow_t, struct namespaced_flow_t, 4096) +BPF_LRU_MAP(flow_pid, struct pid_route_t, struct pid_route_entry_t, 10240) +BPF_LRU_MAP(conntrack, struct namespaced_flow_t, struct namespaced_flow_t, 4096) // TODO: size should be updated dynamically with "nf_conntrack_max" BPF_LRU_MAP(io_uring_ctx_pid, void *, u64, 2048) BPF_LRU_MAP(veth_state_machine, u64, struct veth_state_t, 1024) BPF_LRU_MAP(veth_devices, struct device_ifindex_t, struct device_t, 1024) -BPF_LRU_MAP(exec_file_cache, u64, struct file_t, 4096) BPF_LRU_MAP(syscall_monitor, struct syscall_monitor_key_t, struct syscall_monitor_entry_t, 2048) BPF_LRU_MAP(syscall_table, struct syscall_table_key_t, u8, 50) BPF_LRU_MAP(kill_list, u32, u32, 32) BPF_LRU_MAP(user_sessions, struct user_session_key_t, struct user_session_t, 1024) BPF_LRU_MAP(dentry_resolver_inputs, u64, struct dentry_resolver_input_t, 256) +BPF_LRU_MAP(ns_flow_to_network_stats, struct namespaced_flow_t, struct network_stats_t, 4096) // TODO: size should be updated dynamically with "nf_conntrack_max" BPF_LRU_MAP_FLAGS(tasks_in_coredump, u64, u8, 64, BPF_F_NO_COMMON_LRU) BPF_LRU_MAP_FLAGS(syscalls, u64, struct syscall_cache_t, 1, BPF_F_NO_COMMON_LRU) // max entries will be overridden at runtime BPF_LRU_MAP_FLAGS(pathnames, struct path_key_t, struct path_leaf_t, 1, BPF_F_NO_COMMON_LRU) // edited +BPF_SK_MAP(sock_active_pid_route, struct pid_route_t); + BPF_PERCPU_ARRAY_MAP(dr_erpc_state, struct dr_erpc_state_t, 1) BPF_PERCPU_ARRAY_MAP(cgroup_tracing_event_gen, struct cgroup_tracing_event_t, EVENT_GEN_SIZE) BPF_PERCPU_ARRAY_MAP(cgroup_prefix, cgroup_prefix_t, 1) @@ -90,6 +105,8 @@ BPF_PERCPU_ARRAY_MAP(selinux_write_buffer, struct selinux_write_buffer_t, 1) BPF_PERCPU_ARRAY_MAP(is_new_kthread, u32, 1) BPF_PERCPU_ARRAY_MAP(syscalls_stats, struct syscalls_stats_t, EVENT_MAX) BPF_PERCPU_ARRAY_MAP(raw_packet_event, struct raw_packet_event_t, 1) +BPF_PERCPU_ARRAY_MAP(network_flow_monitor_event_gen, struct network_flow_monitor_event_t, 1) +BPF_PERCPU_ARRAY_MAP(active_flows_gen, struct active_flows_t, 1) BPF_PERCPU_ARRAY_MAP(raw_packet_enabled, u32, 1) BPF_PROG_ARRAY(args_envs_progs, 3) @@ -100,5 +117,6 @@ BPF_PROG_ARRAY(dentry_resolver_tracepoint_progs, 3) BPF_PROG_ARRAY(classifier_router, 10) BPF_PROG_ARRAY(sys_exit_progs, 64) BPF_PROG_ARRAY(raw_packet_classifier_router, 32) +BPF_PROG_ARRAY(flush_network_stats_progs, 2) #endif diff --git a/pkg/security/ebpf/c/include/structs/activity_dump.h b/pkg/security/ebpf/c/include/structs/activity_dump.h index bde48d185111e..26f72198a55e5 100644 --- a/pkg/security/ebpf/c/include/structs/activity_dump.h +++ b/pkg/security/ebpf/c/include/structs/activity_dump.h @@ -7,7 +7,8 @@ struct activity_dump_config { u64 wait_list_timestamp; u64 start_timestamp; u64 end_timestamp; - u32 events_rate; + u16 events_rate; + u16 padding; u32 paused; }; diff --git a/pkg/security/ebpf/c/include/structs/network.h b/pkg/security/ebpf/c/include/structs/network.h index c2c2293e04673..3d185f28739fb 100644 --- a/pkg/security/ebpf/c/include/structs/network.h +++ b/pkg/security/ebpf/c/include/structs/network.h @@ -5,6 +5,13 @@ struct pid_route_t { u64 addr[2]; u32 netns; u16 port; + // TODO: wait for implementation on security_socket_bind to be ready first + // u16 l4_protocol; +}; + +struct pid_route_entry_t { + u32 pid; + u32 type; }; struct flow_t { @@ -12,7 +19,23 @@ struct flow_t { u64 daddr[2]; u16 sport; u16 dport; - u32 padding; + u16 l4_protocol; + u16 l3_protocol; +}; + +struct network_counters_t { + u64 data_size; + u64 pkt_count; +}; + +struct network_stats_t { + struct network_counters_t ingress; + struct network_counters_t egress; +}; + +struct flow_stats_t { + struct flow_t flow; + struct network_stats_t stats; }; struct namespaced_flow_t { @@ -20,6 +43,23 @@ struct namespaced_flow_t { u32 netns; }; +struct active_flows_t { + struct flow_t flows[ACTIVE_FLOWS_MAX_SIZE]; + + u64 last_sent; + u32 netns; + u32 ifindex; + u32 cursor; +}; + +struct active_flows_spin_lock_t { + struct bpf_spin_lock lock; +}; + +struct inet_bind_args_t { + struct socket *sock; +}; + struct device_t { char name[16]; u32 netns; @@ -66,7 +106,7 @@ struct packet_t { u32 offset; s64 pid; u32 payload_len; - u16 l4_protocol; + u32 network_direction; }; struct network_device_context_t { @@ -79,19 +119,7 @@ struct network_context_t { struct flow_t flow; u32 size; - u16 l3_protocol; - u16 l4_protocol; -}; - -struct raw_packet_event_t { - struct kevent_t event; - struct process_context_t process; - struct span_context_t span; - struct container_context_t container; - struct network_device_context_t device; - - int len; - char data[256]; + u32 network_direction; }; #endif diff --git a/pkg/security/ebpf/c/include/structs/rate_limiter.h b/pkg/security/ebpf/c/include/structs/rate_limiter.h index 9b22f346084bc..3092253eed803 100644 --- a/pkg/security/ebpf/c/include/structs/rate_limiter.h +++ b/pkg/security/ebpf/c/include/structs/rate_limiter.h @@ -1,11 +1,36 @@ #ifndef _STRUCTS_RATE_LIMITER_H_ #define _STRUCTS_RATE_LIMITER_H_ +#define RATE_LIMITER_COUNTER_MASK 0xffffllu + struct rate_limiter_ctx { - u64 current_period; - u32 counter; - u32 padding; + /* + data is representing both the `current_period` start + in the first 6 bytes (basically current_period & ~0xff) + and the counter in the last 2 bytes + */ + u64 data; }; +struct rate_limiter_ctx __attribute__((always_inline)) new_rate_limiter(u64 now, u16 counter) { + return (struct rate_limiter_ctx) { + .data = (now & ~RATE_LIMITER_COUNTER_MASK) | counter, + }; +} + +u64 __attribute__((always_inline)) get_current_period(struct rate_limiter_ctx *r) { + return r->data & ~RATE_LIMITER_COUNTER_MASK; +} + +u16 __attribute__((always_inline)) get_counter(struct rate_limiter_ctx *r) { + return r->data & RATE_LIMITER_COUNTER_MASK; +} + +void __attribute__((always_inline)) inc_counter(struct rate_limiter_ctx *r, u16 delta) { + // this is an horrible hack, to keep the atomic property + // we do an atomic add on the full data, worse case scenario + // the current_period is increased by 256 nanoseconds + __sync_fetch_and_add(&r->data, delta); +} #endif /* _STRUCTS_RATE_LIMITER_H_ */ diff --git a/pkg/security/ebpf/c/include/structs/syscalls.h b/pkg/security/ebpf/c/include/structs/syscalls.h index 47aad2b07bb9d..13c16d7ce4539 100644 --- a/pkg/security/ebpf/c/include/structs/syscalls.h +++ b/pkg/security/ebpf/c/include/structs/syscalls.h @@ -218,6 +218,12 @@ struct syscall_cache_t { u16 protocol; } connect; + struct { + u64 addr[2]; + u16 family; + u16 port; + } accept; + struct { struct dentry *dentry; struct path *path; @@ -227,6 +233,10 @@ struct syscall_cache_t { struct { u32 auid; } login_uid; + + struct { + u8 in_flight; + } stat; }; }; diff --git a/pkg/security/ebpf/c/include/tests/activity_dump_ratelimiter_test.h b/pkg/security/ebpf/c/include/tests/activity_dump_ratelimiter_test.h index a9ffc8ebbb17e..11e3ed74abd53 100644 --- a/pkg/security/ebpf/c/include/tests/activity_dump_ratelimiter_test.h +++ b/pkg/security/ebpf/c/include/tests/activity_dump_ratelimiter_test.h @@ -14,9 +14,7 @@ int test_ad_ratelimiter() { u32 rate = AD_RL_TEST_RATE; - struct rate_limiter_ctx ctx; - ctx.counter = 0; - ctx.current_period = now; + struct rate_limiter_ctx ctx = new_rate_limiter(now, 0); u64 cookie = 0; bpf_map_update_elem(&activity_dump_rate_limiters, &cookie, &ctx, BPF_ANY); diff --git a/pkg/security/ebpf/c/include/tests/raw_packet_test.h b/pkg/security/ebpf/c/include/tests/raw_packet_test.h index a00f55225b6ea..0e06bb53b569e 100644 --- a/pkg/security/ebpf/c/include/tests/raw_packet_test.h +++ b/pkg/security/ebpf/c/include/tests/raw_packet_test.h @@ -1,7 +1,7 @@ #ifndef _RAW_PACKET_TEST_H #define _RAW_PACKET_TEST_H -#include "helpers/network.h" +#include "helpers/network/raw.h" #include "baloum.h" SEC("test/raw_packet_tail_calls") diff --git a/pkg/security/ebpf/kernel/kernel.go b/pkg/security/ebpf/kernel/kernel.go index 646163b104936..25e47574d1da0 100644 --- a/pkg/security/ebpf/kernel/kernel.go +++ b/pkg/security/ebpf/kernel/kernel.go @@ -330,6 +330,54 @@ func (k *Version) HaveRingBuffers() bool { return features.HaveMapType(ebpf.RingBuf) == nil } +// HasNoPreallocMapsInPerfEvent returns true if the kernel supports using non-preallocated maps in perf_event programs +// See https://github.com/torvalds/linux/commit/274052a2b0ab9f380ce22b19ff80a99b99ecb198 +func (k *Version) HasNoPreallocMapsInPerfEvent() bool { + return k.Code >= Kernel6_1 +} + +// HasSKStorage returns true if the kernel supports SK_STORAGE maps +// See https://github.com/torvalds/linux/commit/6ac99e8f23d4b10258406ca0dd7bffca5f31da9d +func (k *Version) HasSKStorage() bool { + if features.HaveMapType(ebpf.SkStorage) == nil { + return true + } + + return k.Code != 0 && k.Code > Kernel5_2 +} + +// HasSKStorageInTracingPrograms returns true if the kernel supports SK_STORAGE maps in tracing programs +// See https://github.com/torvalds/linux/commit/8e4597c627fb48f361e2a5b012202cb1b6cbcd5e +func (k *Version) HasSKStorageInTracingPrograms() bool { + if !k.HasSKStorage() { + return false + } + + if !k.HaveFentrySupport() { + return false + } + + if features.HaveProgramHelper(ebpf.Tracing, asm.FnSkStorageGet) == nil { + return true + } + return k.Code != 0 && k.Code > Kernel5_11 +} + +// IsMapValuesToMapHelpersAllowed returns true if the kernel supports passing map values to map helpers +// See https://github.com/torvalds/linux/commit/d71962f3e627b5941804036755c844fabfb65ff5 +func (k *Version) IsMapValuesToMapHelpersAllowed() bool { + return k.Code != 0 && k.Code > Kernel4_18 +} + +// HasBPFForEachMapElemHelper returns true if the kernel support the bpf_for_each_map_elem helper +// See https://github.com/torvalds/linux/commit/69c087ba6225b574afb6e505b72cb75242a3d844 +func (k *Version) HasBPFForEachMapElemHelper() bool { + if features.HaveProgramHelper(ebpf.PerfEvent, asm.FnForEachMapElem) == nil { + return true + } + return k.Code != 0 && k.Code > Kernel5_13 +} + // HavePIDLinkStruct returns whether the kernel uses the pid_link struct, which was removed in 4.19 func (k *Version) HavePIDLinkStruct() bool { return k.Code != 0 && k.Code < Kernel4_19 && !k.IsRH8Kernel() diff --git a/pkg/security/ebpf/probes/accept.go b/pkg/security/ebpf/probes/accept.go new file mode 100644 index 0000000000000..80d4b217ceb2a --- /dev/null +++ b/pkg/security/ebpf/probes/accept.go @@ -0,0 +1,24 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux + +// Package probes holds probes related files +package probes + +import manager "github.com/DataDog/ebpf-manager" + +func getAcceptProbes() []*manager.Probe { + var acceptProbes []*manager.Probe + + acceptProbes = append(acceptProbes, &manager.Probe{ + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + UID: SecurityAgentUID, + EBPFFuncName: "hook_accept", + }, + }) + + return acceptProbes +} diff --git a/pkg/security/ebpf/probes/all.go b/pkg/security/ebpf/probes/all.go index 777630d1cb688..3e34f2288bd8f 100644 --- a/pkg/security/ebpf/probes/all.go +++ b/pkg/security/ebpf/probes/all.go @@ -16,6 +16,7 @@ import ( "github.com/cilium/ebpf" "golang.org/x/sys/unix" + "github.com/DataDog/datadog-agent/pkg/security/ebpf/kernel" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/utils" ) @@ -35,6 +36,20 @@ var ( EventsPerfRingBufferSize = 256 * os.Getpagesize() ) +func appendSyscallProbes(probes []*manager.Probe, fentry bool, flag int, compat bool, syscalls ...string) []*manager.Probe { + for _, syscall := range syscalls { + probes = append(probes, + ExpandSyscallProbes(&manager.Probe{ + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + UID: SecurityAgentUID, + }, + SyscallFuncName: syscall, + }, fentry, flag, compat)...) + } + + return probes +} + // computeDefaultEventsRingBufferSize is the default buffer size of the ring buffers for events. // Must be a power of 2 and a multiple of the page size func computeDefaultEventsRingBufferSize() uint32 { @@ -78,10 +93,12 @@ func AllProbes(fentry bool) []*manager.Probe { allProbes = append(allProbes, getNetDeviceProbes()...) allProbes = append(allProbes, GetTCProbes(true, true)...) allProbes = append(allProbes, getBindProbes(fentry)...) + allProbes = append(allProbes, getAcceptProbes()...) allProbes = append(allProbes, getConnectProbes(fentry)...) allProbes = append(allProbes, getSyscallMonitorProbes()...) allProbes = append(allProbes, getChdirProbes(fentry)...) allProbes = append(allProbes, GetOnDemandProbes()...) + allProbes = append(allProbes, GetPerfEventProbes()...) allProbes = append(allProbes, &manager.Probe{ @@ -90,7 +107,10 @@ func AllProbes(fentry bool) []*manager.Probe { EBPFFuncName: "sys_exit", }, }, - // Snapshot probe + ) + + // procfs fallback, used to get mount_id + allProbes = append(allProbes, &manager.Probe{ ProbeIdentificationPair: manager.ProbeIdentificationPair{ UID: SecurityAgentUID, @@ -98,6 +118,7 @@ func AllProbes(fentry bool) []*manager.Probe { }, }, ) + allProbes = appendSyscallProbes(allProbes, fentry, EntryAndExit, false, "newfstatat") return allProbes } @@ -114,8 +135,8 @@ func AllMaps() []*manager.Map { {Name: "basename_approvers"}, // Dentry resolver table {Name: "pathnames"}, - // Snapshot table - {Name: "exec_file_cache"}, + // Procfs fallback table + {Name: "inode_file"}, // Open tables {Name: "open_flags_approvers"}, // Exec tables @@ -135,6 +156,13 @@ func AllMaps() []*manager.Map { } } +// AllBPFForEachMapElemProgramFunctions returns the list of programs that leverage the bpf_for_each_map_elem helper +func AllBPFForEachMapElemProgramFunctions() []string { + return []string{ + "network_stats_worker", + } +} + func getMaxEntries(numCPU int, min int, max int) uint32 { maxEntries := int(math.Min(float64(max), float64(min*numCPU)/4)) if maxEntries < min { @@ -146,17 +174,18 @@ func getMaxEntries(numCPU int, min int, max int) uint32 { // MapSpecEditorOpts defines some options of the map spec editor type MapSpecEditorOpts struct { - TracedCgroupSize int - UseMmapableMaps bool - UseRingBuffers bool - RingBufferSize uint32 - PathResolutionEnabled bool - SecurityProfileMaxCount int - ReducedProcPidCacheSize bool + TracedCgroupSize int + UseMmapableMaps bool + UseRingBuffers bool + RingBufferSize uint32 + PathResolutionEnabled bool + SecurityProfileMaxCount int + ReducedProcPidCacheSize bool + NetworkFlowMonitorEnabled bool } // AllMapSpecEditors returns the list of map editors -func AllMapSpecEditors(numCPU int, opts MapSpecEditorOpts) map[string]manager.MapSpecEditor { +func AllMapSpecEditors(numCPU int, opts MapSpecEditorOpts, kv *kernel.Version) map[string]manager.MapSpecEditor { var procPidCacheMaxEntries uint32 if opts.ReducedProcPidCacheSize { procPidCacheMaxEntries = getMaxEntries(numCPU, minProcEntries, maxProcEntries/2) @@ -164,6 +193,15 @@ func AllMapSpecEditors(numCPU int, opts MapSpecEditorOpts) map[string]manager.Ma procPidCacheMaxEntries = getMaxEntries(numCPU, minProcEntries, maxProcEntries) } + var activeFlowsMaxEntries, nsFlowToNetworkStats uint32 + if opts.NetworkFlowMonitorEnabled { + activeFlowsMaxEntries = procPidCacheMaxEntries + nsFlowToNetworkStats = 4096 + } else { + activeFlowsMaxEntries = 1 + nsFlowToNetworkStats = 1 + } + editors := map[string]manager.MapSpecEditor{ "syscalls": { MaxEntries: 8192, @@ -182,6 +220,22 @@ func AllMapSpecEditors(numCPU int, opts MapSpecEditorOpts) map[string]manager.Ma EditorFlag: manager.EditMaxEntries, }, + "active_flows": { + MaxEntries: activeFlowsMaxEntries, + EditorFlag: manager.EditMaxEntries, + }, + "active_flows_spin_locks": { + MaxEntries: activeFlowsMaxEntries, + EditorFlag: manager.EditMaxEntries, + }, + "ns_flow_to_network_stats": { + MaxEntries: nsFlowToNetworkStats, + EditorFlag: manager.EditMaxEntries, + }, + "inet_bind_args": { + MaxEntries: procPidCacheMaxEntries, + EditorFlag: manager.EditMaxEntries, + }, "activity_dumps_config": { MaxEntries: model.MaxTracedCgroupsCount, EditorFlag: manager.EditMaxEntries, @@ -234,6 +288,33 @@ func AllMapSpecEditors(numCPU int, opts MapSpecEditorOpts) map[string]manager.Ma EditorFlag: manager.EditMaxEntries | manager.EditType | manager.EditKeyValue, } } + + if !kv.HasSKStorage() { + // Edit each SK_Storage map and transform them to a basic hash maps so they can be loaded by older kernels. + // We need this so that the eBPF manager can link the SK_Storage maps in our eBPF programs, even if deadcode + // elimination will clean up the piece of code that work with them prior to running the verifier. + editors["sock_active_pid_route"] = manager.MapSpecEditor{ + Type: ebpf.Hash, + KeySize: 1, + ValueSize: 1, + MaxEntries: 1, + EditorFlag: manager.EditKeyValue | manager.EditType | manager.EditMaxEntries, + } + } + + if !kv.HasNoPreallocMapsInPerfEvent() { + editors["active_flows"] = manager.MapSpecEditor{ + MaxEntries: activeFlowsMaxEntries, + Flags: unix.BPF_ANY, + EditorFlag: manager.EditMaxEntries | manager.EditFlags, + } + } else { + editors["active_flows"] = manager.MapSpecEditor{ + MaxEntries: activeFlowsMaxEntries, + EditorFlag: manager.EditMaxEntries, + } + } + return editors } @@ -256,7 +337,7 @@ func AllRingBuffers() []*manager.RingBuffer { } // AllTailRoutes returns the list of all the tail call routes -func AllTailRoutes(eRPCDentryResolutionEnabled, networkEnabled, rawPacketEnabled, supportMmapableMaps bool) []manager.TailCallRoute { +func AllTailRoutes(eRPCDentryResolutionEnabled, networkEnabled, networkFlowMonitorEnabled, rawPacketEnabled, supportMmapableMaps bool) []manager.TailCallRoute { var routes []manager.TailCallRoute routes = append(routes, getExecTailCallRoutes()...) @@ -265,6 +346,9 @@ func AllTailRoutes(eRPCDentryResolutionEnabled, networkEnabled, rawPacketEnabled if networkEnabled { routes = append(routes, getTCTailCallRoutes(rawPacketEnabled)...) } + if networkFlowMonitorEnabled { + routes = append(routes, getFlushNetworkStatsTailCallRoutes()...) + } return routes } diff --git a/pkg/security/ebpf/probes/attr.go b/pkg/security/ebpf/probes/attr.go index a8181706b614f..09c202d657295 100644 --- a/pkg/security/ebpf/probes/attr.go +++ b/pkg/security/ebpf/probes/attr.go @@ -21,123 +21,15 @@ func getAttrProbes(fentry bool) []*manager.Probe { } // chmod - attrProbes = append(attrProbes, ExpandSyscallProbes(&manager.Probe{ - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - UID: SecurityAgentUID, - }, - SyscallFuncName: "chmod", - }, fentry, EntryAndExit)...) - attrProbes = append(attrProbes, ExpandSyscallProbes(&manager.Probe{ - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - UID: SecurityAgentUID, - }, - SyscallFuncName: "fchmod", - }, fentry, EntryAndExit)...) - attrProbes = append(attrProbes, ExpandSyscallProbes(&manager.Probe{ - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - UID: SecurityAgentUID, - }, - SyscallFuncName: "fchmodat", - }, fentry, EntryAndExit)...) - attrProbes = append(attrProbes, ExpandSyscallProbes(&manager.Probe{ - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - UID: SecurityAgentUID, - }, - SyscallFuncName: "fchmodat2", - }, fentry, EntryAndExit)...) + attrProbes = appendSyscallProbes(attrProbes, fentry, EntryAndExit, false, "chmod", "fchmod", "fchmodat", "fchmodat2") // chown - attrProbes = append(attrProbes, ExpandSyscallProbes(&manager.Probe{ - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - UID: SecurityAgentUID, - }, - SyscallFuncName: "chown", - }, fentry, EntryAndExit)...) - attrProbes = append(attrProbes, ExpandSyscallProbes(&manager.Probe{ - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - UID: SecurityAgentUID, - }, - SyscallFuncName: "chown16", - }, fentry, EntryAndExit)...) - attrProbes = append(attrProbes, ExpandSyscallProbes(&manager.Probe{ - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - UID: SecurityAgentUID, - }, - SyscallFuncName: "fchown", - }, fentry, EntryAndExit)...) - attrProbes = append(attrProbes, ExpandSyscallProbes(&manager.Probe{ - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - UID: SecurityAgentUID, - }, - SyscallFuncName: "fchown16", - }, fentry, EntryAndExit)...) - attrProbes = append(attrProbes, ExpandSyscallProbes(&manager.Probe{ - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - UID: SecurityAgentUID, - }, - SyscallFuncName: "fchownat", - }, fentry, EntryAndExit)...) - attrProbes = append(attrProbes, ExpandSyscallProbes(&manager.Probe{ - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - UID: SecurityAgentUID, - }, - SyscallFuncName: "lchown", - }, fentry, EntryAndExit)...) - attrProbes = append(attrProbes, ExpandSyscallProbes(&manager.Probe{ - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - UID: SecurityAgentUID, - }, - SyscallFuncName: "lchown16", - }, fentry, EntryAndExit)...) + attrProbes = appendSyscallProbes(attrProbes, fentry, EntryAndExit, false, "chown", "chown16", "fchown", "fchown16", "fchownat", "lchown", "lchown16") // utime - attrProbes = append(attrProbes, ExpandSyscallProbes(&manager.Probe{ - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - UID: SecurityAgentUID, - }, - SyscallFuncName: "utime", - }, fentry, EntryAndExit, true)...) - attrProbes = append(attrProbes, ExpandSyscallProbes(&manager.Probe{ - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - UID: SecurityAgentUID, - }, - SyscallFuncName: "utime32", - }, fentry, EntryAndExit)...) - attrProbes = append(attrProbes, ExpandSyscallProbes(&manager.Probe{ - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - UID: SecurityAgentUID, - }, - SyscallFuncName: "utimes", - }, fentry, EntryAndExit, true)...) - attrProbes = append(attrProbes, ExpandSyscallProbes(&manager.Probe{ - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - UID: SecurityAgentUID, - }, - SyscallFuncName: "utimes", - }, fentry, EntryAndExit|ExpandTime32)...) - attrProbes = append(attrProbes, ExpandSyscallProbes(&manager.Probe{ - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - UID: SecurityAgentUID, - }, - SyscallFuncName: "utimensat", - }, fentry, EntryAndExit, true)...) - attrProbes = append(attrProbes, ExpandSyscallProbes(&manager.Probe{ - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - UID: SecurityAgentUID, - }, - SyscallFuncName: "utimensat", - }, fentry, EntryAndExit|ExpandTime32)...) - attrProbes = append(attrProbes, ExpandSyscallProbes(&manager.Probe{ - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - UID: SecurityAgentUID, - }, - SyscallFuncName: "futimesat", - }, fentry, EntryAndExit, true)...) - attrProbes = append(attrProbes, ExpandSyscallProbes(&manager.Probe{ - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - UID: SecurityAgentUID, - }, - SyscallFuncName: "futimesat", - }, fentry, EntryAndExit|ExpandTime32)...) + attrProbes = appendSyscallProbes(attrProbes, fentry, EntryAndExit, true, "utime", "utimes", "utimensat", "futimesat") + attrProbes = appendSyscallProbes(attrProbes, fentry, EntryAndExit, false, "utime32") + attrProbes = appendSyscallProbes(attrProbes, fentry, EntryAndExit|ExpandTime32, false, "utimes", "utimensat", "futimesat") + return attrProbes } diff --git a/pkg/security/ebpf/probes/bind.go b/pkg/security/ebpf/probes/bind.go index f66fbdddcc455..8cbe9930ced1f 100644 --- a/pkg/security/ebpf/probes/bind.go +++ b/pkg/security/ebpf/probes/bind.go @@ -12,13 +12,7 @@ import manager "github.com/DataDog/ebpf-manager" func getBindProbes(fentry bool) []*manager.Probe { var bindProbes []*manager.Probe - bindProbes = append(bindProbes, ExpandSyscallProbes(&manager.Probe{ - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - UID: SecurityAgentUID, - }, - SyscallFuncName: "bind", - }, fentry, EntryAndExit)...) - + bindProbes = appendSyscallProbes(bindProbes, fentry, EntryAndExit, false, "bind") bindProbes = append(bindProbes, &manager.Probe{ ProbeIdentificationPair: manager.ProbeIdentificationPair{ UID: SecurityAgentUID, diff --git a/pkg/security/ebpf/probes/chdir.go b/pkg/security/ebpf/probes/chdir.go index 45b207082d711..4b30162c21834 100644 --- a/pkg/security/ebpf/probes/chdir.go +++ b/pkg/security/ebpf/probes/chdir.go @@ -19,18 +19,6 @@ func getChdirProbes(fentry bool) []*manager.Probe { }, }, } - - chdirProbes = append(chdirProbes, ExpandSyscallProbes(&manager.Probe{ - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - UID: SecurityAgentUID, - }, - SyscallFuncName: "chdir", - }, fentry, EntryAndExit)...) - chdirProbes = append(chdirProbes, ExpandSyscallProbes(&manager.Probe{ - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - UID: SecurityAgentUID, - }, - SyscallFuncName: "fchdir", - }, fentry, EntryAndExit)...) + chdirProbes = appendSyscallProbes(chdirProbes, fentry, EntryAndExit, false, "chdir", "fchdir") return chdirProbes } diff --git a/pkg/security/ebpf/probes/connect.go b/pkg/security/ebpf/probes/connect.go index c96f49a7f4016..d25a5481b6cd8 100644 --- a/pkg/security/ebpf/probes/connect.go +++ b/pkg/security/ebpf/probes/connect.go @@ -12,13 +12,7 @@ import manager "github.com/DataDog/ebpf-manager" func getConnectProbes(fentry bool) []*manager.Probe { var connectProbes []*manager.Probe - connectProbes = append(connectProbes, ExpandSyscallProbes(&manager.Probe{ - ProbeIdentificationPair: manager.ProbeIdentificationPair{ - UID: SecurityAgentUID, - }, - SyscallFuncName: "connect", - }, fentry, EntryAndExit)...) - + connectProbes = appendSyscallProbes(connectProbes, fentry, EntryAndExit, false, "connect") connectProbes = append(connectProbes, &manager.Probe{ ProbeIdentificationPair: manager.ProbeIdentificationPair{ UID: SecurityAgentUID, diff --git a/pkg/security/ebpf/probes/const.go b/pkg/security/ebpf/probes/const.go index 3d984a02b9b0b..7007c20f036e3 100644 --- a/pkg/security/ebpf/probes/const.go +++ b/pkg/security/ebpf/probes/const.go @@ -107,3 +107,10 @@ const ( // ExecParseArgsEnvsKey is the key to the program that parses arguments and then environment variables ExecParseArgsEnvsKey ) + +const ( + // FlushNetworkStatsExitKey is the key to the program that flushes network stats before resuming the normal exit event processing + FlushNetworkStatsExitKey uint32 = iota + // FlushNetworkStatsExecKey is the key to the program that flushes network stats before resuming the normal exec event processing + FlushNetworkStatsExecKey +) diff --git a/pkg/security/ebpf/probes/event_types.go b/pkg/security/ebpf/probes/event_types.go index b70447dba6a59..543a605211a66 100644 --- a/pkg/security/ebpf/probes/event_types.go +++ b/pkg/security/ebpf/probes/event_types.go @@ -22,6 +22,7 @@ func NetworkNFNatSelectors() []manager.ProbesSelector { &manager.OneOf{Selectors: []manager.ProbesSelector{ kprobeOrFentry("nf_nat_manip_pkt"), kprobeOrFentry("nf_nat_packet"), + kprobeOrFentry("nf_ct_delete"), }}, } } @@ -40,9 +41,14 @@ func NetworkSelectors() []manager.ProbesSelector { return []manager.ProbesSelector{ // flow classification probes &manager.AllOf{Selectors: []manager.ProbesSelector{ + kprobeOrFentry("accept"), kprobeOrFentry("security_socket_bind"), kprobeOrFentry("security_socket_connect"), kprobeOrFentry("security_sk_classify_flow"), + kprobeOrFentry("inet_release"), + kprobeOrFentry("inet_shutdown"), + kprobeOrFentry("inet_bind"), + kprobeOrFentry("sk_common_release"), kprobeOrFentry("path_get"), kprobeOrFentry("proc_fd_link"), }}, @@ -71,13 +77,15 @@ var SyscallMonitorSelectors = []manager.ProbesSelector{ } // SnapshotSelectors selectors required during the snapshot -func SnapshotSelectors() []manager.ProbesSelector { +func SnapshotSelectors(fentry bool) []manager.ProbesSelector { procsOpen := kprobeOrFentry("cgroup_procs_open") tasksOpen := kprobeOrFentry("cgroup_tasks_open") return []manager.ProbesSelector{ + &manager.BestEffort{Selectors: []manager.ProbesSelector{procsOpen, tasksOpen}}, + // required to stat /proc/.../exe kprobeOrFentry("security_inode_getattr"), - &manager.BestEffort{Selectors: []manager.ProbesSelector{procsOpen, tasksOpen}}, + &manager.AllOf{Selectors: ExpandSyscallProbesSelector(SecurityAgentUID, "newfstatat", fentry, EntryAndExit)}, } } @@ -442,6 +450,12 @@ func GetSelectorsPerEventType(fentry bool) map[eval.EventType][]manager.ProbesSe kretprobeOrFexit("get_pipe_info"), }}}, + // List of probes required to capture accept events + "accept": { + &manager.AllOf{Selectors: []manager.ProbesSelector{ + kprobeOrFentry("accept"), + }}, + }, // List of probes required to capture bind events "bind": { &manager.AllOf{Selectors: []manager.ProbesSelector{ @@ -465,10 +479,22 @@ func GetSelectorsPerEventType(fentry bool) map[eval.EventType][]manager.ProbesSe &manager.OneOf{Selectors: ExpandSyscallProbesSelector(SecurityAgentUID, "chdir", fentry, EntryAndExit)}, &manager.OneOf{Selectors: ExpandSyscallProbesSelector(SecurityAgentUID, "fchdir", fentry, EntryAndExit)}, }, + + "network_flow_monitor": { + // perf_event probes + &manager.AllOf{Selectors: []manager.ProbesSelector{ + &manager.ProbeSelector{ + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + UID: SecurityAgentUID, + EBPFFuncName: "network_stats_worker", + }, + }, + }}, + }, } // Add probes required to track network interfaces and map network flows to processes - // networkEventTypes: dns, imds, packet + // networkEventTypes: dns, imds, packet, network_monitor networkEventTypes := model.GetEventTypePerCategory(model.NetworkCategory)[model.NetworkCategory] for _, networkEventType := range networkEventTypes { selectorsPerEventTypeStore[networkEventType] = []manager.ProbesSelector{ diff --git a/pkg/security/ebpf/probes/flow.go b/pkg/security/ebpf/probes/flow.go index c09c741c572ad..8700f5093a699 100644 --- a/pkg/security/ebpf/probes/flow.go +++ b/pkg/security/ebpf/probes/flow.go @@ -18,6 +18,30 @@ func getFlowProbes() []*manager.Probe { EBPFFuncName: "hook_security_sk_classify_flow", }, }, + { + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + UID: SecurityAgentUID, + EBPFFuncName: "hook_inet_release", + }, + }, + { + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + UID: SecurityAgentUID, + EBPFFuncName: "hook_sk_common_release", + }, + }, + { + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + UID: SecurityAgentUID, + EBPFFuncName: "hook_inet_shutdown", + }, + }, + { + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + UID: SecurityAgentUID, + EBPFFuncName: "hook_inet_bind", + }, + }, { ProbeIdentificationPair: manager.ProbeIdentificationPair{ UID: SecurityAgentUID, @@ -30,6 +54,12 @@ func getFlowProbes() []*manager.Probe { EBPFFuncName: "hook_nf_nat_packet", }, }, + { + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + UID: SecurityAgentUID, + EBPFFuncName: "hook_nf_ct_delete", + }, + }, { ProbeIdentificationPair: manager.ProbeIdentificationPair{ UID: SecurityAgentUID, @@ -44,3 +74,30 @@ func getFlowProbes() []*manager.Probe { }, } } + +// GetAllFlushNetworkStatsTaillCallFunctions returns the list of network flush tail call functions +func GetAllFlushNetworkStatsTaillCallFunctions() []string { + return []string{ + "tail_call_target_flush_network_stats_exec", + "tail_call_target_flush_network_stats_exit", + } +} + +func getFlushNetworkStatsTailCallRoutes() []manager.TailCallRoute { + return []manager.TailCallRoute{ + { + ProgArrayName: "flush_network_stats_progs", + Key: FlushNetworkStatsExecKey, + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + EBPFFuncName: "tail_call_target_flush_network_stats_exec", + }, + }, + { + ProgArrayName: "flush_network_stats_progs", + Key: FlushNetworkStatsExitKey, + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + EBPFFuncName: "tail_call_target_flush_network_stats_exit", + }, + }, + } +} diff --git a/pkg/security/ebpf/probes/perf_event.go b/pkg/security/ebpf/probes/perf_event.go new file mode 100644 index 0000000000000..5b9f766d82ec5 --- /dev/null +++ b/pkg/security/ebpf/probes/perf_event.go @@ -0,0 +1,30 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux + +// Package probes holds probes related files +package probes + +import ( + manager "github.com/DataDog/ebpf-manager" + "golang.org/x/sys/unix" +) + +// GetPerfEventProbes returns the list of perf event Probes +func GetPerfEventProbes() []*manager.Probe { + return []*manager.Probe{ + { + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + UID: SecurityAgentUID, + EBPFFuncName: "network_stats_worker", + }, + SampleFrequency: 1, + PerfEventType: unix.PERF_TYPE_SOFTWARE, + PerfEventConfig: unix.PERF_COUNT_SW_CPU_CLOCK, + PerfEventCPUCount: 1, + }, + } +} diff --git a/pkg/security/ebpf/probes/raw_sys_exit.go b/pkg/security/ebpf/probes/raw_sys_exit.go index 14e489cb47a58..c31717ae003b8 100644 --- a/pkg/security/ebpf/probes/raw_sys_exit.go +++ b/pkg/security/ebpf/probes/raw_sys_exit.go @@ -198,5 +198,12 @@ func getSysExitTailCallRoutes() []manager.TailCallRoute { EBPFFuncName: "tracepoint_handle_sys_chdir_exit", }, }, + { + ProgArrayName: "sys_exit_progs", + Key: uint32(model.StatEventType), + ProbeIdentificationPair: manager.ProbeIdentificationPair{ + EBPFFuncName: "tracepoint_handle_sys_newfstatat_exit", + }, + }, } } diff --git a/pkg/security/ebpf/probes/rawpacket/pcap.go b/pkg/security/ebpf/probes/rawpacket/pcap.go index 8ffc7c451c6ab..db138ad4a7365 100644 --- a/pkg/security/ebpf/probes/rawpacket/pcap.go +++ b/pkg/security/ebpf/probes/rawpacket/pcap.go @@ -120,6 +120,7 @@ func filtersToProgs(filters []Filter, opts ProgOpts, headerInsts, senderInsts as // prepend a return instruction in case of fail footerInsts := append(asm.Instructions{ + asm.Mov.Imm(asm.R0, 0), asm.Return(), }, senderInsts...) diff --git a/pkg/security/events/token_limiter.go b/pkg/security/events/token_limiter.go index bcfe9e7c97c91..f441db703791c 100644 --- a/pkg/security/events/token_limiter.go +++ b/pkg/security/events/token_limiter.go @@ -36,7 +36,7 @@ func (tkl *TokenLimiter) genGetTokenFnc(fields []eval.Field) error { event := m.NewEvent() for _, field := range fields { - if _, err := event.GetFieldType(field); err != nil { + if _, _, err := event.GetFieldMetadata(field); err != nil { return err } } diff --git a/pkg/security/secl/compiler/generators/accessors/accessors.go b/pkg/security/generators/accessors/accessors.go similarity index 91% rename from pkg/security/secl/compiler/generators/accessors/accessors.go rename to pkg/security/generators/accessors/accessors.go index 7dad3100588ae..3b9124959ef37 100644 --- a/pkg/security/secl/compiler/generators/accessors/accessors.go +++ b/pkg/security/generators/accessors/accessors.go @@ -18,7 +18,6 @@ import ( "os/exec" "path" "reflect" - "slices" "strconv" "strings" "text/template" @@ -31,8 +30,8 @@ import ( "golang.org/x/text/language" "golang.org/x/tools/go/packages" - "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/generators/accessors/common" - "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/generators/accessors/doc" + "github.com/DataDog/datadog-agent/pkg/security/generators/accessors/common" + "github.com/DataDog/datadog-agent/pkg/security/generators/accessors/doc" ) const ( @@ -106,10 +105,6 @@ func origTypeToBasicType(kind string) string { return kind } -func isNetType(kind string) bool { - return kind == "net.IPNet" -} - func isBasicType(kind string) bool { switch kind { case "string", "bool", "int", "int8", "int16", "int32", "int64", "uint8", "uint16", "uint32", "uint64", "net.IPNet": @@ -168,6 +163,7 @@ func handleBasic(module *common.Module, field seclField, name, alias, aliasPrefi Alias: alias, AliasPrefix: aliasPrefix, GettersOnly: field.gettersOnly, + GenGetters: field.genGetters, Ref: field.ref, RestrictedTo: restrictedTo, } @@ -198,6 +194,7 @@ func handleBasic(module *common.Module, field seclField, name, alias, aliasPrefi Alias: alias, AliasPrefix: aliasPrefix, GettersOnly: field.gettersOnly, + GenGetters: field.genGetters, Ref: field.ref, RestrictedTo: restrictedTo, } @@ -304,7 +301,7 @@ func handleFieldWithHandler(module *common.Module, field seclField, aliasPrefix, alias = aliasPrefix + "." + alias } - if event == "" { + if event == "" && verbose { log.Printf("event type not specified for field: %s", prefixedFieldName) } @@ -329,6 +326,7 @@ func handleFieldWithHandler(module *common.Module, field seclField, aliasPrefix, Alias: alias, AliasPrefix: aliasPrefix, GettersOnly: field.gettersOnly, + GenGetters: field.genGetters, Ref: field.ref, RestrictedTo: restrictedTo, } @@ -383,6 +381,7 @@ type seclField struct { exposedAtEventRootOnly bool // fields that should only be exposed at the root of an event, i.e. `parent` should not be exposed for an `ancestor` of a process containerStructName string gettersOnly bool // a field that is not exposed via SECL, but still has an accessor generated + genGetters bool ref string } @@ -432,6 +431,8 @@ func parseFieldDef(def string) (seclField, error) { case "getters_only": field.gettersOnly = true field.exposedAtEventRootOnly = true + case "gen_getters": + field.genGetters = true } } } @@ -463,6 +464,7 @@ func handleSpecRecursive(module *common.Module, astFiles *AstFiles, spec interfa for _, field := range structType.Fields.List { fieldCommentText := field.Comment.Text() fieldIterator := iterator + fieldEvent := event var tag reflect.StructTag if field.Tag != nil { @@ -470,7 +472,7 @@ func handleSpecRecursive(module *common.Module, astFiles *AstFiles, spec interfa } if e, ok := tag.Lookup("event"); ok { - event = e + fieldEvent = e if _, ok = module.EventTypes[e]; !ok { module.EventTypes[e] = common.NewEventTypeMetada() dejavu = make(map[string]bool) // clear dejavu map when it's a new event type @@ -502,8 +504,8 @@ func handleSpecRecursive(module *common.Module, astFiles *AstFiles, spec interfa embedded := astFiles.LookupSymbol(ident.Name) if embedded != nil { - handleEmbedded(module, ident.Name, prefix, event, restrictedTo, field.Type) - handleSpecRecursive(module, astFiles, embedded.Decl, name, aliasPrefix, event, restrictedTo, fieldIterator, dejavu) + handleEmbedded(module, ident.Name, prefix, fieldEvent, restrictedTo, field.Type) + handleSpecRecursive(module, astFiles, embedded.Decl, name, aliasPrefix, fieldEvent, restrictedTo, fieldIterator, dejavu) } else { log.Printf("failed to resolve symbol for identifier %+v in %s", ident.Name, pkgname) } @@ -539,14 +541,14 @@ func handleSpecRecursive(module *common.Module, astFiles *AstFiles, spec interfa } for _, seclField := range fields { - handleNonEmbedded(module, seclField, prefixedFieldName, event, restrictedTo, fieldType, isPointer, isArray) + handleNonEmbedded(module, seclField, prefixedFieldName, fieldEvent, restrictedTo, fieldType, isPointer, isArray) if seclFieldIterator := seclField.iterator; seclFieldIterator != "" { - fieldIterator = handleIterator(module, seclField, fieldType, seclFieldIterator, aliasPrefix, prefixedFieldName, event, restrictedTo, fieldCommentText, opOverrides, isPointer, isArray) + fieldIterator = handleIterator(module, seclField, fieldType, seclFieldIterator, aliasPrefix, prefixedFieldName, fieldEvent, restrictedTo, fieldCommentText, opOverrides, isPointer, isArray) } if handler := seclField.handler; handler != "" { - handleFieldWithHandler(module, seclField, aliasPrefix, prefix, prefixedFieldName, fieldType, seclField.containerStructName, event, restrictedTo, fieldCommentText, opOverrides, handler, isPointer, isArray, fieldIterator) + handleFieldWithHandler(module, seclField, aliasPrefix, prefix, prefixedFieldName, fieldType, seclField.containerStructName, fieldEvent, restrictedTo, fieldCommentText, opOverrides, handler, isPointer, isArray, fieldIterator) delete(dejavu, fieldBasename) continue @@ -562,15 +564,9 @@ func handleSpecRecursive(module *common.Module, astFiles *AstFiles, spec interfa continue } - if isNetType((fieldType)) { - if !slices.Contains(module.Imports, "net") { - module.Imports = append(module.Imports, "net") - } - } - alias := seclField.name if isBasicType(fieldType) { - handleBasic(module, seclField, fieldBasename, alias, aliasPrefix, prefix, fieldType, event, restrictedTo, opOverrides, fieldCommentText, seclField.containerStructName, fieldIterator, isArray) + handleBasic(module, seclField, fieldBasename, alias, aliasPrefix, prefix, fieldType, fieldEvent, restrictedTo, opOverrides, fieldCommentText, seclField.containerStructName, fieldIterator, isArray) } else { spec := astFiles.LookupSymbol(fieldType) if spec != nil { @@ -584,7 +580,7 @@ func handleSpecRecursive(module *common.Module, astFiles *AstFiles, spec interfa newAliasPrefix = aliasPrefix + "." + alias } - handleSpecRecursive(module, astFiles, spec.Decl, newPrefix, newAliasPrefix, event, restrictedTo, fieldIterator, dejavu) + handleSpecRecursive(module, astFiles, spec.Decl, newPrefix, newAliasPrefix, fieldEvent, restrictedTo, fieldIterator, dejavu) } else { log.Printf("failed to resolve symbol for type %+v in %s", fieldType, pkgname) } @@ -595,14 +591,14 @@ func handleSpecRecursive(module *common.Module, astFiles *AstFiles, spec interfa } } for _, seclField := range gettersOnlyFields { - handleNonEmbedded(module, seclField, prefixedFieldName, event, restrictedTo, fieldType, isPointer, isArray) + handleNonEmbedded(module, seclField, prefixedFieldName, fieldEvent, restrictedTo, fieldType, isPointer, isArray) if seclFieldIterator := seclField.iterator; seclFieldIterator != "" { - fieldIterator = handleIterator(module, seclField, fieldType, seclFieldIterator, aliasPrefix, prefixedFieldName, event, restrictedTo, fieldCommentText, opOverrides, isPointer, isArray) + fieldIterator = handleIterator(module, seclField, fieldType, seclFieldIterator, aliasPrefix, prefixedFieldName, fieldEvent, restrictedTo, fieldCommentText, opOverrides, isPointer, isArray) } if handler := seclField.handler; handler != "" { - handleFieldWithHandler(module, seclField, aliasPrefix, prefix, prefixedFieldName, fieldType, seclField.containerStructName, event, restrictedTo, fieldCommentText, opOverrides, handler, isPointer, isArray, fieldIterator) + handleFieldWithHandler(module, seclField, aliasPrefix, prefix, prefixedFieldName, fieldType, seclField.containerStructName, fieldEvent, restrictedTo, fieldCommentText, opOverrides, handler, isPointer, isArray, fieldIterator) delete(dejavu, fieldBasename) continue @@ -620,7 +616,7 @@ func handleSpecRecursive(module *common.Module, astFiles *AstFiles, spec interfa alias := seclField.name if isBasicTypeForGettersOnly(fieldType) { - handleBasic(module, seclField, fieldBasename, alias, aliasPrefix, prefix, fieldType, event, restrictedTo, opOverrides, fieldCommentText, seclField.containerStructName, fieldIterator, isArray) + handleBasic(module, seclField, fieldBasename, alias, aliasPrefix, prefix, fieldType, fieldEvent, restrictedTo, opOverrides, fieldCommentText, seclField.containerStructName, fieldIterator, isArray) } else { spec := astFiles.LookupSymbol(fieldType) if spec != nil { @@ -634,7 +630,7 @@ func handleSpecRecursive(module *common.Module, astFiles *AstFiles, spec interfa newAliasPrefix = aliasPrefix + "." + alias } - handleSpecRecursive(module, astFiles, spec.Decl, newPrefix, newAliasPrefix, event, restrictedTo, fieldIterator, dejavu) + handleSpecRecursive(module, astFiles, spec.Decl, newPrefix, newAliasPrefix, fieldEvent, restrictedTo, fieldIterator, dejavu) } else { log.Printf("failed to resolve symbol for type %+v in %s", fieldType, pkgname) } @@ -756,9 +752,9 @@ func formatBuildTags(buildTags string) []string { return formattedBuildTags } -func newField(allFields map[string]*common.StructField, field *common.StructField) string { +func newField(allFields map[string]*common.StructField, inputField *common.StructField) string { var fieldPath, result string - for _, node := range strings.Split(field.Name, ".") { + for _, node := range strings.Split(inputField.Name, ".") { if fieldPath != "" { fieldPath += "." + node } else { @@ -768,6 +764,8 @@ func newField(allFields map[string]*common.StructField, field *common.StructFiel if field, ok := allFields[fieldPath]; ok { if field.IsOrigTypePtr { result += fmt.Sprintf("if ev.%s == nil { ev.%s = &%s{} }\n", field.Name, field.Name, field.OrigType) + } else if field.IsArray && fieldPath != inputField.Name { + result += fmt.Sprintf("if len(ev.%s) == 0 { ev.%s = append(ev.%s, %s{}) }\n", field.Name, field.Name, field.Name, field.OrigType) } } } @@ -775,6 +773,25 @@ func newField(allFields map[string]*common.StructField, field *common.StructFiel return result } +func buildFirstAccessor(allFields map[string]*common.StructField, inputField *common.StructField) string { + var fieldPath string + for _, node := range strings.Split(inputField.Name, ".") { + if fieldPath != "" { + fieldPath += "." + node + } else { + fieldPath = node + } + + if field, ok := allFields[fieldPath]; ok { + if field.IsArray && fieldPath != inputField.Name { + fieldPath += "[0]" + } + } + } + + return "ev." + fieldPath +} + func generatePrefixNilChecks(allFields map[string]*common.StructField, returnType string, field *common.StructField) string { var fieldPath, result string for _, node := range strings.Split(field.Name, ".") { @@ -845,7 +862,7 @@ func getDefaultValueOfType(returnType string) string { return "false" } else if baseType == "net.IPNet" { if isArray { - return "&eval.CIDRValues{}" + return "[]net.IPNet{}" } return "net.IPNet{}" } else if baseType == "time.Time" { @@ -990,11 +1007,26 @@ func getFieldRestrictions(field *common.StructField) string { return fmt.Sprintf(`[]eval.EventType{"%s"}`, strings.Join(field.RestrictedTo, `", "`)) } +func getFieldReflectType(field *common.StructField) string { + switch field.ReturnType { + case "string": + return "reflect.String" + case "int": + return "reflect.Int" + case "bool": + return "reflect.Bool" + case "net.IPNet": + return "reflect.Struct" + } + return "" +} + var funcMap = map[string]interface{}{ "TrimPrefix": strings.TrimPrefix, "TrimSuffix": strings.TrimSuffix, "HasPrefix": strings.HasPrefix, "NewField": newField, + "BuildFirstAccessor": buildFirstAccessor, "GeneratePrefixNilChecks": generatePrefixNilChecks, "GetFieldHandler": getFieldHandler, "FieldADPrint": fieldADPrint, @@ -1005,6 +1037,7 @@ var funcMap = map[string]interface{}{ "NeedScrubbed": needScrubbed, "AddSuffixToFuncPrototype": addSuffixToFuncPrototype, "GetFieldRestrictions": getFieldRestrictions, + "GetFieldReflectType": getFieldReflectType, } //go:embed accessors.tmpl diff --git a/pkg/security/secl/compiler/generators/accessors/accessors.tmpl b/pkg/security/generators/accessors/accessors.tmpl similarity index 62% rename from pkg/security/secl/compiler/generators/accessors/accessors.tmpl rename to pkg/security/generators/accessors/accessors.tmpl index 4829db9985f8a..5845cc4931c8b 100644 --- a/pkg/security/secl/compiler/generators/accessors/accessors.tmpl +++ b/pkg/security/generators/accessors/accessors.tmpl @@ -9,9 +9,7 @@ package {{.Name}} import ( - {{range .Imports }} - "{{.}}" - {{end}} + "net" "reflect" "math" @@ -22,8 +20,9 @@ import ( // to always require the math package var _ = math.MaxUint16 +var _ = net.IP{} -func (m *Model) GetEventTypes() []eval.EventType { +func (_ *Model) GetEventTypes() []eval.EventType { return []eval.EventType{ {{range $Name, $Exists := .EventTypes}} {{- if ne $Name ""}} @@ -33,7 +32,7 @@ func (m *Model) GetEventTypes() []eval.EventType { } } -func (m *Model) GetFieldRestrictions(field eval.Field) []eval.EventType { +func (_ *Model) GetFieldRestrictions(field eval.Field) []eval.EventType { switch field { {{range $Name, $Field := .Fields}} {{- if $Field.RestrictedTo }} @@ -46,7 +45,7 @@ func (m *Model) GetFieldRestrictions(field eval.Field) []eval.EventType { return nil } -func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Evaluator, error) { +func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Evaluator, error) { switch field { {{range $Name, $Field := .Fields}} {{- if $Field.GettersOnly }} @@ -74,24 +73,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval {{$Checks := $Field | GetChecks $.AllFields}} - if result, ok := ctx.{{$Field.GetCacheName}}[field]; ok { - return result - } - - var results []{{$Field.ReturnType}} - iterator := &{{$Field.Iterator.ReturnType}}{} if regID != "" { + {{if $Field.Iterator.IsOrigTypePtr}} + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil + } + {{else}} value := iterator.At(ctx, regID, ctx.Registers[regID]) if value == nil { - return results + return nil } - - {{if $Field.Iterator.IsOrigTypePtr}} - element := value - {{else}} - element := *value + element := *value {{end}} {{range $Check := $Checks}} @@ -99,7 +94,8 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval {{$SubName := $Field.Iterator.Name | TrimPrefix $Check}} {{$Check = $SubName | printf "element%s"}} if !{{$Check}}() { - return append(results, {{$Field.GetDefaultScalarReturnValue}}) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []{{$Field.ReturnType}}{ {{$Field.GetDefaultScalarReturnValue}} } } {{end}} {{end}} @@ -124,12 +120,14 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval {{end}} {{if not $Field.GetArrayPrefix}} - results = append(results, result) + return []{{$Field.ReturnType}}{result} {{else}} - results = append(results, result...) + return result {{end}} + } - return results + if result, ok := ctx.{{$Field.GetCacheName}}[field]; ok { + return result } {{$Event := "nil"}} @@ -140,12 +138,13 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval {{if $Field.GetArrayPrefix}} {{$AncestorFunc = "newAncestorsIteratorArray"}} {{end}} - results = {{$AncestorFunc}}(iterator, ctx, {{$Event}}, func(ev *Event, pce *ProcessCacheEntry) {{$Field.GetArrayPrefix}}{{$Field.ReturnType}} { + results := {{$AncestorFunc}}(iterator, field, ctx, {{$Event}}, func(ev *Event, current *{{$Field.Iterator.OrigType}}) {{$Field.GetArrayPrefix}}{{$Field.ReturnType}} { {{range $Check := $Checks}} {{if $Field.Iterator.Name | HasPrefix $Check}} {{$SubName := $Field.Iterator.Name | TrimPrefix $Check}} - {{$Check = $SubName | printf "pce%s"}} + {{$Check = $SubName | printf "current%s"}} if !{{$Check}}() { + ctx.Error = &eval.ErrNotSupported{Field: field} {{if $Field.GetArrayPrefix}} return nil {{else}} @@ -157,11 +156,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval {{$SubName := $Field.Iterator.Name | TrimPrefix $Field.Name}} - {{$Return := $SubName | printf "pce%s"}} + {{$Return := $SubName | printf "current%s"}} {{if $Field.Handler }} {{$SubName = $Field.Iterator.Name | TrimPrefix $Field.Prefix}} {{$Handler := $Field.Iterator.Name | TrimPrefix $Field.Handler}} - {{$Return = print "ev.FieldHandlers." $Handler "(ev, &pce" $SubName ")"}} + {{$Return = print "ev.FieldHandlers." $Handler "(ev, ¤t" $SubName ")"}} {{end}} {{if eq $Field.ReturnType "int"}} @@ -181,7 +180,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval }, {{- else}} {{- $ReturnType := $Field.ReturnType}} - EvalFnc: func(ctx *eval.Context) {{$Field.GetArrayPrefix}}{{$ReturnType}} { + EvalFnc: func(ctx *eval.Context) {{- if not $Field.IsIterator}}{{$Field.GetArrayPrefix}}{{end}}{{$ReturnType}} { ctx.AppendResolvedField(field) {{- if not (and $Field.IsLength $Field.IsIterator)}} ev := ctx.Event.(*Event) @@ -193,6 +192,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval {{range $Check := $Checks}} {{$Check = $Check | printf "ev.%s"}} if !{{$Check}}() { + ctx.Error = &eval.ErrNotSupported{Field: field} return {{$Field.GetDefaultReturnValue}} } {{end}} @@ -275,132 +275,21 @@ func (ev *Event) GetFields() []eval.Field { } func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { - switch field { - {{range $Name, $Field := .Fields}} - {{- if $Field.GettersOnly }} - {{continue}} - {{end}} - - {{if $Field.Ref}} - {{$Ref := index $.Fields $Field.Ref}} - {{if $Ref}} - {{$Field = $Ref}} - {{end}} - {{end}} - - case "{{$Name}}": - {{- if and $Field.Iterator (not $Field.IsLength)}} - var values []{{$Field.ReturnType}} - - ctx := eval.NewContext(ev) - - iterator := &{{$Field.Iterator.ReturnType}}{} - ptr := iterator.Front(ctx) - - for ptr != nil { - {{if $Field.Iterator.IsOrigTypePtr}} - element := ptr - {{else}} - element := *ptr - {{end}} - - {{$SubName := $Field.Iterator.Name | TrimPrefix $Field.Name}} - - {{$Return := $SubName | printf "element%s"}} - {{if $Field.Handler}} - {{$SubName = $Field.Iterator.Name | TrimPrefix $Field.Prefix}} - {{$Handler := $Field.Iterator.Name | TrimPrefix $Field.Handler}} - {{$Return = print "ev.FieldHandlers." $Handler "(ev, &element" $SubName ")"}} - {{end}} - - {{if $Field.IsLength}} - {{$Return = ".length" | TrimSuffix $Return}} - {{end}} - - {{if and (eq $Field.ReturnType "int") (ne $Field.OrigType "int")}} - result := int({{$Return}}) - {{else}} - {{if $Field.IsLength}} - result := len({{$Return}}) - {{else}} - result := {{$Return}} - {{end}} - {{end}} - - {{if not $Field.GetArrayPrefix}} - values = append(values, result) - {{else}} - values = append(values, result...) - {{end}} - - ptr = iterator.Next() - } - - return values, nil - {{else}} - {{$Return := $Field.Name | printf "ev.%s"}} - - {{$Checks := $Field | GetChecks $.AllFields}} - {{range $Check := $Checks}} - {{$Check = $Check | printf "ev.%s"}} - if !{{$Check}}() { - return {{$Field.GetDefaultReturnValue}}, &eval.ErrNotSupported{Field: field} - } - {{end}} - - {{if $Field.IsLength}} - {{- if $Field.IsIterator}} - ctx := eval.NewContext(ev) - iterator := &{{$Field.Iterator.ReturnType}}{} - {{$Return = "iterator.Len(ctx)"}} - {{else}} - {{$Return = ".length" | TrimSuffix $Return | printf "len(%s)"}} - {{end}} - {{end}} - {{if $Field.Handler}} - {{$Ptr := "&"}} - {{$Parent := index $.AllFields $Field.Prefix}} - {{- if or (not $Parent) $Parent.IsOrigTypePtr}} - {{$Ptr = ""}} - {{end}} - - {{$Prefix := $Field.Prefix}} - {{ if not $Prefix }} - {{$Return = print "ev.FieldHandlers." $Field.Handler "(ev)"}} - {{else}} - {{$Return = print "ev.FieldHandlers." $Field.Handler "(ev, " $Ptr "ev." $Prefix ")"}} - {{end}} - {{end}} - - {{if eq $Field.ReturnType "string"}} - return {{$Return}}, nil - {{else if eq $Field.ReturnType "int"}} - {{- if and ($Field.IsArray) (ne $Field.OrigType "int") }} - result := make([]int, len({{$Return}})) - for i, v := range {{$Return}} { - result[i] = int(v) - } - return result, nil - {{- else}} - {{- if ne $Field.OrigType "int"}} - return int({{$Return}}), nil - {{- else}} - return {{$Return}}, nil - {{end -}} - {{end -}} - {{else if eq $Field.ReturnType "bool"}} - return {{$Return}}, nil - {{else if eq $Field.ReturnType "net.IPNet"}} - return {{$Return}}, nil - {{end}} - {{end}} - {{end}} + m := &Model{} + evaluator, err := m.GetEvaluator(field, "") + if err != nil { + return nil, err } - return nil, &eval.ErrFieldNotFound{Field: field} + ctx := eval.NewContext(ev) + value := evaluator.Eval(ctx) + if ctx.Error != nil { + return nil, ctx.Error + } + return value, nil } -func (ev *Event) GetFieldEventType(field eval.Field) (eval.EventType, error) { +func (ev *Event) GetFieldMetadata(field eval.Field) (eval.EventType, reflect.Kind, error) { switch field { {{range $Name, $Field := .Fields}} {{- if $Field.GettersOnly }} @@ -408,34 +297,11 @@ func (ev *Event) GetFieldEventType(field eval.Field) (eval.EventType, error) { {{end}} case "{{$Name}}": - return "{{$Field.Event}}", nil + return "{{$Field.Event}}", {{$Field | GetFieldReflectType}}, nil {{end}} } - return "", &eval.ErrFieldNotFound{Field: field} -} - -func (ev *Event) GetFieldType(field eval.Field) (reflect.Kind, error) { - switch field { - {{range $Name, $Field := .Fields}} - {{- if $Field.GettersOnly }} - {{continue}} - {{end}} - - case "{{$Name}}": - {{if eq $Field.ReturnType "string"}} - return reflect.String, nil - {{else if eq $Field.ReturnType "int"}} - return reflect.Int, nil - {{else if eq $Field.ReturnType "bool"}} - return reflect.Bool, nil - {{else if eq $Field.ReturnType "net.IPNet"}} - return reflect.Struct, nil - {{end}} - {{end}} - } - - return reflect.Invalid, &eval.ErrFieldNotFound{Field: field} + return "", reflect.Invalid, &eval.ErrFieldNotFound{Field: field} } func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { @@ -452,7 +318,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { {{end}} {{end}} - {{$FieldName := $Field.Name | printf "ev.%s"}} + {{$FieldName := $Field | BuildFirstAccessor $.AllFields}} case "{{$Name}}": {{- $Field | NewField $.AllFields}} {{if $Field.IsLength}} @@ -466,12 +332,12 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: {{$FieldName}} = append({{$FieldName}}, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "{{$Field.Name}}"} + return &eval.ErrValueTypeMismatch{Field: "{{$Name}}"} } {{else}} rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "{{$Field.Name}}"} + return &eval.ErrValueTypeMismatch{Field: "{{$Name}}"} } {{- if ne $Field.OrigType "string" }} {{$FieldName}} = {{$Field.OrigType}}(rv) @@ -490,16 +356,16 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { {{$FieldName}} = append({{$FieldName}}, {{$Field.OrigType}}(i)) } default: - return &eval.ErrValueTypeMismatch{Field: "{{$Field.Name}}"} + return &eval.ErrValueTypeMismatch{Field: "{{$Name}}"} } {{else}} rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "{{$Field.Name}}"} + return &eval.ErrValueTypeMismatch{Field: "{{$Name}}"} } {{- if eq $Field.OrigType "uint16" }} if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "{{$Field.Name}}"} + return &eval.ErrValueOutOfRange{Field: "{{$Name}}"} } {{- end }} {{$FieldName}} = {{$Field.OrigType}}(rv) @@ -513,12 +379,12 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []bool: {{$FieldName}} = append({{$FieldName}}, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "{{$Field.Name}}"} + return &eval.ErrValueTypeMismatch{Field: "{{$Name}}"} } {{else}} rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "{{$Field.Name}}"} + return &eval.ErrValueTypeMismatch{Field: "{{$Name}}"} } {{$FieldName}} = rv {{end}} @@ -531,12 +397,12 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []net.IPNet: {{$FieldName}} = append({{$FieldName}}, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "{{$Field.Name}}"} + return &eval.ErrValueTypeMismatch{Field: "{{$Name}}"} } {{else}} rv, ok := value.(net.IPNet) if !ok { - return &eval.ErrValueTypeMismatch{Field: "{{$Field.Name}}"} + return &eval.ErrValueTypeMismatch{Field: "{{$Name}}"} } {{$FieldName}} = rv {{end}} diff --git a/pkg/security/secl/compiler/generators/accessors/common/types.go b/pkg/security/generators/accessors/common/types.go similarity index 96% rename from pkg/security/secl/compiler/generators/accessors/common/types.go rename to pkg/security/generators/accessors/common/types.go index 3e9f3ebef05f6..b942e90f89271 100644 --- a/pkg/security/secl/compiler/generators/accessors/common/types.go +++ b/pkg/security/generators/accessors/common/types.go @@ -43,7 +43,6 @@ type Module struct { Iterators map[string]*StructField EventTypes map[string]*EventTypeMetadata Mock bool - Imports []string } // StructField represents a structure field for which an accessor will be generated @@ -69,6 +68,7 @@ type StructField struct { Alias string AliasPrefix string GettersOnly bool + GenGetters bool Ref string RestrictedTo []string IsIterator bool @@ -91,8 +91,8 @@ func (sf *StructField) GetEvaluatorType() string { } } else if sf.ReturnType == "net.IPNet" { evaluatorType = "eval.CIDREvaluator" - if sf.IsArray { - evaluatorType = "eval.CIDRValuesEvaluator" + if sf.Iterator != nil || sf.IsArray { + evaluatorType = "eval.CIDRArrayEvaluator" } } else { evaluatorType = "eval.StringEvaluator" @@ -155,6 +155,8 @@ func (sf *StructField) GetCacheName() string { return "IntCache" case "bool": return "BoolCache" + case "net.IPNet": + return "IPNetCache" default: panic(fmt.Sprintf("no cache name defined for return type '%s'", sf.ReturnType)) } diff --git a/pkg/security/secl/compiler/generators/accessors/doc/doc.go b/pkg/security/generators/accessors/doc/doc.go similarity index 99% rename from pkg/security/secl/compiler/generators/accessors/doc/doc.go rename to pkg/security/generators/accessors/doc/doc.go index 2e53708e2fffc..74443c799e51a 100644 --- a/pkg/security/secl/compiler/generators/accessors/doc/doc.go +++ b/pkg/security/generators/accessors/doc/doc.go @@ -18,7 +18,7 @@ import ( "golang.org/x/tools/go/packages" - "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/generators/accessors/common" + "github.com/DataDog/datadog-agent/pkg/security/generators/accessors/common" ) const ( diff --git a/pkg/security/secl/compiler/generators/accessors/field_accessors.tmpl b/pkg/security/generators/accessors/field_accessors.tmpl similarity index 94% rename from pkg/security/secl/compiler/generators/accessors/field_accessors.tmpl rename to pkg/security/generators/accessors/field_accessors.tmpl index 02eff112541d7..7fa3da492c2c2 100644 --- a/pkg/security/secl/compiler/generators/accessors/field_accessors.tmpl +++ b/pkg/security/generators/accessors/field_accessors.tmpl @@ -9,16 +9,22 @@ package {{.Name}} import ( - {{range .Imports }} - "{{.}}" - {{end}} + "net" "time" "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" ) +var _ = time.Time{} +var _ = net.IP{} +var _ = eval.NewContext + {{range $Name, $Field := .Fields}} +{{if not $Field.GenGetters }} +{{continue}} +{{end}} + {{if $Field.Ref}} {{$Ref := index $.Fields $Field.Ref}} {{if $Ref}} @@ -33,7 +39,7 @@ import ( {{$accessorReturnType = $Field.ReturnType}} {{ end }} -{{ if or (and $Field.Iterator (not $Field.IsIterator)) ($Field.IsArray) }} +{{ if or (and $Field.Iterator (not $Field.IsIterator)) (and $Field.IsArray (not $Field.IsIterator)) }} {{$accessorReturnType = $accessorReturnType | printf "[]%s" }} {{ end }} @@ -92,7 +98,7 @@ func (ev *Event) Get{{$pascalCaseName}}() {{ $accessorReturnType }} { values = append(values, result...) {{end}} - ptr = iterator.Next() + ptr = iterator.Next(ctx) } return values diff --git a/pkg/security/secl/compiler/generators/accessors/field_handlers.tmpl b/pkg/security/generators/accessors/field_handlers.tmpl similarity index 100% rename from pkg/security/secl/compiler/generators/accessors/field_handlers.tmpl rename to pkg/security/generators/accessors/field_handlers.tmpl diff --git a/pkg/security/secl/compiler/generators/operators/operators.go b/pkg/security/generators/operators/operators.go similarity index 100% rename from pkg/security/secl/compiler/generators/operators/operators.go rename to pkg/security/generators/operators/operators.go diff --git a/pkg/security/secl/model/syscall_table_generator/syscall_table_generator.go b/pkg/security/generators/syscall_table_generator/syscall_table_generator.go similarity index 100% rename from pkg/security/secl/model/syscall_table_generator/syscall_table_generator.go rename to pkg/security/generators/syscall_table_generator/syscall_table_generator.go diff --git a/pkg/security/metrics/metrics.go b/pkg/security/metrics/metrics.go index 8bbc1b5546dba..a4df3a7c34dda 100644 --- a/pkg/security/metrics/metrics.go +++ b/pkg/security/metrics/metrics.go @@ -68,6 +68,8 @@ var ( // MetricDentryERPC is the counter of eRPC dentry resolution errors by error type // Tags: ret MetricDentryERPC = newRuntimeMetric(".dentry_resolver.erpc") + // MetricDentryCacheSize is the size of the cache + MetricDentryCacheSize = newRuntimeMetric(".dentry_resolver.cache_size") // filtering metrics @@ -266,6 +268,18 @@ var ( // Tags: - MetricSBOMResolverSBOMCacheMiss = newRuntimeMetric(".sbom_resolver.sbom_cache.miss") + // CGroup resolver metrics + + // MetricCGroupResolverActiveCGroups is the name of the metric used to report the count of cgroups kept in memory + // Tags: - + MetricCGroupResolverActiveCGroups = newRuntimeMetric(".cgroup_resolver.active_cgroups") + // MetricCGroupResolverActiveContainerWorkloads is the name of the metric used to report the count of active cgroups corresponding to a container kept in memory + // Tags: - + MetricCGroupResolverActiveContainerWorkloads = newRuntimeMetric(".cgroup_resolver.active_containers") + // MetricCGroupResolverActiveHostWorkloads is the name of the metric used to report the count of active cgroups not corresponding to a container kept in memory + // Tags: - + MetricCGroupResolverActiveHostWorkloads = newRuntimeMetric(".cgroup_resolver.active_non_containers") + // Security Profile metrics // MetricSecurityProfileProfiles is the name of the metric used to report the count of Security Profiles per category diff --git a/pkg/security/module/cws.go b/pkg/security/module/cws.go index 3f65c6378a99c..227ccfa1dea70 100644 --- a/pkg/security/module/cws.go +++ b/pkg/security/module/cws.go @@ -18,6 +18,7 @@ import ( "go.uber.org/atomic" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" + compression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/def" "github.com/DataDog/datadog-agent/pkg/eventmonitor" "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/events" @@ -64,8 +65,12 @@ type CWSConsumer struct { } // NewCWSConsumer initializes the module with options -func NewCWSConsumer(evm *eventmonitor.EventMonitor, cfg *config.RuntimeSecurityConfig, wmeta workloadmeta.Component, opts Opts) (*CWSConsumer, error) { - crtelemetry, err := telemetry.NewContainersRunningTelemetry(cfg, evm.StatsdClient, wmeta) +func NewCWSConsumer(evm *eventmonitor.EventMonitor, cfg *config.RuntimeSecurityConfig, wmeta workloadmeta.Component, opts Opts, compression compression.Component) (*CWSConsumer, error) { + crtelemcfg := telemetry.ContainersRunningTelemetryConfig{ + RuntimeEnabled: cfg.RuntimeEnabled, + FIMEnabled: cfg.FIMEnabled, + } + crtelemetry, err := telemetry.NewContainersRunningTelemetry(crtelemcfg, evm.StatsdClient, wmeta) if err != nil { return nil, err } @@ -80,7 +85,7 @@ func NewCWSConsumer(evm *eventmonitor.EventMonitor, cfg *config.RuntimeSecurityC family, address := config.GetFamilyAddress(cfg.SocketPath) - apiServer, err := NewAPIServer(cfg, evm.Probe, opts.MsgSender, evm.StatsdClient, selfTester) + apiServer, err := NewAPIServer(cfg, evm.Probe, opts.MsgSender, evm.StatsdClient, selfTester, compression) if err != nil { return nil, err } @@ -146,6 +151,20 @@ func NewCWSConsumer(evm *eventmonitor.EventMonitor, cfg *config.RuntimeSecurityC return c, nil } +func (c *CWSConsumer) onAPIConnectionEstablished() { + seclog.Infof("api client connected, starts sending events") + c.startRunningMetrics() +} + +func (c *CWSConsumer) startRunningMetrics() { + c.ruleEngine.StartRunningMetrics(c.ctx) + + if c.crtelemetry != nil { + // Send containers running telemetry + go c.crtelemetry.Run(c.ctx) + } +} + // ID returns id for CWS func (c *CWSConsumer) ID() string { return "CWS" @@ -164,18 +183,13 @@ func (c *CWSConsumer) Start() error { // start api server c.apiServer.Start(c.ctx) - if err := c.ruleEngine.Start(c.ctx, c.reloader.Chan(), &c.wg); err != nil { + if err := c.ruleEngine.Start(c.ctx, c.reloader.Chan()); err != nil { return err } c.wg.Add(1) go c.statsSender() - if c.crtelemetry != nil { - // Send containers running telemetry - go c.crtelemetry.Run(c.ctx) - } - seclog.Infof("runtime security started") // we can now wait for self test events @@ -201,6 +215,11 @@ func (c *CWSConsumer) Start() error { go c.selfTester.WaitForResult(cb) } + // do not wait external api connection, send directly running metrics + if c.config.SendEventFromSystemProbe { + c.startRunningMetrics() + } + return nil } @@ -268,9 +287,10 @@ func (c *CWSConsumer) Stop() { c.apiServer.Stop() } + c.cancelFnc() + c.ruleEngine.Stop() - c.cancelFnc() c.wg.Wait() c.grpcServer.Stop() diff --git a/pkg/security/module/msg_sender.go b/pkg/security/module/msg_sender.go index eddd3ebf67f0d..a2a4292b9849f 100644 --- a/pkg/security/module/msg_sender.go +++ b/pkg/security/module/msg_sender.go @@ -9,6 +9,7 @@ package module import ( "fmt" + compression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/def" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/security/common" "github.com/DataDog/datadog-agent/pkg/security/proto/api" @@ -72,7 +73,7 @@ func (ds *DirectMsgSender) Send(msg *api.SecurityEventMessage, _ func(*api.Secur } // NewDirectMsgSender returns a new direct sender -func NewDirectMsgSender(stopper startstop.Stopper) (*DirectMsgSender, error) { +func NewDirectMsgSender(stopper startstop.Stopper, compression compression.Component) (*DirectMsgSender, error) { useSecRuntimeTrack := pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.use_secruntime_track") endpoints, destinationsCtx, err := common.NewLogContextRuntime(useSecRuntimeTrack) @@ -86,7 +87,7 @@ func NewDirectMsgSender(stopper startstop.Stopper) (*DirectMsgSender, error) { // we set the hostname to the empty string to take advantage of the out of the box message hostname // resolution - reporter, err := reporter.NewCWSReporter("", stopper, endpoints, destinationsCtx) + reporter, err := reporter.NewCWSReporter("", stopper, endpoints, destinationsCtx, compression) if err != nil { return nil, fmt.Errorf("failed to create direct reporter: %w", err) } diff --git a/pkg/security/module/server.go b/pkg/security/module/server.go index 3f2e2faf75c36..75b0137a8c7d3 100644 --- a/pkg/security/module/server.go +++ b/pkg/security/module/server.go @@ -22,7 +22,7 @@ import ( "go.uber.org/atomic" "github.com/DataDog/datadog-agent/comp/core/tagger/types" - pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" + compression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/def" "github.com/DataDog/datadog-agent/pkg/security/common" "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/events" @@ -127,6 +127,7 @@ type APIServer struct { policiesStatusLock sync.RWMutex policiesStatus []*api.PolicyStatus msgSender MsgSender + connEstablished *atomic.Bool // os release data kernelVersion string @@ -177,6 +178,13 @@ func (a *APIServer) SendActivityDump(dump *api.ActivityDumpStreamMessage) { // GetEvents waits for security events func (a *APIServer) GetEvents(_ *api.GetEventParams, stream api.SecurityModule_GetEventsServer) error { + if prev := a.connEstablished.Swap(true); !prev { + // should always be non nil + if a.cwsConsumer != nil { + a.cwsConsumer.onAPIConnectionEstablished() + } + } + for { select { case <-stream.Context().Done(): @@ -290,7 +298,7 @@ func (a *APIServer) start(ctx context.Context) { return true }) case <-ctx.Done(): - a.stopChan <- struct{}{} + close(a.stopChan) return } } @@ -357,7 +365,6 @@ func (a *APIServer) SendEvent(rule *rules.Rule, event events.Event, extTagsCb fu // model event or custom event ? if model event use queuing so that tags and actions can be handled if ev, ok := event.(*model.Event); ok { - //return serializers.MarshalEvent(ev, opts) eventActionReports := ev.GetActionReports() actionReports := make([]model.ActionReport, 0, len(eventActionReports)) for _, ar := range eventActionReports { @@ -564,29 +571,30 @@ func (a *APIServer) getGlobalTags() []string { } // NewAPIServer returns a new gRPC event server -func NewAPIServer(cfg *config.RuntimeSecurityConfig, probe *sprobe.Probe, msgSender MsgSender, client statsd.ClientInterface, selfTester *selftests.SelfTester) (*APIServer, error) { +func NewAPIServer(cfg *config.RuntimeSecurityConfig, probe *sprobe.Probe, msgSender MsgSender, client statsd.ClientInterface, selfTester *selftests.SelfTester, compression compression.Component) (*APIServer, error) { stopper := startstop.NewSerialStopper() as := &APIServer{ - msgs: make(chan *api.SecurityEventMessage, cfg.EventServerBurst*3), - activityDumps: make(chan *api.ActivityDumpStreamMessage, model.MaxTracedCgroupsCount*2), - expiredEvents: make(map[rules.RuleID]*atomic.Int64), - expiredDumps: atomic.NewInt64(0), - statsdClient: client, - probe: probe, - retention: cfg.EventServerRetention, - cfg: cfg, - stopper: stopper, - selfTester: selfTester, - stopChan: make(chan struct{}), - msgSender: msgSender, + msgs: make(chan *api.SecurityEventMessage, cfg.EventServerBurst*3), + activityDumps: make(chan *api.ActivityDumpStreamMessage, model.MaxTracedCgroupsCount*2), + expiredEvents: make(map[rules.RuleID]*atomic.Int64), + expiredDumps: atomic.NewInt64(0), + statsdClient: client, + probe: probe, + retention: cfg.EventServerRetention, + cfg: cfg, + stopper: stopper, + selfTester: selfTester, + stopChan: make(chan struct{}), + msgSender: msgSender, + connEstablished: atomic.NewBool(false), } as.collectOSReleaseData() if as.msgSender == nil { - if pkgconfigsetup.SystemProbe().GetBool("runtime_security_config.direct_send_from_system_probe") { - msgSender, err := NewDirectMsgSender(stopper) + if cfg.SendEventFromSystemProbe { + msgSender, err := NewDirectMsgSender(stopper, compression) if err != nil { log.Errorf("failed to setup direct reporter: %v", err) } else { diff --git a/pkg/security/module/server_linux.go b/pkg/security/module/server_linux.go index 53ac089890280..9e096cd8e4656 100644 --- a/pkg/security/module/server_linux.go +++ b/pkg/security/module/server_linux.go @@ -218,6 +218,7 @@ func (a *APIServer) GetStatus(_ context.Context, _ *api.GetStatusParams) (*api.S KernelLockdown: string(kernel.GetLockdownMode()), UseMmapableMaps: p.GetKernelVersion().HaveMmapableMaps(), UseRingBuffer: p.UseRingBuffers(), + UseFentry: p.GetUseFentry(), } envErrors := p.VerifyEnvironment() diff --git a/pkg/security/probe/config/config.go b/pkg/security/probe/config/config.go index c597eebe84e0e..bda439b55f2fe 100644 --- a/pkg/security/probe/config/config.go +++ b/pkg/security/probe/config/config.go @@ -9,7 +9,6 @@ package config import ( "fmt" "os" - "runtime" "strings" "time" @@ -115,6 +114,15 @@ type Config struct { // RawNetworkClassifierHandle defines the handle at which CWS should insert its Raw TC classifiers. RawNetworkClassifierHandle uint16 + // NetworkFlowMonitorEnabled defines if the network flow monitor should be enabled. + NetworkFlowMonitorEnabled bool + + // NetworkFlowMonitorPeriod defines the period at which collected flows should flushed to user space. + NetworkFlowMonitorPeriod time.Duration + + // NetworkFlowMonitorSKStorageEnabled defines if the network flow monitor should use a SK_STORAGE map (higher memory footprint). + NetworkFlowMonitorSKStorageEnabled bool + // ProcessConsumerEnabled defines if the process-agent wants to receive kernel events ProcessConsumerEnabled bool @@ -150,34 +158,37 @@ func NewConfig() (*Config, error) { setEnv() c := &Config{ - Config: *ebpf.NewConfig(), - EnableAllProbes: getBool("enable_all_probes"), - EnableKernelFilters: getBool("enable_kernel_filters"), - EnableApprovers: getBool("enable_approvers"), - EnableDiscarders: getBool("enable_discarders"), - FlushDiscarderWindow: getInt("flush_discarder_window"), - PIDCacheSize: getInt("pid_cache_size"), - StatsTagsCardinality: getString("events_stats.tags_cardinality"), - CustomSensitiveWords: getStringSlice("custom_sensitive_words"), - ERPCDentryResolutionEnabled: getBool("erpc_dentry_resolution_enabled"), - MapDentryResolutionEnabled: getBool("map_dentry_resolution_enabled"), - DentryCacheSize: getInt("dentry_cache_size"), - RuntimeMonitor: getBool("runtime_monitor.enabled"), - NetworkLazyInterfacePrefixes: getStringSlice("network.lazy_interface_prefixes"), - NetworkClassifierPriority: uint16(getInt("network.classifier_priority")), - NetworkClassifierHandle: uint16(getInt("network.classifier_handle")), - RawNetworkClassifierHandle: uint16(getInt("network.raw_classifier_handle")), - EventStreamUseRingBuffer: getBool("event_stream.use_ring_buffer"), - EventStreamBufferSize: getInt("event_stream.buffer_size"), - EventStreamUseFentry: getEventStreamFentryValue(), - EnvsWithValue: getStringSlice("envs_with_value"), - NetworkEnabled: getBool("network.enabled"), - NetworkIngressEnabled: getBool("network.ingress.enabled"), - NetworkRawPacketEnabled: getBool("network.raw_packet.enabled"), - NetworkPrivateIPRanges: getStringSlice("network.private_ip_ranges"), - NetworkExtraPrivateIPRanges: getStringSlice("network.extra_private_ip_ranges"), - StatsPollingInterval: time.Duration(getInt("events_stats.polling_interval")) * time.Second, - SyscallsMonitorEnabled: getBool("syscalls_monitor.enabled"), + Config: *ebpf.NewConfig(), + EnableAllProbes: getBool("enable_all_probes"), + EnableKernelFilters: getBool("enable_kernel_filters"), + EnableApprovers: getBool("enable_approvers"), + EnableDiscarders: getBool("enable_discarders"), + FlushDiscarderWindow: getInt("flush_discarder_window"), + PIDCacheSize: getInt("pid_cache_size"), + StatsTagsCardinality: getString("events_stats.tags_cardinality"), + CustomSensitiveWords: getStringSlice("custom_sensitive_words"), + ERPCDentryResolutionEnabled: getBool("erpc_dentry_resolution_enabled"), + MapDentryResolutionEnabled: getBool("map_dentry_resolution_enabled"), + DentryCacheSize: getInt("dentry_cache_size"), + RuntimeMonitor: getBool("runtime_monitor.enabled"), + NetworkLazyInterfacePrefixes: getStringSlice("network.lazy_interface_prefixes"), + NetworkClassifierPriority: uint16(getInt("network.classifier_priority")), + NetworkClassifierHandle: uint16(getInt("network.classifier_handle")), + RawNetworkClassifierHandle: uint16(getInt("network.raw_classifier_handle")), + NetworkFlowMonitorPeriod: getDuration("network.flow_monitor.period"), + NetworkFlowMonitorEnabled: getBool("network.flow_monitor.enabled"), + NetworkFlowMonitorSKStorageEnabled: getBool("network.flow_monitor.sk_storage.enabled"), + EventStreamUseRingBuffer: getBool("event_stream.use_ring_buffer"), + EventStreamBufferSize: getInt("event_stream.buffer_size"), + EventStreamUseFentry: getBool("event_stream.use_fentry"), + EnvsWithValue: getStringSlice("envs_with_value"), + NetworkEnabled: getBool("network.enabled"), + NetworkIngressEnabled: getBool("network.ingress.enabled"), + NetworkRawPacketEnabled: getBool("network.raw_packet.enabled"), + NetworkPrivateIPRanges: getStringSlice("network.private_ip_ranges"), + NetworkExtraPrivateIPRanges: getStringSlice("network.extra_private_ip_ranges"), + StatsPollingInterval: time.Duration(getInt("events_stats.polling_interval")) * time.Second, + SyscallsMonitorEnabled: getBool("syscalls_monitor.enabled"), // event server SocketPath: pkgconfigsetup.SystemProbe().GetString(join(evNS, "socket")), @@ -221,11 +232,11 @@ func (c *Config) sanitize() error { return fmt.Errorf("runtime_security_config.event_stream.buffer_size must be a power of 2 and a multiple of %d", os.Getpagesize()) } - if !isSet("enable_approvers") && c.EnableKernelFilters { + if !isConfigured("enable_approvers") && c.EnableKernelFilters { c.EnableApprovers = true } - if !isSet("enable_discarders") && c.EnableKernelFilters { + if !isConfigured("enable_discarders") && c.EnableKernelFilters { c.EnableDiscarders = true } @@ -253,21 +264,6 @@ func (c *Config) sanitizeConfigNetwork() { } } -func getEventStreamFentryValue() bool { - if getBool("event_stream.use_fentry") { - return true - } - - switch runtime.GOARCH { - case "amd64": - return getBool("event_stream.use_fentry_amd64") - case "arm64": - return getBool("event_stream.use_fentry_arm64") - default: - return false - } -} - func join(pieces ...string) string { return strings.Join(pieces, ".") } @@ -278,14 +274,14 @@ func getAllKeys(key string) (string, string) { return deprecatedKey, newKey } -func isSet(key string) bool { +func isConfigured(key string) bool { deprecatedKey, newKey := getAllKeys(key) - return pkgconfigsetup.SystemProbe().IsSet(deprecatedKey) || pkgconfigsetup.SystemProbe().IsSet(newKey) + return pkgconfigsetup.SystemProbe().IsConfigured(deprecatedKey) || pkgconfigsetup.SystemProbe().IsConfigured(newKey) } func getBool(key string) bool { deprecatedKey, newKey := getAllKeys(key) - if pkgconfigsetup.SystemProbe().IsSet(deprecatedKey) { + if pkgconfigsetup.SystemProbe().IsConfigured(deprecatedKey) { log.Warnf("%s has been deprecated: please set %s instead", deprecatedKey, newKey) return pkgconfigsetup.SystemProbe().GetBool(deprecatedKey) } @@ -294,16 +290,25 @@ func getBool(key string) bool { func getInt(key string) int { deprecatedKey, newKey := getAllKeys(key) - if pkgconfigsetup.SystemProbe().IsSet(deprecatedKey) { + if pkgconfigsetup.SystemProbe().IsConfigured(deprecatedKey) { log.Warnf("%s has been deprecated: please set %s instead", deprecatedKey, newKey) return pkgconfigsetup.SystemProbe().GetInt(deprecatedKey) } return pkgconfigsetup.SystemProbe().GetInt(newKey) } -func getString(key string) string { +func getDuration(key string) time.Duration { deprecatedKey, newKey := getAllKeys(key) if pkgconfigsetup.SystemProbe().IsSet(deprecatedKey) { + log.Warnf("%s has been deprecated: please set %s instead", deprecatedKey, newKey) + return pkgconfigsetup.SystemProbe().GetDuration(deprecatedKey) + } + return pkgconfigsetup.SystemProbe().GetDuration(newKey) +} + +func getString(key string) string { + deprecatedKey, newKey := getAllKeys(key) + if pkgconfigsetup.SystemProbe().IsConfigured(deprecatedKey) { log.Warnf("%s has been deprecated: please set %s instead", deprecatedKey, newKey) return pkgconfigsetup.SystemProbe().GetString(deprecatedKey) } @@ -312,7 +317,7 @@ func getString(key string) string { func getStringSlice(key string) []string { deprecatedKey, newKey := getAllKeys(key) - if pkgconfigsetup.SystemProbe().IsSet(deprecatedKey) { + if pkgconfigsetup.SystemProbe().IsConfigured(deprecatedKey) { log.Warnf("%s has been deprecated: please set %s instead", deprecatedKey, newKey) return pkgconfigsetup.SystemProbe().GetStringSlice(deprecatedKey) } diff --git a/pkg/security/probe/constantfetch/available.go b/pkg/security/probe/constantfetch/available.go index 1035fb497e600..cbe122d50a50e 100644 --- a/pkg/security/probe/constantfetch/available.go +++ b/pkg/security/probe/constantfetch/available.go @@ -94,3 +94,13 @@ func GetHasVFSRenameStructArgs() (bool, error) { return false, nil } + +// GetBTFFunctionArgCount returns the number of arguments of a BTF function +func GetBTFFunctionArgCount(funcName string) (int, error) { + proto, err := getBTFFuncProto(funcName) + if err != nil { + return 0, err + } + + return len(proto.Params), nil +} diff --git a/pkg/security/probe/constantfetch/available_unsupported.go b/pkg/security/probe/constantfetch/available_unsupported.go index 6195452074bbc..f6f0fa746d10c 100644 --- a/pkg/security/probe/constantfetch/available_unsupported.go +++ b/pkg/security/probe/constantfetch/available_unsupported.go @@ -42,3 +42,8 @@ func GetHasUsernamespaceFirstArgWithBtf() (bool, error) { func GetHasVFSRenameStructArgs() (bool, error) { return false, errors.New("unsupported BTF request") } + +// GetBTFFunctionArgCount not available +func GetBTFFunctionArgCount(_ string) (int, error) { + return 0, errors.New("unsupported BTF request") +} diff --git a/pkg/security/probe/constantfetch/btfhub.go b/pkg/security/probe/constantfetch/btfhub.go index bc67299b013e1..491aba219e361 100644 --- a/pkg/security/probe/constantfetch/btfhub.go +++ b/pkg/security/probe/constantfetch/btfhub.go @@ -9,7 +9,7 @@ package constantfetch import ( - _ "embed" + _ "embed" // for go:embed "encoding/json" "fmt" "runtime" @@ -18,14 +18,11 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/ebpf/kernel" ) -//go:embed btfhub/constants.json -var btfhubConstants []byte - // BTFHubConstantFetcher is a constant fetcher based on BTFHub constants type BTFHubConstantFetcher struct { - kernelVersion *kernel.Version - inStore map[string]uint64 - res map[string]uint64 + currentKernelInfos *kernelInfos + inStore map[string]uint64 + requests []string } var archMapping = map[string]string{ @@ -33,64 +30,73 @@ var archMapping = map[string]string{ "arm64": "arm64", } -// NewBTFHubConstantFetcher returns a new BTFHubConstantFetcher -func NewBTFHubConstantFetcher(kv *kernel.Version) (*BTFHubConstantFetcher, error) { - fetcher := &BTFHubConstantFetcher{ - kernelVersion: kv, - inStore: make(map[string]uint64), - res: make(map[string]uint64), - } - - currentKernelInfos, err := newKernelInfos(kv) - if err != nil { - return nil, fmt.Errorf("failed to collect current kernel infos: %w", err) +func (f *BTFHubConstantFetcher) fillStore() error { + if len(f.inStore) != 0 { + return nil } var constantsInfos BTFHubConstants if err := json.Unmarshal(btfhubConstants, &constantsInfos); err != nil { - return nil, err + return err } for _, kernel := range constantsInfos.Kernels { - if kernel.Distribution == currentKernelInfos.distribution && kernel.DistribVersion == currentKernelInfos.distribVersion && kernel.Arch == currentKernelInfos.arch && kernel.UnameRelease == currentKernelInfos.unameRelease { - fetcher.inStore = constantsInfos.Constants[kernel.ConstantsIndex] + if kernel.Distribution == f.currentKernelInfos.distribution && kernel.DistribVersion == f.currentKernelInfos.distribVersion && kernel.Arch == f.currentKernelInfos.arch && kernel.UnameRelease == f.currentKernelInfos.unameRelease { + f.inStore = constantsInfos.Constants[kernel.ConstantsIndex] break } } - return fetcher, nil + return nil } -func (f *BTFHubConstantFetcher) String() string { - return "btfhub" -} +// NewBTFHubConstantFetcher returns a new BTFHubConstantFetcher +func NewBTFHubConstantFetcher(kv *kernel.Version) (*BTFHubConstantFetcher, error) { + currentKernelInfos, err := newKernelInfos(kv) + if err != nil { + return nil, fmt.Errorf("failed to collect current kernel infos: %w", err) + } -// HasConstantsInStore returns true if there is constants in store in BTFHub -func (f *BTFHubConstantFetcher) HasConstantsInStore() bool { - return len(f.inStore) != 0 + return &BTFHubConstantFetcher{ + currentKernelInfos: currentKernelInfos, + inStore: make(map[string]uint64), + }, nil } -func (f *BTFHubConstantFetcher) appendRequest(id string) { - if value, ok := f.inStore[id]; ok { - f.res[id] = value - } else { - f.res[id] = ErrorSentinel - } +func (f *BTFHubConstantFetcher) String() string { + return "btfhub" } // AppendSizeofRequest appends a sizeof request func (f *BTFHubConstantFetcher) AppendSizeofRequest(id, _ string) { - f.appendRequest(id) + f.requests = append(f.requests, id) } // AppendOffsetofRequest appends an offset request func (f *BTFHubConstantFetcher) AppendOffsetofRequest(id, _ string, _ ...string) { - f.appendRequest(id) + f.requests = append(f.requests, id) } // FinishAndGetResults returns the results func (f *BTFHubConstantFetcher) FinishAndGetResults() (map[string]uint64, error) { - return f.res, nil + if len(f.requests) == 0 { + return nil, nil + } + + if err := f.fillStore(); err != nil { + return nil, err + } + + res := make(map[string]uint64) + for _, id := range f.requests { + if value, ok := f.inStore[id]; ok { + res[id] = value + } else { + res[id] = ErrorSentinel + } + } + + return res, nil } type kernelInfos struct { @@ -135,7 +141,6 @@ func newKernelInfos(kv *kernel.Version) (*kernelInfos, error) { // BTFHubConstants represents all the information required for identifying // a unique btf file from BTFHub type BTFHubConstants struct { - Commit string `json:"commit"` Constants []map[string]uint64 `json:"constants"` Kernels []BTFHubKernel `json:"kernels"` } diff --git a/pkg/security/probe/constantfetch/btfhub/.gitignore b/pkg/security/probe/constantfetch/btfhub/.gitignore new file mode 100644 index 0000000000000..8cabdc2b5de70 --- /dev/null +++ b/pkg/security/probe/constantfetch/btfhub/.gitignore @@ -0,0 +1 @@ +constants.json diff --git a/pkg/security/probe/constantfetch/btfhub/constants.json b/pkg/security/probe/constantfetch/btfhub/constants_amd64.json similarity index 65% rename from pkg/security/probe/constantfetch/btfhub/constants.json rename to pkg/security/probe/constantfetch/btfhub/constants_amd64.json index 90238609bab82..3127031428ccd 100644 --- a/pkg/security/probe/constantfetch/btfhub/constants.json +++ b/pkg/security/probe/constantfetch/btfhub/constants_amd64.json @@ -1,5 +1,4 @@ { - "commit": "", "constants": [ { "binprm_file_offset": 168, @@ -11,9 +10,11 @@ "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1320, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -25,14 +26,19 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, @@ -40,16 +46,22 @@ "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 592, + "sizeof_inode": 600, "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1880, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2328, "tty_name_offset": 368, "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { @@ -62,9 +74,11 @@ "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1320, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -76,14 +90,19 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, @@ -91,16 +110,22 @@ "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 600, + "sizeof_inode": 608, "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1880, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2328, "tty_name_offset": 368, "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { @@ -113,9 +138,11 @@ "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1320, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -127,14 +154,19 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, @@ -142,16 +174,22 @@ "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, "sizeof_inode": 600, "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2008, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1384, "tty_name_offset": 368, "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { @@ -164,9 +202,11 @@ "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 44, "creds_uid_offset": 8, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1320, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -178,14 +218,19 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, @@ -193,16 +238,22 @@ "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 600, + "sizeof_inode": 608, "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2008, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2328, "tty_name_offset": 368, "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { @@ -215,9 +266,11 @@ "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1320, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -229,14 +282,19 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, @@ -244,67 +302,144 @@ "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 592, + "sizeof_inode": 600, "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1880, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1320, "tty_name_offset": 368, - "tty_offset": 376, + "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, - "bpf_map_id_offset": 48, - "bpf_map_type_offset": 24, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 16, - "bpf_prog_type_offset": 4, + "bpf_map_type_offset": 4, + "bpf_prog_aux_offset": 16, + "bpf_prog_type_offset": 8, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, + "flowi4_saddr_offset": 32, + "flowi4_uli_offset": 40, + "flowi6_saddr_offset": 48, + "flowi6_uli_offset": 68, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, "mount_id_offset": 284, - "net_device_ifindex_offset": 264, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 296, "net_device_name_offset": 0, - "net_ns_offset": 120, + "net_ns_offset": 136, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, + "pipe_inode_info_buffers_offset": 72, + "pipe_inode_info_bufs_offset": 128, + "pipe_inode_info_curbuf_offset": 68, + "pipe_inode_info_nrbufs_offset": 64, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 600, + "sizeof_inode": 584, "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2328, - "tty_name_offset": 368, - "tty_offset": 368, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1240, + "tty_name_offset": 400, + "tty_offset": 384, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_type_offset": 8, + "bpf_prog_aux_offset": 16, + "bpf_prog_type_offset": 8, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1256, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 32, + "flowi4_uli_offset": 40, + "flowi6_saddr_offset": 48, + "flowi6_uli_offset": 68, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 296, + "net_device_name_offset": 0, + "net_ns_offset": 136, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 72, + "pipe_inode_info_bufs_offset": 128, + "pipe_inode_info_curbuf_offset": 68, + "pipe_inode_info_nrbufs_offset": 64, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 584, + "sizeof_upid": 32, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1240, + "tty_name_offset": 400, + "tty_offset": 384, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { @@ -317,6 +452,8 @@ "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, "device_nd_net_net_offset": 1256, @@ -331,14 +468,19 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, @@ -346,16 +488,22 @@ "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 608, + "sizeof_inode": 600, "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2328, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2320, "tty_name_offset": 368, "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { @@ -368,6 +516,8 @@ "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, "device_nd_net_net_offset": 1256, @@ -382,14 +532,19 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, @@ -397,16 +552,22 @@ "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 600, + "sizeof_inode": 608, "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1384, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2320, "tty_name_offset": 368, "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { @@ -417,8 +578,10 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 44, - "creds_uid_offset": 8, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, "device_nd_net_net_offset": 1256, @@ -433,14 +596,19 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, @@ -448,16 +616,22 @@ "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 608, + "sizeof_inode": 600, "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2328, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1376, "tty_name_offset": 368, "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { @@ -468,8 +642,10 @@ "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, + "creds_cap_inheritable_offset": 44, + "creds_uid_offset": 8, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, "device_nd_net_net_offset": 1256, @@ -484,14 +660,19 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, @@ -499,95 +680,128 @@ "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 600, + "sizeof_inode": 608, "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1320, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2320, "tty_name_offset": 368, "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, - "bpf_map_type_offset": 4, - "bpf_prog_aux_offset": 16, - "bpf_prog_type_offset": 8, + "bpf_map_id_offset": 48, + "bpf_map_type_offset": 24, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 16, + "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 32, - "flowi4_uli_offset": 40, - "flowi6_saddr_offset": 48, - "flowi6_uli_offset": 68, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, "mount_id_offset": 284, - "net_device_ifindex_offset": 296, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_ns_offset": 136, + "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 72, - "pipe_inode_info_bufs_offset": 128, - "pipe_inode_info_curbuf_offset": 68, - "pipe_inode_info_nrbufs_offset": 64, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 584, + "sizeof_inode": 600, "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1240, - "tty_name_offset": 400, - "tty_offset": 384, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1312, + "tty_name_offset": 368, + "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, - "bpf_map_type_offset": 8, - "bpf_prog_aux_offset": 16, - "bpf_prog_type_offset": 8, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 112, + "bpf_map_type_offset": 24, + "bpf_prog_aux_id_offset": 8, + "bpf_prog_aux_name_offset": 144, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, + "device_nd_net_net_offset": 1000, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 32, - "flowi4_uli_offset": 40, - "flowi6_saddr_offset": 48, - "flowi6_uli_offset": 68, + "flowi4_saddr_offset": 20, + "flowi4_uli_offset": 28, + "flowi6_saddr_offset": 36, + "flowi6_uli_offset": 56, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 296, + "mount_id_offset": 268, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 192, "net_device_name_offset": 0, - "net_ns_offset": 136, - "nf_conn_ct_net_offset": 144, + "net_proc_inum_offset": 72, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, @@ -595,79 +809,104 @@ "pipe_inode_info_bufs_offset": 128, "pipe_inode_info_curbuf_offset": 68, "pipe_inode_info_nrbufs_offset": 64, + "sb_dev_offset": 16, "sb_flags_offset": 80, - "sb_magic_offset": 96, + "sb_magic_offset": 88, "sizeof_inode": 584, "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1240, - "tty_name_offset": 400, - "tty_offset": 384, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1296, + "tty_name_offset": 312, + "tty_offset": 416, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, "bpf_map_id_offset": 48, + "bpf_map_name_offset": 112, "bpf_map_type_offset": 24, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 16, + "bpf_prog_aux_id_offset": 8, + "bpf_prog_aux_name_offset": 80, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, + "device_nd_net_net_offset": 1000, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, + "flowi4_saddr_offset": 20, + "flowi4_uli_offset": 28, + "flowi6_saddr_offset": 36, + "flowi6_uli_offset": 56, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, + "mount_id_offset": 268, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 192, "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, + "net_proc_inum_offset": 72, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, + "pipe_inode_info_buffers_offset": 72, + "pipe_inode_info_bufs_offset": 128, + "pipe_inode_info_curbuf_offset": 68, + "pipe_inode_info_nrbufs_offset": 64, + "sb_dev_offset": 16, "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 600, + "sb_magic_offset": 88, + "sizeof_inode": 584, "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2320, - "tty_name_offset": 368, - "tty_offset": 368, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1296, + "tty_name_offset": 312, + "tty_offset": 416, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, "bpf_map_id_offset": 48, + "bpf_map_name_offset": 176, "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 16, + "bpf_prog_aux_name_offset": 160, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, "device_nd_net_net_offset": 1256, @@ -682,43 +921,56 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, + "pid_numbers_offset": 56, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 608, - "sizeof_upid": 32, + "sizeof_inode": 592, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2320, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1424, "tty_name_offset": 368, - "tty_offset": 368, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, "bpf_map_id_offset": 48, + "bpf_map_name_offset": 176, "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 16, + "bpf_prog_aux_name_offset": 160, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, "device_nd_net_net_offset": 1256, @@ -733,43 +985,56 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, + "pid_numbers_offset": 56, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, "sizeof_inode": 600, - "sizeof_upid": 32, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1376, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1424, "tty_name_offset": 368, - "tty_offset": 368, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, + "binprm_file_offset": 48, "bpf_map_id_offset": 48, + "bpf_map_name_offset": 168, "bpf_map_type_offset": 24, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 16, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 44, - "creds_uid_offset": 8, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, "device_nd_net_net_offset": 1256, @@ -784,231 +1049,325 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 96, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, + "pid_numbers_offset": 80, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 608, - "sizeof_upid": 32, + "sizeof_inode": 600, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2320, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2328, "tty_name_offset": 368, - "tty_offset": 368, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, + "binprm_file_offset": 296, "bpf_map_id_offset": 48, + "bpf_map_name_offset": 208, "bpf_map_type_offset": 24, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 16, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 20, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, + "device_nd_net_net_offset": 1288, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, + "flowi4_saddr_offset": 56, + "flowi4_uli_offset": 64, + "flowi6_saddr_offset": 72, + "flowi6_uli_offset": 92, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 320, + "linux_binprm_envc_offset": 324, + "linux_binprm_p_offset": 280, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, + "pid_numbers_offset": 56, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 32, + "sizeof_inode": 648, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1312, + "super_block_s_type_offset": 40, "tty_name_offset": 368, - "tty_offset": 368, + "tty_offset": 392, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, - "bpf_map_type_offset": 4, - "bpf_prog_aux_offset": 16, + "binprm_file_offset": 296, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 208, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 152, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1128, + "device_nd_net_net_offset": 1288, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 20, - "flowi4_uli_offset": 28, - "flowi6_saddr_offset": 36, - "flowi6_uli_offset": 56, + "flowi4_saddr_offset": 56, + "flowi4_uli_offset": 64, + "flowi6_saddr_offset": 72, + "flowi6_uli_offset": 92, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 268, - "net_device_ifindex_offset": 256, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 320, + "linux_binprm_envc_offset": 324, + "linux_binprm_p_offset": 280, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_proc_inum_offset": 72, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 72, - "pipe_inode_info_bufs_offset": 128, - "pipe_inode_info_curbuf_offset": 68, - "pipe_inode_info_nrbufs_offset": 64, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, - "sb_magic_offset": 88, - "sizeof_inode": 576, - "sizeof_upid": 32, + "sb_magic_offset": 96, + "sizeof_inode": 648, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 896, - "tty_name_offset": 400, - "tty_offset": 416, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2408, + "tty_name_offset": 368, + "tty_offset": 392, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, - "bpf_map_type_offset": 4, - "bpf_prog_aux_offset": 16, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 176, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 160, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1128, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 20, - "flowi4_uli_offset": 28, - "flowi6_saddr_offset": 36, - "flowi6_uli_offset": 56, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, - "mount_id_offset": 268, - "net_device_ifindex_offset": 256, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, "net_device_name_offset": 0, + "net_ns_offset": 112, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 72, - "pipe_inode_info_bufs_offset": 128, - "pipe_inode_info_curbuf_offset": 68, - "pipe_inode_info_nrbufs_offset": 64, + "pid_numbers_offset": 56, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, - "sb_magic_offset": 88, - "sizeof_inode": 560, - "sizeof_upid": 32, + "sb_magic_offset": 96, + "sizeof_inode": 592, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 896, - "tty_name_offset": 400, - "tty_offset": 416, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1328, + "tty_name_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, - "bpf_map_type_offset": 4, - "bpf_prog_aux_offset": 16, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 176, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 160, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1128, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 152, + "dentry_sb_offset": 152, + "device_nd_net_net_offset": 1368, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 20, - "flowi4_uli_offset": 28, - "flowi6_saddr_offset": 36, - "flowi6_uli_offset": 56, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, - "mount_id_offset": 268, - "net_device_ifindex_offset": 288, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, "net_device_name_offset": 0, + "net_ns_offset": 240, + "nf_conn_ct_net_offset": 192, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 72, - "pipe_inode_info_bufs_offset": 128, - "pipe_inode_info_curbuf_offset": 68, - "pipe_inode_info_nrbufs_offset": 64, + "pid_numbers_offset": 56, + "pipe_inode_info_buffers_offset": 112, + "pipe_inode_info_bufs_offset": 168, + "pipe_inode_info_curbuf_offset": 108, + "pipe_inode_info_nrbufs_offset": 104, + "sb_dev_offset": 16, "sb_flags_offset": 80, - "sb_magic_offset": 88, - "sizeof_inode": 544, - "sizeof_upid": 32, + "sb_magic_offset": 96, + "sizeof_inode": 752, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 944, - "tty_name_offset": 400, - "tty_offset": 416, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1488, + "tty_name_offset": 496, + "tty_offset": 440, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, - "bpf_map_type_offset": 4, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 12, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 176, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 160, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1192, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -1020,44 +1379,61 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, - "mount_id_offset": 268, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, + "net_ns_offset": 112, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, + "pid_numbers_offset": 56, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 576, - "sizeof_upid": 32, + "sizeof_inode": 600, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1808, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1328, "tty_name_offset": 368, - "tty_offset": 376, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, - "bpf_map_id_offset": 28, - "bpf_map_type_offset": 4, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 176, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 16, + "bpf_prog_aux_name_offset": 160, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1320, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 152, + "dentry_sb_offset": 152, + "device_nd_net_net_offset": 1368, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -1069,44 +1445,61 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, + "net_ns_offset": 248, + "nf_conn_ct_net_offset": 192, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, + "pid_numbers_offset": 56, + "pipe_inode_info_buffers_offset": 112, + "pipe_inode_info_bufs_offset": 168, + "pipe_inode_info_curbuf_offset": 108, + "pipe_inode_info_nrbufs_offset": 104, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 592, - "sizeof_upid": 32, + "sizeof_inode": 760, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2008, - "tty_name_offset": 368, - "tty_offset": 376, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1480, + "tty_name_offset": 496, + "tty_offset": 440, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, "bpf_map_id_offset": 48, + "bpf_map_name_offset": 176, "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 16, + "bpf_prog_aux_name_offset": 160, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1320, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -1118,299 +1511,392 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, + "net_ns_offset": 112, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, + "pid_numbers_offset": 56, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, "sizeof_inode": 600, - "sizeof_upid": 32, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1528, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1336, "tty_name_offset": 368, - "tty_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 296, + "binprm_file_offset": 168, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 208, + "bpf_map_name_offset": 176, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 20, - "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 160, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1288, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 152, + "dentry_sb_offset": 152, + "device_nd_net_net_offset": 1368, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 56, - "flowi4_uli_offset": 64, - "flowi6_saddr_offset": 72, - "flowi6_uli_offset": 92, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 320, - "linux_binprm_envc_offset": 324, - "linux_binprm_p_offset": 280, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, + "net_ns_offset": 248, + "nf_conn_ct_net_offset": 192, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_numbers_offset": 56, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, + "pipe_inode_info_buffers_offset": 112, + "pipe_inode_info_bufs_offset": 168, + "pipe_inode_info_curbuf_offset": 108, + "pipe_inode_info_nrbufs_offset": 104, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 648, + "sizeof_inode": 760, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "tty_name_offset": 368, - "tty_offset": 392, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1488, + "tty_name_offset": 496, + "tty_offset": 440, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 296, + "binprm_file_offset": 168, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 200, + "bpf_map_name_offset": 176, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 160, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, + "creds_cap_inheritable_offset": 44, + "creds_uid_offset": 8, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1288, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 56, - "flowi4_uli_offset": 64, - "flowi6_saddr_offset": 72, - "flowi6_uli_offset": 92, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 320, - "linux_binprm_envc_offset": 324, - "linux_binprm_p_offset": 280, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, + "net_ns_offset": 112, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_numbers_offset": 56, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 648, + "sizeof_inode": 600, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1336, "tty_name_offset": 368, - "tty_offset": 392, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 296, + "binprm_file_offset": 168, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 88, + "bpf_map_name_offset": 176, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 28, - "bpf_prog_aux_name_offset": 464, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 160, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1288, + "creds_cap_inheritable_offset": 44, + "creds_uid_offset": 8, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 152, + "dentry_sb_offset": 152, + "device_nd_net_net_offset": 1368, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 56, - "flowi4_uli_offset": 64, - "flowi6_saddr_offset": 72, - "flowi6_uli_offset": 92, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 320, - "linux_binprm_envc_offset": 324, - "linux_binprm_p_offset": 280, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, + "net_ns_offset": 248, + "nf_conn_ct_net_offset": 192, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_numbers_offset": 56, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, + "pipe_inode_info_buffers_offset": 112, + "pipe_inode_info_bufs_offset": 168, + "pipe_inode_info_curbuf_offset": 108, + "pipe_inode_info_nrbufs_offset": 104, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 648, + "sizeof_inode": 760, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "tty_name_offset": 368, - "tty_offset": 392, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1488, + "tty_name_offset": 496, + "tty_offset": 440, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 296, + "binprm_file_offset": 168, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 80, + "bpf_map_name_offset": 176, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 32, - "bpf_prog_aux_name_offset": 520, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 160, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1288, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 152, + "dentry_sb_offset": 152, + "device_nd_net_net_offset": 1368, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 56, - "flowi4_uli_offset": 64, - "flowi6_saddr_offset": 72, - "flowi6_uli_offset": 92, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 320, - "linux_binprm_envc_offset": 324, - "linux_binprm_p_offset": 280, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, + "net_ns_offset": 248, + "nf_conn_ct_net_offset": 192, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_numbers_offset": 56, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, + "pipe_inode_info_buffers_offset": 112, + "pipe_inode_info_bufs_offset": 168, + "pipe_inode_info_curbuf_offset": 108, + "pipe_inode_info_nrbufs_offset": 104, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 648, + "sizeof_inode": 752, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "tty_name_offset": 368, - "tty_offset": 392, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1488, + "tty_name_offset": 496, + "tty_offset": 440, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 296, + "binprm_file_offset": 64, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 208, + "bpf_map_name_offset": 88, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 152, + "bpf_prog_aux_id_offset": 28, + "bpf_prog_aux_name_offset": 496, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1288, + "device_nd_net_net_offset": 1264, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 56, - "flowi4_uli_offset": 64, - "flowi6_saddr_offset": 72, - "flowi6_uli_offset": 92, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 320, - "linux_binprm_envc_offset": 324, - "linux_binprm_p_offset": 280, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 80, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 88, + "linux_binprm_envc_offset": 92, + "linux_binprm_p_offset": 24, "mount_id_offset": 284, - "net_device_ifindex_offset": 264, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 256, "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, + "pid_numbers_offset": 96, + "pipe_inode_info_bufs_offset": 152, + "pipe_inode_info_head_offset": 80, + "pipe_inode_info_ring_size_offset": 92, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 648, + "sizeof_inode": 600, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1960, - "tty_name_offset": 368, - "tty_offset": 392, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2336, + "tty_name_offset": 360, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, + "binprm_file_offset": 64, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, + "bpf_map_name_offset": 88, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 160, + "bpf_prog_aux_id_offset": 28, + "bpf_prog_aux_name_offset": 512, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, + "dentry_d_inode_offset": 64, + "dentry_d_name_offset": 48, + "dentry_d_sb_offset": 168, + "dentry_sb_offset": 168, + "device_nd_net_net_offset": 1376, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -1422,46 +1908,62 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 80, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 88, + "linux_binprm_envc_offset": 92, + "linux_binprm_p_offset": 24, "mount_id_offset": 284, - "net_device_ifindex_offset": 264, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 256, "net_device_name_offset": 0, + "net_ns_offset": 264, + "nf_conn_ct_net_offset": 192, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 56, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, + "pid_numbers_offset": 176, + "pipe_inode_info_bufs_offset": 240, + "pipe_inode_info_head_offset": 168, + "pipe_inode_info_ring_size_offset": 180, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 592, + "sizeof_inode": 760, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_offset": 1384, - "tty_name_offset": 368, - "tty_offset": 400, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2480, + "tty_name_offset": 488, + "tty_offset": 440, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, + "binprm_file_offset": 64, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, + "bpf_map_name_offset": 88, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 160, + "bpf_prog_aux_id_offset": 28, + "bpf_prog_aux_name_offset": 496, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, + "creds_cap_inheritable_offset": 44, + "creds_uid_offset": 8, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, + "device_nd_net_net_offset": 1264, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -1473,57 +1975,147 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 80, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 88, + "linux_binprm_envc_offset": 92, + "linux_binprm_p_offset": 24, "mount_id_offset": 284, - "net_device_ifindex_offset": 264, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 256, "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 56, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, + "pid_numbers_offset": 96, + "pipe_inode_info_bufs_offset": 152, + "pipe_inode_info_head_offset": 80, + "pipe_inode_info_ring_size_offset": 92, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, "sizeof_inode": 600, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_offset": 1384, - "tty_name_offset": 368, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2336, + "tty_name_offset": 360, "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 64, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 88, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 28, + "bpf_prog_aux_name_offset": 512, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 44, + "creds_uid_offset": 8, + "dentry_d_inode_offset": 64, + "dentry_d_name_offset": 48, + "dentry_d_sb_offset": 168, + "dentry_sb_offset": 168, + "device_nd_net_net_offset": 1376, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 80, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 88, + "linux_binprm_envc_offset": 92, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 256, + "net_device_name_offset": 0, + "net_ns_offset": 264, + "nf_conn_ct_net_offset": 192, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 176, + "pipe_inode_info_bufs_offset": 240, + "pipe_inode_info_head_offset": 168, + "pipe_inode_info_ring_size_offset": 180, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 760, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2480, + "tty_name_offset": 488, + "tty_offset": 440, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, - "bpf_map_type_offset": 4, + "bpf_map_type_offset": 8, "bpf_prog_aux_offset": 16, "bpf_prog_type_offset": 8, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1160, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 20, - "flowi4_uli_offset": 28, - "flowi6_saddr_offset": 36, - "flowi6_uli_offset": 56, + "flowi4_saddr_offset": 32, + "flowi4_uli_offset": 40, + "flowi6_saddr_offset": 48, + "flowi6_uli_offset": 68, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, - "mount_id_offset": 268, - "net_device_ifindex_offset": 288, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 296, "net_device_name_offset": 0, + "net_ns_offset": 136, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, @@ -1531,28 +2123,36 @@ "pipe_inode_info_bufs_offset": 128, "pipe_inode_info_curbuf_offset": 68, "pipe_inode_info_nrbufs_offset": 64, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 560, + "sizeof_inode": 592, "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1464, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1272, "tty_name_offset": 400, - "tty_offset": 416, + "tty_offset": 384, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, - "bpf_map_type_offset": 4, + "bpf_map_type_offset": 8, "bpf_prog_aux_offset": 16, "bpf_prog_type_offset": 8, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1240, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 152, + "dentry_sb_offset": 152, + "device_nd_net_net_offset": 1368, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 32, @@ -1564,47 +2164,57 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, - "mount_id_offset": 268, - "net_device_ifindex_offset": 288, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 296, "net_device_name_offset": 0, + "net_ns_offset": 272, + "nf_conn_ct_net_offset": 192, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 72, - "pipe_inode_info_bufs_offset": 128, - "pipe_inode_info_curbuf_offset": 68, - "pipe_inode_info_nrbufs_offset": 64, + "pipe_inode_info_buffers_offset": 112, + "pipe_inode_info_bufs_offset": 168, + "pipe_inode_info_curbuf_offset": 108, + "pipe_inode_info_nrbufs_offset": 104, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 560, + "sizeof_inode": 744, "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1696, - "tty_name_offset": 400, - "tty_offset": 408, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1352, + "tty_name_offset": 496, + "tty_offset": 464, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 64, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 88, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 28, - "bpf_prog_aux_name_offset": 496, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, + "binprm_file_offset": 168, + "bpf_map_type_offset": 4, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 12, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1264, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -1616,44 +2226,117 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 80, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 88, - "linux_binprm_envc_offset": 92, - "linux_binprm_p_offset": 24, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, "mount_id_offset": 284, - "net_device_ifindex_offset": 256, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 96, - "pipe_inode_info_bufs_offset": 152, - "pipe_inode_info_head_offset": 80, - "pipe_inode_info_ring_size_offset": 92, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, + "sizeof_inode": 584, + "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 1912, - "tty_name_offset": 360, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2264, + "tty_name_offset": 368, + "tty_offset": 376, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_type_offset": 4, + "bpf_prog_aux_offset": 16, + "bpf_prog_type_offset": 8, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1176, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 32, + "flowi4_uli_offset": 40, + "flowi6_saddr_offset": 48, + "flowi6_uli_offset": 68, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 268, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 288, + "net_device_name_offset": 0, + "net_ns_offset": 128, + "nf_conn_ct_net_offset": 216, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 72, + "pipe_inode_info_bufs_offset": 128, + "pipe_inode_info_curbuf_offset": 68, + "pipe_inode_info_nrbufs_offset": 64, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 560, + "sizeof_upid": 32, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1400, + "tty_name_offset": 400, "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, + "binprm_file_offset": 168, + "bpf_map_id_offset": 28, + "bpf_map_type_offset": 4, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, "device_nd_net_net_offset": 1256, @@ -1668,65 +2351,80 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 96, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 80, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, + "sizeof_inode": 592, + "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 1872, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2408, "tty_name_offset": 368, - "tty_offset": 400, + "tty_offset": 376, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 112, - "bpf_map_type_offset": 24, - "bpf_prog_aux_id_offset": 8, - "bpf_prog_aux_name_offset": 144, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, + "bpf_map_type_offset": 4, + "bpf_prog_aux_offset": 16, + "bpf_prog_type_offset": 8, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1000, + "device_nd_net_net_offset": 1224, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 20, - "flowi4_uli_offset": 28, - "flowi6_saddr_offset": 36, - "flowi6_uli_offset": 56, + "flowi4_saddr_offset": 32, + "flowi4_uli_offset": 40, + "flowi6_saddr_offset": 48, + "flowi6_uli_offset": 68, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, "mount_id_offset": 268, - "net_device_ifindex_offset": 192, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 296, "net_device_name_offset": 0, - "net_proc_inum_offset": 72, + "net_ns_offset": 128, + "nf_conn_ct_net_offset": 208, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, @@ -1734,86 +2432,103 @@ "pipe_inode_info_bufs_offset": 128, "pipe_inode_info_curbuf_offset": 68, "pipe_inode_info_nrbufs_offset": 64, + "sb_dev_offset": 16, "sb_flags_offset": 80, - "sb_magic_offset": 88, - "sizeof_inode": 584, + "sb_magic_offset": 96, + "sizeof_inode": 576, "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1296, - "tty_name_offset": 312, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1392, + "tty_name_offset": 400, "tty_offset": 416, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 112, - "bpf_map_type_offset": 24, - "bpf_prog_aux_id_offset": 8, - "bpf_prog_aux_name_offset": 80, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, + "bpf_map_type_offset": 4, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 12, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1000, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 20, - "flowi4_uli_offset": 28, - "flowi6_saddr_offset": 36, - "flowi6_uli_offset": 56, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, "mount_id_offset": 268, - "net_device_ifindex_offset": 192, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_proc_inum_offset": 72, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 72, - "pipe_inode_info_bufs_offset": 128, - "pipe_inode_info_curbuf_offset": 68, - "pipe_inode_info_nrbufs_offset": 64, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, - "sb_magic_offset": 88, + "sb_magic_offset": 96, "sizeof_inode": 584, "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1296, - "tty_name_offset": 312, - "tty_offset": 416, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2264, + "tty_name_offset": 368, + "tty_offset": 376, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, + "bpf_map_name_offset": 112, "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 160, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, + "bpf_prog_aux_name_offset": 152, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, + "device_nd_net_net_offset": 1248, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -1825,28 +2540,42 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 56, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 592, + "sizeof_inode": 600, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_offset": 1424, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1424, "tty_name_offset": 368, - "tty_offset": 408, + "tty_offset": 376, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { @@ -1856,15 +2585,17 @@ "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 160, + "bpf_prog_aux_name_offset": 152, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, + "device_nd_net_net_offset": 1248, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -1876,46 +2607,61 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, + "net_ns_offset": 112, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 56, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, "sizeof_inode": 600, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_offset": 1424, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1424, "tty_name_offset": 368, - "tty_offset": 408, + "tty_offset": 376, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 48, + "binprm_file_offset": 168, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, + "bpf_map_name_offset": 112, "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 152, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, + "device_nd_net_net_offset": 1248, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -1927,36 +2673,48 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 96, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, "sizeof_inode": 600, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2328, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1488, "tty_name_offset": 368, - "tty_offset": 408, + "tty_offset": 376, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 296, + "binprm_file_offset": 176, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 208, + "bpf_map_name_offset": 176, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, "bpf_prog_aux_id_offset": 20, @@ -1966,166 +2724,209 @@ "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1288, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 56, - "flowi4_uli_offset": 64, - "flowi6_saddr_offset": 72, - "flowi6_uli_offset": 92, + "flowi4_saddr_offset": 32, + "flowi4_uli_offset": 40, + "flowi6_saddr_offset": 48, + "flowi6_uli_offset": 68, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 320, - "linux_binprm_envc_offset": 324, - "linux_binprm_p_offset": 280, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 200, + "linux_binprm_envc_offset": 204, + "linux_binprm_p_offset": 152, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_ns_offset": 120, + "net_ns_offset": 112, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_numbers_offset": 56, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 648, + "sizeof_inode": 584, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1424, "tty_name_offset": 368, - "tty_offset": 392, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 296, + "binprm_file_offset": 48, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 208, + "bpf_map_name_offset": 168, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 152, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1288, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 56, - "flowi4_uli_offset": 64, - "flowi6_saddr_offset": 72, - "flowi6_uli_offset": 92, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 320, - "linux_binprm_envc_offset": 324, - "linux_binprm_p_offset": 280, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 88, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, + "pid_numbers_offset": 80, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 648, + "sizeof_inode": 584, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1960, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1432, "tty_name_offset": 368, - "tty_offset": 392, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 296, + "binprm_file_offset": 48, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 208, + "bpf_map_name_offset": 88, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 152, + "bpf_prog_aux_id_offset": 28, + "bpf_prog_aux_name_offset": 256, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1288, + "device_nd_net_net_offset": 1264, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 56, - "flowi4_uli_offset": 64, - "flowi6_saddr_offset": 72, - "flowi6_uli_offset": 92, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 320, - "linux_binprm_envc_offset": 324, - "linux_binprm_p_offset": 280, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 88, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, "mount_id_offset": 284, - "net_device_ifindex_offset": 264, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 256, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, + "pid_numbers_offset": 80, + "pipe_inode_info_bufs_offset": 144, + "pipe_inode_info_head_offset": 80, + "pipe_inode_info_ring_size_offset": 92, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 648, + "sizeof_inode": 600, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2408, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2328, "tty_name_offset": 368, - "tty_offset": 392, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, + "binprm_file_offset": 48, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, + "bpf_map_name_offset": 168, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 160, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, "device_nd_net_net_offset": 1256, @@ -2140,154 +2941,184 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 88, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 112, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 56, + "pid_numbers_offset": 80, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 592, + "sizeof_inode": 584, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_offset": 1360, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1432, "tty_name_offset": 368, - "tty_offset": 400, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 160, + "bpf_prog_aux_id_offset": 8, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, - "dentry_d_sb_offset": 152, - "dentry_sb_offset": 152, - "device_nd_net_net_offset": 1368, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1000, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, + "flowi4_saddr_offset": 20, + "flowi4_uli_offset": 28, + "flowi6_saddr_offset": 36, + "flowi6_uli_offset": 56, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, + "mount_id_offset": 268, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 192, "net_device_name_offset": 0, - "net_ns_offset": 240, - "nf_conn_ct_net_offset": 192, + "nf_conn_ct_net_offset": 240, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 56, - "pipe_inode_info_buffers_offset": 112, - "pipe_inode_info_bufs_offset": 168, - "pipe_inode_info_curbuf_offset": 108, - "pipe_inode_info_nrbufs_offset": 104, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 72, + "pipe_inode_info_bufs_offset": 128, + "pipe_inode_info_curbuf_offset": 68, + "pipe_inode_info_nrbufs_offset": 64, + "sb_dev_offset": 16, "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 752, - "sizeof_upid": 16, + "sb_magic_offset": 88, + "sizeof_inode": 584, + "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_offset": 1456, - "tty_name_offset": 496, - "tty_offset": 440, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1296, + "tty_name_offset": 312, + "tty_offset": 416, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 160, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, + "bpf_map_type_offset": 4, + "bpf_prog_aux_offset": 16, + "bpf_prog_type_offset": 8, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, + "device_nd_net_net_offset": 1160, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, + "flowi4_saddr_offset": 20, + "flowi4_uli_offset": 28, + "flowi6_saddr_offset": 36, + "flowi6_uli_offset": 56, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, + "mount_id_offset": 268, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 288, "net_device_name_offset": 0, - "net_ns_offset": 112, - "nf_conn_ct_net_offset": 144, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 216, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 56, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 72, + "pipe_inode_info_bufs_offset": 128, + "pipe_inode_info_curbuf_offset": 68, + "pipe_inode_info_nrbufs_offset": 64, + "sb_dev_offset": 16, "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, + "sb_magic_offset": 88, + "sizeof_inode": 544, + "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_offset": 1360, - "tty_name_offset": 368, - "tty_offset": 400, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1984, + "tty_name_offset": 400, + "tty_offset": 416, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, + "bpf_map_id_offset": 28, + "bpf_map_type_offset": 4, "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 160, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, - "dentry_d_sb_offset": 152, - "dentry_sb_offset": 152, - "device_nd_net_net_offset": 1368, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -2299,45 +3130,56 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_ns_offset": 248, - "nf_conn_ct_net_offset": 192, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 56, - "pipe_inode_info_buffers_offset": 112, - "pipe_inode_info_bufs_offset": 168, - "pipe_inode_info_curbuf_offset": 108, - "pipe_inode_info_nrbufs_offset": 104, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 760, - "sizeof_upid": 16, + "sizeof_inode": 592, + "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_offset": 1448, - "tty_name_offset": 496, - "tty_offset": 440, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2200, + "tty_name_offset": 368, + "tty_offset": 376, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 160, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, "device_nd_net_net_offset": 1256, @@ -2352,48 +3194,62 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_ns_offset": 112, + "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 56, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, "sizeof_inode": 600, - "sizeof_upid": 16, + "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_offset": 1368, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2392, "tty_name_offset": 368, - "tty_offset": 400, + "tty_offset": 376, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, + "binprm_file_offset": 48, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, + "bpf_map_name_offset": 168, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 160, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, - "dentry_d_sb_offset": 152, - "dentry_sb_offset": 152, - "device_nd_net_net_offset": 1368, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -2405,45 +3261,60 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 96, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_ns_offset": 248, - "nf_conn_ct_net_offset": 192, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 56, - "pipe_inode_info_buffers_offset": 112, - "pipe_inode_info_bufs_offset": 168, - "pipe_inode_info_curbuf_offset": 108, - "pipe_inode_info_nrbufs_offset": 104, + "pid_numbers_offset": 80, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 760, + "sizeof_inode": 576, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_offset": 1456, - "tty_name_offset": 496, - "tty_offset": 440, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2320, + "tty_name_offset": 368, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, + "binprm_file_offset": 48, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, + "bpf_map_name_offset": 168, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 160, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 44, - "creds_uid_offset": 8, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, "device_nd_net_net_offset": 1256, @@ -2458,316 +3329,396 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 96, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_ns_offset": 112, + "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 56, + "pid_numbers_offset": 80, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 600, + "sizeof_inode": 584, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_offset": 1368, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2320, "tty_name_offset": 368, - "tty_offset": 400, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, + "binprm_file_offset": 48, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, + "bpf_map_name_offset": 200, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 160, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 44, - "creds_uid_offset": 8, - "dentry_d_sb_offset": 152, - "dentry_sb_offset": 152, - "device_nd_net_net_offset": 1368, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1288, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, + "flowi4_saddr_offset": 56, + "flowi4_uli_offset": 64, + "flowi6_saddr_offset": 72, + "flowi6_uli_offset": 92, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 96, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_ns_offset": 248, - "nf_conn_ct_net_offset": 192, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 56, - "pipe_inode_info_buffers_offset": 112, - "pipe_inode_info_bufs_offset": 168, - "pipe_inode_info_curbuf_offset": 108, - "pipe_inode_info_nrbufs_offset": 104, + "pid_numbers_offset": 80, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 760, + "sizeof_inode": 632, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_offset": 1456, - "tty_name_offset": 496, - "tty_offset": 440, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2384, + "tty_name_offset": 368, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, + "binprm_file_offset": 48, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, + "bpf_map_name_offset": 200, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 160, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, - "dentry_d_sb_offset": 152, - "dentry_sb_offset": 152, - "device_nd_net_net_offset": 1368, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1288, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, + "flowi4_saddr_offset": 56, + "flowi4_uli_offset": 64, + "flowi6_saddr_offset": 72, + "flowi6_uli_offset": 92, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 80, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_ns_offset": 248, - "nf_conn_ct_net_offset": 192, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 56, - "pipe_inode_info_buffers_offset": 112, - "pipe_inode_info_bufs_offset": 168, - "pipe_inode_info_curbuf_offset": 108, - "pipe_inode_info_nrbufs_offset": 104, + "pid_numbers_offset": 80, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 752, + "sizeof_inode": 632, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_offset": 1456, - "tty_name_offset": 496, - "tty_offset": 440, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2384, + "tty_name_offset": 368, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 64, + "binprm_file_offset": 296, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 88, + "bpf_map_name_offset": 208, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 28, - "bpf_prog_aux_name_offset": 496, + "bpf_prog_aux_id_offset": 20, + "bpf_prog_aux_name_offset": 176, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1264, + "device_nd_net_net_offset": 1288, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, + "flowi4_saddr_offset": 56, + "flowi4_uli_offset": 64, + "flowi6_saddr_offset": 72, + "flowi6_uli_offset": 92, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 80, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 88, - "linux_binprm_envc_offset": 92, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 256, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 320, + "linux_binprm_envc_offset": 324, + "linux_binprm_p_offset": 280, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 96, - "pipe_inode_info_bufs_offset": 152, - "pipe_inode_info_head_offset": 80, - "pipe_inode_info_ring_size_offset": 92, + "pid_numbers_offset": 56, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 600, + "sizeof_inode": 648, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 1408, - "tty_name_offset": 360, - "tty_offset": 400, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "tty_name_offset": 368, + "tty_offset": 392, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 64, + "binprm_file_offset": 296, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 88, + "bpf_map_name_offset": 208, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 28, - "bpf_prog_aux_name_offset": 512, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 152, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, - "dentry_d_sb_offset": 168, - "dentry_sb_offset": 168, - "device_nd_net_net_offset": 1376, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1288, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, + "flowi4_saddr_offset": 56, + "flowi4_uli_offset": 64, + "flowi6_saddr_offset": 72, + "flowi6_uli_offset": 92, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 80, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 88, - "linux_binprm_envc_offset": 92, - "linux_binprm_p_offset": 24, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 320, + "linux_binprm_envc_offset": 324, + "linux_binprm_p_offset": 280, "mount_id_offset": 284, - "net_device_ifindex_offset": 256, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_ns_offset": 264, - "nf_conn_ct_net_offset": 192, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 176, - "pipe_inode_info_bufs_offset": 240, - "pipe_inode_info_head_offset": 168, - "pipe_inode_info_ring_size_offset": 180, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 760, + "sizeof_inode": 648, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 1480, - "tty_name_offset": 488, - "tty_offset": 440, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2408, + "tty_name_offset": 368, + "tty_offset": 392, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 64, + "binprm_file_offset": 48, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 88, + "bpf_map_name_offset": 200, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 28, - "bpf_prog_aux_name_offset": 496, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 44, - "creds_uid_offset": 8, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1264, + "device_nd_net_net_offset": 1288, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, + "flowi4_saddr_offset": 56, + "flowi4_uli_offset": 64, + "flowi6_saddr_offset": 72, + "flowi6_uli_offset": 92, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 80, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 96, "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 88, - "linux_binprm_envc_offset": 92, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, "linux_binprm_p_offset": 24, "mount_id_offset": 284, - "net_device_ifindex_offset": 256, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 96, - "pipe_inode_info_bufs_offset": 152, - "pipe_inode_info_head_offset": 80, - "pipe_inode_info_ring_size_offset": 92, + "pid_numbers_offset": 80, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 600, + "sizeof_inode": 632, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 24, - "task_struct_pid_offset": 1408, - "tty_name_offset": 360, - "tty_offset": 400, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2384, + "tty_name_offset": 368, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 64, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 88, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 28, - "bpf_prog_aux_name_offset": 512, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, + "binprm_file_offset": 168, + "bpf_map_id_offset": 28, + "bpf_map_type_offset": 4, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 44, - "creds_uid_offset": 8, - "dentry_d_sb_offset": 168, - "dentry_sb_offset": 168, - "device_nd_net_net_offset": 1376, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -2779,49 +3730,59 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 80, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 88, - "linux_binprm_envc_offset": 92, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 256, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 292, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_ns_offset": 264, - "nf_conn_ct_net_offset": 192, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 176, - "pipe_inode_info_bufs_offset": 240, - "pipe_inode_info_head_offset": 168, - "pipe_inode_info_ring_size_offset": 180, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 760, - "sizeof_upid": 16, + "sizeof_inode": 592, + "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 1480, - "tty_name_offset": 488, - "tty_offset": 440, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1336, + "tty_name_offset": 368, + "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, + "bpf_map_id_offset": 28, + "bpf_map_type_offset": 4, "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 160, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, + "device_nd_net_net_offset": 1120, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -2833,48 +3794,60 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, - "mount_id_offset": 284, + "mount_id_offset": 292, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_ns_offset": 112, + "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 56, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, "sizeof_inode": 592, - "sizeof_upid": 16, + "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_offset": 1328, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1336, "tty_name_offset": 368, - "tty_offset": 400, + "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 160, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, - "dentry_d_sb_offset": 152, - "dentry_sb_offset": 152, - "device_nd_net_net_offset": 1368, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1264, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -2886,48 +3859,60 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, - "mount_id_offset": 284, + "mount_id_offset": 292, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_ns_offset": 240, - "nf_conn_ct_net_offset": 192, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 56, - "pipe_inode_info_buffers_offset": 112, - "pipe_inode_info_bufs_offset": 168, - "pipe_inode_info_curbuf_offset": 108, - "pipe_inode_info_nrbufs_offset": 104, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 752, - "sizeof_upid": 16, + "sizeof_inode": 592, + "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_offset": 1488, - "tty_name_offset": 496, - "tty_offset": 440, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1312, + "tty_name_offset": 368, + "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 160, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, + "device_nd_net_net_offset": 1128, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -2939,48 +3924,62 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, - "mount_id_offset": 284, + "mount_id_offset": 292, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_ns_offset": 112, + "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 56, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, + "sizeof_inode": 592, + "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_offset": 1328, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1312, "tty_name_offset": 368, - "tty_offset": 400, + "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, + "binprm_file_offset": 48, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, + "bpf_map_name_offset": 88, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 160, + "bpf_prog_aux_id_offset": 28, + "bpf_prog_aux_name_offset": 256, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, - "dentry_d_sb_offset": 152, - "dentry_sb_offset": 152, - "device_nd_net_net_offset": 1368, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -2992,48 +3991,63 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 88, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 292, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 256, "net_device_name_offset": 0, - "net_ns_offset": 248, - "nf_conn_ct_net_offset": 192, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 56, - "pipe_inode_info_buffers_offset": 112, - "pipe_inode_info_bufs_offset": 168, - "pipe_inode_info_curbuf_offset": 108, - "pipe_inode_info_nrbufs_offset": 104, + "pid_numbers_offset": 80, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 760, + "sizeof_inode": 592, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_offset": 1480, - "tty_name_offset": 496, - "tty_offset": 440, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1376, + "tty_name_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, + "binprm_file_offset": 48, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, + "bpf_map_name_offset": 88, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 160, + "bpf_prog_aux_id_offset": 28, + "bpf_prog_aux_name_offset": 256, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, + "device_nd_net_net_offset": 1056, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -3045,48 +4059,63 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 88, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 292, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 256, "net_device_name_offset": 0, - "net_ns_offset": 112, + "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 56, + "pid_numbers_offset": 80, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 600, + "sizeof_inode": 592, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_offset": 1336, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1376, "tty_name_offset": 368, "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, + "binprm_file_offset": 48, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, + "bpf_map_name_offset": 88, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 160, + "bpf_prog_aux_id_offset": 28, + "bpf_prog_aux_name_offset": 424, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, - "dentry_d_sb_offset": 152, - "dentry_sb_offset": 152, - "device_nd_net_net_offset": 1368, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1264, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -3098,48 +4127,63 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 88, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 292, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 256, "net_device_name_offset": 0, - "net_ns_offset": 248, - "nf_conn_ct_net_offset": 192, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 56, - "pipe_inode_info_buffers_offset": 112, - "pipe_inode_info_bufs_offset": 168, - "pipe_inode_info_curbuf_offset": 108, - "pipe_inode_info_nrbufs_offset": 104, + "pid_numbers_offset": 80, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 760, + "sizeof_inode": 592, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_offset": 1488, - "tty_name_offset": 496, - "tty_offset": 440, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2336, + "tty_name_offset": 368, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, + "binprm_file_offset": 48, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, + "bpf_map_name_offset": 88, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 160, + "bpf_prog_aux_id_offset": 28, + "bpf_prog_aux_name_offset": 424, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 44, - "creds_uid_offset": 8, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, + "device_nd_net_net_offset": 1128, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -3151,364 +4195,319 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 88, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 292, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 256, "net_device_name_offset": 0, - "net_ns_offset": 112, + "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 56, + "pid_numbers_offset": 80, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 600, + "sizeof_inode": 592, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_offset": 1336, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2336, "tty_name_offset": 368, - "tty_offset": 400, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, + "bpf_map_name_offset": 112, "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 160, + "bpf_prog_aux_id_offset": 8, + "bpf_prog_aux_name_offset": 144, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 44, - "creds_uid_offset": 8, - "dentry_d_sb_offset": 152, - "dentry_sb_offset": 152, - "device_nd_net_net_offset": 1368, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1000, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, + "flowi4_saddr_offset": 20, + "flowi4_uli_offset": 28, + "flowi6_saddr_offset": 36, + "flowi6_uli_offset": 56, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, + "mount_id_offset": 268, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 192, "net_device_name_offset": 0, - "net_ns_offset": 248, - "nf_conn_ct_net_offset": 192, + "net_proc_inum_offset": 72, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 56, - "pipe_inode_info_buffers_offset": 112, - "pipe_inode_info_bufs_offset": 168, - "pipe_inode_info_curbuf_offset": 108, - "pipe_inode_info_nrbufs_offset": 104, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 72, + "pipe_inode_info_bufs_offset": 128, + "pipe_inode_info_curbuf_offset": 68, + "pipe_inode_info_nrbufs_offset": 64, + "sb_dev_offset": 16, "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 760, - "sizeof_upid": 16, + "sb_magic_offset": 88, + "sizeof_inode": 584, + "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_offset": 1488, - "tty_name_offset": 496, - "tty_offset": 440, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1296, + "tty_name_offset": 312, + "tty_offset": 416, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, + "bpf_map_name_offset": 112, "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 160, + "bpf_prog_aux_id_offset": 8, + "bpf_prog_aux_name_offset": 80, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, - "dentry_d_sb_offset": 152, - "dentry_sb_offset": 152, - "device_nd_net_net_offset": 1368, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1000, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, + "flowi4_saddr_offset": 20, + "flowi4_uli_offset": 28, + "flowi6_saddr_offset": 36, + "flowi6_uli_offset": 56, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, + "mount_id_offset": 268, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 192, "net_device_name_offset": 0, - "net_ns_offset": 248, - "nf_conn_ct_net_offset": 192, + "net_proc_inum_offset": 72, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 56, - "pipe_inode_info_buffers_offset": 112, - "pipe_inode_info_bufs_offset": 168, - "pipe_inode_info_curbuf_offset": 108, - "pipe_inode_info_nrbufs_offset": 104, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 72, + "pipe_inode_info_bufs_offset": 128, + "pipe_inode_info_curbuf_offset": 68, + "pipe_inode_info_nrbufs_offset": 64, + "sb_dev_offset": 16, "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 752, - "sizeof_upid": 16, + "sb_magic_offset": 88, + "sizeof_inode": 584, + "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_offset": 1488, - "tty_name_offset": 496, - "tty_offset": 440, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1296, + "tty_name_offset": 312, + "tty_offset": 416, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 64, + "binprm_file_offset": 296, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 88, + "bpf_map_name_offset": 208, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 28, - "bpf_prog_aux_name_offset": 496, + "bpf_prog_aux_id_offset": 20, + "bpf_prog_aux_name_offset": 176, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1264, + "device_nd_net_net_offset": 1288, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, + "flowi4_saddr_offset": 56, + "flowi4_uli_offset": 64, + "flowi6_saddr_offset": 72, + "flowi6_uli_offset": 92, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 80, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 88, - "linux_binprm_envc_offset": 92, - "linux_binprm_p_offset": 24, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 320, + "linux_binprm_envc_offset": 324, + "linux_binprm_p_offset": 280, "mount_id_offset": 284, - "net_device_ifindex_offset": 256, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 96, - "pipe_inode_info_bufs_offset": 152, - "pipe_inode_info_head_offset": 80, - "pipe_inode_info_ring_size_offset": 92, + "pid_numbers_offset": 56, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 600, + "sizeof_inode": 648, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2336, - "tty_name_offset": 360, - "tty_offset": 400, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "tty_name_offset": 368, + "tty_offset": 392, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 64, + "binprm_file_offset": 296, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 88, + "bpf_map_name_offset": 208, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 28, - "bpf_prog_aux_name_offset": 512, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 152, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, - "dentry_d_sb_offset": 168, - "dentry_sb_offset": 168, - "device_nd_net_net_offset": 1376, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 80, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 88, - "linux_binprm_envc_offset": 92, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 256, - "net_device_name_offset": 0, - "net_ns_offset": 264, - "nf_conn_ct_net_offset": 192, - "pid_level_offset": 4, - "pid_numbers_offset": 176, - "pipe_inode_info_bufs_offset": 240, - "pipe_inode_info_head_offset": 168, - "pipe_inode_info_ring_size_offset": 180, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 760, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2480, - "tty_name_offset": 488, - "tty_offset": 440, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 64, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 88, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 28, - "bpf_prog_aux_name_offset": 496, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 44, - "creds_uid_offset": 8, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1264, + "device_nd_net_net_offset": 1288, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, + "flowi4_saddr_offset": 56, + "flowi4_uli_offset": 64, + "flowi6_saddr_offset": 72, + "flowi6_uli_offset": 92, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 80, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 88, - "linux_binprm_envc_offset": 92, - "linux_binprm_p_offset": 24, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 320, + "linux_binprm_envc_offset": 324, + "linux_binprm_p_offset": 280, "mount_id_offset": 284, - "net_device_ifindex_offset": 256, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 96, - "pipe_inode_info_bufs_offset": 152, - "pipe_inode_info_head_offset": 80, - "pipe_inode_info_ring_size_offset": 92, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2336, - "tty_name_offset": 360, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 64, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 88, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 28, - "bpf_prog_aux_name_offset": 512, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 44, - "creds_uid_offset": 8, - "dentry_d_sb_offset": 168, - "dentry_sb_offset": 168, - "device_nd_net_net_offset": 1376, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 80, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 88, - "linux_binprm_envc_offset": 92, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 256, - "net_device_name_offset": 0, - "net_ns_offset": 264, - "nf_conn_ct_net_offset": 192, - "pid_level_offset": 4, - "pid_numbers_offset": 176, - "pipe_inode_info_bufs_offset": 240, - "pipe_inode_info_head_offset": 168, - "pipe_inode_info_ring_size_offset": 180, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 760, + "sizeof_inode": 648, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2480, - "tty_name_offset": 488, - "tty_offset": 440, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2408, + "tty_name_offset": 368, + "tty_offset": 392, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, - "bpf_map_type_offset": 8, + "bpf_map_type_offset": 4, "bpf_prog_aux_offset": 16, "bpf_prog_type_offset": 8, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1320, + "device_nd_net_net_offset": 1160, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 32, @@ -3520,14 +4519,19 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 296, + "mount_id_offset": 292, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 288, "net_device_name_offset": 0, - "net_ns_offset": 136, - "nf_conn_ct_net_offset": 144, + "net_ns_offset": 128, + "nf_conn_ct_net_offset": 216, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, @@ -3535,28 +4539,36 @@ "pipe_inode_info_bufs_offset": 128, "pipe_inode_info_curbuf_offset": 68, "pipe_inode_info_nrbufs_offset": 64, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 592, + "sizeof_inode": 560, "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1304, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2320, "tty_name_offset": 400, - "tty_offset": 384, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, - "bpf_map_type_offset": 8, + "bpf_map_type_offset": 4, "bpf_prog_aux_offset": 16, "bpf_prog_type_offset": 8, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, + "device_nd_net_net_offset": 1160, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 32, @@ -3568,14 +4580,19 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 296, + "mount_id_offset": 276, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 288, "net_device_name_offset": 0, - "net_ns_offset": 136, - "nf_conn_ct_net_offset": 144, + "net_ns_offset": 128, + "nf_conn_ct_net_offset": 216, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, @@ -3583,74 +4600,100 @@ "pipe_inode_info_bufs_offset": 128, "pipe_inode_info_curbuf_offset": 68, "pipe_inode_info_nrbufs_offset": 64, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 592, + "sizeof_inode": 560, "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1272, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2320, "tty_name_offset": 400, - "tty_offset": 384, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, - "bpf_map_type_offset": 8, - "bpf_prog_aux_offset": 16, - "bpf_prog_type_offset": 8, + "bpf_map_id_offset": 28, + "bpf_map_type_offset": 4, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 16, + "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, - "dentry_d_sb_offset": 152, - "dentry_sb_offset": 152, - "device_nd_net_net_offset": 1368, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 32, - "flowi4_uli_offset": 40, - "flowi6_saddr_offset": 48, - "flowi6_uli_offset": 68, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 296, + "mount_id_offset": 292, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_ns_offset": 272, - "nf_conn_ct_net_offset": 192, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 112, - "pipe_inode_info_bufs_offset": 168, - "pipe_inode_info_curbuf_offset": 108, - "pipe_inode_info_nrbufs_offset": 104, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 744, + "sizeof_inode": 592, "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1352, - "tty_name_offset": 496, - "tty_offset": 464, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2296, + "tty_name_offset": 368, + "tty_offset": 376, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, + "bpf_map_id_offset": 28, "bpf_map_type_offset": 4, + "bpf_prog_aux_id_offset": 16, "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 12, + "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, "device_nd_net_net_offset": 1256, @@ -3665,14 +4708,19 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, - "mount_id_offset": 284, + "mount_id_offset": 292, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, @@ -3680,76 +4728,104 @@ "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 584, + "sizeof_inode": 592, "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2264, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2288, "tty_name_offset": 368, "tty_offset": 376, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, - "bpf_map_type_offset": 4, - "bpf_prog_aux_offset": 16, - "bpf_prog_type_offset": 8, + "bpf_map_id_offset": 48, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1176, + "device_nd_net_net_offset": 1264, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 32, - "flowi4_uli_offset": 40, - "flowi6_saddr_offset": 48, - "flowi6_uli_offset": 68, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, - "mount_id_offset": 268, - "net_device_ifindex_offset": 288, + "mount_id_offset": 292, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_ns_offset": 128, - "nf_conn_ct_net_offset": 216, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 72, - "pipe_inode_info_bufs_offset": 128, - "pipe_inode_info_curbuf_offset": 68, - "pipe_inode_info_nrbufs_offset": 64, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 560, + "sizeof_inode": 592, "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1400, - "tty_name_offset": 400, - "tty_offset": 408, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2296, + "tty_name_offset": 368, + "tty_offset": 376, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, - "bpf_map_id_offset": 28, - "bpf_map_type_offset": 4, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 16, + "binprm_file_offset": 48, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 88, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 28, + "bpf_prog_aux_name_offset": 256, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, "device_nd_net_net_offset": 1256, @@ -3764,79 +4840,111 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 88, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 292, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 256, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, + "pid_numbers_offset": 80, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, "sizeof_inode": 592, - "sizeof_upid": 32, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2408, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2336, "tty_name_offset": 368, - "tty_offset": 376, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, - "bpf_map_type_offset": 4, - "bpf_prog_aux_offset": 16, - "bpf_prog_type_offset": 8, + "binprm_file_offset": 48, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 88, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 28, + "bpf_prog_aux_name_offset": 424, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1224, + "device_nd_net_net_offset": 1264, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 32, - "flowi4_uli_offset": 40, - "flowi6_saddr_offset": 48, - "flowi6_uli_offset": 68, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 268, - "net_device_ifindex_offset": 296, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 88, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 292, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 256, "net_device_name_offset": 0, - "net_ns_offset": 128, - "nf_conn_ct_net_offset": 208, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 72, - "pipe_inode_info_bufs_offset": 128, - "pipe_inode_info_curbuf_offset": 68, - "pipe_inode_info_nrbufs_offset": 64, + "pid_numbers_offset": 80, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 576, - "sizeof_upid": 32, + "sizeof_inode": 592, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1392, - "tty_name_offset": 400, - "tty_offset": 416, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2336, + "tty_name_offset": 368, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { @@ -3847,9 +4955,11 @@ "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, + "device_nd_net_net_offset": 1280, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -3861,14 +4971,19 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, "mount_id_offset": 268, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_ns_offset": 120, + "net_ns_offset": 136, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, @@ -3876,33 +4991,37 @@ "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, "sizeof_inode": 584, "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2264, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1328, "tty_name_offset": 368, - "tty_offset": 376, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 112, - "bpf_map_type_offset": 24, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 152, + "bpf_map_type_offset": 4, "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 16, + "bpf_prog_tag_offset": 12, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1248, + "device_nd_net_net_offset": 1280, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -3914,14 +5033,19 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, - "mount_id_offset": 284, + "mount_id_offset": 268, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_ns_offset": 120, + "net_ns_offset": 136, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, @@ -3929,34 +5053,37 @@ "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, + "sizeof_inode": 584, + "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1424, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2288, "tty_name_offset": 368, - "tty_offset": 376, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 152, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, + "bpf_map_type_offset": 4, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 12, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1248, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -3968,14 +5095,19 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_ns_offset": 112, + "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, @@ -3983,33 +5115,37 @@ "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, + "sizeof_inode": 584, + "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1424, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2256, "tty_name_offset": 368, - "tty_offset": 376, + "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, - "bpf_map_type_offset": 24, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 152, + "bpf_map_type_offset": 4, "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 16, + "bpf_prog_tag_offset": 12, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1312, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -4021,14 +5157,19 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, @@ -4036,69 +5177,86 @@ "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, + "sizeof_inode": 584, + "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1512, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2264, "tty_name_offset": 368, "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 176, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 20, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, + "binprm_file_offset": 168, + "bpf_map_id_offset": 28, + "bpf_map_type_offset": 4, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 32, - "flowi4_uli_offset": 40, - "flowi6_saddr_offset": 48, - "flowi6_uli_offset": 68, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 200, - "linux_binprm_envc_offset": 204, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_ns_offset": 112, + "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 56, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 584, - "sizeof_upid": 16, + "sizeof_inode": 592, + "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_offset": 1384, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2408, "tty_name_offset": 368, - "tty_offset": 400, + "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { @@ -4107,12 +5265,14 @@ "bpf_map_name_offset": 112, "bpf_map_type_offset": 24, "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 152, + "bpf_prog_aux_name_offset": 128, "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, "device_nd_net_net_offset": 1248, @@ -4127,14 +5287,19 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, @@ -4142,84 +5307,104 @@ "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 600, + "sizeof_inode": 608, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1488, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2320, "tty_name_offset": 368, - "tty_offset": 376, + "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 176, + "binprm_file_offset": 168, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, + "bpf_map_name_offset": 112, "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 20, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 128, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, + "device_nd_net_net_offset": 1248, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 32, - "flowi4_uli_offset": 40, - "flowi6_saddr_offset": 48, - "flowi6_uli_offset": 68, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 200, - "linux_binprm_envc_offset": 204, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_ns_offset": 112, + "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 56, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 584, + "sizeof_inode": 600, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_offset": 1424, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2384, "tty_name_offset": 368, - "tty_offset": 408, + "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, + "bpf_map_name_offset": 112, "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 152, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, + "bpf_prog_aux_name_offset": 128, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, "device_nd_net_net_offset": 1248, @@ -4234,14 +5419,19 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_ns_offset": 112, + "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, @@ -4249,34 +5439,41 @@ "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, "sizeof_inode": 600, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1384, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2320, "tty_name_offset": 368, "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 48, + "binprm_file_offset": 168, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, + "bpf_map_name_offset": 112, "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 128, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, + "device_nd_net_net_offset": 1248, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -4288,47 +5485,58 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 88, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 80, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 584, + "sizeof_inode": 600, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 1392, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2320, "tty_name_offset": 368, - "tty_offset": 400, + "tty_offset": 376, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 48, + "binprm_file_offset": 168, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, + "bpf_map_name_offset": 112, "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 128, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, "device_nd_net_net_offset": 1256, @@ -4343,50 +5551,61 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 88, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 80, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 584, + "sizeof_inode": 600, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 1432, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2320, "tty_name_offset": 368, - "tty_offset": 408, + "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 48, + "binprm_file_offset": 168, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 88, + "bpf_map_name_offset": 112, "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 28, - "bpf_prog_aux_name_offset": 256, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 128, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1264, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -4398,49 +5617,61 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 88, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, "mount_id_offset": 284, - "net_device_ifindex_offset": 256, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_bufs_offset": 144, - "pipe_inode_info_head_offset": 80, - "pipe_inode_info_ring_size_offset": 92, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 600, + "sizeof_inode": 608, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 1872, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2320, "tty_name_offset": 368, - "tty_offset": 408, + "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 48, + "binprm_file_offset": 168, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 88, + "bpf_map_name_offset": 112, "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 28, - "bpf_prog_aux_name_offset": 256, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 128, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1264, + "device_nd_net_net_offset": 1248, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -4452,49 +5683,62 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 88, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, "mount_id_offset": 284, - "net_device_ifindex_offset": 256, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_bufs_offset": 144, - "pipe_inode_info_head_offset": 80, - "pipe_inode_info_ring_size_offset": 92, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 600, + "sizeof_inode": 608, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2328, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2320, "tty_name_offset": 368, - "tty_offset": 408, + "tty_offset": 376, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 48, + "binprm_file_offset": 168, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, + "bpf_map_name_offset": 176, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 152, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, + "device_nd_net_net_offset": 1248, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -4506,336 +5750,385 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 88, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 112, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 80, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 584, + "sizeof_inode": 600, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 1392, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2320, "tty_name_offset": 368, - "tty_offset": 400, + "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, + "binprm_file_offset": 168, + "bpf_map_type_offset": 4, + "bpf_prog_aux_offset": 16, + "bpf_prog_type_offset": 8, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, + "device_nd_net_net_offset": 1160, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, + "flowi4_saddr_offset": 20, + "flowi4_uli_offset": 28, + "flowi6_saddr_offset": 36, + "flowi6_uli_offset": 56, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 88, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 268, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 288, "net_device_name_offset": 0, - "net_ns_offset": 112, - "nf_conn_ct_net_offset": 144, + "net_ns_offset": 128, + "nf_conn_ct_net_offset": 216, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 72, + "pipe_inode_info_bufs_offset": 128, + "pipe_inode_info_curbuf_offset": 68, + "pipe_inode_info_nrbufs_offset": 64, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 584, - "sizeof_upid": 16, + "sizeof_inode": 560, + "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 1432, - "tty_name_offset": 368, - "tty_offset": 408, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1184, + "tty_name_offset": 400, + "tty_offset": 416, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, - "bpf_map_id_offset": 48, - "bpf_map_type_offset": 24, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 16, - "bpf_prog_type_offset": 4, + "bpf_map_type_offset": 4, + "bpf_prog_aux_offset": 16, + "bpf_prog_type_offset": 8, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1320, + "device_nd_net_net_offset": 1160, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, + "flowi4_saddr_offset": 32, + "flowi4_uli_offset": 40, + "flowi6_saddr_offset": 48, + "flowi6_uli_offset": 68, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, + "mount_id_offset": 268, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 288, "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, + "net_ns_offset": 128, + "nf_conn_ct_net_offset": 216, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, + "pipe_inode_info_buffers_offset": 72, + "pipe_inode_info_bufs_offset": 128, + "pipe_inode_info_curbuf_offset": 68, + "pipe_inode_info_nrbufs_offset": 64, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 600, + "sizeof_inode": 560, "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2008, - "tty_name_offset": 368, - "tty_offset": 376, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1168, + "tty_name_offset": 400, + "tty_offset": 416, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, + "binprm_file_offset": 168, + "bpf_map_type_offset": 4, + "bpf_prog_aux_offset": 16, + "bpf_prog_type_offset": 8, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, + "device_nd_net_net_offset": 1160, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, + "flowi4_saddr_offset": 32, + "flowi4_uli_offset": 40, + "flowi6_saddr_offset": 48, + "flowi6_uli_offset": 68, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 96, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 268, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 288, "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, + "net_ns_offset": 128, + "nf_conn_ct_net_offset": 216, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 72, + "pipe_inode_info_bufs_offset": 128, + "pipe_inode_info_curbuf_offset": 68, + "pipe_inode_info_nrbufs_offset": 64, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 584, - "sizeof_upid": 16, + "sizeof_inode": 560, + "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 1872, - "tty_name_offset": 368, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1168, + "tty_name_offset": 400, "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 200, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, + "binprm_file_offset": 168, + "bpf_map_type_offset": 4, + "bpf_prog_aux_offset": 16, + "bpf_prog_type_offset": 8, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1288, + "device_nd_net_net_offset": 1160, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 56, - "flowi4_uli_offset": 64, - "flowi6_saddr_offset": 72, - "flowi6_uli_offset": 92, + "flowi4_saddr_offset": 32, + "flowi4_uli_offset": 40, + "flowi6_saddr_offset": 48, + "flowi6_uli_offset": 68, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 96, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, "mount_id_offset": 284, - "net_device_ifindex_offset": 264, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 288, "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, + "net_ns_offset": 128, + "nf_conn_ct_net_offset": 216, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 72, + "pipe_inode_info_bufs_offset": 128, + "pipe_inode_info_curbuf_offset": 68, + "pipe_inode_info_nrbufs_offset": 64, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 632, - "sizeof_upid": 16, + "sizeof_inode": 560, + "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 1936, - "tty_name_offset": 368, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1200, + "tty_name_offset": 400, "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 200, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, + "binprm_file_offset": 168, + "bpf_map_type_offset": 4, + "bpf_prog_aux_offset": 16, + "bpf_prog_type_offset": 8, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1288, + "device_nd_net_net_offset": 1160, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 56, - "flowi4_uli_offset": 64, - "flowi6_saddr_offset": 72, - "flowi6_uli_offset": 92, + "flowi4_saddr_offset": 32, + "flowi4_uli_offset": 40, + "flowi6_saddr_offset": 48, + "flowi6_uli_offset": 68, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 80, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, "mount_id_offset": 284, - "net_device_ifindex_offset": 264, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 288, "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, + "net_ns_offset": 128, + "nf_conn_ct_net_offset": 216, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 72, + "pipe_inode_info_bufs_offset": 128, + "pipe_inode_info_curbuf_offset": 68, + "pipe_inode_info_nrbufs_offset": 64, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 632, - "sizeof_upid": 16, + "sizeof_inode": 560, + "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 1936, - "tty_name_offset": 368, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1168, + "tty_name_offset": 400, "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, - "bpf_map_id_offset": 48, - "bpf_map_type_offset": 24, - "bpf_prog_aux_id_offset": 8, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, + "bpf_map_type_offset": 4, + "bpf_prog_aux_offset": 16, + "bpf_prog_type_offset": 8, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1000, + "device_nd_net_net_offset": 1160, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 20, - "flowi4_uli_offset": 28, - "flowi6_saddr_offset": 36, - "flowi6_uli_offset": 56, + "flowi4_saddr_offset": 32, + "flowi4_uli_offset": 40, + "flowi6_saddr_offset": 48, + "flowi6_uli_offset": 68, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, - "mount_id_offset": 268, - "net_device_ifindex_offset": 192, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 288, "net_device_name_offset": 0, - "nf_conn_ct_net_offset": 240, + "net_ns_offset": 128, + "nf_conn_ct_net_offset": 216, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, @@ -4843,47 +6136,60 @@ "pipe_inode_info_bufs_offset": 128, "pipe_inode_info_curbuf_offset": 68, "pipe_inode_info_nrbufs_offset": 64, + "sb_dev_offset": 16, "sb_flags_offset": 80, - "sb_magic_offset": 88, - "sizeof_inode": 584, + "sb_magic_offset": 96, + "sizeof_inode": 560, "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1296, - "tty_name_offset": 312, - "tty_offset": 416, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1176, + "tty_name_offset": 400, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, - "bpf_map_type_offset": 4, + "bpf_map_type_offset": 8, "bpf_prog_aux_offset": 16, "bpf_prog_type_offset": 8, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, "device_nd_net_net_offset": 1160, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 20, - "flowi4_uli_offset": 28, - "flowi6_saddr_offset": 36, - "flowi6_uli_offset": 56, + "flowi4_saddr_offset": 32, + "flowi4_uli_offset": 40, + "flowi6_saddr_offset": 48, + "flowi6_uli_offset": 68, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, - "mount_id_offset": 268, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 288, "net_device_name_offset": 0, - "net_ns_offset": 120, + "net_ns_offset": 128, "nf_conn_ct_net_offset": 216, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, @@ -4891,588 +6197,673 @@ "pipe_inode_info_bufs_offset": 128, "pipe_inode_info_curbuf_offset": 68, "pipe_inode_info_nrbufs_offset": 64, + "sb_dev_offset": 16, "sb_flags_offset": 80, - "sb_magic_offset": 88, - "sizeof_inode": 544, + "sb_magic_offset": 96, + "sizeof_inode": 560, "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1984, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1176, "tty_name_offset": 400, - "tty_offset": 416, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, - "bpf_map_id_offset": 28, - "bpf_map_type_offset": 4, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 16, - "bpf_prog_type_offset": 4, + "bpf_map_type_offset": 8, + "bpf_prog_aux_offset": 16, + "bpf_prog_type_offset": 8, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, + "device_nd_net_net_offset": 1160, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, + "flowi4_saddr_offset": 32, + "flowi4_uli_offset": 40, + "flowi6_saddr_offset": 48, + "flowi6_uli_offset": 68, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, "mount_id_offset": 284, - "net_device_ifindex_offset": 264, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 288, "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, + "net_ns_offset": 128, + "nf_conn_ct_net_offset": 216, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, + "pipe_inode_info_buffers_offset": 72, + "pipe_inode_info_bufs_offset": 128, + "pipe_inode_info_curbuf_offset": 68, + "pipe_inode_info_nrbufs_offset": 64, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 592, + "sizeof_inode": 568, "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2200, - "tty_name_offset": 368, - "tty_offset": 376, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1176, + "tty_name_offset": 400, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, - "bpf_map_id_offset": 48, - "bpf_map_type_offset": 24, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 16, - "bpf_prog_type_offset": 4, + "bpf_map_type_offset": 8, + "bpf_prog_aux_offset": 16, + "bpf_prog_type_offset": 8, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, + "device_nd_net_net_offset": 1128, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, + "flowi4_saddr_offset": 32, + "flowi4_uli_offset": 40, + "flowi6_saddr_offset": 48, + "flowi6_uli_offset": 68, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, "mount_id_offset": 284, - "net_device_ifindex_offset": 264, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 288, "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, + "net_ns_offset": 128, + "nf_conn_ct_net_offset": 184, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, + "pipe_inode_info_buffers_offset": 72, + "pipe_inode_info_bufs_offset": 128, + "pipe_inode_info_curbuf_offset": 68, + "pipe_inode_info_nrbufs_offset": 64, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 600, + "sizeof_inode": 568, "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2392, - "tty_name_offset": 368, - "tty_offset": 376, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1152, + "tty_name_offset": 400, + "tty_offset": 384, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, + "binprm_file_offset": 168, + "bpf_map_type_offset": 4, + "bpf_prog_aux_offset": 16, + "bpf_prog_type_offset": 8, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, + "device_nd_net_net_offset": 1160, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, + "flowi4_saddr_offset": 32, + "flowi4_uli_offset": 40, + "flowi6_saddr_offset": 48, + "flowi6_uli_offset": 68, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 96, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, "mount_id_offset": 284, - "net_device_ifindex_offset": 264, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 288, "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, + "net_ns_offset": 128, + "nf_conn_ct_net_offset": 216, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 72, + "pipe_inode_info_bufs_offset": 128, + "pipe_inode_info_curbuf_offset": 68, + "pipe_inode_info_nrbufs_offset": 64, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 576, - "sizeof_upid": 16, + "sizeof_inode": 560, + "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2320, - "tty_name_offset": 368, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1208, + "tty_name_offset": 400, "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, + "binprm_file_offset": 168, + "bpf_map_type_offset": 8, + "bpf_prog_aux_offset": 16, + "bpf_prog_type_offset": 8, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, + "device_nd_net_net_offset": 1160, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, + "flowi4_saddr_offset": 32, + "flowi4_uli_offset": 40, + "flowi6_saddr_offset": 48, + "flowi6_uli_offset": 68, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 96, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, "mount_id_offset": 284, - "net_device_ifindex_offset": 264, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 288, "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, + "net_ns_offset": 128, + "nf_conn_ct_net_offset": 216, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 72, + "pipe_inode_info_bufs_offset": 128, + "pipe_inode_info_curbuf_offset": 68, + "pipe_inode_info_nrbufs_offset": 64, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 584, - "sizeof_upid": 16, + "sizeof_inode": 560, + "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2320, - "tty_name_offset": 368, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1208, + "tty_name_offset": 400, "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 200, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, + "binprm_file_offset": 168, + "bpf_map_type_offset": 8, + "bpf_prog_aux_offset": 16, + "bpf_prog_type_offset": 8, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1288, + "device_nd_net_net_offset": 1160, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 56, - "flowi4_uli_offset": 64, - "flowi6_saddr_offset": 72, - "flowi6_uli_offset": 92, + "flowi4_saddr_offset": 32, + "flowi4_uli_offset": 40, + "flowi6_saddr_offset": 48, + "flowi6_uli_offset": 68, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 96, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, "mount_id_offset": 284, - "net_device_ifindex_offset": 264, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 288, "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, + "net_ns_offset": 128, + "nf_conn_ct_net_offset": 216, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 72, + "pipe_inode_info_bufs_offset": 128, + "pipe_inode_info_curbuf_offset": 68, + "pipe_inode_info_nrbufs_offset": 64, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 632, - "sizeof_upid": 16, + "sizeof_inode": 568, + "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2384, - "tty_name_offset": 368, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1208, + "tty_name_offset": 400, "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 200, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, + "binprm_file_offset": 168, + "bpf_map_type_offset": 4, + "bpf_prog_aux_offset": 16, + "bpf_prog_type_offset": 8, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1288, + "device_nd_net_net_offset": 1160, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 56, - "flowi4_uli_offset": 64, - "flowi6_saddr_offset": 72, - "flowi6_uli_offset": 92, + "flowi4_saddr_offset": 32, + "flowi4_uli_offset": 40, + "flowi6_saddr_offset": 48, + "flowi6_uli_offset": 68, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 80, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 268, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 288, "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, + "net_ns_offset": 128, + "nf_conn_ct_net_offset": 216, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 72, + "pipe_inode_info_bufs_offset": 128, + "pipe_inode_info_curbuf_offset": 68, + "pipe_inode_info_nrbufs_offset": 64, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 632, - "sizeof_upid": 16, + "sizeof_inode": 560, + "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2384, - "tty_name_offset": 368, - "tty_offset": 408, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1176, + "tty_name_offset": 400, + "tty_offset": 416, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 296, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 208, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 20, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, + "binprm_file_offset": 168, + "bpf_map_type_offset": 8, + "bpf_prog_aux_offset": 16, + "bpf_prog_type_offset": 8, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1288, + "device_nd_net_net_offset": 1128, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 56, - "flowi4_uli_offset": 64, - "flowi6_saddr_offset": 72, - "flowi6_uli_offset": 92, + "flowi4_saddr_offset": 32, + "flowi4_uli_offset": 40, + "flowi6_saddr_offset": 48, + "flowi6_uli_offset": 68, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 320, - "linux_binprm_envc_offset": 324, - "linux_binprm_p_offset": 280, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, "mount_id_offset": 284, - "net_device_ifindex_offset": 264, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 288, "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, + "net_ns_offset": 128, + "nf_conn_ct_net_offset": 184, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 56, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 72, + "pipe_inode_info_bufs_offset": 128, + "pipe_inode_info_curbuf_offset": 68, + "pipe_inode_info_nrbufs_offset": 64, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 648, - "sizeof_upid": 16, + "sizeof_inode": 568, + "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "tty_name_offset": 368, - "tty_offset": 392, - "vm_area_struct_flags_offset": 80 - }, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1184, + "tty_name_offset": 400, + "tty_offset": 384, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, { - "binprm_file_offset": 296, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 208, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 152, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, + "binprm_file_offset": 168, + "bpf_map_type_offset": 4, + "bpf_prog_aux_offset": 16, + "bpf_prog_type_offset": 8, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1288, + "device_nd_net_net_offset": 1160, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 56, - "flowi4_uli_offset": 64, - "flowi6_saddr_offset": 72, - "flowi6_uli_offset": 92, + "flowi4_saddr_offset": 32, + "flowi4_uli_offset": 40, + "flowi6_saddr_offset": 48, + "flowi6_uli_offset": 68, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 320, - "linux_binprm_envc_offset": 324, - "linux_binprm_p_offset": 280, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 268, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 288, "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, + "net_ns_offset": 128, + "nf_conn_ct_net_offset": 216, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, + "pipe_inode_info_buffers_offset": 72, + "pipe_inode_info_bufs_offset": 128, + "pipe_inode_info_curbuf_offset": 68, + "pipe_inode_info_nrbufs_offset": 64, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 648, - "sizeof_upid": 16, + "sizeof_inode": 560, + "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1960, - "tty_name_offset": 368, - "tty_offset": 392, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1200, + "tty_name_offset": 400, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 200, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, + "binprm_file_offset": 168, + "bpf_map_type_offset": 4, + "bpf_prog_aux_offset": 16, + "bpf_prog_type_offset": 8, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1288, + "device_nd_net_net_offset": 1224, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 56, - "flowi4_uli_offset": 64, - "flowi6_saddr_offset": 72, - "flowi6_uli_offset": 92, + "flowi4_saddr_offset": 32, + "flowi4_uli_offset": 40, + "flowi6_saddr_offset": 48, + "flowi6_uli_offset": 68, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 96, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 268, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 296, "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, + "net_ns_offset": 128, + "nf_conn_ct_net_offset": 208, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 72, + "pipe_inode_info_bufs_offset": 128, + "pipe_inode_info_curbuf_offset": 68, + "pipe_inode_info_nrbufs_offset": 64, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 632, - "sizeof_upid": 16, + "sizeof_inode": 584, + "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 1936, - "tty_name_offset": 368, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1296, + "tty_name_offset": 400, "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 296, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 208, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 20, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, + "binprm_file_offset": 168, + "bpf_map_type_offset": 4, + "bpf_prog_aux_offset": 16, + "bpf_prog_type_offset": 8, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1288, + "device_nd_net_net_offset": 1224, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 56, - "flowi4_uli_offset": 64, - "flowi6_saddr_offset": 72, - "flowi6_uli_offset": 92, + "flowi4_saddr_offset": 32, + "flowi4_uli_offset": 40, + "flowi6_saddr_offset": 48, + "flowi6_uli_offset": 68, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 320, - "linux_binprm_envc_offset": 324, - "linux_binprm_p_offset": 280, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 268, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 296, "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, + "net_ns_offset": 128, + "nf_conn_ct_net_offset": 208, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 56, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 72, + "pipe_inode_info_bufs_offset": 128, + "pipe_inode_info_curbuf_offset": 68, + "pipe_inode_info_nrbufs_offset": 64, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 648, - "sizeof_upid": 16, + "sizeof_inode": 584, + "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "tty_name_offset": 368, - "tty_offset": 392, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1328, + "tty_name_offset": 400, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 296, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 208, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, + "binprm_file_offset": 168, + "bpf_map_id_offset": 28, + "bpf_map_type_offset": 4, "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 152, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1288, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 56, - "flowi4_uli_offset": 64, - "flowi6_saddr_offset": 72, - "flowi6_uli_offset": 92, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 320, - "linux_binprm_envc_offset": 324, - "linux_binprm_p_offset": 280, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, @@ -5480,86 +6871,107 @@ "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 648, - "sizeof_upid": 16, + "sizeof_inode": 592, + "sizeof_upid": 32, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, + "super_block_s_type_offset": 40, "task_struct_pid_link_offset": 2408, "tty_name_offset": 368, - "tty_offset": 392, + "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 48, + "binprm_file_offset": 168, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 200, + "bpf_map_name_offset": 112, "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 128, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1288, + "device_nd_net_net_offset": 1248, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 56, - "flowi4_uli_offset": 64, - "flowi6_saddr_offset": 72, - "flowi6_uli_offset": 92, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 96, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 80, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 632, + "sizeof_inode": 600, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2384, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2384, "tty_name_offset": 368, - "tty_offset": 408, + "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, - "bpf_map_id_offset": 28, - "bpf_map_type_offset": 4, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 112, + "bpf_map_type_offset": 24, "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 128, "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1320, + "device_nd_net_net_offset": 1248, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -5571,14 +6983,19 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, - "mount_id_offset": 292, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, @@ -5586,31 +7003,41 @@ "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 592, - "sizeof_upid": 32, + "sizeof_inode": 608, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1456, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2320, "tty_name_offset": 368, "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, - "bpf_map_id_offset": 28, - "bpf_map_type_offset": 4, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 112, + "bpf_map_type_offset": 24, "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 128, "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, + "device_nd_net_net_offset": 1248, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -5622,14 +7049,19 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, - "mount_id_offset": 292, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, @@ -5637,31 +7069,41 @@ "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 592, - "sizeof_upid": 32, + "sizeof_inode": 600, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1336, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2320, "tty_name_offset": 368, "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, - "bpf_map_id_offset": 28, - "bpf_map_type_offset": 4, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 112, + "bpf_map_type_offset": 24, "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 128, "bpf_prog_aux_offset": 24, "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1120, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -5673,14 +7115,19 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, - "mount_id_offset": 292, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, @@ -5688,32 +7135,41 @@ "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 592, - "sizeof_upid": 32, + "sizeof_inode": 600, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1336, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2320, "tty_name_offset": 368, "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, "bpf_map_id_offset": 48, + "bpf_map_name_offset": 112, "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, + "bpf_prog_aux_name_offset": 128, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1264, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -5725,14 +7181,19 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, - "mount_id_offset": 292, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, @@ -5740,32 +7201,41 @@ "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 592, - "sizeof_upid": 32, + "sizeof_inode": 608, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1344, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2320, "tty_name_offset": 368, "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, "bpf_map_id_offset": 48, + "bpf_map_name_offset": 112, "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, + "bpf_prog_aux_name_offset": 128, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 16, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1264, + "device_nd_net_net_offset": 1248, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -5777,14 +7247,19 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, - "mount_id_offset": 292, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, @@ -5792,32 +7267,42 @@ "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 592, - "sizeof_upid": 32, + "sizeof_inode": 608, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1312, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2320, "tty_name_offset": 368, - "tty_offset": 368, + "tty_offset": 376, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 168, "bpf_map_id_offset": 48, + "bpf_map_name_offset": 176, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 152, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1128, + "device_nd_net_net_offset": 1312, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -5829,14 +7314,19 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, + "inode_sb_offset": 40, "linux_binprm_argc_offset": 192, "linux_binprm_envc_offset": 196, "linux_binprm_p_offset": 152, - "mount_id_offset": 292, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_ns_offset": 120, + "net_ns_offset": 112, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_link_pid_offset": 16, "pid_numbers_offset": 48, @@ -5844,34 +7334,42 @@ "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 592, - "sizeof_upid": 32, + "sizeof_inode": 600, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1312, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2320, "tty_name_offset": 368, "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 48, + "binprm_file_offset": 168, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 88, + "bpf_map_name_offset": 176, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 28, - "bpf_prog_aux_name_offset": 256, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 152, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, + "device_nd_net_net_offset": 1248, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -5883,50 +7381,62 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 88, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 292, - "net_device_ifindex_offset": 256, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_ns_offset": 120, + "net_ns_offset": 112, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 80, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 592, + "sizeof_inode": 600, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 1472, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2320, "tty_name_offset": 368, - "tty_offset": 400, + "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 48, + "binprm_file_offset": 168, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 88, + "bpf_map_name_offset": 176, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 28, - "bpf_prog_aux_name_offset": 256, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 152, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, + "device_nd_net_net_offset": 1248, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -5938,314 +7448,392 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 88, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 292, - "net_device_ifindex_offset": 256, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_ns_offset": 120, + "net_ns_offset": 112, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 80, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 592, + "sizeof_inode": 600, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 1376, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2320, "tty_name_offset": 368, - "tty_offset": 400, + "tty_offset": 376, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 48, + "binprm_file_offset": 176, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 88, + "bpf_map_name_offset": 176, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 28, - "bpf_prog_aux_name_offset": 256, + "bpf_prog_aux_id_offset": 20, + "bpf_prog_aux_name_offset": 176, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1056, + "device_nd_net_net_offset": 1320, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, + "flowi4_saddr_offset": 32, + "flowi4_uli_offset": 40, + "flowi6_saddr_offset": 48, + "flowi6_uli_offset": 68, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 88, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 292, - "net_device_ifindex_offset": 256, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 200, + "linux_binprm_envc_offset": 204, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_ns_offset": 120, + "net_ns_offset": 112, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 80, + "pid_numbers_offset": 56, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 592, + "sizeof_inode": 584, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 1376, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2320, "tty_name_offset": 368, "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 48, + "binprm_file_offset": 176, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 88, + "bpf_map_name_offset": 176, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 28, - "bpf_prog_aux_name_offset": 424, + "bpf_prog_aux_id_offset": 20, + "bpf_prog_aux_name_offset": 176, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1264, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, + "flowi4_saddr_offset": 32, + "flowi4_uli_offset": 40, + "flowi6_saddr_offset": 48, + "flowi6_uli_offset": 68, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 88, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 292, - "net_device_ifindex_offset": 256, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 200, + "linux_binprm_envc_offset": 204, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_ns_offset": 120, + "net_ns_offset": 112, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 80, + "pid_numbers_offset": 56, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 592, + "sizeof_inode": 584, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 1472, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2320, "tty_name_offset": 368, "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 48, + "binprm_file_offset": 176, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 88, + "bpf_map_name_offset": 176, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 28, - "bpf_prog_aux_name_offset": 424, + "bpf_prog_aux_id_offset": 20, + "bpf_prog_aux_name_offset": 176, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1264, + "device_nd_net_net_offset": 1320, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, + "flowi4_saddr_offset": 32, + "flowi4_uli_offset": 40, + "flowi6_saddr_offset": 48, + "flowi6_uli_offset": 68, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 88, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 292, - "net_device_ifindex_offset": 256, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 200, + "linux_binprm_envc_offset": 204, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 80, + "pid_numbers_offset": 56, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 592, + "sizeof_inode": 584, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2336, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2320, "tty_name_offset": 368, - "tty_offset": 408, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 48, + "binprm_file_offset": 176, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 88, + "bpf_map_name_offset": 176, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 28, - "bpf_prog_aux_name_offset": 424, + "bpf_prog_aux_id_offset": 20, + "bpf_prog_aux_name_offset": 176, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1128, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, + "flowi4_saddr_offset": 32, + "flowi4_uli_offset": 40, + "flowi6_saddr_offset": 48, + "flowi6_uli_offset": 68, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 88, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 292, - "net_device_ifindex_offset": 256, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 200, + "linux_binprm_envc_offset": 204, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 80, + "pid_numbers_offset": 56, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 592, + "sizeof_inode": 584, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2336, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2320, "tty_name_offset": 368, "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, - "bpf_map_type_offset": 4, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 12, + "binprm_file_offset": 176, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 176, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 20, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1192, + "device_nd_net_net_offset": 1320, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, + "flowi4_saddr_offset": 32, + "flowi4_uli_offset": 40, + "flowi6_saddr_offset": 48, + "flowi6_uli_offset": 68, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 200, + "linux_binprm_envc_offset": 204, "linux_binprm_p_offset": 152, - "mount_id_offset": 268, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, + "pid_numbers_offset": 56, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 576, - "sizeof_upid": 32, + "sizeof_inode": 592, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1808, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2320, "tty_name_offset": 368, - "tty_offset": 376, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, - "bpf_map_id_offset": 28, - "bpf_map_type_offset": 4, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 16, + "binprm_file_offset": 48, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 168, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1320, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -6257,401 +7845,536 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 88, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, + "net_ns_offset": 112, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, + "pid_numbers_offset": 80, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 592, - "sizeof_upid": 32, + "sizeof_inode": 584, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2008, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2344, "tty_name_offset": 368, - "tty_offset": 376, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, + "binprm_file_offset": 48, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 112, + "bpf_map_name_offset": 168, "bpf_map_type_offset": 24, - "bpf_prog_aux_id_offset": 8, - "bpf_prog_aux_name_offset": 144, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1000, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 20, - "flowi4_uli_offset": 28, - "flowi6_saddr_offset": 36, - "flowi6_uli_offset": 56, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 268, - "net_device_ifindex_offset": 192, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 88, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_proc_inum_offset": 72, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 72, - "pipe_inode_info_bufs_offset": 128, - "pipe_inode_info_curbuf_offset": 68, - "pipe_inode_info_nrbufs_offset": 64, + "pid_numbers_offset": 80, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, - "sb_magic_offset": 88, + "sb_magic_offset": 96, "sizeof_inode": 584, - "sizeof_upid": 32, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1296, - "tty_name_offset": 312, - "tty_offset": 416, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2344, + "tty_name_offset": 368, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, + "binprm_file_offset": 48, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 112, + "bpf_map_name_offset": 168, "bpf_map_type_offset": 24, - "bpf_prog_aux_id_offset": 8, - "bpf_prog_aux_name_offset": 80, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1000, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 20, - "flowi4_uli_offset": 28, - "flowi6_saddr_offset": 36, - "flowi6_uli_offset": 56, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 268, - "net_device_ifindex_offset": 192, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 88, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_proc_inum_offset": 72, + "net_ns_offset": 112, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 72, - "pipe_inode_info_bufs_offset": 128, - "pipe_inode_info_curbuf_offset": 68, - "pipe_inode_info_nrbufs_offset": 64, + "pid_numbers_offset": 80, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, - "sb_magic_offset": 88, + "sb_magic_offset": 96, "sizeof_inode": 584, - "sizeof_upid": 32, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1296, - "tty_name_offset": 312, - "tty_offset": 416, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2328, + "tty_name_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 296, + "binprm_file_offset": 48, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 208, + "bpf_map_name_offset": 168, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 20, + "bpf_prog_aux_id_offset": 24, "bpf_prog_aux_name_offset": 176, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1288, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 56, - "flowi4_uli_offset": 64, - "flowi6_saddr_offset": 72, - "flowi6_uli_offset": 92, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 320, - "linux_binprm_envc_offset": 324, - "linux_binprm_p_offset": 280, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 88, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 56, + "pid_numbers_offset": 80, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 648, + "sizeof_inode": 584, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2328, "tty_name_offset": 368, - "tty_offset": 392, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 296, + "binprm_file_offset": 48, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 208, + "bpf_map_name_offset": 168, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 152, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1288, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 56, - "flowi4_uli_offset": 64, - "flowi6_saddr_offset": 72, - "flowi6_uli_offset": 92, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 320, - "linux_binprm_envc_offset": 324, - "linux_binprm_p_offset": 280, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 88, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, + "pid_numbers_offset": 80, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 648, + "sizeof_inode": 584, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1960, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2344, "tty_name_offset": 368, - "tty_offset": 392, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 296, + "binprm_file_offset": 48, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 208, + "bpf_map_name_offset": 168, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 152, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1288, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 56, - "flowi4_uli_offset": 64, - "flowi6_saddr_offset": 72, - "flowi6_uli_offset": 92, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 320, - "linux_binprm_envc_offset": 324, - "linux_binprm_p_offset": 280, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 88, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, + "pid_numbers_offset": 80, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 648, + "sizeof_inode": 592, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2408, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2344, "tty_name_offset": 368, - "tty_offset": 392, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, - "bpf_map_type_offset": 4, - "bpf_prog_aux_offset": 16, - "bpf_prog_type_offset": 8, + "binprm_file_offset": 48, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 168, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1160, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 32, - "flowi4_uli_offset": 40, - "flowi6_saddr_offset": 48, - "flowi6_uli_offset": 68, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 292, - "net_device_ifindex_offset": 288, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 88, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_ns_offset": 128, - "nf_conn_ct_net_offset": 216, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 72, - "pipe_inode_info_bufs_offset": 128, - "pipe_inode_info_curbuf_offset": 68, - "pipe_inode_info_nrbufs_offset": 64, + "pid_numbers_offset": 80, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 560, - "sizeof_upid": 32, + "sizeof_inode": 592, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2320, - "tty_name_offset": 400, - "tty_offset": 408, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2328, + "tty_name_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, - "bpf_map_type_offset": 4, - "bpf_prog_aux_offset": 16, - "bpf_prog_type_offset": 8, + "binprm_file_offset": 48, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 168, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1160, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 32, - "flowi4_uli_offset": 40, - "flowi6_saddr_offset": 48, - "flowi6_uli_offset": 68, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 276, - "net_device_ifindex_offset": 288, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 88, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_ns_offset": 128, - "nf_conn_ct_net_offset": 216, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 72, - "pipe_inode_info_bufs_offset": 128, - "pipe_inode_info_curbuf_offset": 68, - "pipe_inode_info_nrbufs_offset": 64, + "pid_numbers_offset": 80, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 560, - "sizeof_upid": 32, + "sizeof_inode": 592, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2320, - "tty_name_offset": 400, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2344, + "tty_name_offset": 368, "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, - "bpf_map_id_offset": 28, - "bpf_map_type_offset": 4, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 16, + "binprm_file_offset": 48, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 168, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, "device_nd_net_net_offset": 1256, @@ -6666,43 +8389,60 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 292, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 80, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 112, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 80, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 592, - "sizeof_upid": 32, + "sizeof_inode": 584, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2296, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2344, "tty_name_offset": 368, - "tty_offset": 376, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, - "bpf_map_id_offset": 28, - "bpf_map_type_offset": 4, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 16, + "binprm_file_offset": 48, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 168, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, "device_nd_net_net_offset": 1256, @@ -6717,47 +8457,63 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 292, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 88, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_ns_offset": 120, + "net_ns_offset": 112, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, + "pid_numbers_offset": 80, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 592, - "sizeof_upid": 32, + "sizeof_inode": 584, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2288, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2344, "tty_name_offset": 368, - "tty_offset": 376, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, + "binprm_file_offset": 48, "bpf_map_id_offset": 48, + "bpf_map_name_offset": 168, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1264, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -6769,46 +8525,63 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 292, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 96, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, + "pid_numbers_offset": 80, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 592, - "sizeof_upid": 32, + "sizeof_inode": 600, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2296, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2344, "tty_name_offset": 368, - "tty_offset": 376, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, - "bpf_map_id_offset": 28, - "bpf_map_type_offset": 4, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 16, + "binprm_file_offset": 48, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 168, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1320, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -6820,47 +8593,63 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 292, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 96, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, + "pid_numbers_offset": 80, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 592, - "sizeof_upid": 32, + "sizeof_inode": 600, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1472, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2328, "tty_name_offset": 368, - "tty_offset": 376, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, + "binprm_file_offset": 48, "bpf_map_id_offset": 48, + "bpf_map_name_offset": 168, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1264, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -6872,49 +8661,63 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 292, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 96, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, + "pid_numbers_offset": 80, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 592, - "sizeof_upid": 32, + "sizeof_inode": 600, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1376, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2344, "tty_name_offset": 368, - "tty_offset": 376, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 48, + "binprm_file_offset": 64, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 88, + "bpf_map_name_offset": 80, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, "bpf_prog_aux_id_offset": 28, - "bpf_prog_aux_name_offset": 256, + "bpf_prog_aux_name_offset": 504, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, + "device_nd_net_net_offset": 1264, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -6926,50 +8729,62 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 88, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 80, "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, + "linux_binprm_argc_offset": 88, + "linux_binprm_envc_offset": 92, "linux_binprm_p_offset": 24, - "mount_id_offset": 292, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 256, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, + "pid_numbers_offset": 96, + "pipe_inode_info_bufs_offset": 152, + "pipe_inode_info_head_offset": 80, + "pipe_inode_info_ring_size_offset": 92, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 592, + "sizeof_inode": 600, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 24, - "task_struct_pid_offset": 1472, - "tty_name_offset": 368, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2432, + "tty_name_offset": 360, "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 48, + "binprm_file_offset": 64, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 88, + "bpf_map_name_offset": 80, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, "bpf_prog_aux_id_offset": 28, - "bpf_prog_aux_name_offset": 256, + "bpf_prog_aux_name_offset": 504, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, + "device_nd_net_net_offset": 1328, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -6981,50 +8796,62 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 88, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 80, "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, + "linux_binprm_argc_offset": 88, + "linux_binprm_envc_offset": 92, "linux_binprm_p_offset": 24, - "mount_id_offset": 292, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 256, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, + "pid_numbers_offset": 96, + "pipe_inode_info_bufs_offset": 152, + "pipe_inode_info_head_offset": 80, + "pipe_inode_info_ring_size_offset": 92, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 592, + "sizeof_inode": 600, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 24, - "task_struct_pid_offset": 2336, - "tty_name_offset": 368, - "tty_offset": 408, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2432, + "tty_name_offset": 360, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 48, + "binprm_file_offset": 64, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 88, + "bpf_map_name_offset": 80, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, "bpf_prog_aux_id_offset": 28, - "bpf_prog_aux_name_offset": 424, + "bpf_prog_aux_name_offset": 504, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1264, + "device_nd_net_net_offset": 1328, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -7036,50 +8863,62 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 88, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 80, "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, + "linux_binprm_argc_offset": 88, + "linux_binprm_envc_offset": 92, "linux_binprm_p_offset": 24, - "mount_id_offset": 292, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 256, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, + "pid_numbers_offset": 96, + "pipe_inode_info_bufs_offset": 152, + "pipe_inode_info_head_offset": 80, + "pipe_inode_info_ring_size_offset": 92, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 592, + "sizeof_inode": 600, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 24, - "task_struct_pid_offset": 1472, - "tty_name_offset": 368, - "tty_offset": 408, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2416, + "tty_name_offset": 360, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { "binprm_file_offset": 48, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 88, + "bpf_map_name_offset": 168, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 28, - "bpf_prog_aux_name_offset": 424, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1264, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -7091,45 +8930,63 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 88, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 80, "kernel_clone_args_exit_signal_offset": 32, "linux_binprm_argc_offset": 72, "linux_binprm_envc_offset": 76, "linux_binprm_p_offset": 24, - "mount_id_offset": 292, - "net_device_ifindex_offset": 256, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_ns_offset": 120, + "net_ns_offset": 112, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, "pid_numbers_offset": 80, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 592, + "sizeof_inode": 584, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, "socket_sock_offset": 24, - "task_struct_pid_offset": 2336, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2344, "tty_name_offset": 368, - "tty_offset": 408, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, - "bpf_map_type_offset": 4, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 12, + "binprm_file_offset": 48, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 168, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1280, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -7141,44 +8998,63 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 268, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 80, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_ns_offset": 136, + "net_ns_offset": 112, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, + "pid_numbers_offset": 80, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, "sizeof_inode": 584, - "sizeof_upid": 32, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1328, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2344, "tty_name_offset": 368, "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, - "bpf_map_type_offset": 4, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 12, + "binprm_file_offset": 48, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 168, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1280, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -7190,41 +9066,60 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 268, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 80, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_ns_offset": 136, + "net_ns_offset": 112, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, + "pid_numbers_offset": 80, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, "sizeof_inode": 584, - "sizeof_upid": 32, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2288, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2328, "tty_name_offset": 368, - "tty_offset": 408, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, - "bpf_map_type_offset": 4, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 12, + "binprm_file_offset": 48, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 168, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, "device_nd_net_net_offset": 1256, @@ -7239,41 +9134,60 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 88, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, + "pid_numbers_offset": 80, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, "sizeof_inode": 584, - "sizeof_upid": 32, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2256, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2344, "tty_name_offset": 368, - "tty_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, - "bpf_map_type_offset": 4, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 12, + "binprm_file_offset": 48, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 168, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, "device_nd_net_net_offset": 1256, @@ -7288,43 +9202,60 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 88, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, + "pid_numbers_offset": 80, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, "sizeof_inode": 584, - "sizeof_upid": 32, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2264, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2344, "tty_name_offset": 368, - "tty_offset": 368, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, - "bpf_map_id_offset": 28, - "bpf_map_type_offset": 4, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 16, + "binprm_file_offset": 48, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 168, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, "device_nd_net_net_offset": 1256, @@ -7339,48 +9270,63 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 88, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, + "pid_numbers_offset": 80, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 592, - "sizeof_upid": 32, + "sizeof_inode": 584, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2408, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2328, "tty_name_offset": 368, - "tty_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, + "binprm_file_offset": 48, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 112, + "bpf_map_name_offset": 168, "bpf_map_type_offset": 24, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 128, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 16, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1248, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -7392,48 +9338,63 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 96, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, + "pid_numbers_offset": 80, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 608, + "sizeof_inode": 592, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2320, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2344, "tty_name_offset": 368, - "tty_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, + "binprm_file_offset": 48, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 112, + "bpf_map_name_offset": 168, "bpf_map_type_offset": 24, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 128, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 16, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1248, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -7445,48 +9406,63 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 96, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, + "pid_numbers_offset": 80, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 600, + "sizeof_inode": 592, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2384, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2328, "tty_name_offset": 368, - "tty_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, + "binprm_file_offset": 48, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 112, + "bpf_map_name_offset": 168, "bpf_map_type_offset": 24, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 128, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 16, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1248, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -7498,48 +9474,63 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 96, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, + "pid_numbers_offset": 80, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 600, + "sizeof_inode": 592, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2320, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2344, "tty_name_offset": 368, - "tty_offset": 368, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, + "binprm_file_offset": 48, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 112, + "bpf_map_name_offset": 168, "bpf_map_type_offset": 24, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 128, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 16, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1248, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -7551,45 +9542,60 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 96, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, + "pid_numbers_offset": 80, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, "sizeof_inode": 600, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2320, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2344, "tty_name_offset": 368, - "tty_offset": 376, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, + "binprm_file_offset": 48, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 112, + "bpf_map_name_offset": 168, "bpf_map_type_offset": 24, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 128, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 16, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, "device_nd_net_net_offset": 1256, @@ -7604,45 +9610,60 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 96, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, + "pid_numbers_offset": 80, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, "sizeof_inode": 600, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2320, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2328, "tty_name_offset": 368, - "tty_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, + "binprm_file_offset": 48, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 112, + "bpf_map_name_offset": 168, "bpf_map_type_offset": 24, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 128, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 16, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, "device_nd_net_net_offset": 1256, @@ -7657,48 +9678,63 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 96, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, + "pid_numbers_offset": 80, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 608, + "sizeof_inode": 600, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2320, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2344, "tty_name_offset": 368, - "tty_offset": 368, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, + "binprm_file_offset": 48, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 112, + "bpf_map_name_offset": 168, "bpf_map_type_offset": 24, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 128, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 16, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1248, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -7710,49 +9746,63 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 96, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, + "pid_numbers_offset": 80, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 608, + "sizeof_inode": 600, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2320, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2352, "tty_name_offset": 368, - "tty_offset": 376, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, + "binprm_file_offset": 48, "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, + "bpf_map_name_offset": 168, "bpf_map_type_offset": 24, "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 152, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, "bpf_prog_aux_offset": 32, "bpf_prog_tag_offset": 20, "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1248, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, "flowi4_saddr_offset": 40, @@ -7764,38800 +9814,24841 @@ "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 96, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_ns_offset": 112, + "net_ns_offset": 120, "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, + "pid_numbers_offset": 80, "pipe_inode_info_buffers_offset": 64, "pipe_inode_info_bufs_offset": 120, "pipe_inode_info_curbuf_offset": 60, "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, "sizeof_inode": 600, "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2320, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2368, "tty_name_offset": 368, - "tty_offset": 368, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, - "bpf_map_type_offset": 4, - "bpf_prog_aux_offset": 16, - "bpf_prog_type_offset": 8, + "binprm_file_offset": 48, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 168, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1160, + "device_nd_net_net_offset": 1256, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 20, - "flowi4_uli_offset": 28, - "flowi6_saddr_offset": 36, - "flowi6_uli_offset": 56, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 268, - "net_device_ifindex_offset": 288, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 96, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, "net_device_name_offset": 0, - "net_ns_offset": 128, - "nf_conn_ct_net_offset": 216, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 72, - "pipe_inode_info_bufs_offset": 128, - "pipe_inode_info_curbuf_offset": 68, - "pipe_inode_info_nrbufs_offset": 64, + "pid_numbers_offset": 80, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 560, - "sizeof_upid": 32, + "sizeof_inode": 600, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1184, - "tty_name_offset": 400, - "tty_offset": 416, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2368, + "tty_name_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, - "bpf_map_type_offset": 4, - "bpf_prog_aux_offset": 16, - "bpf_prog_type_offset": 8, + "binprm_file_offset": 64, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 88, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 28, + "bpf_prog_aux_name_offset": 416, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1160, + "device_nd_net_net_offset": 1264, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 32, - "flowi4_uli_offset": 40, - "flowi6_saddr_offset": 48, - "flowi6_uli_offset": 68, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 268, - "net_device_ifindex_offset": 288, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 80, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 88, + "linux_binprm_envc_offset": 92, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 256, "net_device_name_offset": 0, - "net_ns_offset": 128, - "nf_conn_ct_net_offset": 216, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 72, - "pipe_inode_info_bufs_offset": 128, - "pipe_inode_info_curbuf_offset": 68, - "pipe_inode_info_nrbufs_offset": 64, + "pid_numbers_offset": 96, + "pipe_inode_info_bufs_offset": 152, + "pipe_inode_info_head_offset": 80, + "pipe_inode_info_ring_size_offset": 92, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 560, - "sizeof_upid": 32, + "sizeof_inode": 600, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1168, - "tty_name_offset": 400, - "tty_offset": 416, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2360, + "tty_name_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, - "bpf_map_type_offset": 4, - "bpf_prog_aux_offset": 16, - "bpf_prog_type_offset": 8, + "binprm_file_offset": 64, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 88, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 28, + "bpf_prog_aux_name_offset": 416, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1160, + "device_nd_net_net_offset": 1264, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 32, - "flowi4_uli_offset": 40, - "flowi6_saddr_offset": 48, - "flowi6_uli_offset": 68, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 268, - "net_device_ifindex_offset": 288, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 80, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 88, + "linux_binprm_envc_offset": 92, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 256, "net_device_name_offset": 0, - "net_ns_offset": 128, - "nf_conn_ct_net_offset": 216, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 72, - "pipe_inode_info_bufs_offset": 128, - "pipe_inode_info_curbuf_offset": 68, - "pipe_inode_info_nrbufs_offset": 64, + "pid_numbers_offset": 96, + "pipe_inode_info_bufs_offset": 152, + "pipe_inode_info_head_offset": 80, + "pipe_inode_info_ring_size_offset": 92, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 560, - "sizeof_upid": 32, + "sizeof_inode": 600, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1168, - "tty_name_offset": 400, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2376, + "tty_name_offset": 368, "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, - "bpf_map_type_offset": 4, - "bpf_prog_aux_offset": 16, - "bpf_prog_type_offset": 8, + "binprm_file_offset": 64, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 88, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 28, + "bpf_prog_aux_name_offset": 416, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1160, + "device_nd_net_net_offset": 1264, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 32, - "flowi4_uli_offset": 40, - "flowi6_saddr_offset": 48, - "flowi6_uli_offset": 68, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 80, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 88, + "linux_binprm_envc_offset": 92, + "linux_binprm_p_offset": 24, "mount_id_offset": 284, - "net_device_ifindex_offset": 288, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 256, "net_device_name_offset": 0, - "net_ns_offset": 128, - "nf_conn_ct_net_offset": 216, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 72, - "pipe_inode_info_bufs_offset": 128, - "pipe_inode_info_curbuf_offset": 68, - "pipe_inode_info_nrbufs_offset": 64, + "pid_numbers_offset": 96, + "pipe_inode_info_bufs_offset": 152, + "pipe_inode_info_head_offset": 80, + "pipe_inode_info_ring_size_offset": 92, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 560, - "sizeof_upid": 32, + "sizeof_inode": 600, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1200, - "tty_name_offset": 400, - "tty_offset": 408, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2376, + "tty_name_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, - "bpf_map_type_offset": 4, - "bpf_prog_aux_offset": 16, - "bpf_prog_type_offset": 8, + "binprm_file_offset": 64, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 88, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 28, + "bpf_prog_aux_name_offset": 416, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1160, + "device_nd_net_net_offset": 1264, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 32, - "flowi4_uli_offset": 40, - "flowi6_saddr_offset": 48, - "flowi6_uli_offset": 68, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 80, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 88, + "linux_binprm_envc_offset": 92, + "linux_binprm_p_offset": 24, "mount_id_offset": 284, - "net_device_ifindex_offset": 288, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 256, "net_device_name_offset": 0, - "net_ns_offset": 128, - "nf_conn_ct_net_offset": 216, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 72, - "pipe_inode_info_bufs_offset": 128, - "pipe_inode_info_curbuf_offset": 68, - "pipe_inode_info_nrbufs_offset": 64, + "pid_numbers_offset": 96, + "pipe_inode_info_bufs_offset": 152, + "pipe_inode_info_head_offset": 80, + "pipe_inode_info_ring_size_offset": 92, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 560, - "sizeof_upid": 32, + "sizeof_inode": 600, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1168, - "tty_name_offset": 400, - "tty_offset": 408, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 168, - "bpf_map_type_offset": 4, - "bpf_prog_aux_offset": 16, - "bpf_prog_type_offset": 8, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1160, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 32, - "flowi4_uli_offset": 40, - "flowi6_saddr_offset": 48, - "flowi6_uli_offset": 68, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 288, - "net_device_name_offset": 0, - "net_ns_offset": 128, - "nf_conn_ct_net_offset": 216, - "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 72, - "pipe_inode_info_bufs_offset": 128, - "pipe_inode_info_curbuf_offset": 68, - "pipe_inode_info_nrbufs_offset": 64, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 560, - "sizeof_upid": 32, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1176, - "tty_name_offset": 400, - "tty_offset": 408, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 168, - "bpf_map_type_offset": 8, - "bpf_prog_aux_offset": 16, - "bpf_prog_type_offset": 8, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1160, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 32, - "flowi4_uli_offset": 40, - "flowi6_saddr_offset": 48, - "flowi6_uli_offset": 68, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 288, - "net_device_name_offset": 0, - "net_ns_offset": 128, - "nf_conn_ct_net_offset": 216, - "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 72, - "pipe_inode_info_bufs_offset": 128, - "pipe_inode_info_curbuf_offset": 68, - "pipe_inode_info_nrbufs_offset": 64, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 560, - "sizeof_upid": 32, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1176, - "tty_name_offset": 400, - "tty_offset": 408, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 168, - "bpf_map_type_offset": 8, - "bpf_prog_aux_offset": 16, - "bpf_prog_type_offset": 8, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1160, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 32, - "flowi4_uli_offset": 40, - "flowi6_saddr_offset": 48, - "flowi6_uli_offset": 68, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 288, - "net_device_name_offset": 0, - "net_ns_offset": 128, - "nf_conn_ct_net_offset": 216, - "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 72, - "pipe_inode_info_bufs_offset": 128, - "pipe_inode_info_curbuf_offset": 68, - "pipe_inode_info_nrbufs_offset": 64, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 568, - "sizeof_upid": 32, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1176, - "tty_name_offset": 400, - "tty_offset": 408, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 168, - "bpf_map_type_offset": 8, - "bpf_prog_aux_offset": 16, - "bpf_prog_type_offset": 8, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1128, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 32, - "flowi4_uli_offset": 40, - "flowi6_saddr_offset": 48, - "flowi6_uli_offset": 68, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 288, - "net_device_name_offset": 0, - "net_ns_offset": 128, - "nf_conn_ct_net_offset": 184, - "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 72, - "pipe_inode_info_bufs_offset": 128, - "pipe_inode_info_curbuf_offset": 68, - "pipe_inode_info_nrbufs_offset": 64, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 568, - "sizeof_upid": 32, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1152, - "tty_name_offset": 400, - "tty_offset": 384, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 168, - "bpf_map_type_offset": 4, - "bpf_prog_aux_offset": 16, - "bpf_prog_type_offset": 8, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1160, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 32, - "flowi4_uli_offset": 40, - "flowi6_saddr_offset": 48, - "flowi6_uli_offset": 68, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 288, - "net_device_name_offset": 0, - "net_ns_offset": 128, - "nf_conn_ct_net_offset": 216, - "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 72, - "pipe_inode_info_bufs_offset": 128, - "pipe_inode_info_curbuf_offset": 68, - "pipe_inode_info_nrbufs_offset": 64, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 560, - "sizeof_upid": 32, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1208, - "tty_name_offset": 400, - "tty_offset": 408, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2360, + "tty_name_offset": 360, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, - "bpf_map_type_offset": 8, - "bpf_prog_aux_offset": 16, - "bpf_prog_type_offset": 8, + "binprm_file_offset": 64, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 88, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 28, + "bpf_prog_aux_name_offset": 416, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1160, + "device_nd_net_net_offset": 1264, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 32, - "flowi4_uli_offset": 40, - "flowi6_saddr_offset": 48, - "flowi6_uli_offset": 68, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 80, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 88, + "linux_binprm_envc_offset": 92, + "linux_binprm_p_offset": 24, "mount_id_offset": 284, - "net_device_ifindex_offset": 288, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 256, "net_device_name_offset": 0, - "net_ns_offset": 128, - "nf_conn_ct_net_offset": 216, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 72, - "pipe_inode_info_bufs_offset": 128, - "pipe_inode_info_curbuf_offset": 68, - "pipe_inode_info_nrbufs_offset": 64, + "pid_numbers_offset": 96, + "pipe_inode_info_bufs_offset": 152, + "pipe_inode_info_head_offset": 80, + "pipe_inode_info_ring_size_offset": 92, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 560, - "sizeof_upid": 32, + "sizeof_inode": 600, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1208, - "tty_name_offset": 400, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2376, + "tty_name_offset": 360, "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 }, { - "binprm_file_offset": 168, - "bpf_map_type_offset": 8, - "bpf_prog_aux_offset": 16, - "bpf_prog_type_offset": 8, + "binprm_file_offset": 64, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 88, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 28, + "bpf_prog_aux_name_offset": 416, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, "creds_cap_inheritable_offset": 40, "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, "dentry_d_sb_offset": 104, "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1160, + "device_nd_net_net_offset": 1264, "file_f_inode_offset": 32, "file_f_path_offset": 16, - "flowi4_saddr_offset": 32, - "flowi4_uli_offset": 40, - "flowi6_saddr_offset": 48, - "flowi6_uli_offset": 68, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, "inode_ctime_offset": 120, "inode_gid_offset": 8, "inode_ino_offset": 64, "inode_mtime_offset": 104, "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 80, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 88, + "linux_binprm_envc_offset": 92, + "linux_binprm_p_offset": 24, "mount_id_offset": 284, - "net_device_ifindex_offset": 288, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 256, "net_device_name_offset": 0, - "net_ns_offset": 128, - "nf_conn_ct_net_offset": 216, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 72, - "pipe_inode_info_bufs_offset": 128, - "pipe_inode_info_curbuf_offset": 68, - "pipe_inode_info_nrbufs_offset": 64, + "pid_numbers_offset": 96, + "pipe_inode_info_bufs_offset": 152, + "pipe_inode_info_head_offset": 80, + "pipe_inode_info_ring_size_offset": 92, + "sb_dev_offset": 16, "sb_flags_offset": 80, "sb_magic_offset": 96, - "sizeof_inode": 568, - "sizeof_upid": 32, + "sizeof_inode": 600, + "sizeof_upid": 16, "sock_common_skc_family_offset": 16, "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1208, - "tty_name_offset": 400, - "tty_offset": 408, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 2376, + "tty_name_offset": 360, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, "vm_area_struct_flags_offset": 80 - }, + } + ], + "kernels": [ { - "binprm_file_offset": 168, - "bpf_map_type_offset": 4, - "bpf_prog_aux_offset": 16, - "bpf_prog_type_offset": 8, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1160, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 32, - "flowi4_uli_offset": 40, - "flowi6_saddr_offset": 48, - "flowi6_uli_offset": 68, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 268, - "net_device_ifindex_offset": 288, - "net_device_name_offset": 0, - "net_ns_offset": 128, - "nf_conn_ct_net_offset": 216, - "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 72, - "pipe_inode_info_bufs_offset": 128, - "pipe_inode_info_curbuf_offset": 68, - "pipe_inode_info_nrbufs_offset": 64, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 560, - "sizeof_upid": 32, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1176, - "tty_name_offset": 400, - "tty_offset": 416, - "vm_area_struct_flags_offset": 80 + "distrib": "amzn", + "version": "2", + "arch": "x86_64", + "uname_release": "4.14.101-91.76.amzn2.x86_64", + "cindex": 0 }, { - "binprm_file_offset": 168, - "bpf_map_type_offset": 8, - "bpf_prog_aux_offset": 16, - "bpf_prog_type_offset": 8, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1128, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 32, - "flowi4_uli_offset": 40, - "flowi6_saddr_offset": 48, - "flowi6_uli_offset": 68, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 288, - "net_device_name_offset": 0, - "net_ns_offset": 128, - "nf_conn_ct_net_offset": 184, - "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 72, - "pipe_inode_info_bufs_offset": 128, - "pipe_inode_info_curbuf_offset": 68, - "pipe_inode_info_nrbufs_offset": 64, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 568, - "sizeof_upid": 32, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1184, - "tty_name_offset": 400, - "tty_offset": 384, - "vm_area_struct_flags_offset": 80 + "distrib": "amzn", + "version": "2", + "arch": "x86_64", + "uname_release": "4.14.104-95.84.amzn2.x86_64", + "cindex": 0 }, { - "binprm_file_offset": 168, - "bpf_map_type_offset": 4, - "bpf_prog_aux_offset": 16, - "bpf_prog_type_offset": 8, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1160, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 32, - "flowi4_uli_offset": 40, - "flowi6_saddr_offset": 48, - "flowi6_uli_offset": 68, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 268, - "net_device_ifindex_offset": 288, - "net_device_name_offset": 0, - "net_ns_offset": 128, - "nf_conn_ct_net_offset": 216, - "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 72, - "pipe_inode_info_bufs_offset": 128, - "pipe_inode_info_curbuf_offset": 68, - "pipe_inode_info_nrbufs_offset": 64, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 560, - "sizeof_upid": 32, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1200, - "tty_name_offset": 400, - "tty_offset": 408, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 168, - "bpf_map_type_offset": 4, - "bpf_prog_aux_offset": 16, - "bpf_prog_type_offset": 8, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1224, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 32, - "flowi4_uli_offset": 40, - "flowi6_saddr_offset": 48, - "flowi6_uli_offset": 68, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 268, - "net_device_ifindex_offset": 296, - "net_device_name_offset": 0, - "net_ns_offset": 128, - "nf_conn_ct_net_offset": 208, - "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 72, - "pipe_inode_info_bufs_offset": 128, - "pipe_inode_info_curbuf_offset": 68, - "pipe_inode_info_nrbufs_offset": 64, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 584, - "sizeof_upid": 32, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1296, - "tty_name_offset": 400, - "tty_offset": 408, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 168, - "bpf_map_type_offset": 4, - "bpf_prog_aux_offset": 16, - "bpf_prog_type_offset": 8, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1224, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 32, - "flowi4_uli_offset": 40, - "flowi6_saddr_offset": 48, - "flowi6_uli_offset": 68, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 268, - "net_device_ifindex_offset": 296, - "net_device_name_offset": 0, - "net_ns_offset": 128, - "nf_conn_ct_net_offset": 208, - "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 72, - "pipe_inode_info_bufs_offset": 128, - "pipe_inode_info_curbuf_offset": 68, - "pipe_inode_info_nrbufs_offset": 64, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 584, - "sizeof_upid": 32, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1328, - "tty_name_offset": 400, - "tty_offset": 408, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 168, - "bpf_map_id_offset": 28, - "bpf_map_type_offset": 4, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 16, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1320, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 592, - "sizeof_upid": 32, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1544, - "tty_name_offset": 368, - "tty_offset": 368, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 168, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, - "bpf_map_type_offset": 24, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 128, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 16, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1312, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1520, - "tty_name_offset": 368, - "tty_offset": 368, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 168, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, - "bpf_map_type_offset": 24, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 128, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 16, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1312, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 608, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1520, - "tty_name_offset": 368, - "tty_offset": 368, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 168, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 152, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1312, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 112, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 1392, - "tty_name_offset": 368, - "tty_offset": 368, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 176, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 20, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1320, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 32, - "flowi4_uli_offset": 40, - "flowi6_saddr_offset": 48, - "flowi6_uli_offset": 68, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 200, - "linux_binprm_envc_offset": 204, - "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 112, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 56, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 584, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_offset": 1392, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 176, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 20, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1320, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 32, - "flowi4_uli_offset": 40, - "flowi6_saddr_offset": 48, - "flowi6_uli_offset": 68, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 200, - "linux_binprm_envc_offset": 204, - "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 56, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 584, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_offset": 1392, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 176, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 20, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1320, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 32, - "flowi4_uli_offset": 40, - "flowi6_saddr_offset": 48, - "flowi6_uli_offset": 68, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 200, - "linux_binprm_envc_offset": 204, - "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 56, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 592, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_offset": 1392, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 88, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 584, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 1416, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 88, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 592, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 1416, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 80, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 112, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 584, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 1416, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 88, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 112, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 584, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 1416, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 96, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 1416, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 168, - "bpf_map_id_offset": 28, - "bpf_map_type_offset": 4, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 16, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 592, - "sizeof_upid": 32, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2408, - "tty_name_offset": 368, - "tty_offset": 368, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 168, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 112, - "bpf_map_type_offset": 24, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 128, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 16, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1248, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2384, - "tty_name_offset": 368, - "tty_offset": 368, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 168, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 112, - "bpf_map_type_offset": 24, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 128, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 16, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1248, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 608, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2320, - "tty_name_offset": 368, - "tty_offset": 368, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 168, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 112, - "bpf_map_type_offset": 24, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 128, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 16, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1248, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2320, - "tty_name_offset": 368, - "tty_offset": 368, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 168, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 112, - "bpf_map_type_offset": 24, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 128, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 16, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2320, - "tty_name_offset": 368, - "tty_offset": 368, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 168, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 112, - "bpf_map_type_offset": 24, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 128, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 16, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 608, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2320, - "tty_name_offset": 368, - "tty_offset": 368, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 168, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 112, - "bpf_map_type_offset": 24, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 128, - "bpf_prog_aux_offset": 24, - "bpf_prog_tag_offset": 16, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1248, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 608, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2320, - "tty_name_offset": 368, - "tty_offset": 376, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 168, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 152, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1312, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 112, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2320, - "tty_name_offset": 368, - "tty_offset": 368, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 168, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 152, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1248, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 112, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2320, - "tty_name_offset": 368, - "tty_offset": 368, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 168, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 16, - "bpf_prog_aux_name_offset": 152, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1248, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 192, - "linux_binprm_envc_offset": 196, - "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 112, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_link_pid_offset": 16, - "pid_numbers_offset": 48, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_link_offset": 2320, - "tty_name_offset": 368, - "tty_offset": 376, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 176, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 20, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1320, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 32, - "flowi4_uli_offset": 40, - "flowi6_saddr_offset": 48, - "flowi6_uli_offset": 68, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 200, - "linux_binprm_envc_offset": 204, - "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 112, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 56, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 584, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_offset": 2320, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 176, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 20, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 32, - "flowi4_uli_offset": 40, - "flowi6_saddr_offset": 48, - "flowi6_uli_offset": 68, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 200, - "linux_binprm_envc_offset": 204, - "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 112, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 56, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 584, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_offset": 2320, - "tty_name_offset": 368, - "tty_offset": 408, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 176, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 20, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1320, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 32, - "flowi4_uli_offset": 40, - "flowi6_saddr_offset": 48, - "flowi6_uli_offset": 68, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 200, - "linux_binprm_envc_offset": 204, - "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 56, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 584, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_offset": 2320, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 176, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 20, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 32, - "flowi4_uli_offset": 40, - "flowi6_saddr_offset": 48, - "flowi6_uli_offset": 68, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 200, - "linux_binprm_envc_offset": 204, - "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 56, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 584, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_offset": 2320, - "tty_name_offset": 368, - "tty_offset": 408, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 176, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 176, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 20, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1320, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 32, - "flowi4_uli_offset": 40, - "flowi6_saddr_offset": 48, - "flowi6_uli_offset": 68, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "linux_binprm_argc_offset": 200, - "linux_binprm_envc_offset": 204, - "linux_binprm_p_offset": 152, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 56, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 592, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 32, - "task_struct_pid_offset": 2320, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 88, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 112, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 584, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2344, - "tty_name_offset": 368, - "tty_offset": 408, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 88, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 584, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2344, - "tty_name_offset": 368, - "tty_offset": 408, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 88, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 112, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 584, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2328, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 88, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 584, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2328, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 88, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 584, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2344, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 88, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 592, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2344, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 88, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 592, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2328, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 88, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 592, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2344, - "tty_name_offset": 368, - "tty_offset": 408, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 80, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 112, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 584, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2344, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 88, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 112, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 584, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2344, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 96, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2344, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 96, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2328, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 96, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2344, - "tty_name_offset": 368, - "tty_offset": 408, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 64, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 80, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 28, - "bpf_prog_aux_name_offset": 504, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1328, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 80, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 88, - "linux_binprm_envc_offset": 92, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 256, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 96, - "pipe_inode_info_bufs_offset": 152, - "pipe_inode_info_head_offset": 80, - "pipe_inode_info_ring_size_offset": 92, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 1440, - "tty_name_offset": 360, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 64, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 80, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 28, - "bpf_prog_aux_name_offset": 504, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1264, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 80, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 88, - "linux_binprm_envc_offset": 92, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 256, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 96, - "pipe_inode_info_bufs_offset": 152, - "pipe_inode_info_head_offset": 80, - "pipe_inode_info_ring_size_offset": 92, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 1440, - "tty_name_offset": 360, - "tty_offset": 408, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 80, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 112, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 584, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 1416, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 88, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 584, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 1416, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 96, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 592, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 1416, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 96, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 1416, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 96, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 1440, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 64, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 88, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 28, - "bpf_prog_aux_name_offset": 416, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1264, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 80, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 88, - "linux_binprm_envc_offset": 92, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 256, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 96, - "pipe_inode_info_bufs_offset": 152, - "pipe_inode_info_head_offset": 80, - "pipe_inode_info_ring_size_offset": 92, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 1448, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 64, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 88, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 28, - "bpf_prog_aux_name_offset": 416, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1264, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 80, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 88, - "linux_binprm_envc_offset": 92, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 256, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 96, - "pipe_inode_info_bufs_offset": 152, - "pipe_inode_info_head_offset": 80, - "pipe_inode_info_ring_size_offset": 92, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 1448, - "tty_name_offset": 360, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 64, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 80, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 28, - "bpf_prog_aux_name_offset": 504, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1264, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 80, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 88, - "linux_binprm_envc_offset": 92, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 256, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 96, - "pipe_inode_info_bufs_offset": 152, - "pipe_inode_info_head_offset": 80, - "pipe_inode_info_ring_size_offset": 92, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2432, - "tty_name_offset": 360, - "tty_offset": 408, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 64, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 80, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 28, - "bpf_prog_aux_name_offset": 504, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1328, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 80, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 88, - "linux_binprm_envc_offset": 92, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 256, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 96, - "pipe_inode_info_bufs_offset": 152, - "pipe_inode_info_head_offset": 80, - "pipe_inode_info_ring_size_offset": 92, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2432, - "tty_name_offset": 360, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 64, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 80, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 28, - "bpf_prog_aux_name_offset": 504, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1328, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 80, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 88, - "linux_binprm_envc_offset": 92, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 256, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 96, - "pipe_inode_info_bufs_offset": 152, - "pipe_inode_info_head_offset": 80, - "pipe_inode_info_ring_size_offset": 92, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2416, - "tty_name_offset": 360, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 80, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 112, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 584, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2344, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 80, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 112, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 584, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2344, - "tty_name_offset": 368, - "tty_offset": 408, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 80, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 112, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 584, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2328, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 88, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 584, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2344, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 88, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 584, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2344, - "tty_name_offset": 368, - "tty_offset": 408, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 88, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 584, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2328, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 96, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 592, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2344, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 96, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 592, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2328, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 96, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 592, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2344, - "tty_name_offset": 368, - "tty_offset": 408, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 96, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2344, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 96, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2328, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 96, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2344, - "tty_name_offset": 368, - "tty_offset": 408, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 96, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2352, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 96, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2368, - "tty_name_offset": 368, - "tty_offset": 408, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 48, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 168, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 24, - "bpf_prog_aux_name_offset": 176, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1256, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 96, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 72, - "linux_binprm_envc_offset": 76, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 264, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 80, - "pipe_inode_info_buffers_offset": 64, - "pipe_inode_info_bufs_offset": 120, - "pipe_inode_info_curbuf_offset": 60, - "pipe_inode_info_nrbufs_offset": 56, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2368, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 64, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 88, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 28, - "bpf_prog_aux_name_offset": 416, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1264, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 80, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 88, - "linux_binprm_envc_offset": 92, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 256, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 96, - "pipe_inode_info_bufs_offset": 152, - "pipe_inode_info_head_offset": 80, - "pipe_inode_info_ring_size_offset": 92, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2360, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 64, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 88, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 28, - "bpf_prog_aux_name_offset": 416, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1264, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 80, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 88, - "linux_binprm_envc_offset": 92, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 256, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 96, - "pipe_inode_info_bufs_offset": 152, - "pipe_inode_info_head_offset": 80, - "pipe_inode_info_ring_size_offset": 92, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2376, - "tty_name_offset": 368, - "tty_offset": 408, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 64, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 88, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 28, - "bpf_prog_aux_name_offset": 416, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1264, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 80, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 88, - "linux_binprm_envc_offset": 92, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 256, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 96, - "pipe_inode_info_bufs_offset": 152, - "pipe_inode_info_head_offset": 80, - "pipe_inode_info_ring_size_offset": 92, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2376, - "tty_name_offset": 368, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 64, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 88, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 28, - "bpf_prog_aux_name_offset": 416, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1264, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 80, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 88, - "linux_binprm_envc_offset": 92, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 256, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 96, - "pipe_inode_info_bufs_offset": 152, - "pipe_inode_info_head_offset": 80, - "pipe_inode_info_ring_size_offset": 92, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2360, - "tty_name_offset": 360, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 64, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 88, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 28, - "bpf_prog_aux_name_offset": 416, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1264, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 80, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 88, - "linux_binprm_envc_offset": 92, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 256, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 96, - "pipe_inode_info_bufs_offset": 152, - "pipe_inode_info_head_offset": 80, - "pipe_inode_info_ring_size_offset": 92, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2376, - "tty_name_offset": 360, - "tty_offset": 408, - "vm_area_struct_flags_offset": 80 - }, - { - "binprm_file_offset": 64, - "bpf_map_id_offset": 48, - "bpf_map_name_offset": 88, - "bpf_map_type_offset": 24, - "bpf_prog_attach_type_offset": 8, - "bpf_prog_aux_id_offset": 28, - "bpf_prog_aux_name_offset": 416, - "bpf_prog_aux_offset": 32, - "bpf_prog_tag_offset": 20, - "bpf_prog_type_offset": 4, - "creds_cap_inheritable_offset": 40, - "creds_uid_offset": 4, - "dentry_d_sb_offset": 104, - "dentry_sb_offset": 104, - "device_nd_net_net_offset": 1264, - "file_f_inode_offset": 32, - "file_f_path_offset": 16, - "flowi4_saddr_offset": 40, - "flowi4_uli_offset": 48, - "flowi6_saddr_offset": 56, - "flowi6_uli_offset": 76, - "inode_ctime_offset": 120, - "inode_gid_offset": 8, - "inode_ino_offset": 64, - "inode_mtime_offset": 104, - "inode_nlink_offset": 72, - "iokiocb_ctx_offset": 80, - "kernel_clone_args_exit_signal_offset": 32, - "linux_binprm_argc_offset": 88, - "linux_binprm_envc_offset": 92, - "linux_binprm_p_offset": 24, - "mount_id_offset": 284, - "net_device_ifindex_offset": 256, - "net_device_name_offset": 0, - "net_ns_offset": 120, - "nf_conn_ct_net_offset": 144, - "pid_level_offset": 4, - "pid_numbers_offset": 96, - "pipe_inode_info_bufs_offset": 152, - "pipe_inode_info_head_offset": 80, - "pipe_inode_info_ring_size_offset": 92, - "sb_flags_offset": 80, - "sb_magic_offset": 96, - "sizeof_inode": 600, - "sizeof_upid": 16, - "sock_common_skc_family_offset": 16, - "sock_common_skc_net_offset": 48, - "socket_sock_offset": 24, - "task_struct_pid_offset": 2376, - "tty_name_offset": 360, - "tty_offset": 400, - "vm_area_struct_flags_offset": 80 - } - ], - "kernels": [ - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.101-91.76.amzn2.aarch64", - "cindex": 0 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.104-95.84.amzn2.aarch64", - "cindex": 0 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.106-97.85.amzn2.aarch64", - "cindex": 0 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.109-99.92.amzn2.aarch64", - "cindex": 0 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.114-103.97.amzn2.aarch64", - "cindex": 0 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.114-105.126.amzn2.aarch64", - "cindex": 0 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.121-109.96.amzn2.aarch64", - "cindex": 0 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.123-111.109.amzn2.aarch64", - "cindex": 0 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.128-112.105.amzn2.aarch64", - "cindex": 0 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.133-113.105.amzn2.aarch64", - "cindex": 0 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.133-113.112.amzn2.aarch64", - "cindex": 0 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.138-114.102.amzn2.aarch64", - "cindex": 0 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.143-118.123.amzn2.aarch64", - "cindex": 0 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.146-119.123.amzn2.aarch64", - "cindex": 0 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.146-120.181.amzn2.aarch64", - "cindex": 0 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.152-124.171.amzn2.aarch64", - "cindex": 0 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.152-127.182.amzn2.aarch64", - "cindex": 0 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.154-128.181.amzn2.aarch64", - "cindex": 0 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.158-129.185.amzn2.aarch64", - "cindex": 0 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.165-131.185.amzn2.aarch64", - "cindex": 0 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.165-133.209.amzn2.aarch64", - "cindex": 0 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.171-136.231.amzn2.aarch64", - "cindex": 0 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.173-137.228.amzn2.aarch64", - "cindex": 0 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.173-137.229.amzn2.aarch64", - "cindex": 0 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.177-139.253.amzn2.aarch64", - "cindex": 1 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.177-139.254.amzn2.aarch64", - "cindex": 1 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.181-140.257.amzn2.aarch64", - "cindex": 1 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.181-142.260.amzn2.aarch64", - "cindex": 1 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.186-146.268.amzn2.aarch64", - "cindex": 1 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.192-147.314.amzn2.aarch64", - "cindex": 1 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.193-149.317.amzn2.aarch64", - "cindex": 1 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.198-152.320.amzn2.aarch64", - "cindex": 1 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.200-155.322.amzn2.aarch64", - "cindex": 1 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.203-156.332.amzn2.aarch64", - "cindex": 1 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.209-160.335.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.209-160.339.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.214-160.339.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.219-161.340.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.219-164.354.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.225-168.357.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.225-169.362.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.231-173.360.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.231-173.361.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.232-176.381.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.232-177.418.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.238-182.421.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.238-182.422.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.241-184.433.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.243-185.433.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.246-187.474.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.248-189.473.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.252-195.481.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.252-195.483.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.256-197.484.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.262-200.489.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.268-205.500.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.273-207.502.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.275-207.503.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.276-211.499.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.281-212.502.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.285-215.501.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.287-215.504.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.290-217.505.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.291-218.527.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.294-220.533.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.296-222.539.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.299-223.520.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.301-224.520.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.301-225.528.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.304-226.531.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.305-227.531.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.309-231.529.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.311-233.529.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.313-235.533.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.314-237.533.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.314-238.539.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.318-240.529.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.318-241.531.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.320-242.534.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.320-243.544.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.322-244.536.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.322-244.539.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.322-246.539.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.326-245.539.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.327-246.539.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.328-248.540.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.330-250.540.amzn2.aarch64", - "cindex": 2 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.334-252.552.amzn2.aarch64", - "cindex": 3 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.336-253.554.amzn2.aarch64", - "cindex": 3 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.336-255.557.amzn2.aarch64", - "cindex": 3 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.336-256.557.amzn2.aarch64", - "cindex": 3 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.336-256.559.amzn2.aarch64", - "cindex": 3 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.336-257.562.amzn2.aarch64", - "cindex": 3 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.336-257.566.amzn2.aarch64", - "cindex": 3 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.336-257.568.amzn2.aarch64", - "cindex": 3 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.343-259.562.amzn2.aarch64", - "cindex": 3 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.343-260.564.amzn2.aarch64", - "cindex": 3 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.343-261.564.amzn2.aarch64", - "cindex": 3 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.344-262.563.amzn2.aarch64", - "cindex": 3 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.345-262.561.amzn2.aarch64", - "cindex": 3 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.348-265.562.amzn2.aarch64", - "cindex": 3 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.348-265.565.amzn2.aarch64", - "cindex": 3 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.349-266.564.amzn2.aarch64", - "cindex": 3 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.350-266.564.amzn2.aarch64", - "cindex": 3 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.352-267.564.amzn2.aarch64", - "cindex": 3 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.352-268.568.amzn2.aarch64", - "cindex": 3 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.352-268.569.amzn2.aarch64", - "cindex": 3 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.353-270.569.amzn2.aarch64", - "cindex": 3 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.77-80.57.amzn2.aarch64", - "cindex": 4 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.77-81.59.amzn2.aarch64", - "cindex": 4 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.77-86.82.amzn2.aarch64", - "cindex": 0 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.88-88.73.amzn2.aarch64", - "cindex": 0 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.88-88.76.amzn2.aarch64", - "cindex": 0 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.94-89.73.amzn2.aarch64", - "cindex": 0 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "arm64", - "uname_release": "4.14.97-90.72.amzn2.aarch64", - "cindex": 0 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.101-91.76.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.104-95.84.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.106-97.85.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.109-99.92.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.114-103.97.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.114-105.126.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.121-109.96.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.123-111.109.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.128-112.105.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.133-113.105.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.133-113.112.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.138-114.102.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.143-118.123.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.146-119.123.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.146-120.181.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.152-124.171.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.152-127.182.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.154-128.181.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.158-129.185.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.165-131.185.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.165-133.209.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.171-136.231.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.173-137.228.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.173-137.229.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.177-139.253.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.177-139.254.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.181-140.257.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.181-142.260.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.186-146.268.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.192-147.314.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.193-149.317.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.198-152.320.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.200-155.322.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.203-156.332.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.209-160.335.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.209-160.339.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.214-160.339.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.219-161.340.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.219-164.354.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.225-168.357.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.225-169.362.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.231-173.360.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.231-173.361.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.232-176.381.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.232-177.418.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.238-182.421.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.238-182.422.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.241-184.433.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.243-185.433.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.246-187.474.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.248-189.473.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.252-195.481.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.252-195.483.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.256-197.484.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.26-54.32.amzn2.x86_64", - "cindex": 7 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.262-200.489.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.268-205.500.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.273-207.502.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.275-207.503.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.276-211.499.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.281-212.502.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.285-215.501.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.287-215.504.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.290-217.505.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.291-218.527.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.294-220.533.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.296-222.539.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.299-223.520.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.301-224.520.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.301-225.528.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.304-226.531.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.305-227.531.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.309-231.529.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.311-233.529.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.313-235.533.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.314-237.533.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.314-238.539.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.318-240.529.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.318-241.531.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.320-242.534.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.320-243.544.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.322-244.536.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.322-244.539.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.322-246.539.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.326-245.539.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.327-246.539.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.328-248.540.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.33-59.34.amzn2.x86_64", - "cindex": 7 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.33-59.37.amzn2.x86_64", - "cindex": 7 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.330-250.540.amzn2.x86_64", - "cindex": 6 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.334-252.552.amzn2.x86_64", - "cindex": 8 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.336-253.554.amzn2.x86_64", - "cindex": 8 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.336-255.557.amzn2.x86_64", - "cindex": 8 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.336-256.557.amzn2.x86_64", - "cindex": 8 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.336-256.559.amzn2.x86_64", - "cindex": 8 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.336-257.562.amzn2.x86_64", - "cindex": 8 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.336-257.566.amzn2.x86_64", - "cindex": 8 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.336-257.568.amzn2.x86_64", - "cindex": 8 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.343-259.562.amzn2.x86_64", - "cindex": 8 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.343-260.564.amzn2.x86_64", - "cindex": 8 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.343-261.564.amzn2.x86_64", - "cindex": 8 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.344-262.563.amzn2.x86_64", - "cindex": 8 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.345-262.561.amzn2.x86_64", - "cindex": 8 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.348-265.562.amzn2.x86_64", - "cindex": 8 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.348-265.565.amzn2.x86_64", - "cindex": 8 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.349-266.564.amzn2.x86_64", - "cindex": 8 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.350-266.564.amzn2.x86_64", - "cindex": 8 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.352-267.564.amzn2.x86_64", - "cindex": 8 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.352-268.568.amzn2.x86_64", - "cindex": 8 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.352-268.569.amzn2.x86_64", - "cindex": 8 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.353-270.569.amzn2.x86_64", - "cindex": 8 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.42-61.37.amzn2.x86_64", - "cindex": 9 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.47-63.37.amzn2.x86_64", - "cindex": 9 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.47-64.38.amzn2.x86_64", - "cindex": 9 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.51-66.38.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.55-68.37.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.59-68.43.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.62-70.117.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.67-71.56.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.70-72.55.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.72-73.55.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.77-80.57.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.77-81.59.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.77-86.82.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.88-88.73.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.88-88.76.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.94-89.73.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.14.97-90.72.amzn2.x86_64", - "cindex": 5 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.9.62-10.57.amzn2.x86_64", - "cindex": 10 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.9.70-2.243.amzn2.x86_64", - "cindex": 10 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.9.75-1.56.amzn2.x86_64", - "cindex": 10 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.9.76-38.79.amzn2.x86_64", - "cindex": 10 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.9.77-41.59.amzn2.x86_64", - "cindex": 10 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.9.81-44.57.amzn2.x86_64", - "cindex": 11 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.9.85-46.56.amzn2.x86_64", - "cindex": 11 - }, - { - "distrib": "amzn", - "version": "2", - "arch": "x86_64", - "uname_release": "4.9.85-47.59.amzn2.x86_64", - "cindex": 11 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.101-75.76.amzn1.x86_64", - "cindex": 12 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.104-78.84.amzn1.x86_64", - "cindex": 12 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.106-79.86.amzn1.x86_64", - "cindex": 12 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.109-80.92.amzn1.x86_64", - "cindex": 12 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.114-82.97.amzn1.x86_64", - "cindex": 12 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.114-83.126.amzn1.x86_64", - "cindex": 12 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.121-85.96.amzn1.x86_64", - "cindex": 12 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.123-86.109.amzn1.x86_64", - "cindex": 12 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.128-87.105.amzn1.x86_64", - "cindex": 12 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.133-88.105.amzn1.x86_64", - "cindex": 12 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.133-88.112.amzn1.x86_64", - "cindex": 12 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.138-89.102.amzn1.x86_64", - "cindex": 12 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.143-91.122.amzn1.x86_64", - "cindex": 12 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.146-93.123.amzn1.x86_64", - "cindex": 12 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.152-98.182.amzn1.x86_64", - "cindex": 12 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.154-99.181.amzn1.x86_64", - "cindex": 12 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.158-101.185.amzn1.x86_64", - "cindex": 12 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.165-102.185.amzn1.x86_64", - "cindex": 12 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.165-103.209.amzn1.x86_64", - "cindex": 12 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.171-105.231.amzn1.x86_64", - "cindex": 12 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.173-106.229.amzn1.x86_64", - "cindex": 12 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.177-107.254.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.181-108.257.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.186-110.268.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.193-113.317.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.200-116.320.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.203-116.332.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.209-117.337.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.214-118.339.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.219-119.340.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.225-121.357.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.225-121.362.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.232-123.381.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.238-125.421.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.238-125.422.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.248-129.473.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.252-131.483.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.26-46.32.amzn1.x86_64", - "cindex": 14 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.262-135.486.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.262-135.489.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.268-139.500.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.273-140.502.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.275-142.503.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.281-144.502.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.285-147.501.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.287-148.504.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.294-150.533.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.299-152.520.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.301-153.528.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.305-155.531.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.309-159.529.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.311-161.529.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.313-162.533.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.314-164.533.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.314-164.539.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.318-166.529.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.318-167.530.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.320-168.534.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.320-169.544.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.322-170.535.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.322-170.538.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.326-171.539.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.328-174.540.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.33-51.34.amzn1.x86_64", - "cindex": 14 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.33-51.37.amzn1.x86_64", - "cindex": 14 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.330-176.540.amzn1.x86_64", - "cindex": 13 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.334-177.552.amzn1.x86_64", - "cindex": 15 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.336-178.554.amzn1.x86_64", - "cindex": 15 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.336-179.557.amzn1.x86_64", - "cindex": 15 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.336-179.559.amzn1.x86_64", - "cindex": 15 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.336-180.562.amzn1.x86_64", - "cindex": 15 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.336-180.566.amzn1.x86_64", - "cindex": 15 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.343-183.564.amzn1.x86_64", - "cindex": 15 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.348-187.562.amzn1.x86_64", - "cindex": 15 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.348-187.565.amzn1.x86_64", - "cindex": 15 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.349-188.564.amzn1.x86_64", - "cindex": 15 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.350-188.564.amzn1.x86_64", - "cindex": 15 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.352-190.568.amzn1.x86_64", - "cindex": 15 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.352-190.569.amzn1.x86_64", - "cindex": 15 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.353-190.569.amzn1.x86_64", - "cindex": 15 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.42-52.37.amzn1.x86_64", - "cindex": 16 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.47-56.37.amzn1.x86_64", - "cindex": 16 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.51-60.38.amzn1.x86_64", - "cindex": 12 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.55-62.37.amzn1.x86_64", - "cindex": 12 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.59-64.43.amzn1.x86_64", - "cindex": 12 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.62-65.117.amzn1.x86_64", - "cindex": 12 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.67-66.56.amzn1.x86_64", - "cindex": 12 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.70-67.55.amzn1.x86_64", - "cindex": 12 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.72-68.55.amzn1.x86_64", - "cindex": 12 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.77-69.57.amzn1.x86_64", - "cindex": 12 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.77-70.59.amzn1.x86_64", - "cindex": 12 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.77-70.82.amzn1.x86_64", - "cindex": 12 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.88-72.73.amzn1.x86_64", - "cindex": 12 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.88-72.76.amzn1.x86_64", - "cindex": 12 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.94-73.73.amzn1.x86_64", - "cindex": 12 - }, - { - "distrib": "amzn", - "version": "2018", - "arch": "x86_64", - "uname_release": "4.14.97-74.72.amzn1.x86_64", - "cindex": 12 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "3.18.9-200.el7.aarch64", - "cindex": 17 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "3.19.0-0.80.aa7a.aarch64", - "cindex": 18 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.0.0-0.rc7.git1.1.el7.aarch64", - "cindex": 19 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.0.0-1.el7.aarch64", - "cindex": 19 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.11.0-22.el7.2.aarch64", - "cindex": 20 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.11.0-22.el7a.aarch64", - "cindex": 20 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.11.0-45.4.1.el7a.aarch64", - "cindex": 20 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.11.0-45.6.1.el7a.aarch64", - "cindex": 20 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.11.0-45.el7.aarch64", - "cindex": 20 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-115.10.1.el7a.aarch64", - "cindex": 21 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-115.2.2.el7a.aarch64", - "cindex": 21 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-115.5.1.el7a.aarch64", - "cindex": 21 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-115.6.1.el7a.aarch64", - "cindex": 21 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-115.7.1.el7a.aarch64", - "cindex": 21 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-115.8.1.el7a.aarch64", - "cindex": 21 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-115.8.2.el7a.aarch64", - "cindex": 21 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-115.el7a.0.1.aarch64", - "cindex": 21 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-115.el7a.aarch64", - "cindex": 21 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-49.10.1.el7a.aarch64", - "cindex": 21 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-49.13.1.el7a.aarch64", - "cindex": 21 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-49.2.2.el7a.aarch64", - "cindex": 21 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-49.8.1.el7a.aarch64", - "cindex": 21 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-49.el7a.aarch64", - "cindex": 21 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.101-200.el7.aarch64", - "cindex": 22 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.71-201.el7.aarch64", - "cindex": 22 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.78-201.el7.aarch64", - "cindex": 22 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.82-201.el7.aarch64", - "cindex": 22 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.94-200.el7.aarch64", - "cindex": 22 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.18.0-147.0.3.el7.aarch64", - "cindex": 23 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.18.0-147.8.1.el7.aarch64", - "cindex": 23 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.18.0-193.1.2.el7.aarch64", - "cindex": 24 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.18.0-193.28.1.el7.aarch64", - "cindex": 24 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.18.0-305.10.2.el7.aarch64", - "cindex": 25 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.18.0-348.20.1.el7.aarch64", - "cindex": 26 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.18.0-80.7.1.el7.aarch64", - "cindex": 27 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.18.0-80.7.2.el7.aarch64", - "cindex": 27 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.19.104-300.el7.aarch64", - "cindex": 28 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.19.110-300.el7.aarch64", - "cindex": 28 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.19.113-300.el7.aarch64", - "cindex": 29 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.19.23-300.el7.aarch64", - "cindex": 28 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.19.84-300.el7.aarch64", - "cindex": 28 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.19.94-300.el7.aarch64", - "cindex": 28 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.2.0-0.22.el7.1.aarch64", - "cindex": 30 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.2.0-0.24.el7.1.aarch64", - "cindex": 30 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.2.0-0.25.el7.1.aarch64", - "cindex": 30 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.2.0-0.26.el7.1.aarch64", - "cindex": 30 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.2.0-0.27.el7.1.aarch64", - "cindex": 30 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.2.0-0.28.el7.1.aarch64", - "cindex": 30 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.2.0-0.29.el7.1.aarch64", - "cindex": 30 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.2.0-0.30.el7.1.aarch64", - "cindex": 30 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.2.0-0.31.el7.1.aarch64", - "cindex": 30 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.5.0-19.el7.aarch64", - "cindex": 31 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.5.0-20.el7.aarch64", - "cindex": 31 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.5.0-21.el7.aarch64", - "cindex": 31 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.5.0-22.el7.aarch64", - "cindex": 31 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.5.0-23.el7.aarch64", - "cindex": 31 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.5.0-25.el7.aarch64", - "cindex": 31 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.5.0-27.el7.aarch64", - "cindex": 31 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "4.5.0-29.el7.aarch64", - "cindex": 31 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "5.10.109-200.el7.aarch64", - "cindex": 32 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.28-200.el7.aarch64", - "cindex": 33 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.42-200.el7.aarch64", - "cindex": 33 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.49-200.el7.aarch64", - "cindex": 33 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.88-200.el7.aarch64", - "cindex": 33 - }, - { - "distrib": "centos", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.96-200.el7.aarch64", - "cindex": 33 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1062.1.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1062.1.2.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1062.12.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1062.18.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1062.4.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1062.4.2.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1062.4.3.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1062.7.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1062.9.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1062.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1127.10.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1127.13.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1127.18.2.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1127.19.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1127.8.2.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1127.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.102.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.105.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.108.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.11.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.114.2.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.118.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.119.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.15.2.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.2.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.2.2.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.21.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.24.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.25.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.31.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.36.2.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.41.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.42.2.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.45.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.49.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.53.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.59.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.6.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.62.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.66.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.71.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.76.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.80.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.81.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.83.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.88.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.90.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.92.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.95.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.99.1.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.el7.x86_64", - "cindex": 34 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-957.1.3.el7.x86_64", - "cindex": 35 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-957.10.1.el7.x86_64", - "cindex": 35 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-957.12.1.el7.x86_64", - "cindex": 35 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-957.12.2.el7.x86_64", - "cindex": 35 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-957.21.2.el7.x86_64", - "cindex": 35 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-957.21.3.el7.x86_64", - "cindex": 35 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-957.27.2.el7.x86_64", - "cindex": 35 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-957.5.1.el7.x86_64", - "cindex": 35 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-957.el7.x86_64", - "cindex": 35 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "4.19.104-300.el7.x86_64", - "cindex": 36 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "4.19.110-300.el7.x86_64", - "cindex": 36 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "4.19.113-300.el7.x86_64", - "cindex": 37 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "4.19.84-300.el7.x86_64", - "cindex": 36 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "4.19.94-300.el7.x86_64", - "cindex": 36 - }, - { - "distrib": "centos", - "version": "7", - "arch": "x86_64", - "uname_release": "5.4.28-200.el7.x86_64", - "cindex": 38 - }, - { - "distrib": "centos", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-147.0.3.el8_1.aarch64", - "cindex": 39 - }, - { - "distrib": "centos", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-147.3.1.el8_1.aarch64", - "cindex": 39 - }, - { - "distrib": "centos", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-147.5.1.el8_1.aarch64", - "cindex": 39 - }, - { - "distrib": "centos", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-147.8.1.el8_1.aarch64", - "cindex": 39 - }, - { - "distrib": "centos", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-147.el8.aarch64", - "cindex": 39 - }, - { - "distrib": "centos", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-80.7.1.el8_0.aarch64", - "cindex": 40 - }, - { - "distrib": "centos", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-80.el8.aarch64", - "cindex": 40 - }, - { - "distrib": "centos", - "version": "8", - "arch": "x86_64", - "uname_release": "4.18.0-147.0.3.el8_1.x86_64", - "cindex": 39 - }, - { - "distrib": "centos", - "version": "8", - "arch": "x86_64", - "uname_release": "4.18.0-147.3.1.el8_1.x86_64", - "cindex": 39 - }, - { - "distrib": "centos", - "version": "8", - "arch": "x86_64", - "uname_release": "4.18.0-147.5.1.el8_1.x86_64", - "cindex": 39 - }, - { - "distrib": "centos", - "version": "8", - "arch": "x86_64", - "uname_release": "4.18.0-147.8.1.el8_1.x86_64", - "cindex": 39 - }, - { - "distrib": "centos", - "version": "8", - "arch": "x86_64", - "uname_release": "4.18.0-147.el8.x86_64", - "cindex": 39 - }, - { - "distrib": "centos", - "version": "8", - "arch": "x86_64", - "uname_release": "4.18.0-80.7.1.el8_0.x86_64", - "cindex": 41 - }, - { - "distrib": "centos", - "version": "8", - "arch": "x86_64", - "uname_release": "4.18.0-80.el8.x86_64", - "cindex": 41 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-1-arm64", - "cindex": 42 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-1-rt-arm64", - "cindex": 43 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-10-arm64", - "cindex": 44 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-10-rt-arm64", - "cindex": 45 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-11-arm64", - "cindex": 44 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-11-rt-arm64", - "cindex": 45 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-12-arm64", - "cindex": 44 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-12-rt-arm64", - "cindex": 45 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-13-arm64", - "cindex": 44 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-13-rt-arm64", - "cindex": 45 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-14-arm64", - "cindex": 44 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-14-rt-arm64", - "cindex": 45 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-15-arm64", - "cindex": 44 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-15-rt-arm64", - "cindex": 45 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-16-arm64", - "cindex": 44 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-16-rt-arm64", - "cindex": 45 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-17-arm64", - "cindex": 44 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-17-rt-arm64", - "cindex": 45 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-18-arm64", - "cindex": 44 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-18-rt-arm64", - "cindex": 45 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-19-arm64", - "cindex": 44 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-19-rt-arm64", - "cindex": 45 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-2-arm64", - "cindex": 42 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-2-rt-arm64", - "cindex": 43 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-20-arm64", - "cindex": 44 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-20-rt-arm64", - "cindex": 45 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-21-arm64", - "cindex": 44 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-21-rt-arm64", - "cindex": 45 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-22-arm64", - "cindex": 46 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-22-rt-arm64", - "cindex": 47 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-23-arm64", - "cindex": 46 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-23-rt-arm64", - "cindex": 47 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-24-arm64", - "cindex": 46 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-24-rt-arm64", - "cindex": 47 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-25-arm64", - "cindex": 46 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-25-rt-arm64", - "cindex": 47 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-26-arm64", - "cindex": 48 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-26-rt-arm64", - "cindex": 49 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-27-arm64", - "cindex": 48 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-27-rt-arm64", - "cindex": 49 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-3-arm64", - "cindex": 42 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-3-rt-arm64", - "cindex": 43 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-4-arm64", - "cindex": 42 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-4-rt-arm64", - "cindex": 43 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-5-arm64", - "cindex": 42 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-5-rt-arm64", - "cindex": 50 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-6-arm64", - "cindex": 42 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-6-rt-arm64", - "cindex": 50 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-7-arm64", - "cindex": 42 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-7-rt-arm64", - "cindex": 50 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-8-arm64", - "cindex": 42 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-8-rt-arm64", - "cindex": 50 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-9-arm64", - "cindex": 44 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "4.19.0-9-rt-arm64", - "cindex": 45 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "5.10.0-0.deb10.17-arm64", - "cindex": 51 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "5.10.0-0.deb10.17-cloud-arm64", - "cindex": 51 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "5.10.0-0.deb10.17-rt-arm64", - "cindex": 52 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "5.10.0-0.deb10.19-arm64", - "cindex": 51 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "5.10.0-0.deb10.19-cloud-arm64", - "cindex": 51 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "5.10.0-0.deb10.19-rt-arm64", - "cindex": 52 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "5.10.0-0.deb10.20-arm64", - "cindex": 51 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "5.10.0-0.deb10.20-cloud-arm64", - "cindex": 51 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "5.10.0-0.deb10.20-rt-arm64", - "cindex": 52 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "5.10.0-0.deb10.21-arm64", - "cindex": 51 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "5.10.0-0.deb10.21-cloud-arm64", - "cindex": 51 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "5.10.0-0.deb10.21-rt-arm64", - "cindex": 52 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "5.10.0-0.deb10.22-arm64", - "cindex": 51 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "5.10.0-0.deb10.22-cloud-arm64", - "cindex": 51 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "5.10.0-0.deb10.22-rt-arm64", - "cindex": 52 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "5.10.0-0.deb10.23-arm64", - "cindex": 51 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "5.10.0-0.deb10.23-cloud-arm64", - "cindex": 51 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "5.10.0-0.deb10.23-rt-arm64", - "cindex": 52 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "5.10.0-0.deb10.24-arm64", - "cindex": 51 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "5.10.0-0.deb10.24-cloud-arm64", - "cindex": 51 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "5.10.0-0.deb10.24-rt-arm64", - "cindex": 52 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "5.10.0-0.deb10.26-arm64", - "cindex": 51 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "5.10.0-0.deb10.26-cloud-arm64", - "cindex": 51 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "5.10.0-0.deb10.26-rt-arm64", - "cindex": 52 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "5.10.0-0.deb10.27-arm64", - "cindex": 53 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "5.10.0-0.deb10.27-cloud-arm64", - "cindex": 53 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "5.10.0-0.deb10.27-rt-arm64", - "cindex": 54 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "5.10.0-0.deb10.28-arm64", - "cindex": 53 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "5.10.0-0.deb10.28-cloud-arm64", - "cindex": 53 - }, - { - "distrib": "debian", - "version": "10", - "arch": "arm64", - "uname_release": "5.10.0-0.deb10.28-rt-arm64", - "cindex": 54 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-1-amd64", - "cindex": 55 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-1-cloud-amd64", - "cindex": 55 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-1-rt-amd64", - "cindex": 56 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-10-amd64", - "cindex": 57 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-10-cloud-amd64", - "cindex": 57 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-10-rt-amd64", - "cindex": 58 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-11-amd64", - "cindex": 57 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-11-cloud-amd64", - "cindex": 57 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-11-rt-amd64", - "cindex": 58 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-12-amd64", - "cindex": 57 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-12-cloud-amd64", - "cindex": 57 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-12-rt-amd64", - "cindex": 58 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-13-amd64", - "cindex": 57 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-13-cloud-amd64", - "cindex": 57 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-13-rt-amd64", - "cindex": 58 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-14-amd64", - "cindex": 57 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-14-cloud-amd64", - "cindex": 57 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-14-rt-amd64", - "cindex": 58 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-15-amd64", - "cindex": 57 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-15-cloud-amd64", - "cindex": 57 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-15-rt-amd64", - "cindex": 58 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-16-amd64", - "cindex": 57 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-16-cloud-amd64", - "cindex": 57 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-16-rt-amd64", - "cindex": 58 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-17-amd64", - "cindex": 57 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-17-cloud-amd64", - "cindex": 57 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-17-rt-amd64", - "cindex": 58 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-18-amd64", - "cindex": 57 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-18-cloud-amd64", - "cindex": 57 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-18-rt-amd64", - "cindex": 58 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-19-amd64", - "cindex": 57 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-19-cloud-amd64", - "cindex": 57 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-19-rt-amd64", - "cindex": 58 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-2-amd64", - "cindex": 55 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-2-cloud-amd64", - "cindex": 55 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-2-rt-amd64", - "cindex": 56 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-20-amd64", - "cindex": 57 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-20-cloud-amd64", - "cindex": 57 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-20-rt-amd64", - "cindex": 58 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-21-amd64", - "cindex": 57 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-21-cloud-amd64", - "cindex": 57 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-21-rt-amd64", - "cindex": 58 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-22-amd64", - "cindex": 59 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-22-cloud-amd64", - "cindex": 59 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-22-rt-amd64", - "cindex": 60 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-23-amd64", - "cindex": 59 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-23-cloud-amd64", - "cindex": 59 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-23-rt-amd64", - "cindex": 60 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-24-amd64", - "cindex": 59 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-24-cloud-amd64", - "cindex": 59 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-24-rt-amd64", - "cindex": 60 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-25-amd64", - "cindex": 59 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-25-cloud-amd64", - "cindex": 59 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-25-rt-amd64", - "cindex": 60 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-26-amd64", - "cindex": 61 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-26-cloud-amd64", - "cindex": 61 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-26-rt-amd64", - "cindex": 62 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-27-amd64", - "cindex": 61 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-27-cloud-amd64", - "cindex": 61 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-27-rt-amd64", - "cindex": 62 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-3-amd64", - "cindex": 55 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-3-cloud-amd64", - "cindex": 55 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-3-rt-amd64", - "cindex": 56 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-4-amd64", - "cindex": 55 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-4-cloud-amd64", - "cindex": 55 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-4-rt-amd64", - "cindex": 56 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-5-amd64", - "cindex": 55 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-5-cloud-amd64", - "cindex": 55 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-5-rt-amd64", - "cindex": 63 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-6-amd64", - "cindex": 55 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-6-cloud-amd64", - "cindex": 55 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-6-rt-amd64", - "cindex": 63 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-7-amd64", - "cindex": 55 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-7-cloud-amd64", - "cindex": 55 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-7-rt-amd64", - "cindex": 63 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-8-amd64", - "cindex": 55 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-8-cloud-amd64", - "cindex": 55 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-8-rt-amd64", - "cindex": 63 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-9-amd64", - "cindex": 57 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-9-cloud-amd64", - "cindex": 57 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "4.19.0-9-rt-amd64", - "cindex": 58 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "5.10.0-0.deb10.17-amd64", - "cindex": 64 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "5.10.0-0.deb10.17-cloud-amd64", - "cindex": 64 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "5.10.0-0.deb10.17-rt-amd64", - "cindex": 65 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "5.10.0-0.deb10.19-amd64", - "cindex": 64 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "5.10.0-0.deb10.19-cloud-amd64", - "cindex": 64 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "5.10.0-0.deb10.19-rt-amd64", - "cindex": 65 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "5.10.0-0.deb10.20-amd64", - "cindex": 64 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "5.10.0-0.deb10.20-cloud-amd64", - "cindex": 64 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "5.10.0-0.deb10.20-rt-amd64", - "cindex": 65 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "5.10.0-0.deb10.21-amd64", - "cindex": 64 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "5.10.0-0.deb10.21-cloud-amd64", - "cindex": 64 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "5.10.0-0.deb10.21-rt-amd64", - "cindex": 65 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "5.10.0-0.deb10.22-amd64", - "cindex": 64 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "5.10.0-0.deb10.22-cloud-amd64", - "cindex": 64 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "5.10.0-0.deb10.22-rt-amd64", - "cindex": 65 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "5.10.0-0.deb10.23-amd64", - "cindex": 64 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "5.10.0-0.deb10.23-cloud-amd64", - "cindex": 64 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "5.10.0-0.deb10.23-rt-amd64", - "cindex": 65 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "5.10.0-0.deb10.24-amd64", - "cindex": 64 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "5.10.0-0.deb10.24-cloud-amd64", - "cindex": 64 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "5.10.0-0.deb10.24-rt-amd64", - "cindex": 65 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "5.10.0-0.deb10.26-amd64", - "cindex": 64 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "5.10.0-0.deb10.26-cloud-amd64", - "cindex": 64 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "5.10.0-0.deb10.26-rt-amd64", - "cindex": 65 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "5.10.0-0.deb10.27-amd64", - "cindex": 66 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "5.10.0-0.deb10.27-cloud-amd64", - "cindex": 66 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "5.10.0-0.deb10.27-rt-amd64", - "cindex": 67 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "5.10.0-0.deb10.28-amd64", - "cindex": 66 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "5.10.0-0.deb10.28-cloud-amd64", - "cindex": 66 - }, - { - "distrib": "debian", - "version": "10", - "arch": "x86_64", - "uname_release": "5.10.0-0.deb10.28-rt-amd64", - "cindex": 67 - }, - { - "distrib": "debian", - "version": "9", - "arch": "arm64", - "uname_release": "4.19.0-0.bpo.19-arm64", - "cindex": 44 - }, - { - "distrib": "debian", - "version": "9", - "arch": "arm64", - "uname_release": "4.19.0-0.bpo.19-rt-arm64", - "cindex": 45 - }, - { - "distrib": "debian", - "version": "9", - "arch": "arm64", - "uname_release": "4.9.0-13-arm64", - "cindex": 68 - }, - { - "distrib": "debian", - "version": "9", - "arch": "arm64", - "uname_release": "4.9.0-18-arm64", - "cindex": 68 - }, - { - "distrib": "debian", - "version": "9", - "arch": "arm64", - "uname_release": "4.9.0-19-arm64", - "cindex": 68 - }, - { - "distrib": "debian", - "version": "9", - "arch": "x86_64", - "uname_release": "4.19.0-0.bpo.19-amd64", - "cindex": 57 - }, - { - "distrib": "debian", - "version": "9", - "arch": "x86_64", - "uname_release": "4.19.0-0.bpo.19-cloud-amd64", - "cindex": 57 - }, - { - "distrib": "debian", - "version": "9", - "arch": "x86_64", - "uname_release": "4.19.0-0.bpo.19-rt-amd64", - "cindex": 58 - }, - { - "distrib": "debian", - "version": "9", - "arch": "x86_64", - "uname_release": "4.9.0-13-amd64", - "cindex": 69 - }, - { - "distrib": "debian", - "version": "9", - "arch": "x86_64", - "uname_release": "4.9.0-13-rt-amd64", - "cindex": 70 - }, - { - "distrib": "debian", - "version": "9", - "arch": "x86_64", - "uname_release": "4.9.0-18-amd64", - "cindex": 69 - }, - { - "distrib": "debian", - "version": "9", - "arch": "x86_64", - "uname_release": "4.9.0-18-rt-amd64", - "cindex": 70 - }, - { - "distrib": "debian", - "version": "9", - "arch": "x86_64", - "uname_release": "4.9.0-19-amd64", - "cindex": 69 - }, - { - "distrib": "debian", - "version": "9", - "arch": "x86_64", - "uname_release": "4.9.0-19-rt-amd64", - "cindex": 70 - }, - { - "distrib": "fedora", - "version": "24", - "arch": "x86_64", - "uname_release": "4.11.12-100.fc24.x86_64", - "cindex": 71 - }, - { - "distrib": "fedora", - "version": "24", - "arch": "x86_64", - "uname_release": "4.5.5-300.fc24.x86_64", - "cindex": 72 - }, - { - "distrib": "fedora", - "version": "25", - "arch": "x86_64", - "uname_release": "4.13.16-100.fc25.x86_64", - "cindex": 73 - }, - { - "distrib": "fedora", - "version": "25", - "arch": "x86_64", - "uname_release": "4.8.6-300.fc25.x86_64", - "cindex": 74 - }, - { - "distrib": "fedora", - "version": "26", - "arch": "x86_64", - "uname_release": "4.11.8-300.fc26.x86_64", - "cindex": 75 - }, - { - "distrib": "fedora", - "version": "26", - "arch": "x86_64", - "uname_release": "4.16.11-100.fc26.x86_64", - "cindex": 76 - }, - { - "distrib": "fedora", - "version": "27", - "arch": "x86_64", - "uname_release": "4.13.9-300.fc27.x86_64", - "cindex": 73 - }, - { - "distrib": "fedora", - "version": "27", - "arch": "x86_64", - "uname_release": "4.18.19-100.fc27.x86_64", - "cindex": 77 - }, - { - "distrib": "fedora", - "version": "28", - "arch": "arm64", - "uname_release": "4.16.3-301.fc28.aarch64", - "cindex": 78 - }, - { - "distrib": "fedora", - "version": "28", - "arch": "arm64", - "uname_release": "5.0.16-100.fc28.aarch64", - "cindex": 79 - }, - { - "distrib": "fedora", - "version": "28", - "arch": "x86_64", - "uname_release": "4.16.3-301.fc28.x86_64", - "cindex": 80 - }, - { - "distrib": "fedora", - "version": "28", - "arch": "x86_64", - "uname_release": "5.0.16-100.fc28.x86_64", - "cindex": 81 - }, - { - "distrib": "fedora", - "version": "29", - "arch": "arm64", - "uname_release": "4.18.16-300.fc29.aarch64", - "cindex": 82 - }, - { - "distrib": "fedora", - "version": "29", - "arch": "arm64", - "uname_release": "5.3.11-100.fc29.aarch64", - "cindex": 83 - }, - { - "distrib": "fedora", - "version": "29", - "arch": "x86_64", - "uname_release": "4.18.16-300.fc29.x86_64", - "cindex": 77 - }, - { - "distrib": "fedora", - "version": "29", - "arch": "x86_64", - "uname_release": "5.3.11-100.fc29.x86_64", - "cindex": 84 - }, - { - "distrib": "fedora", - "version": "30", - "arch": "arm64", - "uname_release": "5.0.9-301.fc30.aarch64", - "cindex": 79 - }, - { - "distrib": "fedora", - "version": "30", - "arch": "arm64", - "uname_release": "5.6.13-100.fc30.aarch64", - "cindex": 85 - }, - { - "distrib": "fedora", - "version": "30", - "arch": "x86_64", - "uname_release": "5.0.9-301.fc30.x86_64", - "cindex": 81 - }, - { - "distrib": "fedora", - "version": "30", - "arch": "x86_64", - "uname_release": "5.6.13-100.fc30.x86_64", - "cindex": 86 - }, - { - "distrib": "fedora", - "version": "31", - "arch": "arm64", - "uname_release": "5.3.7-301.fc31.aarch64", - "cindex": 87 - }, - { - "distrib": "fedora", - "version": "31", - "arch": "x86_64", - "uname_release": "5.3.7-301.fc31.x86_64", - "cindex": 88 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1818.0.10.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1818.0.15.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1818.0.9.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1818.1.6.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1818.2.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1818.3.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1818.4.6.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1818.4.7.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1818.5.4.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1844.0.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1844.0.4.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1844.0.6.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1844.0.7.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1844.1.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1844.2.5.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1844.3.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1844.4.5.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1844.4.5.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1844.5.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1846.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1847.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1848.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1849.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1850a.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1851.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1901.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.0.10.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.0.11.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.0.12.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.0.13.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.0.14.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.0.15.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.0.18.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.0.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.0.4.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.0.5.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.0.6.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.0.7.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.0.9.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.1.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.10.2.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.10.4.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.10.4.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.10.4.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.10.7.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.10.8.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.11.3.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.11.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.12.0.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.3.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.3.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.300.11.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.301.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.302.0.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.302.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.302.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.303.0.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.303.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.303.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.303.4.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.303.5.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.304.5.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.304.6.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.304.6.4.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.304.6.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.305.0.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.305.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.305.4.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.305.4.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.306.2.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.306.2.10.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.306.2.12.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.306.2.13.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.306.2.14.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.306.2.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.306.2.4.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.306.2.5.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.306.2.7.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.306.2.8.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.306.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.4.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.4.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.4.8.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.5.0.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.5.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.5.2.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.5.2.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.5.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.6.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.6.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.6.4.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.6.5.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.6.6.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.7.0.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.7.3.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.7.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.8.4.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.9.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1902.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1903.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1904.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1905.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1906.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1907.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1908.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1909.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1910a.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1911.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1912.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1915.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1916.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1917.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1923.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1929.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1933.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-1941.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2013.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2015.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2016.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2017.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2018.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2019.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2020.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2025.400.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2025.400.8.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2025.400.9.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2025.400.9.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2025.401.4.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2025.402.0.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2025.402.2.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2025.403.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2025.403.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2025.403.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2025.403.4.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2025.403.5.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2025.404.0.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2025.404.1.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2025.404.1.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2025.405.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2025.405.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2025.405.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2039.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2040.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2041.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.500.10.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.500.5.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.500.9.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.500.9.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.501.0.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.501.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.501.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.502.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.502.4.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.502.4.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.502.5.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.503.0.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.503.1.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.503.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.504.0.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.504.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.504.2.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.504.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.505.0.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.505.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.505.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.505.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.505.4.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.505.4.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.505.4.4.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.505.4.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.506.0.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.506.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.506.10.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.506.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.506.4.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.506.8.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.506.8.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.507.0.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.507.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.507.7.4.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.507.7.5.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.507.7.6.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.508.3.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.508.3.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.508.3.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.508.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.509.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.509.2.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.509.2.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.510.0.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.510.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.510.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.510.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.510.4.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.510.5.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.510.5.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.510.5.4.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.510.5.5.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.510.5.6.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.511.0.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.511.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.511.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.511.5.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.511.5.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.511.5.4.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.511.5.5.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.511.5.5.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.511.5.5.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.511.5.6.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.511.5.7.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.511.5.8.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.511.5.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.512.0.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.512.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.512.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.512.4.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.512.5.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.512.6.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.513.0.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.513.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.513.2.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.513.2.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.513.2.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.513.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.514.0.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.514.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.514.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.514.5.1.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.514.5.1.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.514.5.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.515.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.516.1.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.516.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.516.2.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.516.2.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.516.2.4.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.516.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.517.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.517.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.517.3.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.517.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.518.0.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.518.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.518.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.518.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.518.4.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.518.4.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.518.4.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.518.4.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.519.0.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.519.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.519.2.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.519.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.520.0.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.520.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.520.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.520.3.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.521.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.521.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.521.4.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.522.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.522.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.522.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.523.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.523.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.523.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.523.4.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.523.4.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.523.4.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.524.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.524.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.524.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.524.4.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.524.5.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.525.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.526.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.526.2.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.526.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.527.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.527.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.528.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.528.2.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.528.2.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.528.2.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.528.2.4.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.528.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.529.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.529.3.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.529.3.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.529.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.530.5.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.531.2.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.532.3.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.532.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.533.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.534.3.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.534.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.535.2.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.536.5.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.537.4.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.537.4.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.538.5.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.538.5.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.539.5.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.540.4.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.540.4.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.541.4.1.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.542.2.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2047.543.3.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2048.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2049.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2050.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2051.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2052.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2102.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2103.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2104.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2105.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2106.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2108.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2109.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2110.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2111.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2112.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2113.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2114.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2115.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2116.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2118.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2120.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2121.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2122.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "4.14.35-2124.el7uek.aarch64", - "cindex": 89 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.0-1948.3.el7uek.aarch64", - "cindex": 90 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2006.5.el7uek.aarch64", - "cindex": 90 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2011.4.6.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2011.6.2.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2028.2.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2036.100.1.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2036.100.3.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2036.100.6.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2036.101.0.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2036.101.1.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2036.101.2.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2036.102.0.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2036.103.2.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2036.104.0.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2036.104.2.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2036.105.1.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2036.105.3.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2040.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2041.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2051.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2102.200.7.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2102.200.9.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2102.202.4.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2102.202.5.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2102.203.3.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2102.203.4.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2102.204.0.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2102.204.1.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2102.204.2.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2102.204.3.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2102.204.4.3.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2102.205.2.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2102.205.7.2.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2102.205.7.3.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2102.206.1.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2106.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2108.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2109.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2111.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2114.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2118.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2120.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2122.303.5.el7uek.aarch64", - "cindex": 92 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2122.el7uek.aarch64", - "cindex": 91 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2136.300.7.el7uek.aarch64", - "cindex": 92 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.17-2136.301.0.el7uek.aarch64", - "cindex": 92 - }, - { - "distrib": "ol", - "version": "7", - "arch": "arm64", - "uname_release": "5.4.2-1950.2.el7uek.aarch64", - "cindex": 90 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1062.0.0.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1062.1.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1062.1.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1062.1.2.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1062.1.2.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1062.12.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1062.12.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1062.18.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1062.18.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1062.4.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1062.4.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1062.4.2.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1062.4.3.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1062.4.3.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1062.7.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1062.7.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1062.9.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1062.9.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1062.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1127.0.0.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1127.10.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1127.10.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1127.13.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1127.13.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1127.18.2.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1127.18.2.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1127.19.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1127.19.1.0.2.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1127.19.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1127.8.2.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1127.8.2.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.102.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.102.1.0.2.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.105.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.105.1.0.2.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.108.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.108.1.0.2.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.11.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.11.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.114.2.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.114.2.0.2.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.118.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.118.1.0.2.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.119.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.119.1.0.2.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.119.1.0.3.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.119.1.0.4.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.119.1.0.5.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.15.2.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.15.2.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.2.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.2.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.2.2.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.2.2.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.21.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.21.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.24.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.24.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.25.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.25.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.31.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.31.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.36.2.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.36.2.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.41.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.41.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.42.2.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.42.2.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.45.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.45.1.0.2.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.45.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.49.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.49.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.53.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.53.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.59.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.59.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.6.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.6.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.62.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.62.1.0.2.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.62.1.0.3.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.62.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.66.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.66.1.0.2.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.66.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.71.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.71.1.0.2.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.76.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.76.1.0.2.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.80.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.80.1.0.2.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.81.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.81.1.0.2.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.83.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.83.1.0.2.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.88.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.88.1.0.2.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.90.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.90.1.0.2.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.92.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.92.1.0.2.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.95.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.95.1.0.2.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.99.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.99.1.0.2.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-1160.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-957.0.0.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-957.0.0.0.2.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-957.0.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-957.1.3.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-957.1.3.0.3.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-957.1.3.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-957.10.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-957.10.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-957.12.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-957.12.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-957.12.2.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-957.12.2.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-957.21.2.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-957.21.2.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-957.21.3.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-957.21.3.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-957.27.2.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-957.27.2.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-957.5.1.0.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-957.5.1.0.2.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-957.5.1.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "3.10.0-957.el7.x86_64", - "cindex": 93 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-103.10.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-103.3.8.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-103.3.8.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-103.6.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-103.7.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-103.7.3.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-103.7.4.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-103.9.2.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-103.9.4.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-103.9.6.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-103.9.7.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-112.14.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-112.14.10.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-112.14.11.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-112.14.13.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-112.14.14.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-112.14.15.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-112.14.2.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-112.14.5.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-112.16.4.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-112.16.7.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-112.17.3.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.14.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.14.2.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.14.3.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.14.5.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.15.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.15.2.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.15.4.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.16.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.16.2.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.16.3.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.16.4.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.17.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.17.2.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.18.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.18.5.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.18.6.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.18.9.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.19.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.19.2.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.19.4.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.19.5.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.19.6.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.19.7.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.20.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.20.3.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.20.7.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.21.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.22.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.22.2.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.22.4.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.23.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.23.2.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.23.4.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.24.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.24.3.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.24.5.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.25.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.26.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.26.10.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.26.12.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.26.3.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.26.5.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.26.7.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.27.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.27.2.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.28.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.28.3.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.28.5.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.28.6.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.29.3.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.29.3.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.29.4.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.30.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.31.1.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.31.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.32.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.32.3.2.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.32.3.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.33.4.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.34.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.35.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.35.2.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.35.4.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.36.1.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.36.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.36.3.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.36.4.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.37.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.38.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.39.2.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.39.2.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.39.5.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.39.5.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.40.6.3.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.40.6.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.41.4.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.41.5.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.42.3.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.42.4.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.43.4.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.44.4.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.44.4.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.45.2.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.45.6.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.46.3.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.46.4.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.47.3.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.48.2.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.48.3.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.48.5.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.48.6.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.49.3.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.50.2.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.51.2.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.52.4.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.52.5.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.52.5.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.53.3.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.53.5.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.53.5.2.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.53.5.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.54.6.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.54.6.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.56.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.57.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.58.2.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.59.1.2.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.59.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-124.60.1.el7uek.x86_64", - "cindex": 94 - }, - { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.61.2.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.106-97.85.amzn2.x86_64", + "cindex": 0 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.62.3.1.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.109-99.92.amzn2.x86_64", + "cindex": 0 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.62.3.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.114-103.97.amzn2.x86_64", + "cindex": 0 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.63.2.1.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.114-105.126.amzn2.x86_64", + "cindex": 0 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.63.3.1.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.121-109.96.amzn2.x86_64", + "cindex": 0 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.64.1.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.123-111.109.amzn2.x86_64", + "cindex": 0 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.65.1.1.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.128-112.105.amzn2.x86_64", + "cindex": 0 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.65.1.2.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.133-113.105.amzn2.x86_64", + "cindex": 0 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.65.1.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.133-113.112.amzn2.x86_64", + "cindex": 0 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.66.3.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.138-114.102.amzn2.x86_64", + "cindex": 0 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.67.3.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.143-118.123.amzn2.x86_64", + "cindex": 0 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.68.3.1.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.146-119.123.amzn2.x86_64", + "cindex": 0 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.68.3.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.146-120.181.amzn2.x86_64", + "cindex": 0 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.69.5.1.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.152-124.171.amzn2.x86_64", + "cindex": 0 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.69.5.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.152-127.182.amzn2.x86_64", + "cindex": 0 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.70.2.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.154-128.181.amzn2.x86_64", + "cindex": 0 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.71.3.1.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.158-129.185.amzn2.x86_64", + "cindex": 0 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.71.3.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.165-131.185.amzn2.x86_64", + "cindex": 0 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.72.2.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.165-133.209.amzn2.x86_64", + "cindex": 0 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.73.2.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.171-136.231.amzn2.x86_64", + "cindex": 0 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.74.2.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.173-137.228.amzn2.x86_64", + "cindex": 0 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.75.3.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.173-137.229.amzn2.x86_64", + "cindex": 0 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.76.2.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.177-139.253.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.77.2.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.177-139.254.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.78.2.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.181-140.257.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.78.4.1.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.181-142.260.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.78.4.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.186-146.268.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.79.2.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.192-147.314.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.80.1.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.193-149.317.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.81.2.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.198-152.320.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.82.2.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.200-155.322.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.83.2.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.203-156.332.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.84.2.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.209-160.335.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.85.1.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.209-160.339.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.86.1.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.214-160.339.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.87.2.2.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.219-161.340.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.87.2.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.219-164.354.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.88.3.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.225-168.357.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.89.4.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.225-169.362.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.90.3.1.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.231-173.360.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.90.3.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.231-173.361.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.91.3.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.232-176.381.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-124.92.3.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.232-177.418.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-32.1.2.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.238-182.421.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-32.2.1.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.238-182.422.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-32.2.3.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.241-184.433.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-32.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.243-185.433.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-37.2.1.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.246-187.474.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-37.2.2.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.248-189.473.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-37.3.1.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.252-195.481.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-37.4.1.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.252-195.483.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-37.5.1.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.256-197.484.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-37.6.1.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.26-54.32.amzn2.x86_64", + "cindex": 2 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-37.6.2.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.262-200.489.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-37.6.3.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.268-205.500.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-61.1.10.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.273-207.502.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-61.1.13.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.275-207.503.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-61.1.14.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.276-211.499.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-61.1.16.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.281-212.502.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-61.1.17.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.285-215.501.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-61.1.18.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.287-215.504.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-61.1.19.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.290-217.505.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-61.1.22.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.291-218.527.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-61.1.23.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.294-220.533.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", - "arch": "x86_64", - "uname_release": "4.1.12-61.1.24.el7uek.x86_64", - "cindex": 94 + "distrib": "amzn", + "version": "2", + "arch": "x86_64", + "uname_release": "4.14.296-222.539.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-61.1.25.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.299-223.520.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-61.1.27.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.301-224.520.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-61.1.28.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.301-225.528.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-61.1.33.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.304-226.531.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-61.1.34.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.305-227.531.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-61.1.6.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.309-231.529.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-61.51.1.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.311-233.529.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-61.63.1.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.313-235.533.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-61.64.1.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.314-237.533.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-94.1.8.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.314-238.539.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-94.2.1.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.318-240.529.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-94.3.4.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.318-241.531.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-94.3.5.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.320-242.534.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-94.3.6.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.320-243.544.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-94.3.7.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.322-244.536.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-94.3.8.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.322-244.539.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-94.3.9.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.322-246.539.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-94.5.7.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.326-245.539.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-94.5.9.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.327-246.539.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-94.7.8.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.328-248.540.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-94.8.2.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.33-59.34.amzn2.x86_64", + "cindex": 2 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-94.8.3.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.33-59.37.amzn2.x86_64", + "cindex": 2 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.1.12-94.8.5.el7uek.x86_64", - "cindex": 94 + "uname_release": "4.14.330-250.540.amzn2.x86_64", + "cindex": 1 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.14-11.el7uek.x86_64", - "cindex": 95 + "uname_release": "4.14.334-252.552.amzn2.x86_64", + "cindex": 3 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.32-2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.336-253.554.amzn2.x86_64", + "cindex": 3 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.336-255.557.amzn2.x86_64", + "cindex": 3 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1818.0.14.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.336-256.557.amzn2.x86_64", + "cindex": 3 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1818.0.4.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.336-256.559.amzn2.x86_64", + "cindex": 3 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1818.0.5.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.336-257.562.amzn2.x86_64", + "cindex": 3 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1818.0.6.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.336-257.566.amzn2.x86_64", + "cindex": 3 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1818.0.7.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.336-257.568.amzn2.x86_64", + "cindex": 3 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1818.0.8.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.343-259.562.amzn2.x86_64", + "cindex": 3 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1818.0.9.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.343-260.564.amzn2.x86_64", + "cindex": 3 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1818.1.6.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.343-261.564.amzn2.x86_64", + "cindex": 3 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1818.2.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.344-262.563.amzn2.x86_64", + "cindex": 3 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1818.3.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.345-262.561.amzn2.x86_64", + "cindex": 3 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1818.4.5.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.348-265.562.amzn2.x86_64", + "cindex": 3 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1818.4.6.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.348-265.565.amzn2.x86_64", + "cindex": 3 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1818.4.7.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.349-266.564.amzn2.x86_64", + "cindex": 3 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1818.5.4.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.350-266.564.amzn2.x86_64", + "cindex": 3 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1818.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.352-267.564.amzn2.x86_64", + "cindex": 3 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1820.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.352-268.568.amzn2.x86_64", + "cindex": 3 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1821.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.352-268.569.amzn2.x86_64", + "cindex": 3 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1822.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.353-270.569.amzn2.x86_64", + "cindex": 3 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1823.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.355-271.569.amzn2.x86_64", + "cindex": 3 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1824.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.355-275.570.amzn2.x86_64", + "cindex": 3 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1825.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.42-61.37.amzn2.x86_64", + "cindex": 4 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1826.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.47-63.37.amzn2.x86_64", + "cindex": 4 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1827.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.47-64.38.amzn2.x86_64", + "cindex": 4 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1828.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.51-66.38.amzn2.x86_64", + "cindex": 0 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1829.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.55-68.37.amzn2.x86_64", + "cindex": 0 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1830.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.59-68.43.amzn2.x86_64", + "cindex": 0 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1831.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.62-70.117.amzn2.x86_64", + "cindex": 0 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1833.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.67-71.56.amzn2.x86_64", + "cindex": 0 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1836.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.70-72.55.amzn2.x86_64", + "cindex": 0 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1837.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.72-73.55.amzn2.x86_64", + "cindex": 0 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1838.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.77-80.57.amzn2.x86_64", + "cindex": 0 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1841.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.77-81.59.amzn2.x86_64", + "cindex": 0 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1842.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.77-86.82.amzn2.x86_64", + "cindex": 0 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1843.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.88-88.73.amzn2.x86_64", + "cindex": 0 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1844.0.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.88-88.76.amzn2.x86_64", + "cindex": 0 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1844.0.4.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.94-89.73.amzn2.x86_64", + "cindex": 0 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1844.0.6.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.97-90.72.amzn2.x86_64", + "cindex": 0 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1844.0.7.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.9.62-10.57.amzn2.x86_64", + "cindex": 5 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1844.1.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.9.70-2.243.amzn2.x86_64", + "cindex": 5 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1844.2.5.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.9.75-1.56.amzn2.x86_64", + "cindex": 5 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1844.3.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.9.76-38.79.amzn2.x86_64", + "cindex": 5 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1844.4.5.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.9.77-41.59.amzn2.x86_64", + "cindex": 5 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1844.4.5.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.9.81-44.57.amzn2.x86_64", + "cindex": 6 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1844.5.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.9.85-46.56.amzn2.x86_64", + "cindex": 6 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2", "arch": "x86_64", - "uname_release": "4.14.35-1845.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.9.85-47.59.amzn2.x86_64", + "cindex": 6 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1846.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.101-75.76.amzn1.x86_64", + "cindex": 7 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1847.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.104-78.84.amzn1.x86_64", + "cindex": 7 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1848.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.106-79.86.amzn1.x86_64", + "cindex": 7 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1849.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.109-80.92.amzn1.x86_64", + "cindex": 7 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1850a.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.114-82.97.amzn1.x86_64", + "cindex": 7 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1851.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.114-83.126.amzn1.x86_64", + "cindex": 7 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1901.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.121-85.96.amzn1.x86_64", + "cindex": 7 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.0.10.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.123-86.109.amzn1.x86_64", + "cindex": 7 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.0.11.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.128-87.105.amzn1.x86_64", + "cindex": 7 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.0.12.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.133-88.105.amzn1.x86_64", + "cindex": 7 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.0.13.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.133-88.112.amzn1.x86_64", + "cindex": 7 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.0.14.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.138-89.102.amzn1.x86_64", + "cindex": 7 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.0.15.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.143-91.122.amzn1.x86_64", + "cindex": 7 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.0.18.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.146-93.123.amzn1.x86_64", + "cindex": 7 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.0.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.152-98.182.amzn1.x86_64", + "cindex": 7 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.0.4.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.154-99.181.amzn1.x86_64", + "cindex": 7 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.0.5.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.158-101.185.amzn1.x86_64", + "cindex": 7 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.0.6.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.165-102.185.amzn1.x86_64", + "cindex": 7 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.0.7.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.165-103.209.amzn1.x86_64", + "cindex": 7 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.0.9.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.171-105.231.amzn1.x86_64", + "cindex": 7 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.1.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.173-106.229.amzn1.x86_64", + "cindex": 7 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.10.2.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.177-107.254.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.10.4.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.181-108.257.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.10.4.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.186-110.268.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.10.4.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.193-113.317.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.10.7.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.200-116.320.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.10.8.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.203-116.332.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.11.3.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.209-117.337.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.11.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.214-118.339.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.12.0.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.219-119.340.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.3.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.225-121.357.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.3.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.225-121.362.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.300.11.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.232-123.381.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.301.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.238-125.421.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.302.0.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.238-125.422.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.302.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.248-129.473.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.302.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.252-131.483.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.303.0.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.26-46.32.amzn1.x86_64", + "cindex": 9 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.303.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.262-135.486.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.303.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.262-135.489.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.303.4.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.268-139.500.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.303.5.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.273-140.502.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.304.4.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.275-142.503.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.304.5.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.281-144.502.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.304.6.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.285-147.501.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.304.6.4.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.287-148.504.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.304.6.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.294-150.533.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.305.0.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.299-152.520.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.305.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.301-153.528.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.305.4.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.305-155.531.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.305.4.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.309-159.529.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.306.2.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.311-161.529.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.306.2.12.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.313-162.533.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.306.2.13.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.314-164.533.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.306.2.14.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.314-164.539.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.306.2.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.318-166.529.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.306.2.4.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.318-167.530.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.306.2.5.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.320-168.534.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.306.2.7.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.320-169.544.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.306.2.8.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.322-170.535.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.306.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.322-170.538.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.4.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.326-171.539.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.4.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.328-174.540.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.4.8.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.33-51.34.amzn1.x86_64", + "cindex": 9 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.5.0.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.33-51.37.amzn1.x86_64", + "cindex": 9 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.5.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.330-176.540.amzn1.x86_64", + "cindex": 8 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.5.2.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.334-177.552.amzn1.x86_64", + "cindex": 10 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.5.2.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.336-178.554.amzn1.x86_64", + "cindex": 10 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.5.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.336-179.557.amzn1.x86_64", + "cindex": 10 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.6.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.336-179.559.amzn1.x86_64", + "cindex": 10 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.6.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.336-180.562.amzn1.x86_64", + "cindex": 10 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.6.4.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.336-180.566.amzn1.x86_64", + "cindex": 10 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.6.5.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.343-183.564.amzn1.x86_64", + "cindex": 10 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.6.6.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.348-187.562.amzn1.x86_64", + "cindex": 10 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.7.0.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.348-187.565.amzn1.x86_64", + "cindex": 10 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.7.3.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.349-188.564.amzn1.x86_64", + "cindex": 10 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.7.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.350-188.564.amzn1.x86_64", + "cindex": 10 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.8.4.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.352-190.568.amzn1.x86_64", + "cindex": 10 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.9.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.352-190.569.amzn1.x86_64", + "cindex": 10 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1902.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.353-190.569.amzn1.x86_64", + "cindex": 10 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1903.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.355-192.569.amzn1.x86_64", + "cindex": 10 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1904.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.42-52.37.amzn1.x86_64", + "cindex": 11 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1905.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.47-56.37.amzn1.x86_64", + "cindex": 11 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1906.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.51-60.38.amzn1.x86_64", + "cindex": 7 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1907.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.55-62.37.amzn1.x86_64", + "cindex": 7 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1908.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.59-64.43.amzn1.x86_64", + "cindex": 7 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1909.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.62-65.117.amzn1.x86_64", + "cindex": 7 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1910a.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.67-66.56.amzn1.x86_64", + "cindex": 7 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1911.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.70-67.55.amzn1.x86_64", + "cindex": 7 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1912.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.72-68.55.amzn1.x86_64", + "cindex": 7 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1915.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.77-69.57.amzn1.x86_64", + "cindex": 7 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1916.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.77-70.59.amzn1.x86_64", + "cindex": 7 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1917.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.77-70.82.amzn1.x86_64", + "cindex": 7 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1923.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.88-72.73.amzn1.x86_64", + "cindex": 7 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1929.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.88-72.76.amzn1.x86_64", + "cindex": 7 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1933.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.94-73.73.amzn1.x86_64", + "cindex": 7 }, { - "distrib": "ol", - "version": "7", + "distrib": "amzn", + "version": "2018", "arch": "x86_64", - "uname_release": "4.14.35-1941.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.14.97-74.72.amzn1.x86_64", + "cindex": 7 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2013.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1062.1.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2015.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1062.1.2.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2016.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1062.12.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2017.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1062.18.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2018.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1062.4.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2019.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1062.4.2.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2020.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1062.4.3.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2025.400.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1062.7.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2025.400.8.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1062.9.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2025.400.9.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1062.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2025.400.9.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1127.10.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2025.401.4.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1127.13.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2025.402.0.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1127.18.2.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2025.402.2.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1127.19.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2025.403.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1127.8.2.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2025.403.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1127.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2025.403.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.102.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2025.403.4.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.105.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2025.403.5.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.108.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2025.404.0.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.11.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2025.404.1.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.114.2.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2025.404.1.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.118.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2025.405.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.119.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2025.405.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.15.2.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2025.405.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.2.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2039.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.2.2.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2040.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.21.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2041.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.24.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.500.10.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.25.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.500.5.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.31.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.500.9.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.36.2.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.500.9.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.41.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.501.0.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.42.2.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.501.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.45.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.501.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.49.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.502.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.53.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.502.4.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.59.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.502.4.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.6.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.502.5.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.62.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.503.0.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.66.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.503.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.71.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.504.0.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.76.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.504.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.80.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.504.2.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.81.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.504.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.83.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.505.0.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.88.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.505.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.90.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.505.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.92.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.505.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.95.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.505.4.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.99.1.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.505.4.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.el7.x86_64", + "cindex": 12 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.505.4.4.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-957.1.3.el7.x86_64", + "cindex": 13 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.505.4.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-957.10.1.el7.x86_64", + "cindex": 13 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.506.0.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-957.12.1.el7.x86_64", + "cindex": 13 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.506.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-957.12.2.el7.x86_64", + "cindex": 13 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.506.10.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-957.21.2.el7.x86_64", + "cindex": 13 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.506.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-957.21.3.el7.x86_64", + "cindex": 13 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.506.4.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-957.27.2.el7.x86_64", + "cindex": 13 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.506.8.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-957.5.1.el7.x86_64", + "cindex": 13 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.506.8.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-957.el7.x86_64", + "cindex": 13 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.507.0.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.104-300.el7.x86_64", + "cindex": 14 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.507.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.110-300.el7.x86_64", + "cindex": 14 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.507.7.4.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.113-300.el7.x86_64", + "cindex": 15 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.507.7.5.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.84-300.el7.x86_64", + "cindex": 14 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.507.7.6.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.94-300.el7.x86_64", + "cindex": 14 }, { - "distrib": "ol", + "distrib": "centos", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.508.3.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "5.4.28-200.el7.x86_64", + "cindex": 16 }, { - "distrib": "ol", - "version": "7", + "distrib": "centos", + "version": "8", "arch": "x86_64", - "uname_release": "4.14.35-2047.508.3.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.18.0-147.0.3.el8_1.x86_64", + "cindex": 17 }, { - "distrib": "ol", - "version": "7", + "distrib": "centos", + "version": "8", "arch": "x86_64", - "uname_release": "4.14.35-2047.508.3.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.18.0-147.3.1.el8_1.x86_64", + "cindex": 17 }, { - "distrib": "ol", - "version": "7", + "distrib": "centos", + "version": "8", "arch": "x86_64", - "uname_release": "4.14.35-2047.508.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.18.0-147.5.1.el8_1.x86_64", + "cindex": 17 }, { - "distrib": "ol", - "version": "7", + "distrib": "centos", + "version": "8", "arch": "x86_64", - "uname_release": "4.14.35-2047.509.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.18.0-147.8.1.el8_1.x86_64", + "cindex": 17 }, { - "distrib": "ol", - "version": "7", + "distrib": "centos", + "version": "8", "arch": "x86_64", - "uname_release": "4.14.35-2047.509.2.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.18.0-147.el8.x86_64", + "cindex": 17 }, { - "distrib": "ol", - "version": "7", + "distrib": "centos", + "version": "8", "arch": "x86_64", - "uname_release": "4.14.35-2047.509.2.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.18.0-80.7.1.el8_0.x86_64", + "cindex": 18 }, { - "distrib": "ol", - "version": "7", + "distrib": "centos", + "version": "8", "arch": "x86_64", - "uname_release": "4.14.35-2047.510.0.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.18.0-80.el8.x86_64", + "cindex": 18 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.510.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-1-amd64", + "cindex": 19 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.510.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-1-cloud-amd64", + "cindex": 19 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.510.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-1-rt-amd64", + "cindex": 20 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.510.4.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-10-amd64", + "cindex": 21 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.510.5.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-10-cloud-amd64", + "cindex": 21 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.510.5.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-10-rt-amd64", + "cindex": 22 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.510.5.4.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-11-amd64", + "cindex": 21 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.510.5.5.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-11-cloud-amd64", + "cindex": 21 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.510.5.6.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-11-rt-amd64", + "cindex": 22 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.511.0.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-12-amd64", + "cindex": 21 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.511.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-12-cloud-amd64", + "cindex": 21 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.511.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-12-rt-amd64", + "cindex": 22 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.511.5.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-13-amd64", + "cindex": 21 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.511.5.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-13-cloud-amd64", + "cindex": 21 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.511.5.4.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-13-rt-amd64", + "cindex": 22 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.511.5.5.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-14-amd64", + "cindex": 21 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.511.5.5.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-14-cloud-amd64", + "cindex": 21 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.511.5.5.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-14-rt-amd64", + "cindex": 22 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.511.5.6.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-15-amd64", + "cindex": 21 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.511.5.7.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-15-cloud-amd64", + "cindex": 21 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.511.5.8.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-15-rt-amd64", + "cindex": 22 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.511.5.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-16-amd64", + "cindex": 21 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.512.0.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-16-cloud-amd64", + "cindex": 21 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.512.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-16-rt-amd64", + "cindex": 22 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.512.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-17-amd64", + "cindex": 21 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.512.4.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-17-cloud-amd64", + "cindex": 21 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.512.5.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-17-rt-amd64", + "cindex": 22 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.512.6.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-18-amd64", + "cindex": 21 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.513.0.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-18-cloud-amd64", + "cindex": 21 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.513.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-18-rt-amd64", + "cindex": 22 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.513.2.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-19-amd64", + "cindex": 21 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.513.2.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-19-cloud-amd64", + "cindex": 21 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.513.2.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-19-rt-amd64", + "cindex": 22 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.513.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-2-amd64", + "cindex": 19 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.514.0.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-2-cloud-amd64", + "cindex": 19 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.514.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-2-rt-amd64", + "cindex": 20 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.514.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-20-amd64", + "cindex": 21 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.514.5.1.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-20-cloud-amd64", + "cindex": 21 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.514.5.1.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-20-rt-amd64", + "cindex": 22 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.514.5.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-21-amd64", + "cindex": 21 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.515.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-21-cloud-amd64", + "cindex": 21 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.516.1.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-21-rt-amd64", + "cindex": 22 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.516.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-22-amd64", + "cindex": 23 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.516.2.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-22-cloud-amd64", + "cindex": 23 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.516.2.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-22-rt-amd64", + "cindex": 24 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.516.2.4.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-23-amd64", + "cindex": 23 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.516.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-23-cloud-amd64", + "cindex": 23 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.517.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-23-rt-amd64", + "cindex": 24 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.517.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-24-amd64", + "cindex": 23 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.517.3.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-24-cloud-amd64", + "cindex": 23 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.517.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-24-rt-amd64", + "cindex": 24 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.518.0.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-25-amd64", + "cindex": 23 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.518.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-25-cloud-amd64", + "cindex": 23 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.518.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-25-rt-amd64", + "cindex": 24 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.518.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-26-amd64", + "cindex": 25 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.518.4.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-26-cloud-amd64", + "cindex": 25 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.518.4.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-26-rt-amd64", + "cindex": 26 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.518.4.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-27-amd64", + "cindex": 25 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.518.4.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-27-cloud-amd64", + "cindex": 25 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.519.0.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-27-rt-amd64", + "cindex": 26 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.519.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-3-amd64", + "cindex": 19 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.519.2.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-3-cloud-amd64", + "cindex": 19 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.519.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-3-rt-amd64", + "cindex": 20 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.520.0.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-4-amd64", + "cindex": 19 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.520.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-4-cloud-amd64", + "cindex": 19 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.520.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-4-rt-amd64", + "cindex": 20 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.520.3.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-5-amd64", + "cindex": 19 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.521.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-5-cloud-amd64", + "cindex": 19 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.521.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-5-rt-amd64", + "cindex": 27 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.521.4.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-6-amd64", + "cindex": 19 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.522.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-6-cloud-amd64", + "cindex": 19 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.522.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-6-rt-amd64", + "cindex": 27 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.522.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-7-amd64", + "cindex": 19 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.523.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-7-cloud-amd64", + "cindex": 19 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.523.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-7-rt-amd64", + "cindex": 27 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.523.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-8-amd64", + "cindex": 19 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.523.4.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-8-cloud-amd64", + "cindex": 19 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.523.4.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-8-rt-amd64", + "cindex": 27 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.523.4.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-9-amd64", + "cindex": 21 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.524.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-9-cloud-amd64", + "cindex": 21 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.524.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-9-rt-amd64", + "cindex": 22 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.524.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "5.10.0-0.deb10.17-amd64", + "cindex": 28 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.524.4.el7uek.x86_64", - "cindex": 96 + "uname_release": "5.10.0-0.deb10.17-cloud-amd64", + "cindex": 28 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.524.5.el7uek.x86_64", - "cindex": 96 + "uname_release": "5.10.0-0.deb10.17-rt-amd64", + "cindex": 29 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.525.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "5.10.0-0.deb10.19-amd64", + "cindex": 28 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.526.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "5.10.0-0.deb10.19-cloud-amd64", + "cindex": 28 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.526.2.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "5.10.0-0.deb10.19-rt-amd64", + "cindex": 29 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.526.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "5.10.0-0.deb10.20-amd64", + "cindex": 28 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.527.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "5.10.0-0.deb10.20-cloud-amd64", + "cindex": 28 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.527.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "5.10.0-0.deb10.20-rt-amd64", + "cindex": 29 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.528.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "5.10.0-0.deb10.21-amd64", + "cindex": 28 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.528.2.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "5.10.0-0.deb10.21-cloud-amd64", + "cindex": 28 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.528.2.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "5.10.0-0.deb10.21-rt-amd64", + "cindex": 29 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.528.2.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "5.10.0-0.deb10.22-amd64", + "cindex": 28 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.528.2.4.el7uek.x86_64", - "cindex": 96 + "uname_release": "5.10.0-0.deb10.22-cloud-amd64", + "cindex": 28 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.528.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "5.10.0-0.deb10.22-rt-amd64", + "cindex": 29 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.529.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "5.10.0-0.deb10.23-amd64", + "cindex": 28 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.529.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "5.10.0-0.deb10.23-cloud-amd64", + "cindex": 28 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.529.3.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "5.10.0-0.deb10.23-rt-amd64", + "cindex": 29 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.529.3.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "5.10.0-0.deb10.24-amd64", + "cindex": 28 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.529.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "5.10.0-0.deb10.24-cloud-amd64", + "cindex": 28 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.530.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "5.10.0-0.deb10.24-rt-amd64", + "cindex": 29 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.530.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "5.10.0-0.deb10.26-amd64", + "cindex": 28 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.530.5.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "5.10.0-0.deb10.26-cloud-amd64", + "cindex": 28 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.530.5.el7uek.x86_64", - "cindex": 96 + "uname_release": "5.10.0-0.deb10.26-rt-amd64", + "cindex": 29 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.531.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "5.10.0-0.deb10.27-amd64", + "cindex": 30 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.531.2.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "5.10.0-0.deb10.27-cloud-amd64", + "cindex": 30 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.531.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "5.10.0-0.deb10.27-rt-amd64", + "cindex": 31 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.532.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "5.10.0-0.deb10.28-amd64", + "cindex": 30 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.532.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "5.10.0-0.deb10.28-cloud-amd64", + "cindex": 30 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "10", "arch": "x86_64", - "uname_release": "4.14.35-2047.532.3.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "5.10.0-0.deb10.28-rt-amd64", + "cindex": 31 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "9", "arch": "x86_64", - "uname_release": "4.14.35-2047.532.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-0.bpo.19-amd64", + "cindex": 21 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "9", "arch": "x86_64", - "uname_release": "4.14.35-2047.533.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-0.bpo.19-cloud-amd64", + "cindex": 21 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "9", "arch": "x86_64", - "uname_release": "4.14.35-2047.533.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.19.0-0.bpo.19-rt-amd64", + "cindex": 22 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "9", "arch": "x86_64", - "uname_release": "4.14.35-2047.533.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.9.0-13-amd64", + "cindex": 32 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "9", "arch": "x86_64", - "uname_release": "4.14.35-2047.534.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.9.0-13-rt-amd64", + "cindex": 33 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "9", "arch": "x86_64", - "uname_release": "4.14.35-2047.534.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.9.0-18-amd64", + "cindex": 32 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "9", "arch": "x86_64", - "uname_release": "4.14.35-2047.534.3.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.9.0-18-rt-amd64", + "cindex": 33 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "9", "arch": "x86_64", - "uname_release": "4.14.35-2047.534.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.9.0-19-amd64", + "cindex": 32 }, { - "distrib": "ol", - "version": "7", + "distrib": "debian", + "version": "9", "arch": "x86_64", - "uname_release": "4.14.35-2047.535.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.9.0-19-rt-amd64", + "cindex": 33 }, { - "distrib": "ol", - "version": "7", + "distrib": "fedora", + "version": "24", "arch": "x86_64", - "uname_release": "4.14.35-2047.535.2.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.11.12-100.fc24.x86_64", + "cindex": 34 }, { - "distrib": "ol", - "version": "7", + "distrib": "fedora", + "version": "24", "arch": "x86_64", - "uname_release": "4.14.35-2047.535.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.5.5-300.fc24.x86_64", + "cindex": 35 }, { - "distrib": "ol", - "version": "7", + "distrib": "fedora", + "version": "25", "arch": "x86_64", - "uname_release": "4.14.35-2047.536.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.13.16-100.fc25.x86_64", + "cindex": 36 }, { - "distrib": "ol", - "version": "7", + "distrib": "fedora", + "version": "25", "arch": "x86_64", - "uname_release": "4.14.35-2047.536.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.8.6-300.fc25.x86_64", + "cindex": 37 }, { - "distrib": "ol", - "version": "7", + "distrib": "fedora", + "version": "26", "arch": "x86_64", - "uname_release": "4.14.35-2047.536.4.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.11.8-300.fc26.x86_64", + "cindex": 38 }, { - "distrib": "ol", - "version": "7", + "distrib": "fedora", + "version": "26", "arch": "x86_64", - "uname_release": "4.14.35-2047.536.5.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.16.11-100.fc26.x86_64", + "cindex": 39 }, { - "distrib": "ol", - "version": "7", + "distrib": "fedora", + "version": "27", "arch": "x86_64", - "uname_release": "4.14.35-2047.537.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.13.9-300.fc27.x86_64", + "cindex": 36 }, { - "distrib": "ol", - "version": "7", + "distrib": "fedora", + "version": "27", "arch": "x86_64", - "uname_release": "4.14.35-2047.537.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.18.19-100.fc27.x86_64", + "cindex": 40 }, { - "distrib": "ol", - "version": "7", + "distrib": "fedora", + "version": "28", "arch": "x86_64", - "uname_release": "4.14.35-2047.537.4.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.16.3-301.fc28.x86_64", + "cindex": 41 }, { - "distrib": "ol", - "version": "7", + "distrib": "fedora", + "version": "28", "arch": "x86_64", - "uname_release": "4.14.35-2047.537.4.el7uek.x86_64", - "cindex": 96 + "uname_release": "5.0.16-100.fc28.x86_64", + "cindex": 42 }, { - "distrib": "ol", - "version": "7", + "distrib": "fedora", + "version": "29", "arch": "x86_64", - "uname_release": "4.14.35-2047.538.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "4.18.16-300.fc29.x86_64", + "cindex": 40 }, { - "distrib": "ol", - "version": "7", + "distrib": "fedora", + "version": "29", "arch": "x86_64", - "uname_release": "4.14.35-2047.538.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "5.3.11-100.fc29.x86_64", + "cindex": 43 }, { - "distrib": "ol", - "version": "7", + "distrib": "fedora", + "version": "30", "arch": "x86_64", - "uname_release": "4.14.35-2047.538.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "5.0.9-301.fc30.x86_64", + "cindex": 42 }, { - "distrib": "ol", - "version": "7", + "distrib": "fedora", + "version": "30", "arch": "x86_64", - "uname_release": "4.14.35-2047.538.4.el7uek.x86_64", - "cindex": 96 + "uname_release": "5.6.13-100.fc30.x86_64", + "cindex": 44 }, { - "distrib": "ol", - "version": "7", + "distrib": "fedora", + "version": "31", "arch": "x86_64", - "uname_release": "4.14.35-2047.538.5.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "5.3.7-301.fc31.x86_64", + "cindex": 45 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.538.5.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1062.0.0.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.539.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1062.1.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.539.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1062.1.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.539.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1062.1.2.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.539.4.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1062.1.2.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.539.5.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1062.12.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.540.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1062.12.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.540.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1062.18.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.540.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1062.18.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.540.4.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1062.4.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.540.4.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1062.4.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.540.4.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1062.4.2.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.541.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1062.4.3.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.541.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1062.4.3.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.541.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1062.7.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.541.4.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1062.7.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.542.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1062.9.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.542.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1062.9.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.543.1.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1062.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.543.2.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1127.0.0.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2047.543.3.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1127.10.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2048.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1127.10.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2049.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1127.13.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2050.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1127.13.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2051.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1127.18.2.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2052.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1127.18.2.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2102.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1127.19.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2103.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1127.19.1.0.2.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2104.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1127.19.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2105.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1127.8.2.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2106.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1127.8.2.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2108.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.102.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2109.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.102.1.0.2.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2110.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.105.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2111.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.105.1.0.2.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2112.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.108.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2113.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.108.1.0.2.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2114.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.11.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2115.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.11.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2116.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.114.2.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2118.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.114.2.0.2.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2120.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.118.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2121.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.118.1.0.2.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2122.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.119.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "4.14.35-2124.el7uek.x86_64", - "cindex": 96 + "uname_release": "3.10.0-1160.119.1.0.2.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.0-1948.3.el7uek.x86_64", - "cindex": 97 + "uname_release": "3.10.0-1160.119.1.0.3.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2006.5.el7uek.x86_64", - "cindex": 98 + "uname_release": "3.10.0-1160.119.1.0.4.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2011.0.7.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.119.1.0.5.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2011.1.2.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.15.2.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2011.2.2.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.15.2.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2011.3.2.1.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.2.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2011.4.4.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.2.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2011.4.6.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.2.2.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2011.5.3.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.2.2.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2011.6.2.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.21.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2011.7.4.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.21.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2028.2.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.24.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2036.100.1.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.24.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2036.100.3.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.25.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2036.100.6.1.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.25.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2036.100.6.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.31.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2036.101.0.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.31.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2036.101.1.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.36.2.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2036.101.2.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.36.2.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2036.102.0.2.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.41.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2036.102.0.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.41.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2036.103.2.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.42.2.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2036.103.3.1.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.42.2.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2036.103.3.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.45.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2036.104.0.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.45.1.0.2.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2036.104.2.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.45.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2036.104.4.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.49.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2036.104.5.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.49.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2036.105.1.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.53.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2036.105.3.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.53.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2040.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.59.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2041.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.59.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2051.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.6.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.200.13.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.6.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.200.7.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.62.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.200.9.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.62.1.0.2.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.201.3.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.62.1.0.3.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.202.4.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.62.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.202.5.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.66.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.203.3.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.66.1.0.2.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.203.4.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.66.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.203.5.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.71.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.203.6.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.71.1.0.2.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.204.0.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.76.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.204.1.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.76.1.0.2.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.204.2.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.80.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.204.3.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.80.1.0.2.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.204.4.2.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.81.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.204.4.3.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.81.1.0.2.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.204.4.4.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.83.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.205.2.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.83.1.0.2.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.205.7.2.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.88.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.205.7.3.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.88.1.0.2.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.206.1.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.90.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2106.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.90.1.0.2.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2108.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.92.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2109.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.92.1.0.2.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2111.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.95.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2114.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.95.1.0.2.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2118.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.99.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2120.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-1160.99.1.0.2.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2122.303.5.el7uek.x86_64", - "cindex": 100 + "uname_release": "3.10.0-1160.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2122.el7uek.x86_64", - "cindex": 99 + "uname_release": "3.10.0-957.0.0.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2136.300.7.el7uek.x86_64", - "cindex": 100 + "uname_release": "3.10.0-957.0.0.0.2.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2136.301.0.el7uek.x86_64", - "cindex": 100 + "uname_release": "3.10.0-957.0.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "5.4.2-1950.2.el7uek.x86_64", - "cindex": 98 + "uname_release": "3.10.0-957.1.3.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-147.0.2.el8_1.aarch64", - "cindex": 101 + "version": "7", + "arch": "x86_64", + "uname_release": "3.10.0-957.1.3.0.3.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-147.0.3.el8_1.aarch64", - "cindex": 101 + "version": "7", + "arch": "x86_64", + "uname_release": "3.10.0-957.1.3.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-147.3.1.el8_1.aarch64", - "cindex": 101 + "version": "7", + "arch": "x86_64", + "uname_release": "3.10.0-957.10.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-147.5.1.el8_1.aarch64", - "cindex": 101 + "version": "7", + "arch": "x86_64", + "uname_release": "3.10.0-957.10.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-147.8.1.el8_1.aarch64", - "cindex": 101 + "version": "7", + "arch": "x86_64", + "uname_release": "3.10.0-957.12.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-147.el8.aarch64", - "cindex": 101 + "version": "7", + "arch": "x86_64", + "uname_release": "3.10.0-957.12.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-80.1.2.el8_0.aarch64", - "cindex": 102 + "version": "7", + "arch": "x86_64", + "uname_release": "3.10.0-957.12.2.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-80.11.1.el8_0.aarch64", - "cindex": 102 + "version": "7", + "arch": "x86_64", + "uname_release": "3.10.0-957.12.2.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-80.11.2.el8_0.aarch64", - "cindex": 102 + "version": "7", + "arch": "x86_64", + "uname_release": "3.10.0-957.21.2.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-80.4.2.el8_0.aarch64", - "cindex": 102 + "version": "7", + "arch": "x86_64", + "uname_release": "3.10.0-957.21.2.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-80.7.1.el8_0.aarch64", - "cindex": 102 + "version": "7", + "arch": "x86_64", + "uname_release": "3.10.0-957.21.3.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-80.7.2.el8_0.aarch64", - "cindex": 102 + "version": "7", + "arch": "x86_64", + "uname_release": "3.10.0-957.21.3.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-80.el8.aarch64", - "cindex": 102 + "version": "7", + "arch": "x86_64", + "uname_release": "3.10.0-957.27.2.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", - "version": "8", - "arch": "arm64", - "uname_release": "5.4.17-2011.0.7.el8uek.aarch64", - "cindex": 103 + "version": "7", + "arch": "x86_64", + "uname_release": "3.10.0-957.27.2.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", - "version": "8", - "arch": "arm64", - "uname_release": "5.4.17-2011.1.2.el8uek.aarch64", - "cindex": 103 + "version": "7", + "arch": "x86_64", + "uname_release": "3.10.0-957.5.1.0.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", - "version": "8", - "arch": "arm64", - "uname_release": "5.4.17-2011.2.2.el8uek.aarch64", - "cindex": 103 + "version": "7", + "arch": "x86_64", + "uname_release": "3.10.0-957.5.1.0.2.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", - "version": "8", - "arch": "arm64", - "uname_release": "5.4.17-2011.3.2.1.el8uek.aarch64", - "cindex": 103 + "version": "7", + "arch": "x86_64", + "uname_release": "3.10.0-957.5.1.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", - "version": "8", - "arch": "arm64", - "uname_release": "5.4.17-2011.4.4.el8uek.aarch64", - "cindex": 103 + "version": "7", + "arch": "x86_64", + "uname_release": "3.10.0-957.el7.x86_64", + "cindex": 46 }, { "distrib": "ol", - "version": "8", - "arch": "arm64", - "uname_release": "5.4.17-2011.4.6.el8uek.aarch64", - "cindex": 103 + "version": "7", + "arch": "x86_64", + "uname_release": "4.1.12-103.10.1.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", - "arch": "arm64", - "uname_release": "5.4.17-2011.5.3.el8uek.aarch64", - "cindex": 103 + "version": "7", + "arch": "x86_64", + "uname_release": "4.1.12-103.3.8.1.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", - "arch": "arm64", - "uname_release": "5.4.17-2011.6.2.el8uek.aarch64", - "cindex": 103 + "version": "7", + "arch": "x86_64", + "uname_release": "4.1.12-103.3.8.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", - "arch": "arm64", - "uname_release": "5.4.17-2011.7.4.el8uek.aarch64", - "cindex": 103 + "version": "7", + "arch": "x86_64", + "uname_release": "4.1.12-103.6.1.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", - "arch": "arm64", - "uname_release": "5.4.17-2036.100.6.1.el8uek.aarch64", - "cindex": 103 + "version": "7", + "arch": "x86_64", + "uname_release": "4.1.12-103.7.1.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", - "arch": "arm64", - "uname_release": "5.4.17-2036.101.2.el8uek.aarch64", - "cindex": 103 + "version": "7", + "arch": "x86_64", + "uname_release": "4.1.12-103.7.3.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", - "arch": "arm64", - "uname_release": "5.4.17-2036.102.0.2.el8uek.aarch64", - "cindex": 103 + "version": "7", + "arch": "x86_64", + "uname_release": "4.1.12-103.7.4.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", - "arch": "arm64", - "uname_release": "5.4.17-2036.103.3.1.el8uek.aarch64", - "cindex": 103 + "version": "7", + "arch": "x86_64", + "uname_release": "4.1.12-103.9.2.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", - "arch": "arm64", - "uname_release": "5.4.17-2036.103.3.el8uek.aarch64", - "cindex": 103 + "version": "7", + "arch": "x86_64", + "uname_release": "4.1.12-103.9.4.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", - "arch": "arm64", - "uname_release": "5.4.17-2036.104.4.el8uek.aarch64", - "cindex": 103 + "version": "7", + "arch": "x86_64", + "uname_release": "4.1.12-103.9.6.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", - "arch": "arm64", - "uname_release": "5.4.17-2036.104.5.el8uek.aarch64", - "cindex": 103 + "version": "7", + "arch": "x86_64", + "uname_release": "4.1.12-103.9.7.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", - "arch": "arm64", - "uname_release": "5.4.17-2102.200.13.el8uek.aarch64", - "cindex": 103 + "version": "7", + "arch": "x86_64", + "uname_release": "4.1.12-112.14.1.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", - "arch": "arm64", - "uname_release": "5.4.17-2102.201.3.el8uek.aarch64", - "cindex": 103 + "version": "7", + "arch": "x86_64", + "uname_release": "4.1.12-112.14.10.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", - "arch": "arm64", - "uname_release": "5.4.17-2102.202.5.el8uek.aarch64", - "cindex": 103 + "version": "7", + "arch": "x86_64", + "uname_release": "4.1.12-112.14.11.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", - "arch": "arm64", - "uname_release": "5.4.17-2102.203.5.el8uek.aarch64", - "cindex": 103 + "version": "7", + "arch": "x86_64", + "uname_release": "4.1.12-112.14.13.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", - "arch": "arm64", - "uname_release": "5.4.17-2102.203.6.el8uek.aarch64", - "cindex": 103 + "version": "7", + "arch": "x86_64", + "uname_release": "4.1.12-112.14.14.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", - "arch": "arm64", - "uname_release": "5.4.17-2102.204.4.2.el8uek.aarch64", - "cindex": 103 + "version": "7", + "arch": "x86_64", + "uname_release": "4.1.12-112.14.15.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-147.0.2.el8_1.x86_64", - "cindex": 104 + "uname_release": "4.1.12-112.14.2.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-147.0.3.el8_1.x86_64", - "cindex": 104 + "uname_release": "4.1.12-112.14.5.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-147.3.1.el8_1.x86_64", - "cindex": 104 + "uname_release": "4.1.12-112.16.4.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-147.5.1.el8_1.x86_64", - "cindex": 104 + "uname_release": "4.1.12-112.16.7.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-147.8.1.el8_1.x86_64", - "cindex": 104 + "uname_release": "4.1.12-112.17.3.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-147.el8.x86_64", - "cindex": 104 + "uname_release": "4.1.12-124.14.1.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-80.1.2.el8_0.x86_64", - "cindex": 105 + "uname_release": "4.1.12-124.14.2.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-80.11.1.el8_0.x86_64", - "cindex": 105 + "uname_release": "4.1.12-124.14.3.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-80.11.2.el8_0.x86_64", - "cindex": 105 + "uname_release": "4.1.12-124.14.5.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-80.4.2.el8_0.x86_64", - "cindex": 105 + "uname_release": "4.1.12-124.15.1.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-80.7.1.el8_0.x86_64", - "cindex": 105 + "uname_release": "4.1.12-124.15.2.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-80.7.2.el8_0.x86_64", - "cindex": 105 + "uname_release": "4.1.12-124.15.4.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-80.el8.x86_64", - "cindex": 105 + "uname_release": "4.1.12-124.16.1.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2011.0.7.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.16.2.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2011.1.2.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.16.3.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2011.2.2.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.16.4.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2011.3.2.1.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.17.1.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2011.4.4.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.17.2.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2011.4.6.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.18.1.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2011.5.3.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.18.5.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2011.6.2.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.18.6.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2011.7.4.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.18.9.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2036.100.6.1.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.19.1.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2036.101.2.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.19.2.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2036.102.0.2.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.19.4.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2036.103.3.1.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.19.5.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2036.103.3.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.19.6.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2036.104.4.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.19.7.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2036.104.5.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.20.1.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.200.13.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.20.3.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.201.3.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.20.7.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.202.4.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.21.1.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.202.5.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.22.1.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.203.3.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.22.2.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.203.4.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.22.4.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.203.5.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.23.1.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.203.6.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.23.2.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.204.0.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.23.4.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.204.1.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.24.1.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.204.2.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.24.3.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.204.3.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.24.5.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.204.4.2.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.25.1.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.204.4.3.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.26.1.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.204.4.4.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.26.10.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.205.3.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.26.12.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.205.5.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.26.3.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.205.6.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.26.5.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.205.7.1.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.26.7.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.205.7.2.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.27.1.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.205.7.3.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.27.2.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.205.7.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.28.1.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.206.0.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.28.3.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2102.206.1.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.28.5.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2114.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.28.6.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2118.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.29.3.1.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2120.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.29.3.el7uek.x86_64", + "cindex": 47 }, { "distrib": "ol", - "version": "8", + "version": "7", "arch": "x86_64", - "uname_release": "5.4.17-2122.el8uek.x86_64", - "cindex": 106 + "uname_release": "4.1.12-124.29.4.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", - "arch": "arm64", - "uname_release": "4.12.14-lp150.11-default", - "cindex": 107 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.1.12-124.30.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.11-default", - "cindex": 108 + "uname_release": "4.1.12-124.31.1.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.11-kvmsmall", - "cindex": 109 + "uname_release": "4.1.12-124.31.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.10-default", - "cindex": 108 + "uname_release": "4.1.12-124.32.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.10-kvmsmall", - "cindex": 109 + "uname_release": "4.1.12-124.32.3.2.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.13-default", - "cindex": 108 + "uname_release": "4.1.12-124.32.3.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.13-kvmsmall", - "cindex": 109 + "uname_release": "4.1.12-124.33.4.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.16-default", - "cindex": 108 + "uname_release": "4.1.12-124.34.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.16-kvmsmall", - "cindex": 109 + "uname_release": "4.1.12-124.35.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.19-default", - "cindex": 108 + "uname_release": "4.1.12-124.35.2.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.19-kvmsmall", - "cindex": 109 + "uname_release": "4.1.12-124.35.4.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.22-default", - "cindex": 108 + "uname_release": "4.1.12-124.36.1.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.22-kvmsmall", - "cindex": 109 + "uname_release": "4.1.12-124.36.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.25-default", - "cindex": 108 + "uname_release": "4.1.12-124.36.3.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.25-kvmsmall", - "cindex": 109 + "uname_release": "4.1.12-124.36.4.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.28-default", - "cindex": 108 + "uname_release": "4.1.12-124.37.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.28-kvmsmall", - "cindex": 109 + "uname_release": "4.1.12-124.38.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.4-default", - "cindex": 108 + "uname_release": "4.1.12-124.39.2.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.4-kvmsmall", - "cindex": 109 + "uname_release": "4.1.12-124.39.2.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.45-default", - "cindex": 108 + "uname_release": "4.1.12-124.39.5.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.45-kvmsmall", - "cindex": 109 + "uname_release": "4.1.12-124.39.5.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.48-default", - "cindex": 108 + "uname_release": "4.1.12-124.40.6.3.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.48-kvmsmall", - "cindex": 109 + "uname_release": "4.1.12-124.40.6.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.58-default", - "cindex": 108 + "uname_release": "4.1.12-124.41.4.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.58-kvmsmall", - "cindex": 109 + "uname_release": "4.1.12-124.41.5.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.61-default", - "cindex": 108 + "uname_release": "4.1.12-124.42.3.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.61-kvmsmall", - "cindex": 109 + "uname_release": "4.1.12-124.42.4.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.64-default", - "cindex": 108 + "uname_release": "4.1.12-124.43.4.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.64-kvmsmall", - "cindex": 109 + "uname_release": "4.1.12-124.44.4.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.67-default", - "cindex": 108 + "uname_release": "4.1.12-124.44.4.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.67-kvmsmall", - "cindex": 109 + "uname_release": "4.1.12-124.45.2.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.7-default", - "cindex": 108 + "uname_release": "4.1.12-124.45.6.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.7-kvmsmall", - "cindex": 109 + "uname_release": "4.1.12-124.46.3.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.70-default", - "cindex": 108 + "uname_release": "4.1.12-124.46.4.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.70-kvmsmall", - "cindex": 109 + "uname_release": "4.1.12-124.47.3.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.73-default", - "cindex": 108 + "uname_release": "4.1.12-124.48.2.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.73-kvmsmall", - "cindex": 109 + "uname_release": "4.1.12-124.48.3.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.76-default", - "cindex": 108 + "uname_release": "4.1.12-124.48.5.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.76-kvmsmall", - "cindex": 109 + "uname_release": "4.1.12-124.48.6.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.79-default", - "cindex": 108 + "uname_release": "4.1.12-124.49.3.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.79-kvmsmall", - "cindex": 109 + "uname_release": "4.1.12-124.50.2.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.82-default", - "cindex": 108 + "uname_release": "4.1.12-124.51.2.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp150.12.82-kvmsmall", - "cindex": 109 + "uname_release": "4.1.12-124.52.4.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", - "arch": "arm64", - "uname_release": "4.12.14-lp151.27-default", - "cindex": 110 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.1.12-124.52.5.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.26-default", - "cindex": 111 + "uname_release": "4.1.12-124.52.5.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.26-kvmsmall", - "cindex": 112 + "uname_release": "4.1.12-124.53.3.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.10-default", - "cindex": 111 + "uname_release": "4.1.12-124.53.5.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.10-kvmsmall", - "cindex": 112 + "uname_release": "4.1.12-124.53.5.2.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.13-default", - "cindex": 111 + "uname_release": "4.1.12-124.53.5.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.13-kvmsmall", - "cindex": 112 + "uname_release": "4.1.12-124.54.6.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.16-default", - "cindex": 111 + "uname_release": "4.1.12-124.54.6.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.16-kvmsmall", - "cindex": 112 + "uname_release": "4.1.12-124.56.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.20-default", - "cindex": 111 + "uname_release": "4.1.12-124.57.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.20-kvmsmall", - "cindex": 112 + "uname_release": "4.1.12-124.58.2.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.25-default", - "cindex": 111 + "uname_release": "4.1.12-124.59.1.2.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.25-kvmsmall", - "cindex": 112 + "uname_release": "4.1.12-124.59.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.32-default", - "cindex": 111 + "uname_release": "4.1.12-124.60.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.32-kvmsmall", - "cindex": 112 + "uname_release": "4.1.12-124.61.2.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.36-default", - "cindex": 111 + "uname_release": "4.1.12-124.62.3.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.36-kvmsmall", - "cindex": 112 + "uname_release": "4.1.12-124.62.3.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.4-default", - "cindex": 111 + "uname_release": "4.1.12-124.63.2.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.4-kvmsmall", - "cindex": 112 + "uname_release": "4.1.12-124.63.3.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.40-default", - "cindex": 111 + "uname_release": "4.1.12-124.64.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.40-kvmsmall", - "cindex": 112 + "uname_release": "4.1.12-124.65.1.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.44-default", - "cindex": 111 + "uname_release": "4.1.12-124.65.1.2.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.44-kvmsmall", - "cindex": 112 + "uname_release": "4.1.12-124.65.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.48-default", - "cindex": 111 + "uname_release": "4.1.12-124.66.3.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.48-kvmsmall", - "cindex": 112 + "uname_release": "4.1.12-124.67.3.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.52-default", - "cindex": 111 + "uname_release": "4.1.12-124.68.3.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.52-kvmsmall", - "cindex": 112 + "uname_release": "4.1.12-124.68.3.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.59-default", - "cindex": 111 + "uname_release": "4.1.12-124.69.5.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.59-kvmsmall", - "cindex": 112 + "uname_release": "4.1.12-124.69.5.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.63-default", - "cindex": 111 + "uname_release": "4.1.12-124.70.2.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.63-kvmsmall", - "cindex": 112 + "uname_release": "4.1.12-124.71.3.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.67-default", - "cindex": 111 + "uname_release": "4.1.12-124.71.3.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.67-kvmsmall", - "cindex": 112 + "uname_release": "4.1.12-124.72.2.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.7-default", - "cindex": 111 + "uname_release": "4.1.12-124.73.2.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.7-kvmsmall", - "cindex": 112 + "uname_release": "4.1.12-124.74.2.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.71-default", - "cindex": 111 + "uname_release": "4.1.12-124.75.3.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.71-kvmsmall", - "cindex": 112 + "uname_release": "4.1.12-124.76.2.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.75-default", - "cindex": 111 + "uname_release": "4.1.12-124.77.2.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.75-kvmsmall", - "cindex": 112 + "uname_release": "4.1.12-124.78.2.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.79-default", - "cindex": 111 + "uname_release": "4.1.12-124.78.4.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.79-kvmsmall", - "cindex": 112 + "uname_release": "4.1.12-124.78.4.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.83-default", - "cindex": 111 + "uname_release": "4.1.12-124.79.2.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.83-kvmsmall", - "cindex": 112 + "uname_release": "4.1.12-124.80.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.87-default", - "cindex": 111 + "uname_release": "4.1.12-124.81.2.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.87-kvmsmall", - "cindex": 112 + "uname_release": "4.1.12-124.82.2.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.91-default", - "cindex": 111 + "uname_release": "4.1.12-124.83.2.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.1", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-lp151.28.91-kvmsmall", - "cindex": 112 + "uname_release": "4.1.12-124.84.2.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", - "arch": "arm64", - "uname_release": "5.3.18-lp152.19-default", - "cindex": 113 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.1.12-124.85.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.102-default", - "cindex": 114 + "uname_release": "4.1.12-124.86.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.102-kvmsmall", - "cindex": 115 + "uname_release": "4.1.12-124.87.2.2.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.106-default", - "cindex": 114 + "uname_release": "4.1.12-124.87.2.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.106-kvmsmall", - "cindex": 115 + "uname_release": "4.1.12-124.88.3.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.19-default", - "cindex": 114 + "uname_release": "4.1.12-124.89.4.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.19-kvmsmall", - "cindex": 115 + "uname_release": "4.1.12-124.90.3.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.26-default", - "cindex": 114 + "uname_release": "4.1.12-124.90.3.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.26-kvmsmall", - "cindex": 115 + "uname_release": "4.1.12-124.91.3.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.33-default", - "cindex": 114 + "uname_release": "4.1.12-124.92.3.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.33-kvmsmall", - "cindex": 115 + "uname_release": "4.1.12-124.93.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.36-default", - "cindex": 114 + "uname_release": "4.1.12-32.1.2.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.36-kvmsmall", - "cindex": 115 + "uname_release": "4.1.12-32.2.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.41-default", - "cindex": 114 + "uname_release": "4.1.12-32.2.3.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.41-kvmsmall", - "cindex": 115 + "uname_release": "4.1.12-32.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.44-default", - "cindex": 114 + "uname_release": "4.1.12-37.2.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.44-kvmsmall", - "cindex": 115 + "uname_release": "4.1.12-37.2.2.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.47-default", - "cindex": 114 + "uname_release": "4.1.12-37.3.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.47-kvmsmall", - "cindex": 115 + "uname_release": "4.1.12-37.4.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.50-default", - "cindex": 114 + "uname_release": "4.1.12-37.5.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.50-kvmsmall", - "cindex": 115 + "uname_release": "4.1.12-37.6.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.54-default", - "cindex": 114 + "uname_release": "4.1.12-37.6.2.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.54-kvmsmall", - "cindex": 115 + "uname_release": "4.1.12-37.6.3.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.57-default", - "cindex": 114 + "uname_release": "4.1.12-61.1.10.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.57-kvmsmall", - "cindex": 115 + "uname_release": "4.1.12-61.1.13.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.60-default", - "cindex": 114 + "uname_release": "4.1.12-61.1.14.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.60-kvmsmall", - "cindex": 115 + "uname_release": "4.1.12-61.1.16.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.63-default", - "cindex": 114 + "uname_release": "4.1.12-61.1.17.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.63-kvmsmall", - "cindex": 115 + "uname_release": "4.1.12-61.1.18.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.66-default", - "cindex": 114 + "uname_release": "4.1.12-61.1.19.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.66-kvmsmall", - "cindex": 115 + "uname_release": "4.1.12-61.1.22.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.69-default", - "cindex": 114 + "uname_release": "4.1.12-61.1.23.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.69-kvmsmall", - "cindex": 115 + "uname_release": "4.1.12-61.1.24.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.72-default", - "cindex": 114 + "uname_release": "4.1.12-61.1.25.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.72-kvmsmall", - "cindex": 115 + "uname_release": "4.1.12-61.1.27.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.75-default", - "cindex": 114 + "uname_release": "4.1.12-61.1.28.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.75-kvmsmall", - "cindex": 115 + "uname_release": "4.1.12-61.1.33.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.78-default", - "cindex": 114 + "uname_release": "4.1.12-61.1.34.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.78-kvmsmall", - "cindex": 115 + "uname_release": "4.1.12-61.1.6.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.81-default", - "cindex": 114 + "uname_release": "4.1.12-61.51.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.81-kvmsmall", - "cindex": 115 + "uname_release": "4.1.12-61.63.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.84-default", - "cindex": 114 + "uname_release": "4.1.12-61.64.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.84-kvmsmall", - "cindex": 115 + "uname_release": "4.1.12-94.1.8.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.87-default", - "cindex": 114 + "uname_release": "4.1.12-94.2.1.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.87-kvmsmall", - "cindex": 115 + "uname_release": "4.1.12-94.3.4.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.92-default", - "cindex": 114 + "uname_release": "4.1.12-94.3.5.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.92-kvmsmall", - "cindex": 115 + "uname_release": "4.1.12-94.3.6.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.95-default", - "cindex": 114 + "uname_release": "4.1.12-94.3.7.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.95-kvmsmall", - "cindex": 115 + "uname_release": "4.1.12-94.3.8.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.98-default", - "cindex": 114 + "uname_release": "4.1.12-94.3.9.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.2", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-lp152.98-kvmsmall", - "cindex": 115 + "uname_release": "4.1.12-94.5.7.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.43-64kb", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.1.12-94.5.9.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.43-default", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.1.12-94.7.8.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.46-64kb", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.1.12-94.8.2.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.46-default", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.1.12-94.8.3.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.49-64kb", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.1.12-94.8.5.el7uek.x86_64", + "cindex": 47 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.49-default", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.14-11.el7uek.x86_64", + "cindex": 48 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.54-64kb", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.32-2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.54-default", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.60-64kb", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1818.0.14.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.60-default", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1818.0.4.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.63-64kb", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1818.0.5.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.63-default", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1818.0.6.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.68-64kb", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1818.0.7.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.68-default", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1818.0.8.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.71-64kb", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1818.0.9.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.71-default", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1818.1.6.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.76-64kb", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1818.2.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.76-default", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1818.3.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.81-64kb", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1818.4.5.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.81-default", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1818.4.6.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.87-64kb", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1818.4.7.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.87-default", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1818.5.4.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-57-64kb", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1818.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-57-default", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1820.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.10-64kb", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1821.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.10-default", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1822.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.13-64kb", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1823.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.13-default", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1824.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.16-64kb", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1825.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.16-default", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1826.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.19-64kb", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1827.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.19-default", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1828.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.24-64kb", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1829.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.24-default", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1830.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.27-64kb", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1831.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.27-default", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1833.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.30-64kb", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1836.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.30-default", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1837.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.34-64kb", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1838.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.34-default", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1841.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.37-64kb", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1842.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.37-default", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1843.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.40-64kb", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1844.0.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.40-default", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1844.0.4.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.5-64kb", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1844.0.6.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.5-default", - "cindex": 116 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1844.0.7.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-150300.38.37-azure", - "cindex": 117 + "uname_release": "4.14.35-1844.1.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-150300.38.40-azure", - "cindex": 117 + "uname_release": "4.14.35-1844.2.5.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-150300.38.47-azure", - "cindex": 117 + "uname_release": "4.14.35-1844.3.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-150300.38.50-azure", - "cindex": 117 + "uname_release": "4.14.35-1844.4.5.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-150300.38.53-azure", - "cindex": 117 + "uname_release": "4.14.35-1844.4.5.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-150300.38.56-azure", - "cindex": 117 + "uname_release": "4.14.35-1844.5.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-150300.38.59-azure", - "cindex": 117 + "uname_release": "4.14.35-1845.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-150300.38.62-azure", - "cindex": 117 + "uname_release": "4.14.35-1846.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-150300.38.69-azure", - "cindex": 117 + "uname_release": "4.14.35-1847.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-150300.59.43-default", - "cindex": 117 + "uname_release": "4.14.35-1848.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-150300.59.43-kvmsmall", - "cindex": 118 + "uname_release": "4.14.35-1849.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-150300.59.46-default", - "cindex": 117 + "uname_release": "4.14.35-1850a.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-150300.59.46-kvmsmall", - "cindex": 118 + "uname_release": "4.14.35-1851.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-150300.59.49-default", - "cindex": 117 + "uname_release": "4.14.35-1901.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-150300.59.49-kvmsmall", - "cindex": 118 + "uname_release": "4.14.35-1902.0.10.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-150300.59.54-default", - "cindex": 117 + "uname_release": "4.14.35-1902.0.11.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-150300.59.54-kvmsmall", - "cindex": 118 + "uname_release": "4.14.35-1902.0.12.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-150300.59.60-default", - "cindex": 117 + "uname_release": "4.14.35-1902.0.13.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-150300.59.60-kvmsmall", - "cindex": 118 + "uname_release": "4.14.35-1902.0.14.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-150300.59.63-default", - "cindex": 117 + "uname_release": "4.14.35-1902.0.15.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-150300.59.63-kvmsmall", - "cindex": 118 + "uname_release": "4.14.35-1902.0.18.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-150300.59.68-default", - "cindex": 117 + "uname_release": "4.14.35-1902.0.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-150300.59.68-kvmsmall", - "cindex": 118 + "uname_release": "4.14.35-1902.0.4.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-150300.59.71-default", - "cindex": 117 + "uname_release": "4.14.35-1902.0.5.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-150300.59.71-kvmsmall", - "cindex": 118 + "uname_release": "4.14.35-1902.0.6.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-150300.59.76-default", - "cindex": 117 + "uname_release": "4.14.35-1902.0.7.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-150300.59.76-kvmsmall", - "cindex": 118 + "uname_release": "4.14.35-1902.0.9.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-150300.59.81-default", - "cindex": 117 + "uname_release": "4.14.35-1902.1.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-150300.59.81-kvmsmall", - "cindex": 118 + "uname_release": "4.14.35-1902.10.2.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-1902.10.4.1.el7uek.x86_64", + "cindex": 49 + }, + { + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-150300.59.87-default", - "cindex": 117 + "uname_release": "4.14.35-1902.10.4.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-150300.59.87-kvmsmall", - "cindex": 118 + "uname_release": "4.14.35-1902.10.4.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-36-azure", - "cindex": 117 + "uname_release": "4.14.35-1902.10.7.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-38.11-azure", - "cindex": 117 + "uname_release": "4.14.35-1902.10.8.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-38.14-azure", - "cindex": 117 + "uname_release": "4.14.35-1902.11.3.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-38.17-azure", - "cindex": 117 + "uname_release": "4.14.35-1902.11.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-38.22-azure", - "cindex": 117 + "uname_release": "4.14.35-1902.12.0.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-38.25-azure", - "cindex": 117 + "uname_release": "4.14.35-1902.3.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-38.28-azure", - "cindex": 117 + "uname_release": "4.14.35-1902.3.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-38.3-azure", - "cindex": 117 + "uname_release": "4.14.35-1902.300.11.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-38.31-azure", - "cindex": 117 + "uname_release": "4.14.35-1902.301.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-38.34-azure", - "cindex": 117 + "uname_release": "4.14.35-1902.302.0.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-38.8-azure", - "cindex": 117 + "uname_release": "4.14.35-1902.302.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-57-default", - "cindex": 117 + "uname_release": "4.14.35-1902.302.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-57-kvmsmall", - "cindex": 118 + "uname_release": "4.14.35-1902.303.0.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-59.10-default", - "cindex": 117 + "uname_release": "4.14.35-1902.303.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-59.10-kvmsmall", - "cindex": 118 + "uname_release": "4.14.35-1902.303.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-59.13-default", - "cindex": 117 + "uname_release": "4.14.35-1902.303.4.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-59.13-kvmsmall", - "cindex": 118 + "uname_release": "4.14.35-1902.303.5.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-59.16-default", - "cindex": 117 + "uname_release": "4.14.35-1902.304.4.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-59.16-kvmsmall", - "cindex": 118 + "uname_release": "4.14.35-1902.304.5.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-59.19-default", - "cindex": 117 + "uname_release": "4.14.35-1902.304.6.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-59.19-kvmsmall", - "cindex": 118 + "uname_release": "4.14.35-1902.304.6.4.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-59.24-default", - "cindex": 117 + "uname_release": "4.14.35-1902.304.6.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-59.24-kvmsmall", - "cindex": 118 + "uname_release": "4.14.35-1902.305.0.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-59.27-default", - "cindex": 117 + "uname_release": "4.14.35-1902.305.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-59.27-kvmsmall", - "cindex": 118 + "uname_release": "4.14.35-1902.305.4.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-59.30-default", - "cindex": 117 + "uname_release": "4.14.35-1902.305.4.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-59.30-kvmsmall", - "cindex": 118 + "uname_release": "4.14.35-1902.306.2.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-59.34-default", - "cindex": 117 + "uname_release": "4.14.35-1902.306.2.12.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-59.34-kvmsmall", - "cindex": 118 + "uname_release": "4.14.35-1902.306.2.13.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-59.37-default", - "cindex": 117 + "uname_release": "4.14.35-1902.306.2.14.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-59.37-kvmsmall", - "cindex": 118 + "uname_release": "4.14.35-1902.306.2.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-59.40-default", - "cindex": 117 + "uname_release": "4.14.35-1902.306.2.4.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-59.40-kvmsmall", - "cindex": 118 + "uname_release": "4.14.35-1902.306.2.5.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-59.5-default", - "cindex": 117 + "uname_release": "4.14.35-1902.306.2.7.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "opensuse-leap", - "version": "15.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "5.3.18-59.5-kvmsmall", - "cindex": 118 + "uname_release": "4.14.35-1902.306.2.8.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", - "arch": "arm64", - "uname_release": "4.11.0-44.2.1.el7a.aarch64", - "cindex": 119 + "arch": "x86_64", + "uname_release": "4.14.35-1902.306.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", - "arch": "arm64", - "uname_release": "4.11.0-44.4.1.el7a.aarch64", - "cindex": 119 + "arch": "x86_64", + "uname_release": "4.14.35-1902.4.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", - "arch": "arm64", - "uname_release": "4.11.0-44.6.1.el7a.aarch64", - "cindex": 119 + "arch": "x86_64", + "uname_release": "4.14.35-1902.4.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", - "arch": "arm64", - "uname_release": "4.11.0-44.7.1.el7a.aarch64", - "cindex": 119 + "arch": "x86_64", + "uname_release": "4.14.35-1902.4.8.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", - "arch": "arm64", - "uname_release": "4.11.0-44.el7a.aarch64", - "cindex": 119 + "arch": "x86_64", + "uname_release": "4.14.35-1902.5.0.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-115.10.1.el7a.aarch64", - "cindex": 120 + "arch": "x86_64", + "uname_release": "4.14.35-1902.5.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-115.12.1.el7a.aarch64", - "cindex": 120 + "arch": "x86_64", + "uname_release": "4.14.35-1902.5.2.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-115.13.1.el7a.aarch64", - "cindex": 120 + "arch": "x86_64", + "uname_release": "4.14.35-1902.5.2.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-115.14.1.el7a.aarch64", - "cindex": 120 + "arch": "x86_64", + "uname_release": "4.14.35-1902.5.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-115.16.1.el7a.aarch64", - "cindex": 120 + "arch": "x86_64", + "uname_release": "4.14.35-1902.6.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-115.17.1.el7a.aarch64", - "cindex": 120 + "arch": "x86_64", + "uname_release": "4.14.35-1902.6.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-115.18.1.el7a.aarch64", - "cindex": 120 + "arch": "x86_64", + "uname_release": "4.14.35-1902.6.4.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-115.19.1.el7a.aarch64", - "cindex": 120 + "arch": "x86_64", + "uname_release": "4.14.35-1902.6.5.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-115.2.2.el7a.aarch64", - "cindex": 120 + "arch": "x86_64", + "uname_release": "4.14.35-1902.6.6.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-115.21.2.el7a.aarch64", - "cindex": 120 + "arch": "x86_64", + "uname_release": "4.14.35-1902.7.0.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-115.26.1.el7a.aarch64", - "cindex": 120 + "arch": "x86_64", + "uname_release": "4.14.35-1902.7.3.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-115.29.1.el7a.aarch64", - "cindex": 120 + "arch": "x86_64", + "uname_release": "4.14.35-1902.7.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-115.32.1.el7a.aarch64", - "cindex": 120 + "arch": "x86_64", + "uname_release": "4.14.35-1902.8.4.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-115.33.1.el7a.aarch64", - "cindex": 120 + "arch": "x86_64", + "uname_release": "4.14.35-1902.9.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-115.5.1.el7a.aarch64", - "cindex": 120 + "arch": "x86_64", + "uname_release": "4.14.35-1902.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-115.6.1.el7a.aarch64", - "cindex": 120 + "arch": "x86_64", + "uname_release": "4.14.35-1903.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-115.7.1.el7a.aarch64", - "cindex": 120 + "arch": "x86_64", + "uname_release": "4.14.35-1904.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-115.8.1.el7a.aarch64", - "cindex": 120 + "arch": "x86_64", + "uname_release": "4.14.35-1905.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-115.8.2.el7a.aarch64", - "cindex": 120 + "arch": "x86_64", + "uname_release": "4.14.35-1906.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-115.el7a.aarch64", - "cindex": 120 + "arch": "x86_64", + "uname_release": "4.14.35-1907.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-49.10.1.el7a.aarch64", - "cindex": 120 + "arch": "x86_64", + "uname_release": "4.14.35-1908.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-49.13.1.el7a.aarch64", - "cindex": 120 + "arch": "x86_64", + "uname_release": "4.14.35-1909.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-49.2.2.el7a.aarch64", - "cindex": 120 + "arch": "x86_64", + "uname_release": "4.14.35-1910a.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-49.8.1.el7a.aarch64", - "cindex": 120 + "arch": "x86_64", + "uname_release": "4.14.35-1911.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", - "arch": "arm64", - "uname_release": "4.14.0-49.el7a.aarch64", - "cindex": 120 + "arch": "x86_64", + "uname_release": "4.14.35-1912.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1062.1.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-1915.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1062.1.2.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-1916.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1062.12.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-1917.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1062.18.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-1923.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1062.21.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-1929.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1062.26.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-1933.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1062.30.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-1941.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1062.31.2.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2013.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1062.31.3.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2015.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1062.33.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2016.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1062.36.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2017.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1062.37.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2018.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1062.4.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2019.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1062.4.2.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2020.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1062.4.3.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2025.400.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1062.40.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2025.400.8.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1062.43.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2025.400.9.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1062.45.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2025.400.9.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1062.46.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2025.401.4.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1062.49.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2025.402.0.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1062.51.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2025.402.2.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1062.52.2.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2025.403.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1062.7.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2025.403.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1062.9.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2025.403.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1062.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2025.403.4.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1127.10.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2025.403.5.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1127.13.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2025.404.0.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1127.18.2.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2025.404.1.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1127.19.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2025.404.1.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1127.8.2.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2025.405.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1127.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2025.405.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1160.102.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2025.405.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1160.105.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2039.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1160.108.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2040.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1160.11.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2041.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1160.114.2.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2047.500.10.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1160.118.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2047.500.5.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1160.119.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2047.500.9.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1160.15.2.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2047.500.9.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1160.2.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2047.501.0.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1160.2.2.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2047.501.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1160.21.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2047.501.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1160.24.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2047.502.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1160.25.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2047.502.4.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1160.31.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2047.502.4.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1160.36.2.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2047.502.5.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1160.41.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2047.503.0.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1160.42.2.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2047.503.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1160.45.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2047.504.0.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1160.49.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2047.504.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1160.53.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2047.504.2.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1160.59.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2047.504.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1160.6.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2047.505.0.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1160.62.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2047.505.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1160.66.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2047.505.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1160.71.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2047.505.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1160.76.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2047.505.4.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1160.80.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2047.505.4.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1160.81.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2047.505.4.4.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1160.83.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2047.505.4.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1160.88.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2047.506.0.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1160.90.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2047.506.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1160.92.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2047.506.10.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1160.95.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2047.506.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1160.99.1.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2047.506.4.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-1160.el7.x86_64", - "cindex": 121 + "uname_release": "4.14.35-2047.506.8.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-957.1.3.el7.x86_64", - "cindex": 122 + "uname_release": "4.14.35-2047.506.8.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-957.10.1.el7.x86_64", - "cindex": 122 + "uname_release": "4.14.35-2047.507.0.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-957.12.1.el7.x86_64", - "cindex": 122 + "uname_release": "4.14.35-2047.507.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-957.12.2.el7.x86_64", - "cindex": 122 + "uname_release": "4.14.35-2047.507.7.4.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-957.21.2.el7.x86_64", - "cindex": 122 + "uname_release": "4.14.35-2047.507.7.5.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-957.21.3.el7.x86_64", - "cindex": 122 + "uname_release": "4.14.35-2047.507.7.6.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-957.27.2.el7.x86_64", - "cindex": 122 + "uname_release": "4.14.35-2047.508.3.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-957.5.1.el7.x86_64", - "cindex": 122 + "uname_release": "4.14.35-2047.508.3.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", + "distrib": "ol", "version": "7", "arch": "x86_64", - "uname_release": "3.10.0-957.el7.x86_64", - "cindex": 122 + "uname_release": "4.14.35-2047.508.3.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-147.0.2.el8_1.aarch64", - "cindex": 123 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.508.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-147.0.3.el8_1.aarch64", - "cindex": 123 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.509.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-147.13.2.el8_1.aarch64", - "cindex": 123 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.509.2.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-147.20.1.el8_1.aarch64", - "cindex": 123 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.509.2.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-147.24.2.el8_1.aarch64", - "cindex": 123 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.510.0.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-147.27.1.el8_1.aarch64", - "cindex": 123 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.510.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-147.3.1.el8_1.aarch64", - "cindex": 123 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.510.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-147.32.1.el8_1.aarch64", - "cindex": 123 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.510.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-147.34.1.el8_1.aarch64", - "cindex": 123 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.510.4.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-147.38.1.el8_1.aarch64", - "cindex": 123 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.510.5.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-147.43.1.el8_1.aarch64", - "cindex": 123 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.510.5.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-147.44.1.el8_1.aarch64", - "cindex": 123 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.510.5.4.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-147.48.1.el8_1.aarch64", - "cindex": 123 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.510.5.5.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-147.5.1.el8_1.aarch64", - "cindex": 123 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.510.5.6.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-147.51.1.el8_1.aarch64", - "cindex": 123 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.511.0.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-147.51.2.el8_1.aarch64", - "cindex": 123 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.511.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-147.52.1.el8_1.aarch64", - "cindex": 123 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.511.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-147.54.2.el8_1.aarch64", - "cindex": 123 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.511.5.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-147.56.1.el8_1.aarch64", - "cindex": 123 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.511.5.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-147.57.1.el8_1.aarch64", - "cindex": 123 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.511.5.4.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-147.8.1.el8_1.aarch64", - "cindex": 123 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.511.5.5.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-147.el8.aarch64", - "cindex": 123 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.511.5.5.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-80.1.2.el8_0.aarch64", - "cindex": 124 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.511.5.5.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-80.11.1.el8_0.aarch64", - "cindex": 124 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.511.5.6.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-80.11.2.el8_0.aarch64", - "cindex": 124 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.511.5.7.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-80.4.2.el8_0.aarch64", - "cindex": 124 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.511.5.8.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-80.7.1.el8_0.aarch64", - "cindex": 124 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.511.5.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-80.7.2.el8_0.aarch64", - "cindex": 124 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.512.0.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", - "arch": "arm64", - "uname_release": "4.18.0-80.el8.aarch64", - "cindex": 124 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "4.14.35-2047.512.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-147.0.2.el8_1.x86_64", - "cindex": 123 + "uname_release": "4.14.35-2047.512.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-147.0.3.el8_1.x86_64", - "cindex": 123 + "uname_release": "4.14.35-2047.512.4.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-147.13.2.el8_1.x86_64", - "cindex": 123 + "uname_release": "4.14.35-2047.512.5.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-147.20.1.el8_1.x86_64", - "cindex": 123 + "uname_release": "4.14.35-2047.512.6.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-147.24.2.el8_1.x86_64", - "cindex": 123 + "uname_release": "4.14.35-2047.513.0.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-147.27.1.el8_1.x86_64", - "cindex": 123 + "uname_release": "4.14.35-2047.513.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-147.3.1.el8_1.x86_64", - "cindex": 123 + "uname_release": "4.14.35-2047.513.2.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-147.32.1.el8_1.x86_64", - "cindex": 123 + "uname_release": "4.14.35-2047.513.2.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-147.34.1.el8_1.x86_64", - "cindex": 123 + "uname_release": "4.14.35-2047.513.2.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-147.38.1.el8_1.x86_64", - "cindex": 123 + "uname_release": "4.14.35-2047.513.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-147.43.1.el8_1.x86_64", - "cindex": 123 + "uname_release": "4.14.35-2047.514.0.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-147.44.1.el8_1.x86_64", - "cindex": 123 + "uname_release": "4.14.35-2047.514.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-147.48.1.el8_1.x86_64", - "cindex": 123 + "uname_release": "4.14.35-2047.514.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-147.5.1.el8_1.x86_64", - "cindex": 123 + "uname_release": "4.14.35-2047.514.5.1.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-147.51.1.el8_1.x86_64", - "cindex": 123 + "uname_release": "4.14.35-2047.514.5.1.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-147.51.2.el8_1.x86_64", - "cindex": 123 + "uname_release": "4.14.35-2047.514.5.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-147.52.1.el8_1.x86_64", - "cindex": 123 + "uname_release": "4.14.35-2047.515.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-147.54.2.el8_1.x86_64", - "cindex": 123 + "uname_release": "4.14.35-2047.516.1.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-147.56.1.el8_1.x86_64", - "cindex": 123 + "uname_release": "4.14.35-2047.516.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-147.57.1.el8_1.x86_64", - "cindex": 123 + "uname_release": "4.14.35-2047.516.2.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-147.8.1.el8_1.x86_64", - "cindex": 123 + "uname_release": "4.14.35-2047.516.2.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-147.el8.x86_64", - "cindex": 123 + "uname_release": "4.14.35-2047.516.2.4.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-80.1.2.el8_0.x86_64", - "cindex": 125 + "uname_release": "4.14.35-2047.516.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-80.11.1.el8_0.x86_64", - "cindex": 125 + "uname_release": "4.14.35-2047.517.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-80.11.2.el8_0.x86_64", - "cindex": 125 + "uname_release": "4.14.35-2047.517.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-80.4.2.el8_0.x86_64", - "cindex": 125 + "uname_release": "4.14.35-2047.517.3.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-80.7.1.el8_0.x86_64", - "cindex": 125 + "uname_release": "4.14.35-2047.517.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-80.7.2.el8_0.x86_64", - "cindex": 125 + "uname_release": "4.14.35-2047.518.0.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "rhel", - "version": "8", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.18.0-80.el8.x86_64", - "cindex": 125 + "uname_release": "4.14.35-2047.518.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.4.103-6.33-default", - "cindex": 126 + "uname_release": "4.14.35-2047.518.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.4.103-6.38-default", - "cindex": 126 + "uname_release": "4.14.35-2047.518.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.4.114-94.11-default", - "cindex": 126 + "uname_release": "4.14.35-2047.518.4.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.4.114-94.14-default", - "cindex": 126 + "uname_release": "4.14.35-2047.518.4.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.4.120-94.17-default", - "cindex": 126 + "uname_release": "4.14.35-2047.518.4.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.4.126-94.22-default", - "cindex": 126 + "uname_release": "4.14.35-2047.518.4.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.4.131-94.29-default", - "cindex": 126 + "uname_release": "4.14.35-2047.519.0.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.4.132-94.33-default", - "cindex": 126 + "uname_release": "4.14.35-2047.519.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.4.138-4.7-azure", - "cindex": 126 + "uname_release": "4.14.35-2047.519.2.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.4.138-94.39-default", - "cindex": 126 + "uname_release": "4.14.35-2047.519.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.4.140-94.42-default", - "cindex": 126 + "uname_release": "4.14.35-2047.520.0.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.4.143-4.13-azure", - "cindex": 126 + "uname_release": "4.14.35-2047.520.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.4.143-94.47-default", - "cindex": 126 + "uname_release": "4.14.35-2047.520.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.4.155-4.16-azure", - "cindex": 126 + "uname_release": "4.14.35-2047.520.3.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.4.155-94.50-default", - "cindex": 126 + "uname_release": "4.14.35-2047.521.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.4.156-94.57-default", - "cindex": 126 + "uname_release": "4.14.35-2047.521.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.4.156-94.61-default", - "cindex": 126 + "uname_release": "4.14.35-2047.521.4.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.4.156-94.64-default", - "cindex": 126 + "uname_release": "4.14.35-2047.522.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.4.162-4.19-azure", - "cindex": 126 + "uname_release": "4.14.35-2047.522.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.4.162-94.69-default", - "cindex": 126 + "uname_release": "4.14.35-2047.522.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.4.162-94.72-default", - "cindex": 126 + "uname_release": "4.14.35-2047.523.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.4.170-4.22-azure", - "cindex": 126 + "uname_release": "4.14.35-2047.523.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.4.175-94.79-default", - "cindex": 126 + "uname_release": "4.14.35-2047.523.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.4.176-4.25-azure", - "cindex": 126 + "uname_release": "4.14.35-2047.523.4.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.4.176-94.88-default", - "cindex": 126 + "uname_release": "4.14.35-2047.523.4.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.4.178-4.28-azure", - "cindex": 126 + "uname_release": "4.14.35-2047.523.4.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.4.178-94.91-default", - "cindex": 126 + "uname_release": "4.14.35-2047.524.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.4.180-4.31-azure", - "cindex": 126 + "uname_release": "4.14.35-2047.524.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.4.180-94.100-default", - "cindex": 126 + "uname_release": "4.14.35-2047.524.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.4.180-94.97-default", - "cindex": 126 + "uname_release": "4.14.35-2047.524.4.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.4.73-5-default", - "cindex": 127 + "uname_release": "4.14.35-2047.524.5.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.4.82-6.3-default", - "cindex": 126 + "uname_release": "4.14.35-2047.525.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.4.82-6.6-default", - "cindex": 126 + "uname_release": "4.14.35-2047.526.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.4.82-6.9-default", - "cindex": 126 + "uname_release": "4.14.35-2047.526.2.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.4.92-6.18-default", - "cindex": 126 + "uname_release": "4.14.35-2047.526.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.3", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.4.92-6.30-default", - "cindex": 126 + "uname_release": "4.14.35-2047.527.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.4", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-6.12-azure", - "cindex": 128 + "uname_release": "4.14.35-2047.527.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.4", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-6.15-azure", - "cindex": 128 + "uname_release": "4.14.35-2047.528.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.4", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-6.18-azure", - "cindex": 128 + "uname_release": "4.14.35-2047.528.2.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.4", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-6.23-azure", - "cindex": 128 + "uname_release": "4.14.35-2047.528.2.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.4", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-6.26-azure", - "cindex": 128 + "uname_release": "4.14.35-2047.528.2.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.4", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-6.29-azure", - "cindex": 128 + "uname_release": "4.14.35-2047.528.2.4.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.4", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-6.3-azure", - "cindex": 129 + "uname_release": "4.14.35-2047.528.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.4", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-6.34-azure", - "cindex": 128 + "uname_release": "4.14.35-2047.529.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.4", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-6.37-azure", - "cindex": 128 + "uname_release": "4.14.35-2047.529.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.4", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-6.40-azure", - "cindex": 128 + "uname_release": "4.14.35-2047.529.3.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.4", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-6.43-azure", - "cindex": 128 + "uname_release": "4.14.35-2047.529.3.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.4", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-6.6-azure", - "cindex": 128 + "uname_release": "4.14.35-2047.529.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.4", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-6.9-azure", - "cindex": 128 + "uname_release": "4.14.35-2047.530.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.4", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-94.41-default", - "cindex": 128 + "uname_release": "4.14.35-2047.530.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.4", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-95.13-default", - "cindex": 128 + "uname_release": "4.14.35-2047.530.5.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.4", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-95.16-default", - "cindex": 128 + "uname_release": "4.14.35-2047.530.5.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.4", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-95.19-default", - "cindex": 128 + "uname_release": "4.14.35-2047.531.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.4", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-95.24-default", - "cindex": 128 + "uname_release": "4.14.35-2047.531.2.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.4", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-95.29-default", - "cindex": 128 + "uname_release": "4.14.35-2047.531.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.4", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-95.3-default", - "cindex": 128 + "uname_release": "4.14.35-2047.532.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.4", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-95.32-default", - "cindex": 128 + "uname_release": "4.14.35-2047.532.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.4", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-95.37-default", - "cindex": 128 + "uname_release": "4.14.35-2047.532.3.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.4", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-95.40-default", - "cindex": 128 + "uname_release": "4.14.35-2047.532.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.4", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-95.45-default", - "cindex": 128 + "uname_release": "4.14.35-2047.533.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.4", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-95.48-default", - "cindex": 128 + "uname_release": "4.14.35-2047.533.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.4", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-95.51-default", - "cindex": 128 + "uname_release": "4.14.35-2047.533.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.4", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-95.54-default", - "cindex": 128 + "uname_release": "4.14.35-2047.534.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.4", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-95.6-default", - "cindex": 128 + "uname_release": "4.14.35-2047.534.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-120-default", - "cindex": 130 + "uname_release": "4.14.35-2047.534.3.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.103-default", - "cindex": 130 + "uname_release": "4.14.35-2047.534.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.106-default", - "cindex": 130 + "uname_release": "4.14.35-2047.535.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.110-default", - "cindex": 130 + "uname_release": "4.14.35-2047.535.2.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.113-default", - "cindex": 130 + "uname_release": "4.14.35-2047.535.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.116-default", - "cindex": 130 + "uname_release": "4.14.35-2047.536.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.12-default", - "cindex": 130 + "uname_release": "4.14.35-2047.536.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.121-default", - "cindex": 130 + "uname_release": "4.14.35-2047.536.4.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.124-default", - "cindex": 130 + "uname_release": "4.14.35-2047.536.5.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.127-default", - "cindex": 130 + "uname_release": "4.14.35-2047.537.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.130-default", - "cindex": 130 + "uname_release": "4.14.35-2047.537.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.133-default", - "cindex": 130 + "uname_release": "4.14.35-2047.537.4.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.136-default", - "cindex": 130 + "uname_release": "4.14.35-2047.537.4.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.139-default", - "cindex": 130 + "uname_release": "4.14.35-2047.538.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.144-default", - "cindex": 130 + "uname_release": "4.14.35-2047.538.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.147-default", - "cindex": 130 + "uname_release": "4.14.35-2047.538.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.150-default", - "cindex": 130 + "uname_release": "4.14.35-2047.538.4.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.153-default", - "cindex": 130 + "uname_release": "4.14.35-2047.538.5.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.156-default", - "cindex": 130 + "uname_release": "4.14.35-2047.538.5.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.159-default", - "cindex": 130 + "uname_release": "4.14.35-2047.539.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.162-default", - "cindex": 130 + "uname_release": "4.14.35-2047.539.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.165-default", - "cindex": 130 + "uname_release": "4.14.35-2047.539.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.17-default", - "cindex": 130 + "uname_release": "4.14.35-2047.539.4.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.173-default", - "cindex": 130 + "uname_release": "4.14.35-2047.539.5.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.176-default", - "cindex": 130 + "uname_release": "4.14.35-2047.540.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.179-default", - "cindex": 130 + "uname_release": "4.14.35-2047.540.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.183-default", - "cindex": 130 + "uname_release": "4.14.35-2047.540.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.186-default", - "cindex": 130 + "uname_release": "4.14.35-2047.540.4.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.189-default", - "cindex": 130 + "uname_release": "4.14.35-2047.540.4.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.194-default", - "cindex": 130 + "uname_release": "4.14.35-2047.540.4.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.20-default", - "cindex": 130 + "uname_release": "4.14.35-2047.541.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.201-default", - "cindex": 130 + "uname_release": "4.14.35-2047.541.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.23-default", - "cindex": 130 + "uname_release": "4.14.35-2047.541.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.26-default", - "cindex": 130 + "uname_release": "4.14.35-2047.541.4.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.29-default", - "cindex": 130 + "uname_release": "4.14.35-2047.542.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.32-default", - "cindex": 130 + "uname_release": "4.14.35-2047.542.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.37-default", - "cindex": 130 + "uname_release": "4.14.35-2047.543.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.41-default", - "cindex": 130 + "uname_release": "4.14.35-2047.543.2.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.46-default", - "cindex": 130 + "uname_release": "4.14.35-2047.543.3.1.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.51-default", - "cindex": 130 + "uname_release": "4.14.35-2047.543.3.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.54-default", - "cindex": 130 + "uname_release": "4.14.35-2048.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.57-default", - "cindex": 130 + "uname_release": "4.14.35-2049.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.60-default", - "cindex": 130 + "uname_release": "4.14.35-2050.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.63-default", - "cindex": 130 + "uname_release": "4.14.35-2051.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.66-default", - "cindex": 130 + "uname_release": "4.14.35-2052.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.7-default", - "cindex": 130 + "uname_release": "4.14.35-2102.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.71-default", - "cindex": 130 + "uname_release": "4.14.35-2103.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.74-default", - "cindex": 130 + "uname_release": "4.14.35-2104.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.77-default", - "cindex": 130 + "uname_release": "4.14.35-2105.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.80-default", - "cindex": 130 + "uname_release": "4.14.35-2106.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.83-default", - "cindex": 130 + "uname_release": "4.14.35-2108.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.88-default", - "cindex": 130 + "uname_release": "4.14.35-2109.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.91-default", - "cindex": 130 + "uname_release": "4.14.35-2110.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-122.98-default", - "cindex": 130 + "uname_release": "4.14.35-2111.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.10-azure", - "cindex": 130 + "uname_release": "4.14.35-2112.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.100-azure", - "cindex": 130 + "uname_release": "4.14.35-2113.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.103-azure", - "cindex": 130 + "uname_release": "4.14.35-2114.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.106-azure", - "cindex": 130 + "uname_release": "4.14.35-2115.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.109-azure", - "cindex": 130 + "uname_release": "4.14.35-2116.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.112-azure", - "cindex": 130 + "uname_release": "4.14.35-2118.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.115-azure", - "cindex": 130 + "uname_release": "4.14.35-2120.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.120-azure", - "cindex": 130 + "uname_release": "4.14.35-2121.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.124-azure", - "cindex": 130 + "uname_release": "4.14.35-2122.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.127-azure", - "cindex": 130 + "uname_release": "4.14.35-2124.el7uek.x86_64", + "cindex": 49 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.13-azure", - "cindex": 130 + "uname_release": "5.4.0-1948.3.el7uek.x86_64", + "cindex": 50 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.130-azure", - "cindex": 130 + "uname_release": "5.4.17-2006.5.el7uek.x86_64", + "cindex": 51 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.133-azure", - "cindex": 130 + "uname_release": "5.4.17-2011.0.7.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.136-azure", - "cindex": 130 + "uname_release": "5.4.17-2011.1.2.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.139-azure", - "cindex": 130 + "uname_release": "5.4.17-2011.2.2.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.146-azure", - "cindex": 130 + "uname_release": "5.4.17-2011.3.2.1.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.149-azure", - "cindex": 130 + "uname_release": "5.4.17-2011.4.4.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.152-azure", - "cindex": 130 + "uname_release": "5.4.17-2011.4.6.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.155-azure", - "cindex": 130 + "uname_release": "5.4.17-2011.5.3.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.16-azure", - "cindex": 130 + "uname_release": "5.4.17-2011.6.2.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.160-azure", - "cindex": 130 + "uname_release": "5.4.17-2011.7.4.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.163-azure", - "cindex": 130 + "uname_release": "5.4.17-2028.2.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.168-azure", - "cindex": 130 + "uname_release": "5.4.17-2036.100.1.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.19-azure", - "cindex": 130 + "uname_release": "5.4.17-2036.100.3.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.22-azure", - "cindex": 130 + "uname_release": "5.4.17-2036.100.6.1.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.25-azure", - "cindex": 130 + "uname_release": "5.4.17-2036.100.6.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.28-azure", - "cindex": 130 + "uname_release": "5.4.17-2036.101.0.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.31-azure", - "cindex": 130 + "uname_release": "5.4.17-2036.101.1.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.34-azure", - "cindex": 130 + "uname_release": "5.4.17-2036.101.2.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.38-azure", - "cindex": 130 + "uname_release": "5.4.17-2036.102.0.2.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.41-azure", - "cindex": 130 + "uname_release": "5.4.17-2036.102.0.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.44-azure", - "cindex": 130 + "uname_release": "5.4.17-2036.103.2.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.47-azure", - "cindex": 130 + "uname_release": "5.4.17-2036.103.3.1.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.50-azure", - "cindex": 130 + "uname_release": "5.4.17-2036.103.3.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.53-azure", - "cindex": 130 + "uname_release": "5.4.17-2036.104.0.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.56-azure", - "cindex": 130 + "uname_release": "5.4.17-2036.104.2.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.59-azure", - "cindex": 130 + "uname_release": "5.4.17-2036.104.4.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.62-azure", - "cindex": 130 + "uname_release": "5.4.17-2036.104.5.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.65-azure", - "cindex": 130 + "uname_release": "5.4.17-2036.105.1.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.68-azure", - "cindex": 130 + "uname_release": "5.4.17-2036.105.3.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.7-azure", - "cindex": 130 + "uname_release": "5.4.17-2040.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.73-azure", - "cindex": 130 + "uname_release": "5.4.17-2041.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.76-azure", - "cindex": 130 + "uname_release": "5.4.17-2051.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.80-azure", - "cindex": 130 + "uname_release": "5.4.17-2102.200.13.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.85-azure", - "cindex": 130 + "uname_release": "5.4.17-2102.200.7.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.88-azure", - "cindex": 130 + "uname_release": "5.4.17-2102.200.9.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.91-azure", - "cindex": 130 + "uname_release": "5.4.17-2102.201.3.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.94-azure", - "cindex": 130 + "uname_release": "5.4.17-2102.202.4.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "12.5", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-16.97-azure", - "cindex": 130 + "uname_release": "5.4.17-2102.202.5.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "15.0", - "arch": "arm64", - "uname_release": "4.12.14-150.14-default", - "cindex": 131 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "5.4.17-2102.203.3.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "15.0", - "arch": "arm64", - "uname_release": "4.12.14-150.17-default", - "cindex": 131 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "5.4.17-2102.203.4.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "15.0", - "arch": "arm64", - "uname_release": "4.12.14-150.22-default", - "cindex": 131 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "5.4.17-2102.203.5.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "15.0", - "arch": "arm64", - "uname_release": "4.12.14-150.27-default", - "cindex": 131 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "5.4.17-2102.203.6.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "15.0", - "arch": "arm64", - "uname_release": "4.12.14-150.32-default", - "cindex": 131 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "5.4.17-2102.204.0.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "15.0", - "arch": "arm64", - "uname_release": "4.12.14-150.35-default", - "cindex": 131 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "5.4.17-2102.204.1.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "15.0", - "arch": "arm64", - "uname_release": "4.12.14-150.38-default", - "cindex": 131 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "5.4.17-2102.204.2.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "15.0", - "arch": "arm64", - "uname_release": "4.12.14-150.41-default", - "cindex": 131 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "5.4.17-2102.204.3.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "15.0", - "arch": "arm64", - "uname_release": "4.12.14-150.47-default", - "cindex": 131 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "5.4.17-2102.204.4.2.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "15.0", - "arch": "arm64", - "uname_release": "4.12.14-23-default", - "cindex": 131 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "5.4.17-2102.204.4.3.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "15.0", - "arch": "arm64", - "uname_release": "4.12.14-25.13-default", - "cindex": 131 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "5.4.17-2102.204.4.4.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "15.0", - "arch": "arm64", - "uname_release": "4.12.14-25.16-default", - "cindex": 131 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "5.4.17-2102.205.2.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "15.0", - "arch": "arm64", - "uname_release": "4.12.14-25.19-default", - "cindex": 131 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "5.4.17-2102.205.7.2.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "15.0", - "arch": "arm64", - "uname_release": "4.12.14-25.22-default", - "cindex": 131 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "5.4.17-2102.205.7.3.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "15.0", - "arch": "arm64", - "uname_release": "4.12.14-25.25-default", - "cindex": 131 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "5.4.17-2102.206.1.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "15.0", - "arch": "arm64", - "uname_release": "4.12.14-25.28-default", - "cindex": 131 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "5.4.17-2106.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "15.0", - "arch": "arm64", - "uname_release": "4.12.14-25.3-default", - "cindex": 131 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "5.4.17-2108.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "15.0", - "arch": "arm64", - "uname_release": "4.12.14-25.6-default", - "cindex": 131 + "distrib": "ol", + "version": "7", + "arch": "x86_64", + "uname_release": "5.4.17-2109.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-150.14-default", - "cindex": 128 + "uname_release": "5.4.17-2111.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-150.17-default", - "cindex": 128 + "uname_release": "5.4.17-2114.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-150.22-default", - "cindex": 128 + "uname_release": "5.4.17-2118.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-150.27-default", - "cindex": 128 + "uname_release": "5.4.17-2120.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-150.32-default", - "cindex": 128 + "uname_release": "5.4.17-2122.303.5.el7uek.x86_64", + "cindex": 53 }, { - "distrib": "sles", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-150.35-default", - "cindex": 128 + "uname_release": "5.4.17-2122.el7uek.x86_64", + "cindex": 52 }, { - "distrib": "sles", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-150.38-default", - "cindex": 128 + "uname_release": "5.4.17-2136.300.7.el7uek.x86_64", + "cindex": 53 }, { - "distrib": "sles", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-150.41-default", - "cindex": 128 + "uname_release": "5.4.17-2136.301.0.el7uek.x86_64", + "cindex": 53 }, { - "distrib": "sles", - "version": "15.0", + "distrib": "ol", + "version": "7", "arch": "x86_64", - "uname_release": "4.12.14-150.47-default", - "cindex": 128 + "uname_release": "5.4.2-1950.2.el7uek.x86_64", + "cindex": 51 }, { - "distrib": "sles", - "version": "15.0", + "distrib": "ol", + "version": "8", "arch": "x86_64", - "uname_release": "4.12.14-23-default", - "cindex": 128 + "uname_release": "4.18.0-147.0.2.el8_1.x86_64", + "cindex": 54 }, { - "distrib": "sles", - "version": "15.0", + "distrib": "ol", + "version": "8", "arch": "x86_64", - "uname_release": "4.12.14-25.13-default", - "cindex": 128 + "uname_release": "4.18.0-147.0.3.el8_1.x86_64", + "cindex": 54 }, { - "distrib": "sles", - "version": "15.0", + "distrib": "ol", + "version": "8", "arch": "x86_64", - "uname_release": "4.12.14-25.16-default", - "cindex": 128 + "uname_release": "4.18.0-147.3.1.el8_1.x86_64", + "cindex": 54 }, { - "distrib": "sles", - "version": "15.0", + "distrib": "ol", + "version": "8", "arch": "x86_64", - "uname_release": "4.12.14-25.19-default", - "cindex": 128 + "uname_release": "4.18.0-147.5.1.el8_1.x86_64", + "cindex": 54 }, { - "distrib": "sles", - "version": "15.0", + "distrib": "ol", + "version": "8", "arch": "x86_64", - "uname_release": "4.12.14-25.22-default", - "cindex": 128 + "uname_release": "4.18.0-147.8.1.el8_1.x86_64", + "cindex": 54 }, { - "distrib": "sles", - "version": "15.0", + "distrib": "ol", + "version": "8", "arch": "x86_64", - "uname_release": "4.12.14-25.25-default", - "cindex": 128 + "uname_release": "4.18.0-147.el8.x86_64", + "cindex": 54 }, { - "distrib": "sles", - "version": "15.0", + "distrib": "ol", + "version": "8", "arch": "x86_64", - "uname_release": "4.12.14-25.28-default", - "cindex": 128 + "uname_release": "4.18.0-80.1.2.el8_0.x86_64", + "cindex": 55 }, { - "distrib": "sles", - "version": "15.0", + "distrib": "ol", + "version": "8", "arch": "x86_64", - "uname_release": "4.12.14-25.3-default", - "cindex": 128 + "uname_release": "4.18.0-80.11.1.el8_0.x86_64", + "cindex": 55 }, { - "distrib": "sles", - "version": "15.0", + "distrib": "ol", + "version": "8", "arch": "x86_64", - "uname_release": "4.12.14-25.6-default", - "cindex": 128 + "uname_release": "4.18.0-80.11.2.el8_0.x86_64", + "cindex": 55 }, { - "distrib": "sles", - "version": "15.1", - "arch": "arm64", - "uname_release": "4.12.14-195-default", - "cindex": 132 + "distrib": "ol", + "version": "8", + "arch": "x86_64", + "uname_release": "4.18.0-80.4.2.el8_0.x86_64", + "cindex": 55 }, { - "distrib": "sles", - "version": "15.1", - "arch": "arm64", - "uname_release": "4.12.14-197.10-default", - "cindex": 132 + "distrib": "ol", + "version": "8", + "arch": "x86_64", + "uname_release": "4.18.0-80.7.1.el8_0.x86_64", + "cindex": 55 }, { - "distrib": "sles", - "version": "15.1", - "arch": "arm64", - "uname_release": "4.12.14-197.15-default", - "cindex": 132 + "distrib": "ol", + "version": "8", + "arch": "x86_64", + "uname_release": "4.18.0-80.7.2.el8_0.x86_64", + "cindex": 55 }, { - "distrib": "sles", - "version": "15.1", - "arch": "arm64", - "uname_release": "4.12.14-197.18-default", - "cindex": 132 + "distrib": "ol", + "version": "8", + "arch": "x86_64", + "uname_release": "4.18.0-80.el8.x86_64", + "cindex": 55 }, { - "distrib": "sles", - "version": "15.1", - "arch": "arm64", - "uname_release": "4.12.14-197.21-default", - "cindex": 132 + "distrib": "ol", + "version": "8", + "arch": "x86_64", + "uname_release": "5.4.17-2011.0.7.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", - "arch": "arm64", - "uname_release": "4.12.14-197.26-default", - "cindex": 132 + "distrib": "ol", + "version": "8", + "arch": "x86_64", + "uname_release": "5.4.17-2011.1.2.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", - "arch": "arm64", - "uname_release": "4.12.14-197.29-default", - "cindex": 132 + "distrib": "ol", + "version": "8", + "arch": "x86_64", + "uname_release": "5.4.17-2011.2.2.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", - "arch": "arm64", - "uname_release": "4.12.14-197.34-default", - "cindex": 132 + "distrib": "ol", + "version": "8", + "arch": "x86_64", + "uname_release": "5.4.17-2011.3.2.1.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", - "arch": "arm64", - "uname_release": "4.12.14-197.37-default", - "cindex": 132 + "distrib": "ol", + "version": "8", + "arch": "x86_64", + "uname_release": "5.4.17-2011.4.4.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", - "arch": "arm64", - "uname_release": "4.12.14-197.4-default", - "cindex": 132 + "distrib": "ol", + "version": "8", + "arch": "x86_64", + "uname_release": "5.4.17-2011.4.6.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", - "arch": "arm64", - "uname_release": "4.12.14-197.40-default", - "cindex": 132 + "distrib": "ol", + "version": "8", + "arch": "x86_64", + "uname_release": "5.4.17-2011.5.3.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", - "arch": "arm64", - "uname_release": "4.12.14-197.45-default", - "cindex": 132 + "distrib": "ol", + "version": "8", + "arch": "x86_64", + "uname_release": "5.4.17-2011.6.2.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", - "arch": "arm64", - "uname_release": "4.12.14-197.48-default", - "cindex": 132 + "distrib": "ol", + "version": "8", + "arch": "x86_64", + "uname_release": "5.4.17-2011.7.4.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", - "arch": "arm64", - "uname_release": "4.12.14-197.51-default", - "cindex": 132 + "distrib": "ol", + "version": "8", + "arch": "x86_64", + "uname_release": "5.4.17-2036.100.6.1.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", - "arch": "arm64", - "uname_release": "4.12.14-197.56-default", - "cindex": 132 + "distrib": "ol", + "version": "8", + "arch": "x86_64", + "uname_release": "5.4.17-2036.101.2.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", - "arch": "arm64", - "uname_release": "4.12.14-197.61-default", - "cindex": 132 + "distrib": "ol", + "version": "8", + "arch": "x86_64", + "uname_release": "5.4.17-2036.102.0.2.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", - "arch": "arm64", - "uname_release": "4.12.14-197.64-default", - "cindex": 132 + "distrib": "ol", + "version": "8", + "arch": "x86_64", + "uname_release": "5.4.17-2036.103.3.1.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", - "arch": "arm64", - "uname_release": "4.12.14-197.67-default", - "cindex": 132 + "distrib": "ol", + "version": "8", + "arch": "x86_64", + "uname_release": "5.4.17-2036.103.3.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", - "arch": "arm64", - "uname_release": "4.12.14-197.7-default", - "cindex": 132 + "distrib": "ol", + "version": "8", + "arch": "x86_64", + "uname_release": "5.4.17-2036.104.4.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", - "arch": "arm64", - "uname_release": "4.12.14-197.72-default", - "cindex": 132 + "distrib": "ol", + "version": "8", + "arch": "x86_64", + "uname_release": "5.4.17-2036.104.5.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", - "arch": "arm64", - "uname_release": "4.12.14-197.75-default", - "cindex": 132 + "distrib": "ol", + "version": "8", + "arch": "x86_64", + "uname_release": "5.4.17-2102.200.13.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", - "arch": "arm64", - "uname_release": "4.12.14-197.78-default", - "cindex": 132 + "distrib": "ol", + "version": "8", + "arch": "x86_64", + "uname_release": "5.4.17-2102.201.3.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", + "distrib": "ol", + "version": "8", "arch": "x86_64", - "uname_release": "4.12.14-195-default", - "cindex": 130 + "uname_release": "5.4.17-2102.202.4.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", + "distrib": "ol", + "version": "8", "arch": "x86_64", - "uname_release": "4.12.14-197.10-default", - "cindex": 130 + "uname_release": "5.4.17-2102.202.5.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", + "distrib": "ol", + "version": "8", "arch": "x86_64", - "uname_release": "4.12.14-197.15-default", - "cindex": 130 + "uname_release": "5.4.17-2102.203.3.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", + "distrib": "ol", + "version": "8", "arch": "x86_64", - "uname_release": "4.12.14-197.18-default", - "cindex": 130 + "uname_release": "5.4.17-2102.203.4.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", + "distrib": "ol", + "version": "8", "arch": "x86_64", - "uname_release": "4.12.14-197.21-default", - "cindex": 130 + "uname_release": "5.4.17-2102.203.5.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", + "distrib": "ol", + "version": "8", "arch": "x86_64", - "uname_release": "4.12.14-197.26-default", - "cindex": 130 + "uname_release": "5.4.17-2102.203.6.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", + "distrib": "ol", + "version": "8", "arch": "x86_64", - "uname_release": "4.12.14-197.29-default", - "cindex": 130 + "uname_release": "5.4.17-2102.204.0.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", + "distrib": "ol", + "version": "8", "arch": "x86_64", - "uname_release": "4.12.14-197.34-default", - "cindex": 130 + "uname_release": "5.4.17-2102.204.1.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", + "distrib": "ol", + "version": "8", "arch": "x86_64", - "uname_release": "4.12.14-197.37-default", - "cindex": 130 + "uname_release": "5.4.17-2102.204.2.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", + "distrib": "ol", + "version": "8", "arch": "x86_64", - "uname_release": "4.12.14-197.4-default", - "cindex": 130 + "uname_release": "5.4.17-2102.204.3.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", + "distrib": "ol", + "version": "8", "arch": "x86_64", - "uname_release": "4.12.14-197.40-default", - "cindex": 130 + "uname_release": "5.4.17-2102.204.4.2.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", + "distrib": "ol", + "version": "8", "arch": "x86_64", - "uname_release": "4.12.14-197.45-default", - "cindex": 130 + "uname_release": "5.4.17-2102.204.4.3.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", + "distrib": "ol", + "version": "8", "arch": "x86_64", - "uname_release": "4.12.14-197.48-default", - "cindex": 130 + "uname_release": "5.4.17-2102.204.4.4.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", + "distrib": "ol", + "version": "8", "arch": "x86_64", - "uname_release": "4.12.14-197.51-default", - "cindex": 130 + "uname_release": "5.4.17-2102.205.3.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", + "distrib": "ol", + "version": "8", "arch": "x86_64", - "uname_release": "4.12.14-197.56-default", - "cindex": 130 + "uname_release": "5.4.17-2102.205.5.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", + "distrib": "ol", + "version": "8", "arch": "x86_64", - "uname_release": "4.12.14-197.61-default", - "cindex": 130 + "uname_release": "5.4.17-2102.205.6.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", + "distrib": "ol", + "version": "8", "arch": "x86_64", - "uname_release": "4.12.14-197.64-default", - "cindex": 130 + "uname_release": "5.4.17-2102.205.7.1.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", + "distrib": "ol", + "version": "8", "arch": "x86_64", - "uname_release": "4.12.14-197.67-default", - "cindex": 130 + "uname_release": "5.4.17-2102.205.7.2.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", + "distrib": "ol", + "version": "8", "arch": "x86_64", - "uname_release": "4.12.14-197.7-default", - "cindex": 130 + "uname_release": "5.4.17-2102.205.7.3.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", + "distrib": "ol", + "version": "8", "arch": "x86_64", - "uname_release": "4.12.14-197.72-default", - "cindex": 130 + "uname_release": "5.4.17-2102.205.7.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", + "distrib": "ol", + "version": "8", "arch": "x86_64", - "uname_release": "4.12.14-197.75-default", - "cindex": 130 + "uname_release": "5.4.17-2102.206.0.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.1", + "distrib": "ol", + "version": "8", "arch": "x86_64", - "uname_release": "4.12.14-197.78-default", - "cindex": 130 + "uname_release": "5.4.17-2102.206.1.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.2", - "arch": "arm64", - "uname_release": "5.3.18-22-default", - "cindex": 133 + "distrib": "ol", + "version": "8", + "arch": "x86_64", + "uname_release": "5.4.17-2114.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.2", - "arch": "arm64", - "uname_release": "5.3.18-24.12-default", - "cindex": 133 + "distrib": "ol", + "version": "8", + "arch": "x86_64", + "uname_release": "5.4.17-2118.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.2", - "arch": "arm64", - "uname_release": "5.3.18-24.15-default", - "cindex": 133 + "distrib": "ol", + "version": "8", + "arch": "x86_64", + "uname_release": "5.4.17-2120.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.2", - "arch": "arm64", - "uname_release": "5.3.18-24.24-default", - "cindex": 133 + "distrib": "ol", + "version": "8", + "arch": "x86_64", + "uname_release": "5.4.17-2122.el8uek.x86_64", + "cindex": 56 }, { - "distrib": "sles", - "version": "15.2", - "arch": "arm64", - "uname_release": "5.3.18-24.29-default", - "cindex": 133 + "distrib": "opensuse-leap", + "version": "15.0", + "arch": "x86_64", + "uname_release": "4.12.14-lp150.11-default", + "cindex": 57 }, { - "distrib": "sles", - "version": "15.2", - "arch": "arm64", - "uname_release": "5.3.18-24.34-default", - "cindex": 133 + "distrib": "opensuse-leap", + "version": "15.0", + "arch": "x86_64", + "uname_release": "4.12.14-lp150.11-kvmsmall", + "cindex": 58 }, { - "distrib": "sles", - "version": "15.2", - "arch": "arm64", - "uname_release": "5.3.18-24.37-default", - "cindex": 133 + "distrib": "opensuse-leap", + "version": "15.0", + "arch": "x86_64", + "uname_release": "4.12.14-lp150.12.10-default", + "cindex": 57 }, { - "distrib": "sles", - "version": "15.2", - "arch": "arm64", - "uname_release": "5.3.18-24.43-default", - "cindex": 133 + "distrib": "opensuse-leap", + "version": "15.0", + "arch": "x86_64", + "uname_release": "4.12.14-lp150.12.10-kvmsmall", + "cindex": 58 }, { - "distrib": "sles", - "version": "15.2", - "arch": "arm64", - "uname_release": "5.3.18-24.46-default", - "cindex": 133 + "distrib": "opensuse-leap", + "version": "15.0", + "arch": "x86_64", + "uname_release": "4.12.14-lp150.12.13-default", + "cindex": 57 }, { - "distrib": "sles", - "version": "15.2", - "arch": "arm64", - "uname_release": "5.3.18-24.49-default", - "cindex": 133 + "distrib": "opensuse-leap", + "version": "15.0", + "arch": "x86_64", + "uname_release": "4.12.14-lp150.12.13-kvmsmall", + "cindex": 58 }, { - "distrib": "sles", - "version": "15.2", - "arch": "arm64", - "uname_release": "5.3.18-24.52-default", - "cindex": 133 + "distrib": "opensuse-leap", + "version": "15.0", + "arch": "x86_64", + "uname_release": "4.12.14-lp150.12.16-default", + "cindex": 57 }, { - "distrib": "sles", - "version": "15.2", - "arch": "arm64", - "uname_release": "5.3.18-24.53.4-default", - "cindex": 133 + "distrib": "opensuse-leap", + "version": "15.0", + "arch": "x86_64", + "uname_release": "4.12.14-lp150.12.16-kvmsmall", + "cindex": 58 }, { - "distrib": "sles", - "version": "15.2", - "arch": "arm64", - "uname_release": "5.3.18-24.61-default", - "cindex": 133 + "distrib": "opensuse-leap", + "version": "15.0", + "arch": "x86_64", + "uname_release": "4.12.14-lp150.12.19-default", + "cindex": 57 }, { - "distrib": "sles", - "version": "15.2", - "arch": "arm64", - "uname_release": "5.3.18-24.64-default", - "cindex": 133 + "distrib": "opensuse-leap", + "version": "15.0", + "arch": "x86_64", + "uname_release": "4.12.14-lp150.12.19-kvmsmall", + "cindex": 58 }, { - "distrib": "sles", - "version": "15.2", - "arch": "arm64", - "uname_release": "5.3.18-24.67-default", - "cindex": 133 + "distrib": "opensuse-leap", + "version": "15.0", + "arch": "x86_64", + "uname_release": "4.12.14-lp150.12.22-default", + "cindex": 57 }, { - "distrib": "sles", - "version": "15.2", - "arch": "arm64", - "uname_release": "5.3.18-24.70-default", - "cindex": 133 + "distrib": "opensuse-leap", + "version": "15.0", + "arch": "x86_64", + "uname_release": "4.12.14-lp150.12.22-kvmsmall", + "cindex": 58 }, { - "distrib": "sles", - "version": "15.2", - "arch": "arm64", - "uname_release": "5.3.18-24.75-default", - "cindex": 133 + "distrib": "opensuse-leap", + "version": "15.0", + "arch": "x86_64", + "uname_release": "4.12.14-lp150.12.25-default", + "cindex": 57 }, { - "distrib": "sles", - "version": "15.2", - "arch": "arm64", - "uname_release": "5.3.18-24.78-default", - "cindex": 133 + "distrib": "opensuse-leap", + "version": "15.0", + "arch": "x86_64", + "uname_release": "4.12.14-lp150.12.25-kvmsmall", + "cindex": 58 }, { - "distrib": "sles", - "version": "15.2", - "arch": "arm64", - "uname_release": "5.3.18-24.83-default", - "cindex": 133 + "distrib": "opensuse-leap", + "version": "15.0", + "arch": "x86_64", + "uname_release": "4.12.14-lp150.12.28-default", + "cindex": 57 }, { - "distrib": "sles", - "version": "15.2", - "arch": "arm64", - "uname_release": "5.3.18-24.86-default", - "cindex": 133 + "distrib": "opensuse-leap", + "version": "15.0", + "arch": "x86_64", + "uname_release": "4.12.14-lp150.12.28-kvmsmall", + "cindex": 58 }, { - "distrib": "sles", - "version": "15.2", - "arch": "arm64", - "uname_release": "5.3.18-24.9-default", - "cindex": 133 + "distrib": "opensuse-leap", + "version": "15.0", + "arch": "x86_64", + "uname_release": "4.12.14-lp150.12.4-default", + "cindex": 57 }, { - "distrib": "sles", - "version": "15.2", - "arch": "arm64", - "uname_release": "5.3.18-24.93-default", - "cindex": 133 + "distrib": "opensuse-leap", + "version": "15.0", + "arch": "x86_64", + "uname_release": "4.12.14-lp150.12.4-kvmsmall", + "cindex": 58 }, { - "distrib": "sles", - "version": "15.2", - "arch": "arm64", - "uname_release": "5.3.18-24.96-default", - "cindex": 133 + "distrib": "opensuse-leap", + "version": "15.0", + "arch": "x86_64", + "uname_release": "4.12.14-lp150.12.45-default", + "cindex": 57 }, { - "distrib": "sles", - "version": "15.2", + "distrib": "opensuse-leap", + "version": "15.0", "arch": "x86_64", - "uname_release": "5.3.18-22-default", - "cindex": 134 + "uname_release": "4.12.14-lp150.12.45-kvmsmall", + "cindex": 58 }, { - "distrib": "sles", - "version": "15.2", + "distrib": "opensuse-leap", + "version": "15.0", "arch": "x86_64", - "uname_release": "5.3.18-24.12-default", - "cindex": 134 + "uname_release": "4.12.14-lp150.12.48-default", + "cindex": 57 }, { - "distrib": "sles", - "version": "15.2", + "distrib": "opensuse-leap", + "version": "15.0", "arch": "x86_64", - "uname_release": "5.3.18-24.15-default", - "cindex": 134 + "uname_release": "4.12.14-lp150.12.48-kvmsmall", + "cindex": 58 }, { - "distrib": "sles", - "version": "15.2", + "distrib": "opensuse-leap", + "version": "15.0", "arch": "x86_64", - "uname_release": "5.3.18-24.24-default", - "cindex": 134 + "uname_release": "4.12.14-lp150.12.58-default", + "cindex": 57 }, { - "distrib": "sles", - "version": "15.2", + "distrib": "opensuse-leap", + "version": "15.0", "arch": "x86_64", - "uname_release": "5.3.18-24.29-default", - "cindex": 134 + "uname_release": "4.12.14-lp150.12.58-kvmsmall", + "cindex": 58 }, { - "distrib": "sles", - "version": "15.2", + "distrib": "opensuse-leap", + "version": "15.0", "arch": "x86_64", - "uname_release": "5.3.18-24.34-default", - "cindex": 134 + "uname_release": "4.12.14-lp150.12.61-default", + "cindex": 57 }, { - "distrib": "sles", - "version": "15.2", + "distrib": "opensuse-leap", + "version": "15.0", "arch": "x86_64", - "uname_release": "5.3.18-24.37-default", - "cindex": 134 + "uname_release": "4.12.14-lp150.12.61-kvmsmall", + "cindex": 58 }, { - "distrib": "sles", - "version": "15.2", + "distrib": "opensuse-leap", + "version": "15.0", "arch": "x86_64", - "uname_release": "5.3.18-24.43-default", - "cindex": 134 + "uname_release": "4.12.14-lp150.12.64-default", + "cindex": 57 }, { - "distrib": "sles", - "version": "15.2", + "distrib": "opensuse-leap", + "version": "15.0", "arch": "x86_64", - "uname_release": "5.3.18-24.46-default", - "cindex": 134 + "uname_release": "4.12.14-lp150.12.64-kvmsmall", + "cindex": 58 }, { - "distrib": "sles", - "version": "15.2", + "distrib": "opensuse-leap", + "version": "15.0", "arch": "x86_64", - "uname_release": "5.3.18-24.49-default", - "cindex": 134 + "uname_release": "4.12.14-lp150.12.67-default", + "cindex": 57 }, { - "distrib": "sles", - "version": "15.2", + "distrib": "opensuse-leap", + "version": "15.0", "arch": "x86_64", - "uname_release": "5.3.18-24.52-default", - "cindex": 134 + "uname_release": "4.12.14-lp150.12.67-kvmsmall", + "cindex": 58 }, { - "distrib": "sles", - "version": "15.2", + "distrib": "opensuse-leap", + "version": "15.0", "arch": "x86_64", - "uname_release": "5.3.18-24.53.4-default", - "cindex": 134 + "uname_release": "4.12.14-lp150.12.7-default", + "cindex": 57 }, { - "distrib": "sles", - "version": "15.2", + "distrib": "opensuse-leap", + "version": "15.0", "arch": "x86_64", - "uname_release": "5.3.18-24.61-default", - "cindex": 134 + "uname_release": "4.12.14-lp150.12.7-kvmsmall", + "cindex": 58 }, { - "distrib": "sles", - "version": "15.2", + "distrib": "opensuse-leap", + "version": "15.0", "arch": "x86_64", - "uname_release": "5.3.18-24.64-default", - "cindex": 134 + "uname_release": "4.12.14-lp150.12.70-default", + "cindex": 57 }, { - "distrib": "sles", - "version": "15.2", + "distrib": "opensuse-leap", + "version": "15.0", "arch": "x86_64", - "uname_release": "5.3.18-24.67-default", - "cindex": 134 + "uname_release": "4.12.14-lp150.12.70-kvmsmall", + "cindex": 58 }, { - "distrib": "sles", - "version": "15.2", + "distrib": "opensuse-leap", + "version": "15.0", "arch": "x86_64", - "uname_release": "5.3.18-24.70-default", - "cindex": 134 + "uname_release": "4.12.14-lp150.12.73-default", + "cindex": 57 }, { - "distrib": "sles", - "version": "15.2", + "distrib": "opensuse-leap", + "version": "15.0", "arch": "x86_64", - "uname_release": "5.3.18-24.75-default", - "cindex": 134 + "uname_release": "4.12.14-lp150.12.73-kvmsmall", + "cindex": 58 }, { - "distrib": "sles", - "version": "15.2", + "distrib": "opensuse-leap", + "version": "15.0", "arch": "x86_64", - "uname_release": "5.3.18-24.78-default", - "cindex": 134 + "uname_release": "4.12.14-lp150.12.76-default", + "cindex": 57 }, { - "distrib": "sles", - "version": "15.2", + "distrib": "opensuse-leap", + "version": "15.0", "arch": "x86_64", - "uname_release": "5.3.18-24.83-default", - "cindex": 134 + "uname_release": "4.12.14-lp150.12.76-kvmsmall", + "cindex": 58 }, { - "distrib": "sles", - "version": "15.2", + "distrib": "opensuse-leap", + "version": "15.0", "arch": "x86_64", - "uname_release": "5.3.18-24.86-default", - "cindex": 134 + "uname_release": "4.12.14-lp150.12.79-default", + "cindex": 57 }, { - "distrib": "sles", - "version": "15.2", + "distrib": "opensuse-leap", + "version": "15.0", "arch": "x86_64", - "uname_release": "5.3.18-24.9-default", - "cindex": 134 + "uname_release": "4.12.14-lp150.12.79-kvmsmall", + "cindex": 58 }, { - "distrib": "sles", - "version": "15.2", + "distrib": "opensuse-leap", + "version": "15.0", "arch": "x86_64", - "uname_release": "5.3.18-24.93-default", - "cindex": 134 + "uname_release": "4.12.14-lp150.12.82-default", + "cindex": 57 }, { - "distrib": "sles", - "version": "15.2", + "distrib": "opensuse-leap", + "version": "15.0", "arch": "x86_64", - "uname_release": "5.3.18-24.96-default", - "cindex": 134 + "uname_release": "4.12.14-lp150.12.82-kvmsmall", + "cindex": 58 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.43-64kb", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.26-default", + "cindex": 59 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.43-default", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.26-kvmsmall", + "cindex": 60 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.46-64kb", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.10-default", + "cindex": 59 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.46-default", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.10-kvmsmall", + "cindex": 60 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.49-64kb", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.13-default", + "cindex": 59 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.49-default", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.13-kvmsmall", + "cindex": 60 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.54-64kb", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.16-default", + "cindex": 59 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.54-default", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.16-kvmsmall", + "cindex": 60 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.60-64kb", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.20-default", + "cindex": 59 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.60-default", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.20-kvmsmall", + "cindex": 60 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.63-64kb", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.25-default", + "cindex": 59 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.63-default", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.25-kvmsmall", + "cindex": 60 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.68-64kb", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.32-default", + "cindex": 59 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.68-default", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.32-kvmsmall", + "cindex": 60 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.71-64kb", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.36-default", + "cindex": 59 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.71-default", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.36-kvmsmall", + "cindex": 60 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.76-64kb", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.4-default", + "cindex": 59 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.76-default", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.4-kvmsmall", + "cindex": 60 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.81-64kb", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.40-default", + "cindex": 59 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.81-default", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.40-kvmsmall", + "cindex": 60 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.87-64kb", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.44-default", + "cindex": 59 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-150300.59.87-default", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.44-kvmsmall", + "cindex": 60 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-57-64kb", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.48-default", + "cindex": 59 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-57-default", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.48-kvmsmall", + "cindex": 60 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.10-64kb", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.52-default", + "cindex": 59 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.10-default", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.52-kvmsmall", + "cindex": 60 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.13-64kb", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.59-default", + "cindex": 59 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.13-default", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.59-kvmsmall", + "cindex": 60 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.16-64kb", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.63-default", + "cindex": 59 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.16-default", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.63-kvmsmall", + "cindex": 60 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.19-64kb", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.67-default", + "cindex": 59 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.19-default", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.67-kvmsmall", + "cindex": 60 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.24-64kb", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.7-default", + "cindex": 59 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.24-default", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.7-kvmsmall", + "cindex": 60 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.27-64kb", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.71-default", + "cindex": 59 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.27-default", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.71-kvmsmall", + "cindex": 60 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.30-64kb", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.75-default", + "cindex": 59 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.30-default", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.75-kvmsmall", + "cindex": 60 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.34-64kb", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.79-default", + "cindex": 59 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.34-default", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.79-kvmsmall", + "cindex": 60 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.37-64kb", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.83-default", + "cindex": 59 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.37-default", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.83-kvmsmall", + "cindex": 60 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.40-64kb", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.87-default", + "cindex": 59 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.40-default", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.87-kvmsmall", + "cindex": 60 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.5-64kb", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.91-default", + "cindex": 59 }, { - "distrib": "sles", - "version": "15.3", - "arch": "arm64", - "uname_release": "5.3.18-59.5-default", - "cindex": 135 + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "x86_64", + "uname_release": "4.12.14-lp151.28.91-kvmsmall", + "cindex": 60 }, { - "distrib": "sles", - "version": "15.3", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "5.3.18-150300.59.43-default", - "cindex": 136 + "uname_release": "5.3.18-lp152.102-default", + "cindex": 61 }, { - "distrib": "sles", - "version": "15.3", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "5.3.18-150300.59.46-default", - "cindex": 136 + "uname_release": "5.3.18-lp152.102-kvmsmall", + "cindex": 62 }, { - "distrib": "sles", - "version": "15.3", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "5.3.18-150300.59.49-default", - "cindex": 136 + "uname_release": "5.3.18-lp152.106-default", + "cindex": 61 }, { - "distrib": "sles", - "version": "15.3", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "5.3.18-150300.59.54-default", - "cindex": 136 + "uname_release": "5.3.18-lp152.106-kvmsmall", + "cindex": 62 }, { - "distrib": "sles", - "version": "15.3", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "5.3.18-150300.59.60-default", - "cindex": 136 + "uname_release": "5.3.18-lp152.19-default", + "cindex": 61 }, { - "distrib": "sles", - "version": "15.3", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "5.3.18-150300.59.63-default", - "cindex": 136 + "uname_release": "5.3.18-lp152.19-kvmsmall", + "cindex": 62 }, { - "distrib": "sles", - "version": "15.3", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "5.3.18-150300.59.68-default", - "cindex": 136 + "uname_release": "5.3.18-lp152.26-default", + "cindex": 61 }, { - "distrib": "sles", - "version": "15.3", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "5.3.18-150300.59.71-default", - "cindex": 136 + "uname_release": "5.3.18-lp152.26-kvmsmall", + "cindex": 62 }, { - "distrib": "sles", - "version": "15.3", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "5.3.18-150300.59.76-default", - "cindex": 136 + "uname_release": "5.3.18-lp152.33-default", + "cindex": 61 }, { - "distrib": "sles", - "version": "15.3", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "5.3.18-150300.59.81-default", - "cindex": 136 + "uname_release": "5.3.18-lp152.33-kvmsmall", + "cindex": 62 }, { - "distrib": "sles", - "version": "15.3", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "5.3.18-150300.59.87-default", - "cindex": 136 + "uname_release": "5.3.18-lp152.36-default", + "cindex": 61 }, { - "distrib": "sles", - "version": "15.3", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "5.3.18-57-default", - "cindex": 136 + "uname_release": "5.3.18-lp152.36-kvmsmall", + "cindex": 62 }, { - "distrib": "sles", - "version": "15.3", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "5.3.18-59.10-default", - "cindex": 136 + "uname_release": "5.3.18-lp152.41-default", + "cindex": 61 }, { - "distrib": "sles", - "version": "15.3", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "5.3.18-59.13-default", - "cindex": 136 + "uname_release": "5.3.18-lp152.41-kvmsmall", + "cindex": 62 }, { - "distrib": "sles", - "version": "15.3", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "5.3.18-59.16-default", - "cindex": 136 + "uname_release": "5.3.18-lp152.44-default", + "cindex": 61 }, { - "distrib": "sles", - "version": "15.3", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "5.3.18-59.19-default", - "cindex": 136 + "uname_release": "5.3.18-lp152.44-kvmsmall", + "cindex": 62 }, { - "distrib": "sles", - "version": "15.3", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "5.3.18-59.24-default", - "cindex": 136 + "uname_release": "5.3.18-lp152.47-default", + "cindex": 61 }, { - "distrib": "sles", - "version": "15.3", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "5.3.18-59.27-default", - "cindex": 136 + "uname_release": "5.3.18-lp152.47-kvmsmall", + "cindex": 62 }, { - "distrib": "sles", - "version": "15.3", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "5.3.18-59.30-default", - "cindex": 136 + "uname_release": "5.3.18-lp152.50-default", + "cindex": 61 }, { - "distrib": "sles", - "version": "15.3", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "5.3.18-59.34-default", - "cindex": 136 + "uname_release": "5.3.18-lp152.50-kvmsmall", + "cindex": 62 }, { - "distrib": "sles", - "version": "15.3", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "5.3.18-59.37-default", - "cindex": 136 + "uname_release": "5.3.18-lp152.54-default", + "cindex": 61 }, { - "distrib": "sles", - "version": "15.3", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "5.3.18-59.40-default", - "cindex": 136 + "uname_release": "5.3.18-lp152.54-kvmsmall", + "cindex": 62 }, { - "distrib": "sles", - "version": "15.3", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "5.3.18-59.5-default", - "cindex": 136 + "uname_release": "5.3.18-lp152.57-default", + "cindex": 61 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.10.0-1004-gcp", - "cindex": 137 + "uname_release": "5.3.18-lp152.57-kvmsmall", + "cindex": 62 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.10.0-1006-gcp", - "cindex": 137 + "uname_release": "5.3.18-lp152.60-default", + "cindex": 61 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.10.0-1007-gcp", - "cindex": 137 + "uname_release": "5.3.18-lp152.60-kvmsmall", + "cindex": 62 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.10.0-1008-gcp", - "cindex": 137 + "uname_release": "5.3.18-lp152.63-default", + "cindex": 61 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.10.0-1009-gcp", - "cindex": 137 + "uname_release": "5.3.18-lp152.63-kvmsmall", + "cindex": 62 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.10.0-14-generic", - "cindex": 138 + "uname_release": "5.3.18-lp152.66-default", + "cindex": 61 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.10.0-19-generic", - "cindex": 138 + "uname_release": "5.3.18-lp152.66-kvmsmall", + "cindex": 62 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.10.0-20-generic", - "cindex": 138 + "uname_release": "5.3.18-lp152.69-default", + "cindex": 61 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.10.0-21-generic", - "cindex": 138 + "uname_release": "5.3.18-lp152.69-kvmsmall", + "cindex": 62 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.10.0-22-generic", - "cindex": 138 + "uname_release": "5.3.18-lp152.72-default", + "cindex": 61 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.10.0-24-generic", - "cindex": 138 + "uname_release": "5.3.18-lp152.72-kvmsmall", + "cindex": 62 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.10.0-26-generic", - "cindex": 138 + "uname_release": "5.3.18-lp152.75-default", + "cindex": 61 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.10.0-27-generic", - "cindex": 138 + "uname_release": "5.3.18-lp152.75-kvmsmall", + "cindex": 62 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.10.0-28-generic", - "cindex": 138 + "uname_release": "5.3.18-lp152.78-default", + "cindex": 61 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.10.0-30-generic", - "cindex": 138 + "uname_release": "5.3.18-lp152.78-kvmsmall", + "cindex": 62 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.10.0-32-generic", - "cindex": 138 + "uname_release": "5.3.18-lp152.81-default", + "cindex": 61 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.10.0-33-generic", - "cindex": 138 + "uname_release": "5.3.18-lp152.81-kvmsmall", + "cindex": 62 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.10.0-35-generic", - "cindex": 138 + "uname_release": "5.3.18-lp152.84-default", + "cindex": 61 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.10.0-37-generic", - "cindex": 138 + "uname_release": "5.3.18-lp152.84-kvmsmall", + "cindex": 62 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.10.0-38-generic", - "cindex": 138 + "uname_release": "5.3.18-lp152.87-default", + "cindex": 61 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.10.0-40-generic", - "cindex": 138 + "uname_release": "5.3.18-lp152.87-kvmsmall", + "cindex": 62 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.10.0-42-generic", - "cindex": 138 + "uname_release": "5.3.18-lp152.92-default", + "cindex": 61 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.11.0-1009-azure", - "cindex": 139 + "uname_release": "5.3.18-lp152.92-kvmsmall", + "cindex": 62 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.11.0-1011-azure", - "cindex": 139 + "uname_release": "5.3.18-lp152.95-default", + "cindex": 61 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.11.0-1013-azure", - "cindex": 140 + "uname_release": "5.3.18-lp152.95-kvmsmall", + "cindex": 62 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.11.0-1014-azure", - "cindex": 140 + "uname_release": "5.3.18-lp152.98-default", + "cindex": 61 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.11.0-1015-azure", - "cindex": 140 + "uname_release": "5.3.18-lp152.98-kvmsmall", + "cindex": 62 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.11.0-1016-azure", - "cindex": 140 + "uname_release": "5.3.18-150300.38.37-azure", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.11.0-13-generic", - "cindex": 140 + "uname_release": "5.3.18-150300.38.40-azure", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.11.0-14-generic", - "cindex": 140 + "uname_release": "5.3.18-150300.38.47-azure", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.13.0-1002-gcp", - "cindex": 141 + "uname_release": "5.3.18-150300.38.50-azure", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.13.0-1005-azure", - "cindex": 141 + "uname_release": "5.3.18-150300.38.53-azure", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.13.0-1006-azure", - "cindex": 141 + "uname_release": "5.3.18-150300.38.56-azure", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.13.0-1006-gcp", - "cindex": 141 + "uname_release": "5.3.18-150300.38.59-azure", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.13.0-1007-azure", - "cindex": 141 + "uname_release": "5.3.18-150300.38.62-azure", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.13.0-1007-gcp", - "cindex": 141 + "uname_release": "5.3.18-150300.38.69-azure", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.13.0-1008-gcp", - "cindex": 141 + "uname_release": "5.3.18-150300.59.43-default", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.13.0-1009-azure", - "cindex": 141 + "uname_release": "5.3.18-150300.59.43-kvmsmall", + "cindex": 64 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.13.0-1011-azure", - "cindex": 141 + "uname_release": "5.3.18-150300.59.46-default", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.13.0-1011-gcp", - "cindex": 141 + "uname_release": "5.3.18-150300.59.46-kvmsmall", + "cindex": 64 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.13.0-1012-azure", - "cindex": 141 + "uname_release": "5.3.18-150300.59.49-default", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.13.0-1012-gcp", - "cindex": 141 + "uname_release": "5.3.18-150300.59.49-kvmsmall", + "cindex": 64 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.13.0-1013-gcp", - "cindex": 141 + "uname_release": "5.3.18-150300.59.54-default", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.13.0-1014-azure", - "cindex": 141 + "uname_release": "5.3.18-150300.59.54-kvmsmall", + "cindex": 64 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.13.0-1015-gcp", - "cindex": 141 + "uname_release": "5.3.18-150300.59.60-default", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.13.0-1016-azure", - "cindex": 141 + "uname_release": "5.3.18-150300.59.60-kvmsmall", + "cindex": 64 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.13.0-1017-gcp", - "cindex": 141 + "uname_release": "5.3.18-150300.59.63-default", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.13.0-1018-azure", - "cindex": 141 + "uname_release": "5.3.18-150300.59.63-kvmsmall", + "cindex": 64 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.13.0-1019-gcp", - "cindex": 141 + "uname_release": "5.3.18-150300.59.68-default", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.13.0-16-generic", - "cindex": 141 + "uname_release": "5.3.18-150300.59.68-kvmsmall", + "cindex": 64 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.13.0-17-generic", - "cindex": 141 + "uname_release": "5.3.18-150300.59.71-default", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.13.0-19-generic", - "cindex": 141 + "uname_release": "5.3.18-150300.59.71-kvmsmall", + "cindex": 64 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.13.0-21-generic", - "cindex": 141 + "uname_release": "5.3.18-150300.59.76-default", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.13.0-25-generic", - "cindex": 141 + "uname_release": "5.3.18-150300.59.76-kvmsmall", + "cindex": 64 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.13.0-26-generic", - "cindex": 141 + "uname_release": "5.3.18-150300.59.81-default", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.13.0-31-generic", - "cindex": 141 + "uname_release": "5.3.18-150300.59.81-kvmsmall", + "cindex": 64 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.13.0-32-generic", - "cindex": 141 + "uname_release": "5.3.18-150300.59.87-default", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.13.0-36-generic", - "cindex": 141 + "uname_release": "5.3.18-150300.59.87-kvmsmall", + "cindex": 64 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.13.0-37-generic", - "cindex": 141 + "uname_release": "5.3.18-36-azure", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.13.0-38-generic", - "cindex": 141 + "uname_release": "5.3.18-38.11-azure", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.13.0-39-generic", - "cindex": 141 + "uname_release": "5.3.18-38.14-azure", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.13.0-41-generic", - "cindex": 141 + "uname_release": "5.3.18-38.17-azure", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.13.0-43-generic", - "cindex": 141 + "uname_release": "5.3.18-38.22-azure", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.13.0-45-generic", - "cindex": 141 + "uname_release": "5.3.18-38.25-azure", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.15.0-101-generic", - "cindex": 142 + "uname_release": "5.3.18-38.28-azure", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.15.0-1012-azure", - "cindex": 143 + "uname_release": "5.3.18-38.3-azure", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.15.0-1013-azure", - "cindex": 143 + "uname_release": "5.3.18-38.31-azure", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.15.0-1014-azure", - "cindex": 144 + "uname_release": "5.3.18-38.34-azure", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.15.0-1014-gcp", - "cindex": 144 + "uname_release": "5.3.18-38.8-azure", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.15.0-1015-gcp", - "cindex": 144 + "uname_release": "5.3.18-57-default", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.15.0-1017-gcp", - "cindex": 144 + "uname_release": "5.3.18-57-kvmsmall", + "cindex": 64 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.15.0-1018-azure", - "cindex": 144 + "uname_release": "5.3.18-59.10-default", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.15.0-1018-gcp", - "cindex": 144 + "uname_release": "5.3.18-59.10-kvmsmall", + "cindex": 64 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.15.0-1019-azure", - "cindex": 144 + "uname_release": "5.3.18-59.13-default", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.15.0-1019-gcp", - "cindex": 144 + "uname_release": "5.3.18-59.13-kvmsmall", + "cindex": 64 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.15.0-1021-azure", - "cindex": 144 + "uname_release": "5.3.18-59.16-default", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.15.0-1021-gcp", - "cindex": 144 + "uname_release": "5.3.18-59.16-kvmsmall", + "cindex": 64 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.15.0-1022-azure", - "cindex": 144 + "uname_release": "5.3.18-59.19-default", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.15.0-1023-azure", - "cindex": 144 + "uname_release": "5.3.18-59.19-kvmsmall", + "cindex": 64 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.15.0-1023-gcp", - "cindex": 144 + "uname_release": "5.3.18-59.24-default", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.15.0-1024-gcp", - "cindex": 144 + "uname_release": "5.3.18-59.24-kvmsmall", + "cindex": 64 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.15.0-1025-azure", - "cindex": 144 + "uname_release": "5.3.18-59.27-default", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.15.0-1025-gcp", - "cindex": 144 + "uname_release": "5.3.18-59.27-kvmsmall", + "cindex": 64 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.15.0-1026-gcp", - "cindex": 144 + "uname_release": "5.3.18-59.30-default", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.15.0-1027-gcp", - "cindex": 144 + "uname_release": "5.3.18-59.30-kvmsmall", + "cindex": 64 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.15.0-1028-azure", - "cindex": 144 + "uname_release": "5.3.18-59.34-default", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.15.0-1028-gcp", - "cindex": 144 + "uname_release": "5.3.18-59.34-kvmsmall", + "cindex": 64 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.15.0-1029-gcp", - "cindex": 144 + "uname_release": "5.3.18-59.37-default", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.15.0-1030-aws", - "cindex": 144 + "uname_release": "5.3.18-59.37-kvmsmall", + "cindex": 64 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.15.0-1030-azure", - "cindex": 144 + "uname_release": "5.3.18-59.40-default", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.15.0-1030-gcp", - "cindex": 144 + "uname_release": "5.3.18-59.40-kvmsmall", + "cindex": 64 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.15.0-1031-aws", - "cindex": 144 + "uname_release": "5.3.18-59.5-default", + "cindex": 63 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "opensuse-leap", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.15.0-1031-azure", - "cindex": 144 + "uname_release": "5.3.18-59.5-kvmsmall", + "cindex": 64 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1032-aws", - "cindex": 144 + "uname_release": "3.10.0-1062.1.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1032-azure", - "cindex": 144 + "uname_release": "3.10.0-1062.1.2.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1032-gcp", - "cindex": 144 + "uname_release": "3.10.0-1062.12.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1033-aws", - "cindex": 144 + "uname_release": "3.10.0-1062.18.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1033-gcp", - "cindex": 144 + "uname_release": "3.10.0-1062.21.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1034-gcp", - "cindex": 144 + "uname_release": "3.10.0-1062.26.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1035-aws", - "cindex": 144 + "uname_release": "3.10.0-1062.30.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1035-azure", - "cindex": 144 + "uname_release": "3.10.0-1062.31.2.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1036-aws", - "cindex": 144 + "uname_release": "3.10.0-1062.31.3.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1036-azure", - "cindex": 144 + "uname_release": "3.10.0-1062.33.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1036-gcp", - "cindex": 144 + "uname_release": "3.10.0-1062.36.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1037-azure", - "cindex": 144 + "uname_release": "3.10.0-1062.37.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1037-gcp", - "cindex": 144 + "uname_release": "3.10.0-1062.4.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1039-aws", - "cindex": 144 + "uname_release": "3.10.0-1062.4.2.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1039-azure", - "cindex": 144 + "uname_release": "3.10.0-1062.4.3.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1040-aws", - "cindex": 144 + "uname_release": "3.10.0-1062.40.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1040-azure", - "cindex": 144 + "uname_release": "3.10.0-1062.43.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1040-gcp", - "cindex": 144 + "uname_release": "3.10.0-1062.45.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1041-aws", - "cindex": 144 + "uname_release": "3.10.0-1062.46.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1041-azure", - "cindex": 145 + "uname_release": "3.10.0-1062.49.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1041-gcp", - "cindex": 144 + "uname_release": "3.10.0-1062.51.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1042-azure", - "cindex": 145 + "uname_release": "3.10.0-1062.52.2.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1042-gcp", - "cindex": 144 + "uname_release": "3.10.0-1062.7.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1043-aws", - "cindex": 144 + "uname_release": "3.10.0-1062.9.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1044-aws", - "cindex": 144 + "uname_release": "3.10.0-1062.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1044-gcp", - "cindex": 144 + "uname_release": "3.10.0-1127.10.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1045-aws", - "cindex": 144 + "uname_release": "3.10.0-1127.13.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1045-azure", - "cindex": 145 + "uname_release": "3.10.0-1127.18.2.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1046-azure", - "cindex": 145 + "uname_release": "3.10.0-1127.19.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1046-gcp", - "cindex": 146 + "uname_release": "3.10.0-1127.8.2.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1047-aws", - "cindex": 144 + "uname_release": "3.10.0-1127.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1047-azure", - "cindex": 145 + "uname_release": "3.10.0-1160.102.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1047-gcp", - "cindex": 146 + "uname_release": "3.10.0-1160.105.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1048-aws", - "cindex": 144 + "uname_release": "3.10.0-1160.108.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1049-azure", - "cindex": 145 + "uname_release": "3.10.0-1160.11.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1049-gcp", - "cindex": 146 + "uname_release": "3.10.0-1160.114.2.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1050-aws", - "cindex": 144 + "uname_release": "3.10.0-1160.118.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1050-azure", - "cindex": 145 + "uname_release": "3.10.0-1160.119.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1050-gcp", - "cindex": 146 + "uname_release": "3.10.0-1160.15.2.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1051-aws", - "cindex": 144 + "uname_release": "3.10.0-1160.2.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1051-azure", - "cindex": 145 + "uname_release": "3.10.0-1160.2.2.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1052-aws", - "cindex": 144 + "uname_release": "3.10.0-1160.21.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1052-azure", - "cindex": 145 + "uname_release": "3.10.0-1160.24.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1052-gcp", - "cindex": 146 + "uname_release": "3.10.0-1160.25.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1054-aws", - "cindex": 144 + "uname_release": "3.10.0-1160.31.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1055-azure", - "cindex": 145 + "uname_release": "3.10.0-1160.36.2.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1055-gcp", - "cindex": 146 + "uname_release": "3.10.0-1160.41.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1056-aws", - "cindex": 144 + "uname_release": "3.10.0-1160.42.2.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1056-azure", - "cindex": 145 + "uname_release": "3.10.0-1160.45.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1057-aws", - "cindex": 144 + "uname_release": "3.10.0-1160.49.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1057-azure", - "cindex": 145 + "uname_release": "3.10.0-1160.53.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1058-aws", - "cindex": 144 + "uname_release": "3.10.0-1160.59.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1058-gcp", - "cindex": 146 + "uname_release": "3.10.0-1160.6.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1059-azure", - "cindex": 145 + "uname_release": "3.10.0-1160.62.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-106-generic", - "cindex": 142 + "uname_release": "3.10.0-1160.66.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1060-aws", - "cindex": 144 + "uname_release": "3.10.0-1160.71.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1060-azure", - "cindex": 145 + "uname_release": "3.10.0-1160.76.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1060-gcp", - "cindex": 146 + "uname_release": "3.10.0-1160.80.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1061-azure", - "cindex": 145 + "uname_release": "3.10.0-1160.81.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1061-gcp", - "cindex": 147 + "uname_release": "3.10.0-1160.83.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1063-aws", - "cindex": 144 + "uname_release": "3.10.0-1160.88.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1063-azure", - "cindex": 145 + "uname_release": "3.10.0-1160.90.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1064-azure", - "cindex": 145 + "uname_release": "3.10.0-1160.92.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1065-aws", - "cindex": 144 + "uname_release": "3.10.0-1160.95.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1066-aws", - "cindex": 142 + "uname_release": "3.10.0-1160.99.1.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1066-azure", - "cindex": 145 + "uname_release": "3.10.0-1160.el7.x86_64", + "cindex": 65 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1067-aws", - "cindex": 142 + "uname_release": "3.10.0-957.1.3.el7.x86_64", + "cindex": 66 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1067-azure", - "cindex": 145 + "uname_release": "3.10.0-957.10.1.el7.x86_64", + "cindex": 66 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1069-azure", - "cindex": 145 + "uname_release": "3.10.0-957.12.1.el7.x86_64", + "cindex": 66 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-107-generic", - "cindex": 142 + "uname_release": "3.10.0-957.12.2.el7.x86_64", + "cindex": 66 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1071-azure", - "cindex": 145 + "uname_release": "3.10.0-957.21.2.el7.x86_64", + "cindex": 66 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1071-gcp", - "cindex": 147 + "uname_release": "3.10.0-957.21.3.el7.x86_64", + "cindex": 66 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1073-aws", - "cindex": 142 + "uname_release": "3.10.0-957.27.2.el7.x86_64", + "cindex": 66 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1074-aws", - "cindex": 142 + "uname_release": "3.10.0-957.5.1.el7.x86_64", + "cindex": 66 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "7", "arch": "x86_64", - "uname_release": "4.15.0-1075-azure", - "cindex": 145 + "uname_release": "3.10.0-957.el7.x86_64", + "cindex": 66 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "8", "arch": "x86_64", - "uname_release": "4.15.0-1077-azure", - "cindex": 145 + "uname_release": "4.18.0-147.0.2.el8_1.x86_64", + "cindex": 67 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "8", "arch": "x86_64", - "uname_release": "4.15.0-1077-gcp", - "cindex": 147 + "uname_release": "4.18.0-147.0.3.el8_1.x86_64", + "cindex": 67 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "8", "arch": "x86_64", - "uname_release": "4.15.0-1078-gcp", - "cindex": 147 + "uname_release": "4.18.0-147.13.2.el8_1.x86_64", + "cindex": 67 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "8", "arch": "x86_64", - "uname_release": "4.15.0-1079-aws", - "cindex": 142 + "uname_release": "4.18.0-147.20.1.el8_1.x86_64", + "cindex": 67 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "8", "arch": "x86_64", - "uname_release": "4.15.0-1080-aws", - "cindex": 142 + "uname_release": "4.18.0-147.24.2.el8_1.x86_64", + "cindex": 67 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "8", "arch": "x86_64", - "uname_release": "4.15.0-1080-gcp", - "cindex": 147 + "uname_release": "4.18.0-147.27.1.el8_1.x86_64", + "cindex": 67 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "8", "arch": "x86_64", - "uname_release": "4.15.0-1081-gcp", - "cindex": 147 + "uname_release": "4.18.0-147.3.1.el8_1.x86_64", + "cindex": 67 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "8", "arch": "x86_64", - "uname_release": "4.15.0-1082-aws", - "cindex": 142 + "uname_release": "4.18.0-147.32.1.el8_1.x86_64", + "cindex": 67 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "8", "arch": "x86_64", - "uname_release": "4.15.0-1082-azure", - "cindex": 148 + "uname_release": "4.18.0-147.34.1.el8_1.x86_64", + "cindex": 67 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "8", "arch": "x86_64", - "uname_release": "4.15.0-1083-aws", - "cindex": 142 + "uname_release": "4.18.0-147.38.1.el8_1.x86_64", + "cindex": 67 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "8", "arch": "x86_64", - "uname_release": "4.15.0-1083-azure", - "cindex": 148 + "uname_release": "4.18.0-147.43.1.el8_1.x86_64", + "cindex": 67 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "8", "arch": "x86_64", - "uname_release": "4.15.0-1083-gcp", - "cindex": 147 + "uname_release": "4.18.0-147.44.1.el8_1.x86_64", + "cindex": 67 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "8", "arch": "x86_64", - "uname_release": "4.15.0-1084-gcp", - "cindex": 147 + "uname_release": "4.18.0-147.48.1.el8_1.x86_64", + "cindex": 67 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "8", "arch": "x86_64", - "uname_release": "4.15.0-1085-aws", - "cindex": 142 + "uname_release": "4.18.0-147.5.1.el8_1.x86_64", + "cindex": 67 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "8", "arch": "x86_64", - "uname_release": "4.15.0-1086-gcp", - "cindex": 147 + "uname_release": "4.18.0-147.51.1.el8_1.x86_64", + "cindex": 67 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "8", "arch": "x86_64", - "uname_release": "4.15.0-1087-gcp", - "cindex": 147 + "uname_release": "4.18.0-147.51.2.el8_1.x86_64", + "cindex": 67 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "8", "arch": "x86_64", - "uname_release": "4.15.0-1088-aws", - "cindex": 142 + "uname_release": "4.18.0-147.52.1.el8_1.x86_64", + "cindex": 67 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "8", "arch": "x86_64", - "uname_release": "4.15.0-1088-gcp", - "cindex": 147 + "uname_release": "4.18.0-147.54.2.el8_1.x86_64", + "cindex": 67 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "8", "arch": "x86_64", - "uname_release": "4.15.0-1089-azure", - "cindex": 148 + "uname_release": "4.18.0-147.56.1.el8_1.x86_64", + "cindex": 67 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "8", "arch": "x86_64", - "uname_release": "4.15.0-1090-aws", - "cindex": 142 + "uname_release": "4.18.0-147.57.1.el8_1.x86_64", + "cindex": 67 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "8", "arch": "x86_64", - "uname_release": "4.15.0-1090-gcp", - "cindex": 147 + "uname_release": "4.18.0-147.8.1.el8_1.x86_64", + "cindex": 67 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "8", "arch": "x86_64", - "uname_release": "4.15.0-1091-aws", - "cindex": 142 + "uname_release": "4.18.0-147.el8.x86_64", + "cindex": 67 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "8", "arch": "x86_64", - "uname_release": "4.15.0-1091-azure", - "cindex": 148 + "uname_release": "4.18.0-80.1.2.el8_0.x86_64", + "cindex": 68 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "8", "arch": "x86_64", - "uname_release": "4.15.0-1091-gcp", - "cindex": 147 + "uname_release": "4.18.0-80.11.1.el8_0.x86_64", + "cindex": 68 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "8", "arch": "x86_64", - "uname_release": "4.15.0-1092-azure", - "cindex": 148 + "uname_release": "4.18.0-80.11.2.el8_0.x86_64", + "cindex": 68 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "8", "arch": "x86_64", - "uname_release": "4.15.0-1092-gcp", - "cindex": 147 + "uname_release": "4.18.0-80.4.2.el8_0.x86_64", + "cindex": 68 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "8", "arch": "x86_64", - "uname_release": "4.15.0-1093-aws", - "cindex": 142 + "uname_release": "4.18.0-80.7.1.el8_0.x86_64", + "cindex": 68 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "8", "arch": "x86_64", - "uname_release": "4.15.0-1093-azure", - "cindex": 148 + "uname_release": "4.18.0-80.7.2.el8_0.x86_64", + "cindex": 68 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "rhel", + "version": "8", "arch": "x86_64", - "uname_release": "4.15.0-1093-gcp", - "cindex": 147 + "uname_release": "4.18.0-80.el8.x86_64", + "cindex": 68 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.3", "arch": "x86_64", - "uname_release": "4.15.0-1094-aws", - "cindex": 142 + "uname_release": "4.4.103-6.33-default", + "cindex": 69 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.3", "arch": "x86_64", - "uname_release": "4.15.0-1094-gcp", - "cindex": 147 + "uname_release": "4.4.103-6.38-default", + "cindex": 69 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.3", "arch": "x86_64", - "uname_release": "4.15.0-1095-aws", - "cindex": 142 + "uname_release": "4.4.114-94.11-default", + "cindex": 69 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.3", "arch": "x86_64", - "uname_release": "4.15.0-1095-azure", - "cindex": 148 + "uname_release": "4.4.114-94.14-default", + "cindex": 69 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.3", "arch": "x86_64", - "uname_release": "4.15.0-1095-gcp", - "cindex": 147 + "uname_release": "4.4.120-94.17-default", + "cindex": 69 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.3", "arch": "x86_64", - "uname_release": "4.15.0-1096-aws", - "cindex": 142 + "uname_release": "4.4.126-94.22-default", + "cindex": 69 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.3", "arch": "x86_64", - "uname_release": "4.15.0-1096-azure", - "cindex": 148 + "uname_release": "4.4.131-94.29-default", + "cindex": 69 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.3", "arch": "x86_64", - "uname_release": "4.15.0-1096-gcp", - "cindex": 147 + "uname_release": "4.4.132-94.33-default", + "cindex": 69 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.3", "arch": "x86_64", - "uname_release": "4.15.0-1097-aws", - "cindex": 142 + "uname_release": "4.4.138-4.7-azure", + "cindex": 69 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.3", "arch": "x86_64", - "uname_release": "4.15.0-1097-gcp", - "cindex": 147 + "uname_release": "4.4.138-94.39-default", + "cindex": 69 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.3", "arch": "x86_64", - "uname_release": "4.15.0-1098-aws", - "cindex": 142 + "uname_release": "4.4.140-94.42-default", + "cindex": 69 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.3", "arch": "x86_64", - "uname_release": "4.15.0-1098-azure", - "cindex": 148 + "uname_release": "4.4.143-4.13-azure", + "cindex": 69 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.3", "arch": "x86_64", - "uname_release": "4.15.0-1098-gcp", - "cindex": 147 + "uname_release": "4.4.143-94.47-default", + "cindex": 69 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.3", "arch": "x86_64", - "uname_release": "4.15.0-1099-aws", - "cindex": 142 + "uname_release": "4.4.155-4.16-azure", + "cindex": 69 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.3", "arch": "x86_64", - "uname_release": "4.15.0-1100-azure", - "cindex": 148 + "uname_release": "4.4.155-94.50-default", + "cindex": 69 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.3", "arch": "x86_64", - "uname_release": "4.15.0-1102-azure", - "cindex": 148 + "uname_release": "4.4.156-94.57-default", + "cindex": 69 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.3", "arch": "x86_64", - "uname_release": "4.15.0-1103-azure", - "cindex": 148 + "uname_release": "4.4.156-94.61-default", + "cindex": 69 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.3", "arch": "x86_64", - "uname_release": "4.15.0-1106-azure", - "cindex": 148 + "uname_release": "4.4.156-94.64-default", + "cindex": 69 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.3", "arch": "x86_64", - "uname_release": "4.15.0-1108-azure", - "cindex": 148 + "uname_release": "4.4.162-4.19-azure", + "cindex": 69 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.3", "arch": "x86_64", - "uname_release": "4.15.0-1109-azure", - "cindex": 148 + "uname_release": "4.4.162-94.69-default", + "cindex": 69 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.3", "arch": "x86_64", - "uname_release": "4.15.0-1110-azure", - "cindex": 148 + "uname_release": "4.4.162-94.72-default", + "cindex": 69 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.3", "arch": "x86_64", - "uname_release": "4.15.0-1111-azure", - "cindex": 148 + "uname_release": "4.4.170-4.22-azure", + "cindex": 69 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.3", "arch": "x86_64", - "uname_release": "4.15.0-1112-azure", - "cindex": 148 + "uname_release": "4.4.175-94.79-default", + "cindex": 69 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.3", "arch": "x86_64", - "uname_release": "4.15.0-1113-azure", - "cindex": 148 + "uname_release": "4.4.176-4.25-azure", + "cindex": 69 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.3", "arch": "x86_64", - "uname_release": "4.15.0-112-generic", - "cindex": 142 + "uname_release": "4.4.176-94.88-default", + "cindex": 69 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.3", "arch": "x86_64", - "uname_release": "4.15.0-115-generic", - "cindex": 142 + "uname_release": "4.4.178-4.28-azure", + "cindex": 69 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.3", "arch": "x86_64", - "uname_release": "4.15.0-117-generic", - "cindex": 142 + "uname_release": "4.4.178-94.91-default", + "cindex": 69 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.3", "arch": "x86_64", - "uname_release": "4.15.0-118-generic", - "cindex": 142 + "uname_release": "4.4.180-4.31-azure", + "cindex": 69 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.3", "arch": "x86_64", - "uname_release": "4.15.0-120-generic", - "cindex": 142 + "uname_release": "4.4.180-94.100-default", + "cindex": 69 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.3", "arch": "x86_64", - "uname_release": "4.15.0-122-generic", - "cindex": 142 + "uname_release": "4.4.180-94.97-default", + "cindex": 69 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.3", "arch": "x86_64", - "uname_release": "4.15.0-123-generic", - "cindex": 142 + "uname_release": "4.4.73-5-default", + "cindex": 70 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.3", "arch": "x86_64", - "uname_release": "4.15.0-126-generic", - "cindex": 142 + "uname_release": "4.4.82-6.3-default", + "cindex": 69 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.3", "arch": "x86_64", - "uname_release": "4.15.0-128-generic", - "cindex": 142 + "uname_release": "4.4.82-6.6-default", + "cindex": 69 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.3", "arch": "x86_64", - "uname_release": "4.15.0-129-generic", - "cindex": 142 + "uname_release": "4.4.82-6.9-default", + "cindex": 69 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.3", "arch": "x86_64", - "uname_release": "4.15.0-13-generic", - "cindex": 143 + "uname_release": "4.4.92-6.18-default", + "cindex": 69 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.3", "arch": "x86_64", - "uname_release": "4.15.0-132-generic", - "cindex": 142 + "uname_release": "4.4.92-6.30-default", + "cindex": 69 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.4", "arch": "x86_64", - "uname_release": "4.15.0-133-generic", - "cindex": 142 + "uname_release": "4.12.14-6.12-azure", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.4", "arch": "x86_64", - "uname_release": "4.15.0-136-generic", - "cindex": 142 + "uname_release": "4.12.14-6.15-azure", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.4", "arch": "x86_64", - "uname_release": "4.15.0-137-generic", - "cindex": 142 + "uname_release": "4.12.14-6.18-azure", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.4", "arch": "x86_64", - "uname_release": "4.15.0-139-generic", - "cindex": 142 + "uname_release": "4.12.14-6.23-azure", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.4", "arch": "x86_64", - "uname_release": "4.15.0-140-generic", - "cindex": 142 + "uname_release": "4.12.14-6.26-azure", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.4", "arch": "x86_64", - "uname_release": "4.15.0-142-generic", - "cindex": 142 + "uname_release": "4.12.14-6.29-azure", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.4", "arch": "x86_64", - "uname_release": "4.15.0-15-generic", - "cindex": 143 + "uname_release": "4.12.14-6.3-azure", + "cindex": 72 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.4", "arch": "x86_64", - "uname_release": "4.15.0-20-generic", - "cindex": 143 + "uname_release": "4.12.14-6.34-azure", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.4", "arch": "x86_64", - "uname_release": "4.15.0-22-generic", - "cindex": 143 + "uname_release": "4.12.14-6.37-azure", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.4", "arch": "x86_64", - "uname_release": "4.15.0-23-generic", - "cindex": 143 + "uname_release": "4.12.14-6.40-azure", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.4", "arch": "x86_64", - "uname_release": "4.15.0-24-generic", - "cindex": 144 + "uname_release": "4.12.14-6.43-azure", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.4", "arch": "x86_64", - "uname_release": "4.15.0-29-generic", - "cindex": 144 + "uname_release": "4.12.14-6.6-azure", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.4", "arch": "x86_64", - "uname_release": "4.15.0-30-generic", - "cindex": 144 + "uname_release": "4.12.14-6.9-azure", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.4", "arch": "x86_64", - "uname_release": "4.15.0-32-generic", - "cindex": 144 + "uname_release": "4.12.14-94.41-default", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.4", "arch": "x86_64", - "uname_release": "4.15.0-33-generic", - "cindex": 144 + "uname_release": "4.12.14-95.13-default", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.4", "arch": "x86_64", - "uname_release": "4.15.0-34-generic", - "cindex": 144 + "uname_release": "4.12.14-95.16-default", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.4", "arch": "x86_64", - "uname_release": "4.15.0-36-generic", - "cindex": 144 + "uname_release": "4.12.14-95.19-default", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.4", "arch": "x86_64", - "uname_release": "4.15.0-38-generic", - "cindex": 144 + "uname_release": "4.12.14-95.24-default", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.4", "arch": "x86_64", - "uname_release": "4.15.0-39-generic", - "cindex": 144 + "uname_release": "4.12.14-95.29-default", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.4", "arch": "x86_64", - "uname_release": "4.15.0-42-generic", - "cindex": 144 + "uname_release": "4.12.14-95.3-default", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.4", "arch": "x86_64", - "uname_release": "4.15.0-43-generic", - "cindex": 144 + "uname_release": "4.12.14-95.32-default", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.4", "arch": "x86_64", - "uname_release": "4.15.0-45-generic", - "cindex": 144 + "uname_release": "4.12.14-95.37-default", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.4", "arch": "x86_64", - "uname_release": "4.15.0-46-generic", - "cindex": 144 + "uname_release": "4.12.14-95.40-default", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.4", "arch": "x86_64", - "uname_release": "4.15.0-47-generic", - "cindex": 144 + "uname_release": "4.12.14-95.45-default", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.4", "arch": "x86_64", - "uname_release": "4.15.0-48-generic", - "cindex": 144 + "uname_release": "4.12.14-95.48-default", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.4", "arch": "x86_64", - "uname_release": "4.15.0-50-generic", - "cindex": 144 + "uname_release": "4.12.14-95.51-default", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.4", "arch": "x86_64", - "uname_release": "4.15.0-51-generic", - "cindex": 144 + "uname_release": "4.12.14-95.54-default", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.4", "arch": "x86_64", - "uname_release": "4.15.0-52-generic", - "cindex": 144 + "uname_release": "4.12.14-95.6-default", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.15.0-54-generic", - "cindex": 144 + "uname_release": "4.12.14-120-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.15.0-55-generic", - "cindex": 144 + "uname_release": "4.12.14-122.103-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.15.0-58-generic", - "cindex": 144 + "uname_release": "4.12.14-122.106-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.15.0-60-generic", - "cindex": 144 + "uname_release": "4.12.14-122.110-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.15.0-62-generic", - "cindex": 144 + "uname_release": "4.12.14-122.113-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.15.0-64-generic", - "cindex": 144 + "uname_release": "4.12.14-122.116-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.15.0-65-generic", - "cindex": 144 + "uname_release": "4.12.14-122.12-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.15.0-66-generic", - "cindex": 144 + "uname_release": "4.12.14-122.121-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.15.0-69-generic", - "cindex": 144 + "uname_release": "4.12.14-122.124-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.15.0-70-generic", - "cindex": 144 + "uname_release": "4.12.14-122.127-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.15.0-72-generic", - "cindex": 144 + "uname_release": "4.12.14-122.130-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.15.0-74-generic", - "cindex": 144 + "uname_release": "4.12.14-122.133-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.15.0-76-generic", - "cindex": 144 + "uname_release": "4.12.14-122.136-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.15.0-88-generic", - "cindex": 144 + "uname_release": "4.12.14-122.139-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.15.0-91-generic", - "cindex": 144 + "uname_release": "4.12.14-122.144-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.15.0-96-generic", - "cindex": 144 + "uname_release": "4.12.14-122.147-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.15.0-99-generic", - "cindex": 142 + "uname_release": "4.12.14-122.150-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.18.0-1006-azure", - "cindex": 149 + "uname_release": "4.12.14-122.153-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.2.0-16-generic", - "cindex": 150 + "uname_release": "4.12.14-122.156-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.2.0-17-generic", - "cindex": 150 + "uname_release": "4.12.14-122.159-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.2.0-19-generic", - "cindex": 150 + "uname_release": "4.12.14-122.162-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.3.0-1-generic", - "cindex": 151 + "uname_release": "4.12.14-122.165-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.3.0-2-generic", - "cindex": 151 + "uname_release": "4.12.14-122.17-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.3.0-5-generic", - "cindex": 151 + "uname_release": "4.12.14-122.173-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.3.0-6-generic", - "cindex": 151 + "uname_release": "4.12.14-122.176-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.3.0-7-generic", - "cindex": 151 + "uname_release": "4.12.14-122.179-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-10-generic", - "cindex": 152 + "uname_release": "4.12.14-122.183-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1001-aws", - "cindex": 152 + "uname_release": "4.12.14-122.186-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1003-aws", - "cindex": 152 + "uname_release": "4.12.14-122.189-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1003-gke", - "cindex": 152 + "uname_release": "4.12.14-122.194-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1004-aws", - "cindex": 152 + "uname_release": "4.12.14-122.20-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1005-gke", - "cindex": 152 + "uname_release": "4.12.14-122.201-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1006-gke", - "cindex": 152 + "uname_release": "4.12.14-122.23-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1007-aws", - "cindex": 152 + "uname_release": "4.12.14-122.26-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1008-gke", - "cindex": 152 + "uname_release": "4.12.14-122.29-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1009-aws", - "cindex": 152 + "uname_release": "4.12.14-122.32-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1009-gke", - "cindex": 152 + "uname_release": "4.12.14-122.37-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-101-generic", - "cindex": 153 + "uname_release": "4.12.14-122.41-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1010-gke", - "cindex": 152 + "uname_release": "4.12.14-122.46-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1011-aws", - "cindex": 152 + "uname_release": "4.12.14-122.51-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1012-aws", - "cindex": 152 + "uname_release": "4.12.14-122.54-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1012-gke", - "cindex": 152 + "uname_release": "4.12.14-122.57-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1013-aws", - "cindex": 152 + "uname_release": "4.12.14-122.60-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1013-gke", - "cindex": 152 + "uname_release": "4.12.14-122.63-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1014-gke", - "cindex": 152 + "uname_release": "4.12.14-122.66-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1016-aws", - "cindex": 152 + "uname_release": "4.12.14-122.7-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1016-gke", - "cindex": 152 + "uname_release": "4.12.14-122.71-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1017-aws", - "cindex": 152 + "uname_release": "4.12.14-122.74-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1018-aws", - "cindex": 152 + "uname_release": "4.12.14-122.77-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1018-gke", - "cindex": 152 + "uname_release": "4.12.14-122.80-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1020-aws", - "cindex": 152 + "uname_release": "4.12.14-122.83-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1022-aws", - "cindex": 152 + "uname_release": "4.12.14-122.88-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1022-gke", - "cindex": 152 + "uname_release": "4.12.14-122.91-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1024-gke", - "cindex": 152 + "uname_release": "4.12.14-122.98-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1026-aws", - "cindex": 152 + "uname_release": "4.12.14-16.10-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1026-gke", - "cindex": 152 + "uname_release": "4.12.14-16.100-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1027-gke", - "cindex": 152 + "uname_release": "4.12.14-16.103-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1028-aws", - "cindex": 152 + "uname_release": "4.12.14-16.106-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1028-gke", - "cindex": 154 + "uname_release": "4.12.14-16.109-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-103-generic", - "cindex": 153 + "uname_release": "4.12.14-16.112-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1030-aws", - "cindex": 152 + "uname_release": "4.12.14-16.115-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1031-aws", - "cindex": 152 + "uname_release": "4.12.14-16.120-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1031-gke", - "cindex": 154 + "uname_release": "4.12.14-16.124-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1032-aws", - "cindex": 154 + "uname_release": "4.12.14-16.127-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1032-gke", - "cindex": 154 + "uname_release": "4.12.14-16.13-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1033-gke", - "cindex": 154 + "uname_release": "4.12.14-16.130-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1034-gke", - "cindex": 154 + "uname_release": "4.12.14-16.133-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1035-aws", - "cindex": 154 + "uname_release": "4.12.14-16.136-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1037-aws", - "cindex": 154 + "uname_release": "4.12.14-16.139-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1038-aws", - "cindex": 154 + "uname_release": "4.12.14-16.146-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1039-aws", - "cindex": 154 + "uname_release": "4.12.14-16.149-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-104-generic", - "cindex": 153 + "uname_release": "4.12.14-16.152-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1041-aws", - "cindex": 154 + "uname_release": "4.12.14-16.155-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1043-aws", - "cindex": 154 + "uname_release": "4.12.14-16.16-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1044-aws", - "cindex": 154 + "uname_release": "4.12.14-16.160-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1047-aws", - "cindex": 154 + "uname_release": "4.12.14-16.163-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1048-aws", - "cindex": 154 + "uname_release": "4.12.14-16.168-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1049-aws", - "cindex": 154 + "uname_release": "4.12.14-16.19-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1050-aws", - "cindex": 154 + "uname_release": "4.12.14-16.22-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1052-aws", - "cindex": 154 + "uname_release": "4.12.14-16.25-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1054-aws", - "cindex": 155 + "uname_release": "4.12.14-16.28-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1055-aws", - "cindex": 156 + "uname_release": "4.12.14-16.31-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1057-aws", - "cindex": 156 + "uname_release": "4.12.14-16.34-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1060-aws", - "cindex": 156 + "uname_release": "4.12.14-16.38-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1061-aws", - "cindex": 156 + "uname_release": "4.12.14-16.41-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1062-aws", - "cindex": 156 + "uname_release": "4.12.14-16.44-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1063-aws", - "cindex": 156 + "uname_release": "4.12.14-16.47-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1065-aws", - "cindex": 156 + "uname_release": "4.12.14-16.50-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1066-aws", - "cindex": 156 + "uname_release": "4.12.14-16.53-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1067-aws", - "cindex": 156 + "uname_release": "4.12.14-16.56-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1069-aws", - "cindex": 156 + "uname_release": "4.12.14-16.59-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1070-aws", - "cindex": 156 + "uname_release": "4.12.14-16.62-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1072-aws", - "cindex": 156 + "uname_release": "4.12.14-16.65-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1073-aws", - "cindex": 156 + "uname_release": "4.12.14-16.68-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1074-aws", - "cindex": 156 + "uname_release": "4.12.14-16.7-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1075-aws", - "cindex": 156 + "uname_release": "4.12.14-16.73-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1077-aws", - "cindex": 156 + "uname_release": "4.12.14-16.76-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1079-aws", - "cindex": 156 + "uname_release": "4.12.14-16.80-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-108-generic", - "cindex": 153 + "uname_release": "4.12.14-16.85-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1081-aws", - "cindex": 156 + "uname_release": "4.12.14-16.88-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1083-aws", - "cindex": 156 + "uname_release": "4.12.14-16.91-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1084-aws", - "cindex": 156 + "uname_release": "4.12.14-16.94-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "12.5", "arch": "x86_64", - "uname_release": "4.4.0-1085-aws", - "cindex": 156 + "uname_release": "4.12.14-16.97-azure", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.0", "arch": "x86_64", - "uname_release": "4.4.0-1087-aws", - "cindex": 156 + "uname_release": "4.12.14-150.14-default", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.0", "arch": "x86_64", - "uname_release": "4.4.0-1088-aws", - "cindex": 156 + "uname_release": "4.12.14-150.17-default", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.0", "arch": "x86_64", - "uname_release": "4.4.0-109-generic", - "cindex": 153 + "uname_release": "4.12.14-150.22-default", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.0", "arch": "x86_64", - "uname_release": "4.4.0-1090-aws", - "cindex": 156 + "uname_release": "4.12.14-150.27-default", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.0", "arch": "x86_64", - "uname_release": "4.4.0-1092-aws", - "cindex": 156 + "uname_release": "4.12.14-150.32-default", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.0", "arch": "x86_64", - "uname_release": "4.4.0-1094-aws", - "cindex": 156 + "uname_release": "4.12.14-150.35-default", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.0", "arch": "x86_64", - "uname_release": "4.4.0-1095-aws", - "cindex": 156 + "uname_release": "4.12.14-150.38-default", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.0", "arch": "x86_64", - "uname_release": "4.4.0-1096-aws", - "cindex": 156 + "uname_release": "4.12.14-150.41-default", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.0", "arch": "x86_64", - "uname_release": "4.4.0-1098-aws", - "cindex": 156 + "uname_release": "4.12.14-150.47-default", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.0", "arch": "x86_64", - "uname_release": "4.4.0-1099-aws", - "cindex": 156 + "uname_release": "4.12.14-23-default", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.0", "arch": "x86_64", - "uname_release": "4.4.0-11-generic", - "cindex": 152 + "uname_release": "4.12.14-25.13-default", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.0", "arch": "x86_64", - "uname_release": "4.4.0-1100-aws", - "cindex": 156 + "uname_release": "4.12.14-25.16-default", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.0", "arch": "x86_64", - "uname_release": "4.4.0-1101-aws", - "cindex": 156 + "uname_release": "4.12.14-25.19-default", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.0", "arch": "x86_64", - "uname_release": "4.4.0-1102-aws", - "cindex": 156 + "uname_release": "4.12.14-25.22-default", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.0", "arch": "x86_64", - "uname_release": "4.4.0-1104-aws", - "cindex": 156 + "uname_release": "4.12.14-25.25-default", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.0", "arch": "x86_64", - "uname_release": "4.4.0-1105-aws", - "cindex": 156 + "uname_release": "4.12.14-25.28-default", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.0", "arch": "x86_64", - "uname_release": "4.4.0-1106-aws", - "cindex": 156 + "uname_release": "4.12.14-25.3-default", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.0", "arch": "x86_64", - "uname_release": "4.4.0-1107-aws", - "cindex": 157 + "uname_release": "4.12.14-25.6-default", + "cindex": 71 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.1", "arch": "x86_64", - "uname_release": "4.4.0-1109-aws", - "cindex": 157 + "uname_release": "4.12.14-195-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.1", "arch": "x86_64", - "uname_release": "4.4.0-1110-aws", - "cindex": 157 + "uname_release": "4.12.14-197.10-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.1", "arch": "x86_64", - "uname_release": "4.4.0-1111-aws", - "cindex": 157 + "uname_release": "4.12.14-197.15-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.1", "arch": "x86_64", - "uname_release": "4.4.0-1112-aws", - "cindex": 157 + "uname_release": "4.12.14-197.18-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.1", "arch": "x86_64", - "uname_release": "4.4.0-1113-aws", - "cindex": 157 + "uname_release": "4.12.14-197.21-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.1", "arch": "x86_64", - "uname_release": "4.4.0-1114-aws", - "cindex": 157 + "uname_release": "4.12.14-197.26-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.1", "arch": "x86_64", - "uname_release": "4.4.0-1117-aws", - "cindex": 157 + "uname_release": "4.12.14-197.29-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.1", "arch": "x86_64", - "uname_release": "4.4.0-1118-aws", - "cindex": 157 + "uname_release": "4.12.14-197.34-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.1", "arch": "x86_64", - "uname_release": "4.4.0-1119-aws", - "cindex": 157 + "uname_release": "4.12.14-197.37-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.1", "arch": "x86_64", - "uname_release": "4.4.0-112-generic", - "cindex": 153 + "uname_release": "4.12.14-197.4-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.1", "arch": "x86_64", - "uname_release": "4.4.0-1121-aws", - "cindex": 157 + "uname_release": "4.12.14-197.40-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.1", "arch": "x86_64", - "uname_release": "4.4.0-1122-aws", - "cindex": 157 + "uname_release": "4.12.14-197.45-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.1", "arch": "x86_64", - "uname_release": "4.4.0-1123-aws", - "cindex": 157 + "uname_release": "4.12.14-197.48-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.1", "arch": "x86_64", - "uname_release": "4.4.0-1124-aws", - "cindex": 157 + "uname_release": "4.12.14-197.51-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.1", "arch": "x86_64", - "uname_release": "4.4.0-1126-aws", - "cindex": 158 + "uname_release": "4.12.14-197.56-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.1", "arch": "x86_64", - "uname_release": "4.4.0-1127-aws", - "cindex": 158 + "uname_release": "4.12.14-197.61-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.1", "arch": "x86_64", - "uname_release": "4.4.0-1128-aws", - "cindex": 158 + "uname_release": "4.12.14-197.64-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.1", "arch": "x86_64", - "uname_release": "4.4.0-116-generic", - "cindex": 153 + "uname_release": "4.12.14-197.67-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.1", "arch": "x86_64", - "uname_release": "4.4.0-119-generic", - "cindex": 159 + "uname_release": "4.12.14-197.7-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.1", "arch": "x86_64", - "uname_release": "4.4.0-12-generic", - "cindex": 152 + "uname_release": "4.12.14-197.72-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.1", "arch": "x86_64", - "uname_release": "4.4.0-121-generic", - "cindex": 160 + "uname_release": "4.12.14-197.75-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.1", "arch": "x86_64", - "uname_release": "4.4.0-122-generic", - "cindex": 160 + "uname_release": "4.12.14-197.78-default", + "cindex": 73 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.4.0-124-generic", - "cindex": 160 + "uname_release": "5.3.18-22-default", + "cindex": 74 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.4.0-127-generic", - "cindex": 160 + "uname_release": "5.3.18-24.12-default", + "cindex": 74 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.4.0-128-generic", - "cindex": 160 + "uname_release": "5.3.18-24.15-default", + "cindex": 74 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.4.0-13-generic", - "cindex": 152 + "uname_release": "5.3.18-24.24-default", + "cindex": 74 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.4.0-130-generic", - "cindex": 160 + "uname_release": "5.3.18-24.29-default", + "cindex": 74 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.4.0-131-generic", - "cindex": 160 + "uname_release": "5.3.18-24.34-default", + "cindex": 74 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.4.0-133-generic", - "cindex": 160 + "uname_release": "5.3.18-24.37-default", + "cindex": 74 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.4.0-134-generic", - "cindex": 160 + "uname_release": "5.3.18-24.43-default", + "cindex": 74 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.4.0-135-generic", - "cindex": 160 + "uname_release": "5.3.18-24.46-default", + "cindex": 74 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.4.0-137-generic", - "cindex": 160 + "uname_release": "5.3.18-24.49-default", + "cindex": 74 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.4.0-138-generic", - "cindex": 160 + "uname_release": "5.3.18-24.52-default", + "cindex": 74 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.4.0-139-generic", - "cindex": 160 + "uname_release": "5.3.18-24.53.4-default", + "cindex": 74 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.4.0-14-generic", - "cindex": 152 + "uname_release": "5.3.18-24.61-default", + "cindex": 74 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.4.0-140-generic", - "cindex": 160 + "uname_release": "5.3.18-24.64-default", + "cindex": 74 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.4.0-141-generic", - "cindex": 160 + "uname_release": "5.3.18-24.67-default", + "cindex": 74 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.4.0-142-generic", - "cindex": 160 + "uname_release": "5.3.18-24.70-default", + "cindex": 74 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.4.0-143-generic", - "cindex": 160 + "uname_release": "5.3.18-24.75-default", + "cindex": 74 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.4.0-145-generic", - "cindex": 160 + "uname_release": "5.3.18-24.78-default", + "cindex": 74 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.4.0-146-generic", - "cindex": 160 + "uname_release": "5.3.18-24.83-default", + "cindex": 74 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.4.0-148-generic", - "cindex": 160 + "uname_release": "5.3.18-24.86-default", + "cindex": 74 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.4.0-15-generic", - "cindex": 152 + "uname_release": "5.3.18-24.9-default", + "cindex": 74 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.4.0-150-generic", - "cindex": 160 + "uname_release": "5.3.18-24.93-default", + "cindex": 74 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.2", "arch": "x86_64", - "uname_release": "4.4.0-151-generic", - "cindex": 160 + "uname_release": "5.3.18-24.96-default", + "cindex": 74 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.4.0-154-generic", - "cindex": 160 + "uname_release": "5.3.18-150300.59.43-default", + "cindex": 75 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.4.0-157-generic", - "cindex": 160 + "uname_release": "5.3.18-150300.59.46-default", + "cindex": 75 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.4.0-159-generic", - "cindex": 160 + "uname_release": "5.3.18-150300.59.49-default", + "cindex": 75 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.4.0-16-generic", - "cindex": 152 + "uname_release": "5.3.18-150300.59.54-default", + "cindex": 75 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.4.0-161-generic", - "cindex": 160 + "uname_release": "5.3.18-150300.59.60-default", + "cindex": 75 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.4.0-164-generic", - "cindex": 160 + "uname_release": "5.3.18-150300.59.63-default", + "cindex": 75 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.4.0-165-generic", - "cindex": 160 + "uname_release": "5.3.18-150300.59.68-default", + "cindex": 75 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.4.0-166-generic", - "cindex": 160 + "uname_release": "5.3.18-150300.59.71-default", + "cindex": 75 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.4.0-168-generic", - "cindex": 160 + "uname_release": "5.3.18-150300.59.76-default", + "cindex": 75 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.4.0-169-generic", - "cindex": 160 + "uname_release": "5.3.18-150300.59.81-default", + "cindex": 75 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.4.0-17-generic", - "cindex": 152 + "uname_release": "5.3.18-150300.59.87-default", + "cindex": 75 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.4.0-170-generic", - "cindex": 160 + "uname_release": "5.3.18-57-default", + "cindex": 75 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.4.0-171-generic", - "cindex": 160 + "uname_release": "5.3.18-59.10-default", + "cindex": 75 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.4.0-173-generic", - "cindex": 160 + "uname_release": "5.3.18-59.13-default", + "cindex": 75 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.4.0-174-generic", - "cindex": 160 + "uname_release": "5.3.18-59.16-default", + "cindex": 75 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.4.0-176-generic", - "cindex": 160 + "uname_release": "5.3.18-59.19-default", + "cindex": 75 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.4.0-177-generic", - "cindex": 160 + "uname_release": "5.3.18-59.24-default", + "cindex": 75 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.4.0-178-generic", - "cindex": 160 + "uname_release": "5.3.18-59.27-default", + "cindex": 75 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.4.0-179-generic", - "cindex": 161 + "uname_release": "5.3.18-59.30-default", + "cindex": 75 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.4.0-18-generic", - "cindex": 152 + "uname_release": "5.3.18-59.34-default", + "cindex": 75 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.4.0-184-generic", - "cindex": 161 + "uname_release": "5.3.18-59.37-default", + "cindex": 75 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.4.0-185-generic", - "cindex": 161 + "uname_release": "5.3.18-59.40-default", + "cindex": 75 }, { - "distrib": "ubuntu", - "version": "16.04", + "distrib": "sles", + "version": "15.3", "arch": "x86_64", - "uname_release": "4.4.0-186-generic", - "cindex": 161 + "uname_release": "5.3.18-59.5-default", + "cindex": 75 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-187-generic", - "cindex": 161 + "uname_release": "4.10.0-1004-gcp", + "cindex": 76 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-189-generic", - "cindex": 161 + "uname_release": "4.10.0-1006-gcp", + "cindex": 76 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-190-generic", - "cindex": 161 + "uname_release": "4.10.0-1007-gcp", + "cindex": 76 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-193-generic", - "cindex": 161 + "uname_release": "4.10.0-1008-gcp", + "cindex": 76 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-194-generic", - "cindex": 161 + "uname_release": "4.10.0-1009-gcp", + "cindex": 76 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-197-generic", - "cindex": 161 + "uname_release": "4.10.0-14-generic", + "cindex": 77 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-198-generic", - "cindex": 161 + "uname_release": "4.10.0-19-generic", + "cindex": 77 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-2-generic", - "cindex": 162 + "uname_release": "4.10.0-20-generic", + "cindex": 77 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-200-generic", - "cindex": 161 + "uname_release": "4.10.0-21-generic", + "cindex": 77 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-201-generic", - "cindex": 161 + "uname_release": "4.10.0-22-generic", + "cindex": 77 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-203-generic", - "cindex": 161 + "uname_release": "4.10.0-24-generic", + "cindex": 77 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-204-generic", - "cindex": 161 + "uname_release": "4.10.0-26-generic", + "cindex": 77 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-206-generic", - "cindex": 161 + "uname_release": "4.10.0-27-generic", + "cindex": 77 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-208-generic", - "cindex": 163 + "uname_release": "4.10.0-28-generic", + "cindex": 77 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-209-generic", - "cindex": 163 + "uname_release": "4.10.0-30-generic", + "cindex": 77 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-21-generic", - "cindex": 152 + "uname_release": "4.10.0-32-generic", + "cindex": 77 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-210-generic", - "cindex": 163 + "uname_release": "4.10.0-33-generic", + "cindex": 77 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-22-generic", - "cindex": 152 + "uname_release": "4.10.0-35-generic", + "cindex": 77 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-24-generic", - "cindex": 152 + "uname_release": "4.10.0-37-generic", + "cindex": 77 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-28-generic", - "cindex": 152 + "uname_release": "4.10.0-38-generic", + "cindex": 77 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-31-generic", - "cindex": 152 + "uname_release": "4.10.0-40-generic", + "cindex": 77 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-34-generic", - "cindex": 152 + "uname_release": "4.10.0-42-generic", + "cindex": 77 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-36-generic", - "cindex": 152 + "uname_release": "4.11.0-1009-azure", + "cindex": 78 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-38-generic", - "cindex": 152 + "uname_release": "4.11.0-1011-azure", + "cindex": 78 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-4-generic", - "cindex": 162 + "uname_release": "4.11.0-1013-azure", + "cindex": 79 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-42-generic", - "cindex": 152 + "uname_release": "4.11.0-1014-azure", + "cindex": 79 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-43-generic", - "cindex": 152 + "uname_release": "4.11.0-1015-azure", + "cindex": 79 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-45-generic", - "cindex": 152 + "uname_release": "4.11.0-1016-azure", + "cindex": 79 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-47-generic", - "cindex": 152 + "uname_release": "4.11.0-13-generic", + "cindex": 79 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-51-generic", - "cindex": 152 + "uname_release": "4.11.0-14-generic", + "cindex": 79 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-53-generic", - "cindex": 152 + "uname_release": "4.13.0-1002-gcp", + "cindex": 80 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-57-generic", - "cindex": 164 + "uname_release": "4.13.0-1005-azure", + "cindex": 80 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-59-generic", - "cindex": 164 + "uname_release": "4.13.0-1006-azure", + "cindex": 80 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-6-generic", - "cindex": 162 + "uname_release": "4.13.0-1006-gcp", + "cindex": 80 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-62-generic", - "cindex": 164 + "uname_release": "4.13.0-1007-azure", + "cindex": 80 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-63-generic", - "cindex": 164 + "uname_release": "4.13.0-1007-gcp", + "cindex": 80 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-64-generic", - "cindex": 164 + "uname_release": "4.13.0-1008-gcp", + "cindex": 80 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-65-generic", - "cindex": 164 + "uname_release": "4.13.0-1009-azure", + "cindex": 80 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-66-generic", - "cindex": 164 + "uname_release": "4.13.0-1011-azure", + "cindex": 80 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-67-generic", - "cindex": 164 + "uname_release": "4.13.0-1011-gcp", + "cindex": 80 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-7-generic", - "cindex": 152 + "uname_release": "4.13.0-1012-azure", + "cindex": 80 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-70-generic", - "cindex": 164 + "uname_release": "4.13.0-1012-gcp", + "cindex": 80 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-71-generic", - "cindex": 164 + "uname_release": "4.13.0-1013-gcp", + "cindex": 80 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-72-generic", - "cindex": 164 + "uname_release": "4.13.0-1014-azure", + "cindex": 80 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-75-generic", - "cindex": 164 + "uname_release": "4.13.0-1015-gcp", + "cindex": 80 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-77-generic", - "cindex": 164 + "uname_release": "4.13.0-1016-azure", + "cindex": 80 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-78-generic", - "cindex": 164 + "uname_release": "4.13.0-1017-gcp", + "cindex": 80 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-79-generic", - "cindex": 164 + "uname_release": "4.13.0-1018-azure", + "cindex": 80 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-8-generic", - "cindex": 152 + "uname_release": "4.13.0-1019-gcp", + "cindex": 80 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-81-generic", - "cindex": 164 + "uname_release": "4.13.0-16-generic", + "cindex": 80 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-83-generic", - "cindex": 164 + "uname_release": "4.13.0-17-generic", + "cindex": 80 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-87-generic", - "cindex": 164 + "uname_release": "4.13.0-19-generic", + "cindex": 80 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-89-generic", - "cindex": 164 + "uname_release": "4.13.0-21-generic", + "cindex": 80 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-9-generic", - "cindex": 152 + "uname_release": "4.13.0-25-generic", + "cindex": 80 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-91-generic", - "cindex": 164 + "uname_release": "4.13.0-26-generic", + "cindex": 80 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-92-generic", - "cindex": 164 + "uname_release": "4.13.0-31-generic", + "cindex": 80 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-93-generic", - "cindex": 153 + "uname_release": "4.13.0-32-generic", + "cindex": 80 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-96-generic", - "cindex": 153 + "uname_release": "4.13.0-36-generic", + "cindex": 80 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-97-generic", - "cindex": 153 + "uname_release": "4.13.0-37-generic", + "cindex": 80 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.4.0-98-generic", - "cindex": 153 + "uname_release": "4.13.0-38-generic", + "cindex": 80 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.8.0-28-generic", - "cindex": 165 + "uname_release": "4.13.0-39-generic", + "cindex": 80 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.8.0-30-generic", - "cindex": 165 + "uname_release": "4.13.0-41-generic", + "cindex": 80 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.8.0-32-generic", - "cindex": 165 + "uname_release": "4.13.0-43-generic", + "cindex": 80 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.8.0-34-generic", - "cindex": 166 + "uname_release": "4.13.0-45-generic", + "cindex": 80 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.8.0-36-generic", - "cindex": 166 + "uname_release": "4.15.0-101-generic", + "cindex": 81 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.8.0-39-generic", - "cindex": 166 + "uname_release": "4.15.0-1012-azure", + "cindex": 82 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.8.0-41-generic", - "cindex": 166 + "uname_release": "4.15.0-1013-azure", + "cindex": 82 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.8.0-42-generic", - "cindex": 166 + "uname_release": "4.15.0-1014-azure", + "cindex": 83 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.8.0-44-generic", - "cindex": 166 + "uname_release": "4.15.0-1014-gcp", + "cindex": 83 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.8.0-45-generic", - "cindex": 166 + "uname_release": "4.15.0-1015-gcp", + "cindex": 83 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.8.0-46-generic", - "cindex": 166 + "uname_release": "4.15.0-1017-gcp", + "cindex": 83 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.8.0-49-generic", - "cindex": 166 + "uname_release": "4.15.0-1018-azure", + "cindex": 83 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.8.0-51-generic", - "cindex": 166 + "uname_release": "4.15.0-1018-gcp", + "cindex": 83 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.8.0-52-generic", - "cindex": 166 + "uname_release": "4.15.0-1019-azure", + "cindex": 83 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.8.0-53-generic", - "cindex": 166 + "uname_release": "4.15.0-1019-gcp", + "cindex": 83 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.8.0-54-generic", - "cindex": 166 + "uname_release": "4.15.0-1021-azure", + "cindex": 83 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.8.0-56-generic", - "cindex": 166 + "uname_release": "4.15.0-1021-gcp", + "cindex": 83 }, { "distrib": "ubuntu", "version": "16.04", "arch": "x86_64", - "uname_release": "4.8.0-58-generic", - "cindex": 166 - }, - { - "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.13.0-16-generic", - "cindex": 167 - }, - { - "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.13.0-17-generic", - "cindex": 167 + "uname_release": "4.15.0-1022-azure", + "cindex": 83 }, { - "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.13.0-25-generic", - "cindex": 167 + "distrib": "ubuntu", + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1023-azure", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.13.0-32-generic", - "cindex": 167 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1023-gcp", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-10-generic", - "cindex": 168 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1024-gcp", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-101-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1025-azure", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1029-aws", - "cindex": 168 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1025-gcp", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1031-aws", - "cindex": 168 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1026-gcp", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1032-aws", - "cindex": 168 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1027-gcp", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1033-aws", - "cindex": 168 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1028-azure", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1034-aws", - "cindex": 168 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1028-gcp", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1035-aws", - "cindex": 168 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1029-gcp", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1037-aws", - "cindex": 168 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1030-aws", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1039-aws", - "cindex": 168 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1030-azure", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1040-aws", - "cindex": 168 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1030-gcp", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1041-aws", - "cindex": 168 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1031-aws", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1043-aws", - "cindex": 168 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1031-azure", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1044-aws", - "cindex": 168 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1032-aws", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1045-aws", - "cindex": 168 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1032-azure", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1047-aws", - "cindex": 168 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1032-gcp", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1048-aws", - "cindex": 168 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1033-aws", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1050-aws", - "cindex": 168 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1033-gcp", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1051-aws", - "cindex": 168 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1034-gcp", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1052-aws", - "cindex": 168 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1035-aws", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1054-aws", - "cindex": 168 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1035-azure", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1056-aws", - "cindex": 168 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1036-aws", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1057-aws", - "cindex": 168 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1036-azure", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1058-aws", - "cindex": 168 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1036-gcp", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-106-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1037-azure", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1060-aws", - "cindex": 168 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1037-gcp", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1063-aws", - "cindex": 168 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1039-aws", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1065-aws", - "cindex": 168 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1039-azure", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1066-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1040-aws", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1067-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1040-azure", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1073-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1040-gcp", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1076-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1041-aws", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1077-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1041-azure", + "cindex": 84 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1079-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1041-gcp", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-108-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1042-azure", + "cindex": 84 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1080-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1042-gcp", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1082-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1043-aws", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1083-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1044-aws", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1086-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1044-gcp", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1087-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1045-aws", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1088-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1045-azure", + "cindex": 84 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-109-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1046-azure", + "cindex": 84 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1090-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1046-gcp", + "cindex": 85 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1091-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1047-aws", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1092-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1047-azure", + "cindex": 84 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1093-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1047-gcp", + "cindex": 85 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1094-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1048-aws", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1095-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1049-azure", + "cindex": 84 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1096-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1049-gcp", + "cindex": 85 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1097-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1050-aws", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1098-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1050-azure", + "cindex": 84 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1099-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1050-gcp", + "cindex": 85 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1101-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1051-aws", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1102-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1051-azure", + "cindex": 84 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1103-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1052-aws", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1106-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1052-azure", + "cindex": 84 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1109-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1052-gcp", + "cindex": 85 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-111-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1054-aws", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1110-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1055-azure", + "cindex": 84 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1111-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1055-gcp", + "cindex": 85 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1112-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1056-aws", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1114-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1056-azure", + "cindex": 84 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1115-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1057-aws", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1116-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1057-azure", + "cindex": 84 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1118-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1058-aws", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1119-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1058-gcp", + "cindex": 85 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-112-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1059-azure", + "cindex": 84 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1121-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-106-generic", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1123-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1060-aws", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1124-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1060-azure", + "cindex": 84 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1126-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1060-gcp", + "cindex": 85 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1127-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1061-azure", + "cindex": 84 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1128-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1061-gcp", + "cindex": 86 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1130-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1063-aws", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1133-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1063-azure", + "cindex": 84 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1136-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1064-azure", + "cindex": 84 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1137-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1065-aws", + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1139-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1066-aws", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1140-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1066-azure", + "cindex": 84 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1141-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1067-aws", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1142-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1067-azure", + "cindex": 84 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1143-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1069-azure", + "cindex": 84 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1144-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-107-generic", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1146-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1071-azure", + "cindex": 84 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1147-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1071-gcp", + "cindex": 86 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1148-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1073-aws", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-115-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1074-aws", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1150-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1075-azure", + "cindex": 84 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1151-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1077-azure", + "cindex": 84 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1153-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1077-gcp", + "cindex": 86 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1154-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1078-gcp", + "cindex": 86 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1155-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1079-aws", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1156-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1080-aws", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1157-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1080-gcp", + "cindex": 86 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-1158-aws", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1081-gcp", + "cindex": 86 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-117-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1082-aws", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-118-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1082-azure", + "cindex": 87 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-12-generic", - "cindex": 168 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1083-aws", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-121-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1083-azure", + "cindex": 87 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-122-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1083-gcp", + "cindex": 86 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-123-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1084-gcp", + "cindex": 86 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-124-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1085-aws", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-126-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1086-gcp", + "cindex": 86 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-128-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1087-gcp", + "cindex": 86 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-129-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1088-aws", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-13-generic", - "cindex": 168 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1088-gcp", + "cindex": 86 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-130-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1089-azure", + "cindex": 87 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-132-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1090-aws", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-134-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1090-gcp", + "cindex": 86 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-135-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1091-aws", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-136-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1091-azure", + "cindex": 87 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-137-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1091-gcp", + "cindex": 86 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-139-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1092-azure", + "cindex": 87 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-140-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1092-gcp", + "cindex": 86 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-141-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1093-aws", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-142-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1093-azure", + "cindex": 87 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-143-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1093-gcp", + "cindex": 86 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-144-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1094-aws", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-147-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1094-gcp", + "cindex": 86 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-15-generic", - "cindex": 168 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1095-aws", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-151-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1095-azure", + "cindex": 87 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-153-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1095-gcp", + "cindex": 86 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-154-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1096-aws", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-156-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1096-azure", + "cindex": 87 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-158-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1096-gcp", + "cindex": 86 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-159-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1097-aws", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-161-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1097-gcp", + "cindex": 86 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-162-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1098-aws", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-163-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1098-azure", + "cindex": 87 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-166-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1098-gcp", + "cindex": 86 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-167-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1099-aws", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-169-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1100-azure", + "cindex": 87 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-171-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1102-azure", + "cindex": 87 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-173-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1103-azure", + "cindex": 87 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-175-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1106-azure", + "cindex": 87 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-176-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1108-azure", + "cindex": 87 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-177-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1109-azure", + "cindex": 87 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-180-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1110-azure", + "cindex": 87 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-184-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1111-azure", + "cindex": 87 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-187-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1112-azure", + "cindex": 87 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-188-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-1113-azure", + "cindex": 87 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-189-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-112-generic", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-19-generic", - "cindex": 168 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-115-generic", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-191-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-117-generic", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-192-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-118-generic", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-193-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-120-generic", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-194-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-122-generic", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-196-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-123-generic", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-197-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-126-generic", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-20-generic", - "cindex": 168 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-128-generic", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-200-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-129-generic", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-201-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-13-generic", + "cindex": 82 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-202-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-132-generic", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-204-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-133-generic", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-206-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-136-generic", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-208-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-137-generic", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-209-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-139-generic", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-210-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-140-generic", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-211-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-142-generic", + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-212-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-15-generic", + "cindex": 82 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-213-generic", - "cindex": 169 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.15.0-20-generic", + "cindex": 82 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", + "version": "16.04", + "arch": "x86_64", "uname_release": "4.15.0-22-generic", - "cindex": 168 + "cindex": 82 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", + "version": "16.04", + "arch": "x86_64", "uname_release": "4.15.0-23-generic", - "cindex": 168 + "cindex": 82 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", + "version": "16.04", + "arch": "x86_64", "uname_release": "4.15.0-24-generic", - "cindex": 168 + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", + "version": "16.04", + "arch": "x86_64", "uname_release": "4.15.0-29-generic", - "cindex": 168 + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", + "version": "16.04", + "arch": "x86_64", "uname_release": "4.15.0-30-generic", - "cindex": 168 + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", + "version": "16.04", + "arch": "x86_64", "uname_release": "4.15.0-32-generic", - "cindex": 168 + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", + "version": "16.04", + "arch": "x86_64", "uname_release": "4.15.0-33-generic", - "cindex": 168 + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", + "version": "16.04", + "arch": "x86_64", "uname_release": "4.15.0-34-generic", - "cindex": 168 + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", + "version": "16.04", + "arch": "x86_64", "uname_release": "4.15.0-36-generic", - "cindex": 168 + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", + "version": "16.04", + "arch": "x86_64", "uname_release": "4.15.0-38-generic", - "cindex": 168 + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", + "version": "16.04", + "arch": "x86_64", "uname_release": "4.15.0-39-generic", - "cindex": 168 + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", + "version": "16.04", + "arch": "x86_64", "uname_release": "4.15.0-42-generic", - "cindex": 168 + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", + "version": "16.04", + "arch": "x86_64", "uname_release": "4.15.0-43-generic", - "cindex": 168 - }, - { - "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.15.0-44-generic", - "cindex": 168 + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", + "version": "16.04", + "arch": "x86_64", "uname_release": "4.15.0-45-generic", - "cindex": 168 + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", + "version": "16.04", + "arch": "x86_64", "uname_release": "4.15.0-46-generic", - "cindex": 168 + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", + "version": "16.04", + "arch": "x86_64", "uname_release": "4.15.0-47-generic", - "cindex": 168 + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", + "version": "16.04", + "arch": "x86_64", "uname_release": "4.15.0-48-generic", - "cindex": 168 + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", + "version": "16.04", + "arch": "x86_64", "uname_release": "4.15.0-50-generic", - "cindex": 168 + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", + "version": "16.04", + "arch": "x86_64", "uname_release": "4.15.0-51-generic", - "cindex": 168 + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", + "version": "16.04", + "arch": "x86_64", "uname_release": "4.15.0-52-generic", - "cindex": 168 + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", + "version": "16.04", + "arch": "x86_64", "uname_release": "4.15.0-54-generic", - "cindex": 168 + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", + "version": "16.04", + "arch": "x86_64", "uname_release": "4.15.0-55-generic", - "cindex": 168 + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", + "version": "16.04", + "arch": "x86_64", "uname_release": "4.15.0-58-generic", - "cindex": 168 + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", + "version": "16.04", + "arch": "x86_64", "uname_release": "4.15.0-60-generic", - "cindex": 168 + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", + "version": "16.04", + "arch": "x86_64", "uname_release": "4.15.0-62-generic", - "cindex": 168 + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", + "version": "16.04", + "arch": "x86_64", "uname_release": "4.15.0-64-generic", - "cindex": 168 + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", + "version": "16.04", + "arch": "x86_64", "uname_release": "4.15.0-65-generic", - "cindex": 168 + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", + "version": "16.04", + "arch": "x86_64", "uname_release": "4.15.0-66-generic", - "cindex": 168 + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", + "version": "16.04", + "arch": "x86_64", "uname_release": "4.15.0-69-generic", - "cindex": 168 + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", + "version": "16.04", + "arch": "x86_64", "uname_release": "4.15.0-70-generic", - "cindex": 168 + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", + "version": "16.04", + "arch": "x86_64", "uname_release": "4.15.0-72-generic", - "cindex": 168 + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", + "version": "16.04", + "arch": "x86_64", "uname_release": "4.15.0-74-generic", - "cindex": 168 + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", + "version": "16.04", + "arch": "x86_64", "uname_release": "4.15.0-76-generic", - "cindex": 168 + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", + "version": "16.04", + "arch": "x86_64", "uname_release": "4.15.0-88-generic", - "cindex": 168 + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", + "version": "16.04", + "arch": "x86_64", "uname_release": "4.15.0-91-generic", - "cindex": 168 + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", + "version": "16.04", + "arch": "x86_64", "uname_release": "4.15.0-96-generic", - "cindex": 168 + "cindex": 83 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", + "version": "16.04", + "arch": "x86_64", "uname_release": "4.15.0-99-generic", - "cindex": 169 - }, - { - "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.18.0-1006-aws", - "cindex": 170 - }, - { - "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.18.0-1007-aws", - "cindex": 170 - }, - { - "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.18.0-1008-aws", - "cindex": 170 - }, - { - "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.18.0-1011-aws", - "cindex": 170 - }, - { - "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.18.0-1012-aws", - "cindex": 170 - }, - { - "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.18.0-1013-aws", - "cindex": 170 - }, - { - "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.18.0-1016-aws", - "cindex": 170 - }, - { - "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.18.0-1017-aws", - "cindex": 170 - }, - { - "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.18.0-1018-aws", - "cindex": 170 - }, - { - "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.18.0-1020-aws", - "cindex": 170 - }, - { - "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.18.0-13-generic", - "cindex": 170 - }, - { - "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.18.0-14-generic", - "cindex": 170 - }, - { - "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.18.0-15-generic", - "cindex": 170 - }, - { - "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.18.0-16-generic", - "cindex": 170 - }, - { - "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.18.0-17-generic", - "cindex": 170 - }, - { - "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.18.0-18-generic", - "cindex": 170 - }, - { - "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.18.0-20-generic", - "cindex": 170 + "cindex": 81 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.18.0-21-generic", - "cindex": 170 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.18.0-1006-azure", + "cindex": 88 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.18.0-22-generic", - "cindex": 170 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.2.0-16-generic", + "cindex": 89 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.18.0-24-generic", - "cindex": 170 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.2.0-17-generic", + "cindex": 89 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "4.18.0-25-generic", - "cindex": 170 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.2.0-19-generic", + "cindex": 89 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-1011-aws", - "cindex": 171 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.3.0-1-generic", + "cindex": 90 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-1012-aws", - "cindex": 171 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.3.0-2-generic", + "cindex": 90 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-1014-aws", - "cindex": 171 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.3.0-5-generic", + "cindex": 90 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-1016-aws", - "cindex": 171 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.3.0-6-generic", + "cindex": 90 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-1018-aws", - "cindex": 171 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.3.0-7-generic", + "cindex": 90 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-1019-aws", - "cindex": 171 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-10-generic", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-1021-aws", - "cindex": 171 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1001-aws", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-1022-aws", - "cindex": 171 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1003-aws", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-1023-aws", - "cindex": 172 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1003-gke", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-1024-aws", - "cindex": 172 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1004-aws", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-1025-aws", - "cindex": 172 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1005-gke", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-1027-aws", - "cindex": 172 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1006-gke", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-15-generic", - "cindex": 171 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1007-aws", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-16-generic", - "cindex": 171 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1008-gke", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-17-generic", - "cindex": 171 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1009-aws", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-19-generic", - "cindex": 171 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1009-gke", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-20-generic", - "cindex": 171 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-101-generic", + "cindex": 92 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-23-generic", - "cindex": 171 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1010-gke", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-25-generic", - "cindex": 171 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1011-aws", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-27-generic", - "cindex": 171 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1012-aws", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-29-generic", - "cindex": 171 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1012-gke", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-31-generic", - "cindex": 171 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1013-aws", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-32-generic", - "cindex": 171 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1013-gke", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-35-generic", - "cindex": 171 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1014-gke", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-36-generic", - "cindex": 171 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1016-aws", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-37-generic", - "cindex": 171 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1016-gke", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-43-generic", - "cindex": 172 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1017-aws", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-44-generic", - "cindex": 172 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1018-aws", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-47-generic", - "cindex": 172 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1018-gke", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-48-generic", - "cindex": 173 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1020-aws", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-52-generic", - "cindex": 173 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1022-aws", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-53-generic", - "cindex": 173 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1022-gke", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-58-generic", - "cindex": 173 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1024-gke", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-60-generic", - "cindex": 173 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1026-aws", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-61-generic", - "cindex": 173 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1026-gke", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-62-generic", - "cindex": 173 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1027-gke", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-63-generic", - "cindex": 173 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1028-aws", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.0.0-65-generic", - "cindex": 173 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1028-gke", + "cindex": 93 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.3.0-1016-aws", - "cindex": 174 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-103-generic", + "cindex": 92 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.3.0-1017-aws", - "cindex": 174 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1030-aws", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.3.0-1019-aws", - "cindex": 175 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1031-aws", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.3.0-1023-aws", - "cindex": 175 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1031-gke", + "cindex": 93 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.3.0-1028-aws", - "cindex": 175 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1032-aws", + "cindex": 93 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.3.0-1030-aws", - "cindex": 175 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1032-gke", + "cindex": 93 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.3.0-1032-aws", - "cindex": 175 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1033-gke", + "cindex": 93 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.3.0-1033-aws", - "cindex": 175 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1034-gke", + "cindex": 93 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.3.0-1034-aws", - "cindex": 175 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1035-aws", + "cindex": 93 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.3.0-1035-aws", - "cindex": 175 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1037-aws", + "cindex": 93 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.3.0-19-generic", - "cindex": 176 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1038-aws", + "cindex": 93 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.3.0-22-generic", - "cindex": 177 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1039-aws", + "cindex": 93 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.3.0-23-generic", - "cindex": 177 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-104-generic", + "cindex": 92 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.3.0-24-generic", - "cindex": 174 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1041-aws", + "cindex": 93 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.3.0-26-generic", - "cindex": 174 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1043-aws", + "cindex": 93 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.3.0-28-generic", - "cindex": 174 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1044-aws", + "cindex": 93 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.3.0-40-generic", - "cindex": 174 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1047-aws", + "cindex": 93 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.3.0-42-generic", - "cindex": 174 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1048-aws", + "cindex": 93 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.3.0-45-generic", - "cindex": 174 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1049-aws", + "cindex": 93 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.3.0-46-generic", - "cindex": 174 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1050-aws", + "cindex": 93 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.3.0-51-generic", - "cindex": 174 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1052-aws", + "cindex": 93 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.3.0-53-generic", - "cindex": 175 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1054-aws", + "cindex": 94 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.3.0-59-generic", - "cindex": 175 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1055-aws", + "cindex": 95 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.3.0-61-generic", - "cindex": 175 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1057-aws", + "cindex": 95 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.3.0-62-generic", - "cindex": 175 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1060-aws", + "cindex": 95 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.3.0-64-generic", - "cindex": 175 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1061-aws", + "cindex": 95 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-1018-aws", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1062-aws", + "cindex": 95 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-1020-aws", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1063-aws", + "cindex": 95 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-1022-aws", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1065-aws", + "cindex": 95 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-1024-aws", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1066-aws", + "cindex": 95 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-1025-aws", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1067-aws", + "cindex": 95 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-1028-aws", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1069-aws", + "cindex": 95 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-1029-aws", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1070-aws", + "cindex": 95 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-1030-aws", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1072-aws", + "cindex": 95 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-1032-aws", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1073-aws", + "cindex": 95 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-1034-aws", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1074-aws", + "cindex": 95 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-1035-aws", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1075-aws", + "cindex": 95 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-1037-aws", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1077-aws", + "cindex": 95 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-1038-aws", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1079-aws", + "cindex": 95 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-1039-aws", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-108-generic", + "cindex": 92 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-1041-aws", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1081-aws", + "cindex": 95 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-1043-aws", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1083-aws", + "cindex": 95 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-1045-aws", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1084-aws", + "cindex": 95 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-1047-aws", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1085-aws", + "cindex": 95 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-1048-aws", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1087-aws", + "cindex": 95 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-1049-aws", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1088-aws", + "cindex": 95 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-1051-aws", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-109-generic", + "cindex": 92 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-1054-aws", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1090-aws", + "cindex": 95 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-1055-aws", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1092-aws", + "cindex": 95 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-1056-aws", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1094-aws", + "cindex": 95 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-1057-aws", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1095-aws", + "cindex": 95 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-1058-aws", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1096-aws", + "cindex": 95 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-1059-aws", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1098-aws", + "cindex": 95 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-1060-aws", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1099-aws", + "cindex": 95 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-37-generic", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-11-generic", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-39-generic", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1100-aws", + "cindex": 95 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-40-generic", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1101-aws", + "cindex": 95 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-42-generic", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1102-aws", + "cindex": 95 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-45-generic", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1104-aws", + "cindex": 95 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-47-generic", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1105-aws", + "cindex": 95 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-48-generic", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1106-aws", + "cindex": 95 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-51-generic", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1107-aws", + "cindex": 96 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-52-generic", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1109-aws", + "cindex": 96 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-53-generic", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1110-aws", + "cindex": 96 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-54-generic", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1111-aws", + "cindex": 96 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-56-generic", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1112-aws", + "cindex": 96 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-58-generic", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1113-aws", + "cindex": 96 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-59-generic", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1114-aws", + "cindex": 96 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-60-generic", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1117-aws", + "cindex": 96 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-62-generic", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1118-aws", + "cindex": 96 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-64-generic", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1119-aws", + "cindex": 96 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-65-generic", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-112-generic", + "cindex": 92 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-66-generic", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1121-aws", + "cindex": 96 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-67-generic", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1122-aws", + "cindex": 96 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-70-generic", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1123-aws", + "cindex": 96 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-71-generic", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1124-aws", + "cindex": 96 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-72-generic", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1126-aws", + "cindex": 97 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-73-generic", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1127-aws", + "cindex": 97 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-74-generic", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-1128-aws", + "cindex": 97 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-77-generic", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-116-generic", + "cindex": 92 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-80-generic", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-119-generic", + "cindex": 98 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-81-generic", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-12-generic", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-84-generic", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-121-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-86-generic", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-122-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-87-generic", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-124-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-89-generic", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-127-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-90-generic", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-128-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", - "arch": "arm64", - "uname_release": "5.4.0-91-generic", - "cindex": 178 + "version": "16.04", + "arch": "x86_64", + "uname_release": "4.4.0-13-generic", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.13.0-16-generic", - "cindex": 179 + "uname_release": "4.4.0-130-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.13.0-17-generic", - "cindex": 179 + "uname_release": "4.4.0-131-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.13.0-25-generic", - "cindex": 179 + "uname_release": "4.4.0-133-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.13.0-32-generic", - "cindex": 179 + "uname_release": "4.4.0-134-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-10-generic", - "cindex": 180 + "uname_release": "4.4.0-135-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1001-aws", - "cindex": 180 + "uname_release": "4.4.0-137-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1001-gcp", - "cindex": 180 + "uname_release": "4.4.0-138-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1002-azure", - "cindex": 180 + "uname_release": "4.4.0-139-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1003-aws", - "cindex": 180 + "uname_release": "4.4.0-14-generic", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1003-azure", - "cindex": 180 + "uname_release": "4.4.0-140-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1003-gcp", - "cindex": 180 + "uname_release": "4.4.0-141-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1004-azure", - "cindex": 180 + "uname_release": "4.4.0-142-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1005-aws", - "cindex": 180 + "uname_release": "4.4.0-143-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1005-gcp", - "cindex": 180 + "uname_release": "4.4.0-145-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1006-aws", - "cindex": 180 + "uname_release": "4.4.0-146-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1006-gcp", - "cindex": 180 + "uname_release": "4.4.0-148-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1007-aws", - "cindex": 180 + "uname_release": "4.4.0-15-generic", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1008-azure", - "cindex": 180 + "uname_release": "4.4.0-150-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1008-gcp", - "cindex": 180 + "uname_release": "4.4.0-151-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1009-aws", - "cindex": 180 + "uname_release": "4.4.0-154-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1009-azure", - "cindex": 180 + "uname_release": "4.4.0-157-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1009-gcp", - "cindex": 180 + "uname_release": "4.4.0-159-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-101-generic", - "cindex": 181 + "uname_release": "4.4.0-16-generic", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1010-aws", - "cindex": 180 + "uname_release": "4.4.0-161-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1010-gcp", - "cindex": 182 + "uname_release": "4.4.0-164-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1011-aws", - "cindex": 182 + "uname_release": "4.4.0-165-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1012-azure", - "cindex": 180 + "uname_release": "4.4.0-166-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1013-azure", - "cindex": 180 + "uname_release": "4.4.0-168-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1014-azure", - "cindex": 182 + "uname_release": "4.4.0-169-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1014-gcp", - "cindex": 182 + "uname_release": "4.4.0-17-generic", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1015-gcp", - "cindex": 182 + "uname_release": "4.4.0-170-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1016-aws", - "cindex": 182 + "uname_release": "4.4.0-171-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1017-aws", - "cindex": 182 + "uname_release": "4.4.0-173-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1017-gcp", - "cindex": 182 + "uname_release": "4.4.0-174-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1018-azure", - "cindex": 182 + "uname_release": "4.4.0-176-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1018-gcp", - "cindex": 182 + "uname_release": "4.4.0-177-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1019-aws", - "cindex": 182 + "uname_release": "4.4.0-178-generic", + "cindex": 99 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1019-azure", - "cindex": 182 + "uname_release": "4.4.0-179-generic", + "cindex": 100 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1019-gcp", - "cindex": 182 + "uname_release": "4.4.0-18-generic", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1020-aws", - "cindex": 182 + "uname_release": "4.4.0-184-generic", + "cindex": 100 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1021-aws", - "cindex": 182 + "uname_release": "4.4.0-185-generic", + "cindex": 100 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1021-azure", - "cindex": 182 + "uname_release": "4.4.0-186-generic", + "cindex": 100 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1021-gcp", - "cindex": 182 + "uname_release": "4.4.0-187-generic", + "cindex": 100 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1022-azure", - "cindex": 182 + "uname_release": "4.4.0-189-generic", + "cindex": 100 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1023-aws", - "cindex": 182 + "uname_release": "4.4.0-190-generic", + "cindex": 100 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1023-azure", - "cindex": 182 + "uname_release": "4.4.0-193-generic", + "cindex": 100 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1023-gcp", - "cindex": 182 + "uname_release": "4.4.0-194-generic", + "cindex": 100 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1024-gcp", - "cindex": 182 + "uname_release": "4.4.0-197-generic", + "cindex": 100 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1025-aws", - "cindex": 182 + "uname_release": "4.4.0-198-generic", + "cindex": 100 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1025-azure", - "cindex": 182 + "uname_release": "4.4.0-2-generic", + "cindex": 101 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1025-gcp", - "cindex": 182 + "uname_release": "4.4.0-200-generic", + "cindex": 100 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1026-gcp", - "cindex": 182 + "uname_release": "4.4.0-201-generic", + "cindex": 100 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1027-aws", - "cindex": 182 + "uname_release": "4.4.0-203-generic", + "cindex": 100 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1027-gcp", - "cindex": 182 + "uname_release": "4.4.0-204-generic", + "cindex": 100 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1028-azure", - "cindex": 182 + "uname_release": "4.4.0-206-generic", + "cindex": 100 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1028-gcp", - "cindex": 182 + "uname_release": "4.4.0-208-generic", + "cindex": 102 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1029-aws", - "cindex": 182 + "uname_release": "4.4.0-209-generic", + "cindex": 102 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1029-gcp", - "cindex": 182 + "uname_release": "4.4.0-21-generic", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1030-azure", - "cindex": 182 + "uname_release": "4.4.0-210-generic", + "cindex": 102 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1030-gcp", - "cindex": 182 + "uname_release": "4.4.0-22-generic", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1030-gke", - "cindex": 182 + "uname_release": "4.4.0-24-generic", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1031-aws", - "cindex": 182 + "uname_release": "4.4.0-28-generic", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1031-azure", - "cindex": 182 + "uname_release": "4.4.0-31-generic", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1032-aws", - "cindex": 182 + "uname_release": "4.4.0-34-generic", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1032-azure", - "cindex": 182 + "uname_release": "4.4.0-36-generic", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1032-gcp", - "cindex": 182 + "uname_release": "4.4.0-38-generic", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1032-gke", - "cindex": 182 + "uname_release": "4.4.0-4-generic", + "cindex": 101 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1033-aws", - "cindex": 182 + "uname_release": "4.4.0-42-generic", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1033-gcp", - "cindex": 182 + "uname_release": "4.4.0-43-generic", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1033-gke", - "cindex": 182 + "uname_release": "4.4.0-45-generic", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1034-aws", - "cindex": 182 + "uname_release": "4.4.0-47-generic", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1034-gcp", - "cindex": 182 + "uname_release": "4.4.0-51-generic", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1034-gke", - "cindex": 182 + "uname_release": "4.4.0-53-generic", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1035-aws", - "cindex": 182 + "uname_release": "4.4.0-57-generic", + "cindex": 103 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1035-azure", - "cindex": 182 + "uname_release": "4.4.0-59-generic", + "cindex": 103 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1036-azure", - "cindex": 182 + "uname_release": "4.4.0-6-generic", + "cindex": 101 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1036-gcp", - "cindex": 182 + "uname_release": "4.4.0-62-generic", + "cindex": 103 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1036-gke", - "cindex": 182 + "uname_release": "4.4.0-63-generic", + "cindex": 103 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1037-aws", - "cindex": 182 + "uname_release": "4.4.0-64-generic", + "cindex": 103 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1037-azure", - "cindex": 182 + "uname_release": "4.4.0-65-generic", + "cindex": 103 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1037-gcp", - "cindex": 182 + "uname_release": "4.4.0-66-generic", + "cindex": 103 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1037-gke", - "cindex": 182 + "uname_release": "4.4.0-67-generic", + "cindex": 103 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1039-aws", - "cindex": 182 + "uname_release": "4.4.0-7-generic", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1040-aws", - "cindex": 182 + "uname_release": "4.4.0-70-generic", + "cindex": 103 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1040-gcp", - "cindex": 182 + "uname_release": "4.4.0-71-generic", + "cindex": 103 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1040-gke", - "cindex": 182 + "uname_release": "4.4.0-72-generic", + "cindex": 103 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1041-aws", - "cindex": 182 + "uname_release": "4.4.0-75-generic", + "cindex": 103 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1041-gke", - "cindex": 182 + "uname_release": "4.4.0-77-generic", + "cindex": 103 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1042-gcp", - "cindex": 182 + "uname_release": "4.4.0-78-generic", + "cindex": 103 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1042-gke", - "cindex": 182 + "uname_release": "4.4.0-79-generic", + "cindex": 103 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1043-aws", - "cindex": 182 + "uname_release": "4.4.0-8-generic", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1044-aws", - "cindex": 182 + "uname_release": "4.4.0-81-generic", + "cindex": 103 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1044-gcp", - "cindex": 182 + "uname_release": "4.4.0-83-generic", + "cindex": 103 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1044-gke", - "cindex": 182 + "uname_release": "4.4.0-87-generic", + "cindex": 103 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1045-aws", - "cindex": 182 + "uname_release": "4.4.0-89-generic", + "cindex": 103 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1045-gke", - "cindex": 183 + "uname_release": "4.4.0-9-generic", + "cindex": 91 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1046-gke", - "cindex": 183 + "uname_release": "4.4.0-91-generic", + "cindex": 103 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1047-aws", - "cindex": 182 + "uname_release": "4.4.0-92-generic", + "cindex": 103 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1048-aws", - "cindex": 182 + "uname_release": "4.4.0-93-generic", + "cindex": 92 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1048-gke", - "cindex": 183 + "uname_release": "4.4.0-96-generic", + "cindex": 92 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1049-gke", - "cindex": 183 + "uname_release": "4.4.0-97-generic", + "cindex": 92 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1050-aws", - "cindex": 182 + "uname_release": "4.4.0-98-generic", + "cindex": 92 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1050-gke", - "cindex": 183 + "uname_release": "4.8.0-28-generic", + "cindex": 104 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1051-aws", - "cindex": 182 + "uname_release": "4.8.0-30-generic", + "cindex": 104 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1052-aws", - "cindex": 182 + "uname_release": "4.8.0-32-generic", + "cindex": 104 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1052-gke", - "cindex": 183 + "uname_release": "4.8.0-34-generic", + "cindex": 105 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1054-aws", - "cindex": 182 + "uname_release": "4.8.0-36-generic", + "cindex": 105 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1055-gke", - "cindex": 183 + "uname_release": "4.8.0-39-generic", + "cindex": 105 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1056-aws", - "cindex": 182 + "uname_release": "4.8.0-41-generic", + "cindex": 105 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1057-aws", - "cindex": 182 + "uname_release": "4.8.0-42-generic", + "cindex": 105 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1057-gke", - "cindex": 183 + "uname_release": "4.8.0-44-generic", + "cindex": 105 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1058-aws", - "cindex": 182 + "uname_release": "4.8.0-45-generic", + "cindex": 105 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1058-gke", - "cindex": 184 + "uname_release": "4.8.0-46-generic", + "cindex": 105 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1059-gke", - "cindex": 184 + "uname_release": "4.8.0-49-generic", + "cindex": 105 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-106-generic", - "cindex": 181 + "uname_release": "4.8.0-51-generic", + "cindex": 105 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1060-aws", - "cindex": 182 + "uname_release": "4.8.0-52-generic", + "cindex": 105 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1063-aws", - "cindex": 182 + "uname_release": "4.8.0-53-generic", + "cindex": 105 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1063-gke", - "cindex": 184 + "uname_release": "4.8.0-54-generic", + "cindex": 105 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1064-gke", - "cindex": 184 + "uname_release": "4.8.0-56-generic", + "cindex": 105 }, { "distrib": "ubuntu", - "version": "18.04", + "version": "16.04", "arch": "x86_64", - "uname_release": "4.15.0-1065-aws", - "cindex": 182 + "uname_release": "4.8.0-58-generic", + "cindex": 105 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1066-aws", - "cindex": 181 + "uname_release": "4.13.0-16-generic", + "cindex": 106 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1066-gke", - "cindex": 184 + "uname_release": "4.13.0-17-generic", + "cindex": 106 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1067-aws", - "cindex": 181 + "uname_release": "4.13.0-25-generic", + "cindex": 106 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1067-gke", - "cindex": 184 + "uname_release": "4.13.0-32-generic", + "cindex": 106 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1069-gke", - "cindex": 184 + "uname_release": "4.15.0-10-generic", + "cindex": 107 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1070-gke", - "cindex": 184 + "uname_release": "4.15.0-1001-aws", + "cindex": 107 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1071-gcp", - "cindex": 184 + "uname_release": "4.15.0-1001-gcp", + "cindex": 107 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1072-gke", - "cindex": 184 + "uname_release": "4.15.0-1002-azure", + "cindex": 107 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1073-aws", - "cindex": 181 + "uname_release": "4.15.0-1003-aws", + "cindex": 107 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1073-gke", - "cindex": 184 + "uname_release": "4.15.0-1003-azure", + "cindex": 107 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1074-gke", - "cindex": 184 + "uname_release": "4.15.0-1003-gcp", + "cindex": 107 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1076-aws", - "cindex": 181 + "uname_release": "4.15.0-1004-azure", + "cindex": 107 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1076-gke", - "cindex": 184 + "uname_release": "4.15.0-1005-aws", + "cindex": 107 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1077-aws", - "cindex": 181 + "uname_release": "4.15.0-1005-gcp", + "cindex": 107 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1077-gcp", - "cindex": 184 + "uname_release": "4.15.0-1006-aws", + "cindex": 107 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1077-gke", - "cindex": 184 + "uname_release": "4.15.0-1006-gcp", + "cindex": 107 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1078-gcp", - "cindex": 184 + "uname_release": "4.15.0-1007-aws", + "cindex": 107 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1078-gke", - "cindex": 184 + "uname_release": "4.15.0-1008-azure", + "cindex": 107 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1079-aws", - "cindex": 181 + "uname_release": "4.15.0-1008-gcp", + "cindex": 107 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1079-gke", - "cindex": 184 + "uname_release": "4.15.0-1009-aws", + "cindex": 107 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-108-generic", - "cindex": 181 + "uname_release": "4.15.0-1009-azure", + "cindex": 107 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1080-aws", - "cindex": 181 + "uname_release": "4.15.0-1009-gcp", + "cindex": 107 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1080-gcp", - "cindex": 184 + "uname_release": "4.15.0-101-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1081-gcp", - "cindex": 184 + "uname_release": "4.15.0-1010-aws", + "cindex": 107 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1082-aws", - "cindex": 181 + "uname_release": "4.15.0-1010-gcp", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1082-azure", - "cindex": 185 + "uname_release": "4.15.0-1011-aws", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1083-aws", - "cindex": 181 + "uname_release": "4.15.0-1012-azure", + "cindex": 107 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1083-azure", - "cindex": 185 + "uname_release": "4.15.0-1013-azure", + "cindex": 107 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1083-gcp", - "cindex": 184 + "uname_release": "4.15.0-1014-azure", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1084-gcp", - "cindex": 184 + "uname_release": "4.15.0-1014-gcp", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1086-aws", - "cindex": 181 + "uname_release": "4.15.0-1015-gcp", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1086-gcp", - "cindex": 184 + "uname_release": "4.15.0-1016-aws", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1087-aws", - "cindex": 181 + "uname_release": "4.15.0-1017-aws", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1087-gcp", - "cindex": 184 + "uname_release": "4.15.0-1017-gcp", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1088-aws", - "cindex": 181 + "uname_release": "4.15.0-1018-azure", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1088-gcp", - "cindex": 184 + "uname_release": "4.15.0-1018-gcp", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1089-azure", - "cindex": 185 + "uname_release": "4.15.0-1019-aws", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-109-generic", - "cindex": 181 + "uname_release": "4.15.0-1019-azure", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1090-aws", - "cindex": 181 + "uname_release": "4.15.0-1019-gcp", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1090-gcp", - "cindex": 184 + "uname_release": "4.15.0-1020-aws", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1091-aws", - "cindex": 181 + "uname_release": "4.15.0-1021-aws", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1091-azure", - "cindex": 185 + "uname_release": "4.15.0-1021-azure", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1091-gcp", - "cindex": 184 + "uname_release": "4.15.0-1021-gcp", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1092-aws", - "cindex": 181 + "uname_release": "4.15.0-1022-azure", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1092-azure", - "cindex": 185 + "uname_release": "4.15.0-1023-aws", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1092-gcp", - "cindex": 184 + "uname_release": "4.15.0-1023-azure", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1093-aws", - "cindex": 181 + "uname_release": "4.15.0-1023-gcp", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1093-azure", - "cindex": 185 + "uname_release": "4.15.0-1024-gcp", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1093-gcp", - "cindex": 184 + "uname_release": "4.15.0-1025-aws", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1094-aws", - "cindex": 181 + "uname_release": "4.15.0-1025-azure", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1094-gcp", - "cindex": 184 + "uname_release": "4.15.0-1025-gcp", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1095-aws", - "cindex": 181 + "uname_release": "4.15.0-1026-gcp", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1095-azure", - "cindex": 185 + "uname_release": "4.15.0-1027-aws", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1095-gcp", - "cindex": 184 + "uname_release": "4.15.0-1027-gcp", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1096-aws", - "cindex": 181 + "uname_release": "4.15.0-1028-azure", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1096-azure", - "cindex": 185 + "uname_release": "4.15.0-1028-gcp", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1096-gcp", - "cindex": 184 + "uname_release": "4.15.0-1029-aws", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1097-aws", - "cindex": 181 + "uname_release": "4.15.0-1029-gcp", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1097-gcp", - "cindex": 184 + "uname_release": "4.15.0-1030-azure", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1098-aws", - "cindex": 181 + "uname_release": "4.15.0-1030-gcp", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1098-gcp", - "cindex": 184 + "uname_release": "4.15.0-1030-gke", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1099-aws", - "cindex": 181 + "uname_release": "4.15.0-1031-aws", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1099-azure", - "cindex": 185 + "uname_release": "4.15.0-1031-azure", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1099-gcp", - "cindex": 184 + "uname_release": "4.15.0-1032-aws", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1100-azure", - "cindex": 185 + "uname_release": "4.15.0-1032-azure", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1100-gcp", - "cindex": 184 + "uname_release": "4.15.0-1032-gcp", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1101-aws", - "cindex": 181 + "uname_release": "4.15.0-1032-gke", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1102-aws", - "cindex": 181 + "uname_release": "4.15.0-1033-aws", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1102-azure", - "cindex": 185 + "uname_release": "4.15.0-1033-gcp", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1103-aws", - "cindex": 181 + "uname_release": "4.15.0-1033-gke", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1103-azure", - "cindex": 185 + "uname_release": "4.15.0-1034-aws", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1103-gcp", - "cindex": 184 + "uname_release": "4.15.0-1034-gcp", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1104-azure", - "cindex": 185 + "uname_release": "4.15.0-1034-gke", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1106-aws", - "cindex": 181 + "uname_release": "4.15.0-1035-aws", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1106-azure", - "cindex": 185 + "uname_release": "4.15.0-1035-azure", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1106-gcp", - "cindex": 184 + "uname_release": "4.15.0-1036-azure", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1107-gcp", - "cindex": 184 + "uname_release": "4.15.0-1036-gcp", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1108-azure", - "cindex": 185 + "uname_release": "4.15.0-1036-gke", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1108-gcp", - "cindex": 184 + "uname_release": "4.15.0-1037-aws", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1109-aws", - "cindex": 181 + "uname_release": "4.15.0-1037-azure", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1109-azure", - "cindex": 185 + "uname_release": "4.15.0-1037-gcp", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1109-gcp", - "cindex": 184 + "uname_release": "4.15.0-1037-gke", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-111-generic", - "cindex": 181 + "uname_release": "4.15.0-1039-aws", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1110-aws", - "cindex": 181 + "uname_release": "4.15.0-1040-aws", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1110-azure", - "cindex": 185 + "uname_release": "4.15.0-1040-gcp", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1110-gcp", - "cindex": 184 + "uname_release": "4.15.0-1040-gke", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1111-aws", - "cindex": 181 + "uname_release": "4.15.0-1041-aws", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1111-azure", - "cindex": 185 + "uname_release": "4.15.0-1041-gke", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1111-gcp", - "cindex": 184 + "uname_release": "4.15.0-1042-gcp", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1112-aws", - "cindex": 181 + "uname_release": "4.15.0-1042-gke", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1112-azure", - "cindex": 185 + "uname_release": "4.15.0-1043-aws", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1112-gcp", - "cindex": 184 + "uname_release": "4.15.0-1044-aws", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1113-azure", - "cindex": 185 + "uname_release": "4.15.0-1044-gcp", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1114-aws", - "cindex": 181 + "uname_release": "4.15.0-1044-gke", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1114-azure", - "cindex": 185 + "uname_release": "4.15.0-1045-aws", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1114-gcp", - "cindex": 184 + "uname_release": "4.15.0-1045-gke", + "cindex": 110 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1115-aws", - "cindex": 181 + "uname_release": "4.15.0-1046-gke", + "cindex": 110 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1115-azure", - "cindex": 185 + "uname_release": "4.15.0-1047-aws", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1115-gcp", - "cindex": 184 + "uname_release": "4.15.0-1048-aws", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1116-aws", - "cindex": 181 + "uname_release": "4.15.0-1048-gke", + "cindex": 110 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1116-gcp", - "cindex": 184 + "uname_release": "4.15.0-1049-gke", + "cindex": 110 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1118-aws", - "cindex": 181 + "uname_release": "4.15.0-1050-aws", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1118-azure", - "cindex": 185 + "uname_release": "4.15.0-1050-gke", + "cindex": 110 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1118-gcp", - "cindex": 184 + "uname_release": "4.15.0-1051-aws", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1119-aws", - "cindex": 181 + "uname_release": "4.15.0-1052-aws", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1119-gcp", - "cindex": 184 + "uname_release": "4.15.0-1052-gke", + "cindex": 110 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-112-generic", - "cindex": 181 + "uname_release": "4.15.0-1054-aws", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1120-gcp", - "cindex": 184 + "uname_release": "4.15.0-1055-gke", + "cindex": 110 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1121-aws", - "cindex": 181 + "uname_release": "4.15.0-1056-aws", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1121-azure", - "cindex": 185 + "uname_release": "4.15.0-1057-aws", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1121-gcp", - "cindex": 184 + "uname_release": "4.15.0-1057-gke", + "cindex": 110 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1122-azure", - "cindex": 185 + "uname_release": "4.15.0-1058-aws", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1122-gcp", - "cindex": 184 + "uname_release": "4.15.0-1058-gke", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1123-aws", - "cindex": 181 + "uname_release": "4.15.0-1059-gke", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1123-azure", - "cindex": 185 + "uname_release": "4.15.0-106-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1124-aws", - "cindex": 181 + "uname_release": "4.15.0-1060-aws", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1124-azure", - "cindex": 185 + "uname_release": "4.15.0-1063-aws", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1124-gcp", - "cindex": 184 + "uname_release": "4.15.0-1063-gke", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1125-azure", - "cindex": 185 + "uname_release": "4.15.0-1064-gke", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1126-aws", - "cindex": 181 + "uname_release": "4.15.0-1065-aws", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1126-azure", - "cindex": 185 + "uname_release": "4.15.0-1066-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1127-aws", - "cindex": 181 + "uname_release": "4.15.0-1066-gke", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1127-azure", - "cindex": 185 + "uname_release": "4.15.0-1067-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1127-gcp", - "cindex": 184 + "uname_release": "4.15.0-1067-gke", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1128-aws", - "cindex": 181 + "uname_release": "4.15.0-1069-gke", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1129-azure", - "cindex": 185 + "uname_release": "4.15.0-1070-gke", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1130-aws", - "cindex": 181 + "uname_release": "4.15.0-1071-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1130-azure", - "cindex": 185 + "uname_release": "4.15.0-1072-gke", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1130-gcp", - "cindex": 184 + "uname_release": "4.15.0-1073-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1131-azure", - "cindex": 185 + "uname_release": "4.15.0-1073-gke", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1131-gcp", - "cindex": 184 + "uname_release": "4.15.0-1074-gke", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1133-aws", - "cindex": 181 + "uname_release": "4.15.0-1076-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1133-azure", - "cindex": 185 + "uname_release": "4.15.0-1076-gke", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1134-azure", - "cindex": 185 + "uname_release": "4.15.0-1077-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1134-gcp", - "cindex": 184 + "uname_release": "4.15.0-1077-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1135-gcp", - "cindex": 184 + "uname_release": "4.15.0-1077-gke", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1136-aws", - "cindex": 181 + "uname_release": "4.15.0-1078-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1136-azure", - "cindex": 185 + "uname_release": "4.15.0-1078-gke", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1136-gcp", - "cindex": 184 + "uname_release": "4.15.0-1079-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1137-aws", - "cindex": 181 + "uname_release": "4.15.0-1079-gke", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1137-azure", - "cindex": 185 + "uname_release": "4.15.0-108-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1137-gcp", - "cindex": 184 + "uname_release": "4.15.0-1080-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1138-azure", - "cindex": 185 + "uname_release": "4.15.0-1080-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1138-gcp", - "cindex": 184 + "uname_release": "4.15.0-1081-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1139-aws", - "cindex": 181 + "uname_release": "4.15.0-1082-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1139-azure", - "cindex": 185 + "uname_release": "4.15.0-1082-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1140-aws", - "cindex": 181 + "uname_release": "4.15.0-1083-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1141-aws", - "cindex": 181 + "uname_release": "4.15.0-1083-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1141-gcp", - "cindex": 184 + "uname_release": "4.15.0-1083-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1142-aws", - "cindex": 181 + "uname_release": "4.15.0-1084-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1142-azure", - "cindex": 185 + "uname_release": "4.15.0-1086-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1142-gcp", - "cindex": 184 + "uname_release": "4.15.0-1086-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1143-aws", - "cindex": 181 + "uname_release": "4.15.0-1087-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1143-gcp", - "cindex": 184 + "uname_release": "4.15.0-1087-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1144-aws", - "cindex": 181 + "uname_release": "4.15.0-1088-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1145-azure", - "cindex": 185 + "uname_release": "4.15.0-1088-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1145-gcp", - "cindex": 184 + "uname_release": "4.15.0-1089-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1146-aws", - "cindex": 181 + "uname_release": "4.15.0-109-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1146-azure", - "cindex": 185 + "uname_release": "4.15.0-1090-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1146-gcp", - "cindex": 184 + "uname_release": "4.15.0-1090-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1147-aws", - "cindex": 181 + "uname_release": "4.15.0-1091-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1147-gcp", - "cindex": 184 + "uname_release": "4.15.0-1091-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1148-aws", - "cindex": 181 + "uname_release": "4.15.0-1091-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1148-gcp", - "cindex": 184 + "uname_release": "4.15.0-1092-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1149-azure", - "cindex": 185 + "uname_release": "4.15.0-1092-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1149-gcp", - "cindex": 184 + "uname_release": "4.15.0-1092-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-115-generic", - "cindex": 181 + "uname_release": "4.15.0-1093-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1150-aws", - "cindex": 181 + "uname_release": "4.15.0-1093-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1150-azure", - "cindex": 185 + "uname_release": "4.15.0-1093-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1150-gcp", - "cindex": 184 + "uname_release": "4.15.0-1094-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1151-aws", - "cindex": 181 + "uname_release": "4.15.0-1094-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1151-azure", - "cindex": 185 + "uname_release": "4.15.0-1095-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1151-gcp", - "cindex": 184 + "uname_release": "4.15.0-1095-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1152-gcp", - "cindex": 184 + "uname_release": "4.15.0-1095-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1153-aws", - "cindex": 181 + "uname_release": "4.15.0-1096-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1153-azure", - "cindex": 185 + "uname_release": "4.15.0-1096-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1154-aws", - "cindex": 181 + "uname_release": "4.15.0-1096-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1155-aws", - "cindex": 181 + "uname_release": "4.15.0-1097-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1156-aws", - "cindex": 181 + "uname_release": "4.15.0-1097-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1157-aws", - "cindex": 181 + "uname_release": "4.15.0-1098-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1157-azure", - "cindex": 185 + "uname_release": "4.15.0-1098-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1158-aws", - "cindex": 181 + "uname_release": "4.15.0-1099-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1158-azure", - "cindex": 185 + "uname_release": "4.15.0-1099-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1159-azure", - "cindex": 185 + "uname_release": "4.15.0-1099-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1161-azure", - "cindex": 185 + "uname_release": "4.15.0-1100-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1162-azure", - "cindex": 185 + "uname_release": "4.15.0-1100-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1163-azure", - "cindex": 185 + "uname_release": "4.15.0-1101-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1164-azure", - "cindex": 185 + "uname_release": "4.15.0-1102-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1165-azure", - "cindex": 185 + "uname_release": "4.15.0-1102-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1166-azure", - "cindex": 185 + "uname_release": "4.15.0-1103-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-1167-azure", - "cindex": 185 + "uname_release": "4.15.0-1103-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-117-generic", - "cindex": 181 + "uname_release": "4.15.0-1103-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-118-generic", - "cindex": 181 + "uname_release": "4.15.0-1104-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-12-generic", - "cindex": 180 + "uname_release": "4.15.0-1106-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-121-generic", - "cindex": 181 + "uname_release": "4.15.0-1106-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-122-generic", - "cindex": 181 + "uname_release": "4.15.0-1106-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-123-generic", - "cindex": 181 + "uname_release": "4.15.0-1107-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-124-generic", - "cindex": 181 + "uname_release": "4.15.0-1108-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-126-generic", - "cindex": 181 + "uname_release": "4.15.0-1108-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-128-generic", - "cindex": 181 + "uname_release": "4.15.0-1109-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-129-generic", - "cindex": 181 + "uname_release": "4.15.0-1109-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-13-generic", - "cindex": 180 + "uname_release": "4.15.0-1109-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-130-generic", - "cindex": 181 + "uname_release": "4.15.0-111-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-132-generic", - "cindex": 181 + "uname_release": "4.15.0-1110-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-134-generic", - "cindex": 181 + "uname_release": "4.15.0-1110-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-135-generic", - "cindex": 181 + "uname_release": "4.15.0-1110-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-136-generic", - "cindex": 181 + "uname_release": "4.15.0-1111-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-137-generic", - "cindex": 181 + "uname_release": "4.15.0-1111-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-139-generic", - "cindex": 181 + "uname_release": "4.15.0-1111-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-140-generic", - "cindex": 181 + "uname_release": "4.15.0-1112-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-141-generic", - "cindex": 181 + "uname_release": "4.15.0-1112-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-142-generic", - "cindex": 181 + "uname_release": "4.15.0-1112-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-143-generic", - "cindex": 181 + "uname_release": "4.15.0-1113-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-144-generic", - "cindex": 181 + "uname_release": "4.15.0-1114-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-147-generic", - "cindex": 181 + "uname_release": "4.15.0-1114-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-15-generic", - "cindex": 180 + "uname_release": "4.15.0-1114-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-151-generic", - "cindex": 181 + "uname_release": "4.15.0-1115-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-153-generic", - "cindex": 181 + "uname_release": "4.15.0-1115-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-154-generic", - "cindex": 181 + "uname_release": "4.15.0-1115-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-156-generic", - "cindex": 181 + "uname_release": "4.15.0-1116-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-158-generic", - "cindex": 181 + "uname_release": "4.15.0-1116-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-159-generic", - "cindex": 181 + "uname_release": "4.15.0-1118-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-161-generic", - "cindex": 181 + "uname_release": "4.15.0-1118-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-162-generic", - "cindex": 181 + "uname_release": "4.15.0-1118-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-163-generic", - "cindex": 181 + "uname_release": "4.15.0-1119-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-166-generic", - "cindex": 181 + "uname_release": "4.15.0-1119-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-167-generic", - "cindex": 181 + "uname_release": "4.15.0-112-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-169-generic", - "cindex": 181 + "uname_release": "4.15.0-1120-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-171-generic", - "cindex": 181 + "uname_release": "4.15.0-1121-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-173-generic", - "cindex": 181 + "uname_release": "4.15.0-1121-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-175-generic", - "cindex": 181 + "uname_release": "4.15.0-1121-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-176-generic", - "cindex": 181 + "uname_release": "4.15.0-1122-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-177-generic", - "cindex": 181 + "uname_release": "4.15.0-1122-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-180-generic", - "cindex": 181 + "uname_release": "4.15.0-1123-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-184-generic", - "cindex": 181 + "uname_release": "4.15.0-1123-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-187-generic", - "cindex": 181 + "uname_release": "4.15.0-1124-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-188-generic", - "cindex": 181 + "uname_release": "4.15.0-1124-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-189-generic", - "cindex": 181 + "uname_release": "4.15.0-1124-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-19-generic", - "cindex": 180 + "uname_release": "4.15.0-1125-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-191-generic", - "cindex": 181 + "uname_release": "4.15.0-1126-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-192-generic", - "cindex": 181 + "uname_release": "4.15.0-1126-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-193-generic", - "cindex": 181 + "uname_release": "4.15.0-1127-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-194-generic", - "cindex": 181 + "uname_release": "4.15.0-1127-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-196-generic", - "cindex": 181 + "uname_release": "4.15.0-1127-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-197-generic", - "cindex": 181 + "uname_release": "4.15.0-1128-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-20-generic", - "cindex": 180 + "uname_release": "4.15.0-1129-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-200-generic", - "cindex": 181 + "uname_release": "4.15.0-1130-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-201-generic", - "cindex": 181 + "uname_release": "4.15.0-1130-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-202-generic", - "cindex": 181 + "uname_release": "4.15.0-1130-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-204-generic", - "cindex": 181 + "uname_release": "4.15.0-1131-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-206-generic", - "cindex": 181 + "uname_release": "4.15.0-1131-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-208-generic", - "cindex": 181 + "uname_release": "4.15.0-1133-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-209-generic", - "cindex": 181 + "uname_release": "4.15.0-1133-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-210-generic", - "cindex": 181 + "uname_release": "4.15.0-1134-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-211-generic", - "cindex": 181 + "uname_release": "4.15.0-1134-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-212-generic", - "cindex": 181 + "uname_release": "4.15.0-1135-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-213-generic", - "cindex": 181 + "uname_release": "4.15.0-1136-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-22-generic", - "cindex": 180 + "uname_release": "4.15.0-1136-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-23-generic", - "cindex": 180 + "uname_release": "4.15.0-1136-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-24-generic", - "cindex": 182 + "uname_release": "4.15.0-1137-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-29-generic", - "cindex": 182 + "uname_release": "4.15.0-1137-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-30-generic", - "cindex": 182 + "uname_release": "4.15.0-1137-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-32-generic", - "cindex": 182 + "uname_release": "4.15.0-1138-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-33-generic", - "cindex": 182 + "uname_release": "4.15.0-1138-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-34-generic", - "cindex": 182 + "uname_release": "4.15.0-1139-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-36-generic", - "cindex": 182 + "uname_release": "4.15.0-1139-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-38-generic", - "cindex": 182 + "uname_release": "4.15.0-1140-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-39-generic", - "cindex": 182 + "uname_release": "4.15.0-1141-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-42-generic", - "cindex": 182 + "uname_release": "4.15.0-1141-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-43-generic", - "cindex": 182 + "uname_release": "4.15.0-1142-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-44-generic", - "cindex": 182 + "uname_release": "4.15.0-1142-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-45-generic", - "cindex": 182 + "uname_release": "4.15.0-1142-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-46-generic", - "cindex": 182 + "uname_release": "4.15.0-1143-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-47-generic", - "cindex": 182 + "uname_release": "4.15.0-1143-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-48-generic", - "cindex": 182 + "uname_release": "4.15.0-1144-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-50-generic", - "cindex": 182 + "uname_release": "4.15.0-1145-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-51-generic", - "cindex": 182 + "uname_release": "4.15.0-1145-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-52-generic", - "cindex": 182 + "uname_release": "4.15.0-1146-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-54-generic", - "cindex": 182 + "uname_release": "4.15.0-1146-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-55-generic", - "cindex": 182 + "uname_release": "4.15.0-1146-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-58-generic", - "cindex": 182 + "uname_release": "4.15.0-1147-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-60-generic", - "cindex": 182 + "uname_release": "4.15.0-1147-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-62-generic", - "cindex": 182 + "uname_release": "4.15.0-1148-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-64-generic", - "cindex": 182 + "uname_release": "4.15.0-1148-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-65-generic", - "cindex": 182 + "uname_release": "4.15.0-1149-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-66-generic", - "cindex": 182 + "uname_release": "4.15.0-1149-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-69-generic", - "cindex": 182 + "uname_release": "4.15.0-115-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-70-generic", - "cindex": 182 + "uname_release": "4.15.0-1150-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-72-generic", - "cindex": 182 + "uname_release": "4.15.0-1150-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-74-generic", - "cindex": 182 + "uname_release": "4.15.0-1150-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-76-generic", - "cindex": 182 + "uname_release": "4.15.0-1151-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-88-generic", - "cindex": 182 + "uname_release": "4.15.0-1151-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-91-generic", - "cindex": 182 + "uname_release": "4.15.0-1151-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-96-generic", - "cindex": 182 + "uname_release": "4.15.0-1152-gcp", + "cindex": 111 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.15.0-99-generic", - "cindex": 181 + "uname_release": "4.15.0-1153-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-1004-gcp", - "cindex": 186 + "uname_release": "4.15.0-1153-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-1005-gcp", - "cindex": 186 + "uname_release": "4.15.0-1154-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-1006-aws", - "cindex": 186 + "uname_release": "4.15.0-1155-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-1006-azure", - "cindex": 187 + "uname_release": "4.15.0-1156-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-1006-gcp", - "cindex": 186 + "uname_release": "4.15.0-1157-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-1007-aws", - "cindex": 186 + "uname_release": "4.15.0-1157-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-1007-azure", - "cindex": 187 + "uname_release": "4.15.0-1158-aws", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-1007-gcp", - "cindex": 186 + "uname_release": "4.15.0-1158-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-1008-aws", - "cindex": 186 + "uname_release": "4.15.0-1159-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-1008-azure", - "cindex": 187 + "uname_release": "4.15.0-1161-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-1008-gcp", - "cindex": 186 + "uname_release": "4.15.0-1162-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-1009-gcp", - "cindex": 186 + "uname_release": "4.15.0-1163-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-1011-aws", - "cindex": 186 + "uname_release": "4.15.0-1164-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-1011-azure", - "cindex": 187 + "uname_release": "4.15.0-1165-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-1011-gcp", - "cindex": 186 + "uname_release": "4.15.0-1166-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-1012-aws", - "cindex": 186 + "uname_release": "4.15.0-1167-azure", + "cindex": 112 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-1012-gcp", - "cindex": 186 + "uname_release": "4.15.0-117-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-1013-aws", - "cindex": 186 + "uname_release": "4.15.0-118-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-1013-azure", - "cindex": 187 + "uname_release": "4.15.0-12-generic", + "cindex": 107 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-1013-gcp", - "cindex": 186 + "uname_release": "4.15.0-121-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-1014-azure", - "cindex": 188 + "uname_release": "4.15.0-122-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-1015-gcp", - "cindex": 186 + "uname_release": "4.15.0-123-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-1016-aws", - "cindex": 186 + "uname_release": "4.15.0-124-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-1017-aws", - "cindex": 186 + "uname_release": "4.15.0-126-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-1018-aws", - "cindex": 186 + "uname_release": "4.15.0-128-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-1018-azure", - "cindex": 188 + "uname_release": "4.15.0-129-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-1019-azure", - "cindex": 188 + "uname_release": "4.15.0-13-generic", + "cindex": 107 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-1020-aws", - "cindex": 186 + "uname_release": "4.15.0-130-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-1020-azure", - "cindex": 188 + "uname_release": "4.15.0-132-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-1023-azure", - "cindex": 188 + "uname_release": "4.15.0-134-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-1024-azure", - "cindex": 188 + "uname_release": "4.15.0-135-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-1025-azure", - "cindex": 188 + "uname_release": "4.15.0-136-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-13-generic", - "cindex": 186 + "uname_release": "4.15.0-137-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-14-generic", - "cindex": 186 + "uname_release": "4.15.0-139-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-15-generic", - "cindex": 186 + "uname_release": "4.15.0-140-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-16-generic", - "cindex": 186 + "uname_release": "4.15.0-141-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-17-generic", - "cindex": 186 + "uname_release": "4.15.0-142-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-18-generic", - "cindex": 186 + "uname_release": "4.15.0-143-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-20-generic", - "cindex": 186 + "uname_release": "4.15.0-144-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-21-generic", - "cindex": 186 + "uname_release": "4.15.0-147-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-22-generic", - "cindex": 186 + "uname_release": "4.15.0-15-generic", + "cindex": 107 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-24-generic", - "cindex": 186 + "uname_release": "4.15.0-151-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "4.18.0-25-generic", - "cindex": 186 + "uname_release": "4.15.0-153-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1011-aws", - "cindex": 189 + "uname_release": "4.15.0-154-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1011-gcp", - "cindex": 189 + "uname_release": "4.15.0-156-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1012-aws", - "cindex": 189 + "uname_release": "4.15.0-158-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1012-azure", - "cindex": 190 + "uname_release": "4.15.0-159-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1013-gcp", - "cindex": 189 + "uname_release": "4.15.0-161-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1013-gke", - "cindex": 189 + "uname_release": "4.15.0-162-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1014-aws", - "cindex": 189 + "uname_release": "4.15.0-163-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1014-azure", - "cindex": 190 + "uname_release": "4.15.0-166-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1015-gke", - "cindex": 189 + "uname_release": "4.15.0-167-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1016-aws", - "cindex": 189 + "uname_release": "4.15.0-169-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1016-azure", - "cindex": 190 + "uname_release": "4.15.0-171-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1017-gke", - "cindex": 189 + "uname_release": "4.15.0-173-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1018-aws", - "cindex": 189 + "uname_release": "4.15.0-175-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1018-azure", - "cindex": 190 + "uname_release": "4.15.0-176-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1019-aws", - "cindex": 189 + "uname_release": "4.15.0-177-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1020-azure", - "cindex": 190 + "uname_release": "4.15.0-180-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1020-gcp", - "cindex": 189 + "uname_release": "4.15.0-184-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1020-gke", - "cindex": 189 + "uname_release": "4.15.0-187-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1021-aws", - "cindex": 189 + "uname_release": "4.15.0-188-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1021-gcp", - "cindex": 189 + "uname_release": "4.15.0-189-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1022-aws", - "cindex": 189 + "uname_release": "4.15.0-19-generic", + "cindex": 107 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1022-azure", - "cindex": 190 + "uname_release": "4.15.0-191-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1022-gke", - "cindex": 189 + "uname_release": "4.15.0-192-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1023-aws", - "cindex": 191 + "uname_release": "4.15.0-193-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1023-azure", - "cindex": 190 + "uname_release": "4.15.0-194-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1023-gke", - "cindex": 189 + "uname_release": "4.15.0-196-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1024-aws", - "cindex": 191 + "uname_release": "4.15.0-197-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1025-aws", - "cindex": 191 + "uname_release": "4.15.0-20-generic", + "cindex": 107 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1025-azure", - "cindex": 190 + "uname_release": "4.15.0-200-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1025-gcp", - "cindex": 189 + "uname_release": "4.15.0-201-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1025-gke", - "cindex": 189 + "uname_release": "4.15.0-202-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1026-gcp", - "cindex": 189 + "uname_release": "4.15.0-204-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1026-gke", - "cindex": 189 + "uname_release": "4.15.0-206-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1027-aws", - "cindex": 191 + "uname_release": "4.15.0-208-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1027-azure", - "cindex": 190 + "uname_release": "4.15.0-209-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1027-gke", - "cindex": 191 + "uname_release": "4.15.0-210-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1028-azure", - "cindex": 192 + "uname_release": "4.15.0-211-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1028-gcp", - "cindex": 191 + "uname_release": "4.15.0-212-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1029-azure", - "cindex": 192 + "uname_release": "4.15.0-213-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1029-gcp", - "cindex": 191 + "uname_release": "4.15.0-22-generic", + "cindex": 107 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1029-gke", - "cindex": 191 + "uname_release": "4.15.0-23-generic", + "cindex": 107 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1030-gke", - "cindex": 191 + "uname_release": "4.15.0-24-generic", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1031-azure", - "cindex": 192 + "uname_release": "4.15.0-29-generic", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1031-gcp", - "cindex": 191 + "uname_release": "4.15.0-30-generic", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1032-azure", - "cindex": 192 + "uname_release": "4.15.0-32-generic", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1032-gke", - "cindex": 191 + "uname_release": "4.15.0-33-generic", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1033-gcp", - "cindex": 191 + "uname_release": "4.15.0-34-generic", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1033-gke", - "cindex": 191 + "uname_release": "4.15.0-36-generic", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1034-gcp", - "cindex": 191 + "uname_release": "4.15.0-38-generic", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1035-azure", - "cindex": 192 + "uname_release": "4.15.0-39-generic", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1035-gke", - "cindex": 191 + "uname_release": "4.15.0-42-generic", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1036-azure", - "cindex": 192 + "uname_release": "4.15.0-43-generic", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1037-gke", - "cindex": 193 + "uname_release": "4.15.0-44-generic", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1042-gke", - "cindex": 193 + "uname_release": "4.15.0-45-generic", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1043-gke", - "cindex": 193 + "uname_release": "4.15.0-46-generic", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1045-gke", - "cindex": 193 + "uname_release": "4.15.0-47-generic", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1046-gke", - "cindex": 193 + "uname_release": "4.15.0-48-generic", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1047-gke", - "cindex": 193 + "uname_release": "4.15.0-50-generic", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1049-gke", - "cindex": 193 + "uname_release": "4.15.0-51-generic", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1050-gke", - "cindex": 193 + "uname_release": "4.15.0-52-generic", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-1051-gke", - "cindex": 193 + "uname_release": "4.15.0-54-generic", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-15-generic", - "cindex": 189 + "uname_release": "4.15.0-55-generic", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-16-generic", - "cindex": 189 + "uname_release": "4.15.0-58-generic", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-17-generic", - "cindex": 189 + "uname_release": "4.15.0-60-generic", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-19-generic", - "cindex": 189 + "uname_release": "4.15.0-62-generic", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-20-generic", - "cindex": 189 + "uname_release": "4.15.0-64-generic", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-23-generic", - "cindex": 189 + "uname_release": "4.15.0-65-generic", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-25-generic", - "cindex": 189 + "uname_release": "4.15.0-66-generic", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-27-generic", - "cindex": 189 + "uname_release": "4.15.0-69-generic", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-29-generic", - "cindex": 189 + "uname_release": "4.15.0-70-generic", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-31-generic", - "cindex": 189 + "uname_release": "4.15.0-72-generic", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-32-generic", - "cindex": 189 + "uname_release": "4.15.0-74-generic", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-35-generic", - "cindex": 189 + "uname_release": "4.15.0-76-generic", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-36-generic", - "cindex": 189 + "uname_release": "4.15.0-88-generic", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-37-generic", - "cindex": 189 + "uname_release": "4.15.0-91-generic", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-43-generic", - "cindex": 191 + "uname_release": "4.15.0-96-generic", + "cindex": 109 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-44-generic", - "cindex": 191 + "uname_release": "4.15.0-99-generic", + "cindex": 108 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-47-generic", - "cindex": 191 + "uname_release": "4.18.0-1004-gcp", + "cindex": 113 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-48-generic", - "cindex": 193 + "uname_release": "4.18.0-1005-gcp", + "cindex": 113 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-52-generic", - "cindex": 193 + "uname_release": "4.18.0-1006-aws", + "cindex": 113 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-53-generic", - "cindex": 193 + "uname_release": "4.18.0-1006-azure", + "cindex": 114 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-58-generic", - "cindex": 193 + "uname_release": "4.18.0-1006-gcp", + "cindex": 113 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-60-generic", - "cindex": 193 + "uname_release": "4.18.0-1007-aws", + "cindex": 113 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-61-generic", - "cindex": 193 + "uname_release": "4.18.0-1007-azure", + "cindex": 114 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-62-generic", - "cindex": 193 + "uname_release": "4.18.0-1007-gcp", + "cindex": 113 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-63-generic", - "cindex": 193 + "uname_release": "4.18.0-1008-aws", + "cindex": 113 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.0.0-65-generic", - "cindex": 193 + "uname_release": "4.18.0-1008-azure", + "cindex": 114 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1007-azure", - "cindex": 194 + "uname_release": "4.18.0-1008-gcp", + "cindex": 113 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1008-azure", - "cindex": 195 + "uname_release": "4.18.0-1009-gcp", + "cindex": 113 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1008-gcp", - "cindex": 196 + "uname_release": "4.18.0-1011-aws", + "cindex": 113 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1009-azure", - "cindex": 195 + "uname_release": "4.18.0-1011-azure", + "cindex": 114 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1009-gcp", - "cindex": 197 + "uname_release": "4.18.0-1011-gcp", + "cindex": 113 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1010-azure", - "cindex": 195 + "uname_release": "4.18.0-1012-aws", + "cindex": 113 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1010-gcp", - "cindex": 197 + "uname_release": "4.18.0-1012-gcp", + "cindex": 113 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1011-gke", - "cindex": 197 + "uname_release": "4.18.0-1013-aws", + "cindex": 113 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1012-azure", - "cindex": 195 + "uname_release": "4.18.0-1013-azure", + "cindex": 114 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1012-gcp", - "cindex": 197 + "uname_release": "4.18.0-1013-gcp", + "cindex": 113 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1012-gke", - "cindex": 197 + "uname_release": "4.18.0-1014-azure", + "cindex": 115 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1013-azure", - "cindex": 195 + "uname_release": "4.18.0-1015-gcp", + "cindex": 113 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1014-gcp", - "cindex": 197 + "uname_release": "4.18.0-1016-aws", + "cindex": 113 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1014-gke", - "cindex": 197 + "uname_release": "4.18.0-1017-aws", + "cindex": 113 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1016-aws", - "cindex": 198 + "uname_release": "4.18.0-1018-aws", + "cindex": 113 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1016-azure", - "cindex": 195 + "uname_release": "4.18.0-1018-azure", + "cindex": 115 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1016-gcp", - "cindex": 197 + "uname_release": "4.18.0-1019-azure", + "cindex": 115 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1016-gke", - "cindex": 197 + "uname_release": "4.18.0-1020-aws", + "cindex": 113 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1017-aws", - "cindex": 198 + "uname_release": "4.18.0-1020-azure", + "cindex": 115 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1017-gcp", - "cindex": 197 + "uname_release": "4.18.0-1023-azure", + "cindex": 115 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1017-gke", - "cindex": 197 + "uname_release": "4.18.0-1024-azure", + "cindex": 115 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1018-azure", - "cindex": 195 + "uname_release": "4.18.0-1025-azure", + "cindex": 115 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1018-gcp", - "cindex": 197 + "uname_release": "4.18.0-13-generic", + "cindex": 113 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1018-gke", - "cindex": 197 + "uname_release": "4.18.0-14-generic", + "cindex": 113 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1019-aws", - "cindex": 199 + "uname_release": "4.18.0-15-generic", + "cindex": 113 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1019-azure", - "cindex": 195 + "uname_release": "4.18.0-16-generic", + "cindex": 113 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1020-azure", - "cindex": 195 + "uname_release": "4.18.0-17-generic", + "cindex": 113 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1020-gcp", - "cindex": 200 + "uname_release": "4.18.0-18-generic", + "cindex": 113 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1020-gke", - "cindex": 200 + "uname_release": "4.18.0-20-generic", + "cindex": 113 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1022-azure", - "cindex": 201 + "uname_release": "4.18.0-21-generic", + "cindex": 113 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1023-aws", - "cindex": 199 + "uname_release": "4.18.0-22-generic", + "cindex": 113 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1026-gcp", - "cindex": 200 + "uname_release": "4.18.0-24-generic", + "cindex": 113 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1026-gke", - "cindex": 200 + "uname_release": "4.18.0-25-generic", + "cindex": 113 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1028-aws", - "cindex": 199 + "uname_release": "5.0.0-1011-aws", + "cindex": 116 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1028-azure", - "cindex": 201 + "uname_release": "5.0.0-1011-gcp", + "cindex": 116 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1029-gcp", - "cindex": 200 + "uname_release": "5.0.0-1012-aws", + "cindex": 116 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1030-aws", - "cindex": 199 + "uname_release": "5.0.0-1012-azure", + "cindex": 117 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1030-gcp", - "cindex": 200 + "uname_release": "5.0.0-1013-gcp", + "cindex": 116 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1030-gke", - "cindex": 200 + "uname_release": "5.0.0-1013-gke", + "cindex": 116 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1031-azure", - "cindex": 201 + "uname_release": "5.0.0-1014-aws", + "cindex": 116 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1032-aws", - "cindex": 199 + "uname_release": "5.0.0-1014-azure", + "cindex": 117 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1032-azure", - "cindex": 201 + "uname_release": "5.0.0-1015-gke", + "cindex": 116 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1032-gcp", - "cindex": 200 + "uname_release": "5.0.0-1016-aws", + "cindex": 116 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1032-gke", - "cindex": 200 + "uname_release": "5.0.0-1016-azure", + "cindex": 117 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1033-aws", - "cindex": 199 + "uname_release": "5.0.0-1017-gke", + "cindex": 116 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1033-gke", - "cindex": 200 + "uname_release": "5.0.0-1018-aws", + "cindex": 116 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1034-aws", - "cindex": 199 + "uname_release": "5.0.0-1018-azure", + "cindex": 117 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1034-azure", - "cindex": 201 + "uname_release": "5.0.0-1019-aws", + "cindex": 116 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1034-gke", - "cindex": 200 + "uname_release": "5.0.0-1020-azure", + "cindex": 117 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1035-aws", - "cindex": 199 + "uname_release": "5.0.0-1020-gcp", + "cindex": 116 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1035-azure", - "cindex": 201 + "uname_release": "5.0.0-1020-gke", + "cindex": 116 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1036-gke", - "cindex": 200 + "uname_release": "5.0.0-1021-aws", + "cindex": 116 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1038-gke", - "cindex": 200 + "uname_release": "5.0.0-1021-gcp", + "cindex": 116 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1039-gke", - "cindex": 200 + "uname_release": "5.0.0-1022-aws", + "cindex": 116 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1040-gke", - "cindex": 200 + "uname_release": "5.0.0-1022-azure", + "cindex": 117 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1041-gke", - "cindex": 200 + "uname_release": "5.0.0-1022-gke", + "cindex": 116 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1042-gke", - "cindex": 200 + "uname_release": "5.0.0-1023-aws", + "cindex": 118 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1043-gke", - "cindex": 200 + "uname_release": "5.0.0-1023-azure", + "cindex": 117 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1044-gke", - "cindex": 200 + "uname_release": "5.0.0-1023-gke", + "cindex": 116 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-1045-gke", - "cindex": 200 + "uname_release": "5.0.0-1024-aws", + "cindex": 118 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-19-generic", - "cindex": 202 + "uname_release": "5.0.0-1025-aws", + "cindex": 118 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-22-generic", - "cindex": 203 + "uname_release": "5.0.0-1025-azure", + "cindex": 117 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-23-generic", - "cindex": 203 + "uname_release": "5.0.0-1025-gcp", + "cindex": 116 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-24-generic", - "cindex": 198 + "uname_release": "5.0.0-1025-gke", + "cindex": 116 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-26-generic", - "cindex": 198 + "uname_release": "5.0.0-1026-gcp", + "cindex": 116 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-28-generic", - "cindex": 198 + "uname_release": "5.0.0-1026-gke", + "cindex": 116 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-40-generic", - "cindex": 198 + "uname_release": "5.0.0-1027-aws", + "cindex": 118 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-42-generic", - "cindex": 198 + "uname_release": "5.0.0-1027-azure", + "cindex": 117 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-45-generic", - "cindex": 198 + "uname_release": "5.0.0-1027-gke", + "cindex": 118 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-46-generic", - "cindex": 198 + "uname_release": "5.0.0-1028-azure", + "cindex": 119 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-51-generic", - "cindex": 198 + "uname_release": "5.0.0-1028-gcp", + "cindex": 118 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-53-generic", - "cindex": 199 + "uname_release": "5.0.0-1029-azure", + "cindex": 119 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-59-generic", - "cindex": 199 + "uname_release": "5.0.0-1029-gcp", + "cindex": 118 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-61-generic", - "cindex": 199 + "uname_release": "5.0.0-1029-gke", + "cindex": 118 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-62-generic", - "cindex": 199 + "uname_release": "5.0.0-1030-gke", + "cindex": 118 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-64-generic", - "cindex": 199 + "uname_release": "5.0.0-1031-azure", + "cindex": 119 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-66-generic", - "cindex": 199 + "uname_release": "5.0.0-1031-gcp", + "cindex": 118 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-67-generic", - "cindex": 199 + "uname_release": "5.0.0-1032-azure", + "cindex": 119 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-68-generic", - "cindex": 199 + "uname_release": "5.0.0-1032-gke", + "cindex": 118 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-69-generic", - "cindex": 199 + "uname_release": "5.0.0-1033-gcp", + "cindex": 118 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-70-generic", - "cindex": 199 + "uname_release": "5.0.0-1033-gke", + "cindex": 118 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-72-generic", - "cindex": 199 + "uname_release": "5.0.0-1034-gcp", + "cindex": 118 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-73-generic", - "cindex": 199 + "uname_release": "5.0.0-1035-azure", + "cindex": 119 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-74-generic", - "cindex": 199 + "uname_release": "5.0.0-1035-gke", + "cindex": 118 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-75-generic", - "cindex": 199 + "uname_release": "5.0.0-1036-azure", + "cindex": 119 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.3.0-76-generic", - "cindex": 199 + "uname_release": "5.0.0-1037-gke", + "cindex": 120 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1003-gkeop", - "cindex": 204 + "uname_release": "5.0.0-1042-gke", + "cindex": 120 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1004-gkeop", - "cindex": 204 + "uname_release": "5.0.0-1043-gke", + "cindex": 120 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1005-gkeop", - "cindex": 204 + "uname_release": "5.0.0-1045-gke", + "cindex": 120 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1007-gkeop", - "cindex": 204 + "uname_release": "5.0.0-1046-gke", + "cindex": 120 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1008-gkeop", - "cindex": 204 + "uname_release": "5.0.0-1047-gke", + "cindex": 120 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1009-gkeop", - "cindex": 204 + "uname_release": "5.0.0-1049-gke", + "cindex": 120 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1010-gkeop", - "cindex": 204 + "uname_release": "5.0.0-1050-gke", + "cindex": 120 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1011-gkeop", - "cindex": 204 + "uname_release": "5.0.0-1051-gke", + "cindex": 120 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1012-gkeop", - "cindex": 204 + "uname_release": "5.0.0-15-generic", + "cindex": 116 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1013-gkeop", - "cindex": 204 + "uname_release": "5.0.0-16-generic", + "cindex": 116 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1014-gkeop", - "cindex": 204 + "uname_release": "5.0.0-17-generic", + "cindex": 116 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1015-gkeop", - "cindex": 204 + "uname_release": "5.0.0-19-generic", + "cindex": 116 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1016-gkeop", - "cindex": 204 + "uname_release": "5.0.0-20-generic", + "cindex": 116 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1018-aws", - "cindex": 204 + "uname_release": "5.0.0-23-generic", + "cindex": 116 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1018-gkeop", - "cindex": 204 + "uname_release": "5.0.0-25-generic", + "cindex": 116 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1019-gcp", - "cindex": 205 + "uname_release": "5.0.0-27-generic", + "cindex": 116 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1020-aws", - "cindex": 204 + "uname_release": "5.0.0-29-generic", + "cindex": 116 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1020-azure", - "cindex": 206 + "uname_release": "5.0.0-31-generic", + "cindex": 116 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1021-gcp", - "cindex": 205 + "uname_release": "5.0.0-32-generic", + "cindex": 116 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1021-gkeop", - "cindex": 204 + "uname_release": "5.0.0-35-generic", + "cindex": 116 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1022-aws", - "cindex": 204 + "uname_release": "5.0.0-36-generic", + "cindex": 116 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1022-azure", - "cindex": 206 + "uname_release": "5.0.0-37-generic", + "cindex": 116 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1022-gcp", - "cindex": 205 + "uname_release": "5.0.0-43-generic", + "cindex": 118 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1022-gkeop", - "cindex": 204 + "uname_release": "5.0.0-44-generic", + "cindex": 118 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1023-azure", - "cindex": 206 + "uname_release": "5.0.0-47-generic", + "cindex": 118 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1023-gkeop", - "cindex": 204 + "uname_release": "5.0.0-48-generic", + "cindex": 120 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1024-aws", - "cindex": 204 + "uname_release": "5.0.0-52-generic", + "cindex": 120 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1024-gcp", - "cindex": 205 + "uname_release": "5.0.0-53-generic", + "cindex": 120 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1024-gkeop", - "cindex": 204 + "uname_release": "5.0.0-58-generic", + "cindex": 120 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1025-aws", - "cindex": 204 + "uname_release": "5.0.0-60-generic", + "cindex": 120 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1025-azure", - "cindex": 206 + "uname_release": "5.0.0-61-generic", + "cindex": 120 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1025-gcp", - "cindex": 205 + "uname_release": "5.0.0-62-generic", + "cindex": 120 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1025-gke", - "cindex": 205 + "uname_release": "5.0.0-63-generic", + "cindex": 120 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1025-gkeop", - "cindex": 204 + "uname_release": "5.0.0-65-generic", + "cindex": 120 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1026-azure", - "cindex": 206 + "uname_release": "5.3.0-1007-azure", + "cindex": 121 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1026-gkeop", - "cindex": 204 + "uname_release": "5.3.0-1008-azure", + "cindex": 122 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1027-gke", - "cindex": 205 + "uname_release": "5.3.0-1008-gcp", + "cindex": 123 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1027-gkeop", - "cindex": 204 + "uname_release": "5.3.0-1009-azure", + "cindex": 122 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1028-aws", - "cindex": 204 + "uname_release": "5.3.0-1009-gcp", + "cindex": 124 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1028-gcp", - "cindex": 205 + "uname_release": "5.3.0-1010-azure", + "cindex": 122 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1029-aws", - "cindex": 204 + "uname_release": "5.3.0-1010-gcp", + "cindex": 124 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1029-gcp", - "cindex": 205 + "uname_release": "5.3.0-1011-gke", + "cindex": 124 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1029-gke", - "cindex": 205 + "uname_release": "5.3.0-1012-azure", + "cindex": 122 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1030-aws", - "cindex": 204 + "uname_release": "5.3.0-1012-gcp", + "cindex": 124 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1030-gcp", - "cindex": 205 + "uname_release": "5.3.0-1012-gke", + "cindex": 124 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1030-gke", - "cindex": 205 + "uname_release": "5.3.0-1013-azure", + "cindex": 122 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1031-azure", - "cindex": 206 + "uname_release": "5.3.0-1014-gcp", + "cindex": 124 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1032-aws", - "cindex": 204 + "uname_release": "5.3.0-1014-gke", + "cindex": 124 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1032-azure", - "cindex": 206 + "uname_release": "5.3.0-1016-aws", + "cindex": 125 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1032-gcp", - "cindex": 205 + "uname_release": "5.3.0-1016-azure", + "cindex": 122 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1032-gke", - "cindex": 205 + "uname_release": "5.3.0-1016-gcp", + "cindex": 124 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1033-gcp", - "cindex": 205 + "uname_release": "5.3.0-1016-gke", + "cindex": 124 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1033-gke", - "cindex": 205 + "uname_release": "5.3.0-1017-aws", + "cindex": 125 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1034-aws", - "cindex": 204 + "uname_release": "5.3.0-1017-gcp", + "cindex": 124 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1034-azure", - "cindex": 206 + "uname_release": "5.3.0-1017-gke", + "cindex": 124 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1034-gcp", - "cindex": 205 + "uname_release": "5.3.0-1018-azure", + "cindex": 122 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1035-aws", - "cindex": 204 + "uname_release": "5.3.0-1018-gcp", + "cindex": 124 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1035-azure", - "cindex": 206 + "uname_release": "5.3.0-1018-gke", + "cindex": 124 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1035-gke", - "cindex": 205 + "uname_release": "5.3.0-1019-aws", + "cindex": 126 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1036-azure", - "cindex": 206 + "uname_release": "5.3.0-1019-azure", + "cindex": 122 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1036-gcp", - "cindex": 205 + "uname_release": "5.3.0-1020-azure", + "cindex": 122 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1036-gke", - "cindex": 205 + "uname_release": "5.3.0-1020-gcp", + "cindex": 127 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1037-aws", - "cindex": 204 + "uname_release": "5.3.0-1020-gke", + "cindex": 127 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1037-gcp", - "cindex": 205 + "uname_release": "5.3.0-1022-azure", + "cindex": 128 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1037-gke", - "cindex": 205 + "uname_release": "5.3.0-1023-aws", + "cindex": 126 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1038-aws", - "cindex": 204 + "uname_release": "5.3.0-1026-gcp", + "cindex": 127 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1038-gcp", - "cindex": 205 + "uname_release": "5.3.0-1026-gke", + "cindex": 127 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1039-aws", - "cindex": 204 + "uname_release": "5.3.0-1028-aws", + "cindex": 126 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1039-azure", - "cindex": 206 + "uname_release": "5.3.0-1028-azure", + "cindex": 128 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1039-gke", - "cindex": 205 + "uname_release": "5.3.0-1029-gcp", + "cindex": 127 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1040-azure", - "cindex": 206 + "uname_release": "5.3.0-1030-aws", + "cindex": 126 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1040-gcp", - "cindex": 205 + "uname_release": "5.3.0-1030-gcp", + "cindex": 127 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1040-gke", - "cindex": 205 + "uname_release": "5.3.0-1030-gke", + "cindex": 127 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1041-aws", - "cindex": 204 + "uname_release": "5.3.0-1031-azure", + "cindex": 128 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1041-azure", - "cindex": 206 + "uname_release": "5.3.0-1032-aws", + "cindex": 126 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1041-gcp", - "cindex": 205 + "uname_release": "5.3.0-1032-azure", + "cindex": 128 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1042-gcp", - "cindex": 205 + "uname_release": "5.3.0-1032-gcp", + "cindex": 127 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1042-gke", - "cindex": 205 + "uname_release": "5.3.0-1032-gke", + "cindex": 127 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1043-aws", - "cindex": 204 + "uname_release": "5.3.0-1033-aws", + "cindex": 126 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1043-azure", - "cindex": 206 + "uname_release": "5.3.0-1033-gke", + "cindex": 127 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1043-gcp", - "cindex": 205 + "uname_release": "5.3.0-1034-aws", + "cindex": 126 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1043-gke", - "cindex": 205 + "uname_release": "5.3.0-1034-azure", + "cindex": 128 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1044-azure", - "cindex": 206 + "uname_release": "5.3.0-1034-gke", + "cindex": 127 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1044-gcp", - "cindex": 205 + "uname_release": "5.3.0-1035-aws", + "cindex": 126 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1044-gke", - "cindex": 205 + "uname_release": "5.3.0-1035-azure", + "cindex": 128 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1045-aws", - "cindex": 204 + "uname_release": "5.3.0-1036-gke", + "cindex": 127 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1046-azure", - "cindex": 206 + "uname_release": "5.3.0-1038-gke", + "cindex": 127 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1046-gcp", - "cindex": 205 + "uname_release": "5.3.0-1039-gke", + "cindex": 127 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1046-gke", - "cindex": 205 + "uname_release": "5.3.0-1040-gke", + "cindex": 127 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1047-aws", - "cindex": 204 + "uname_release": "5.3.0-1041-gke", + "cindex": 127 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1047-azure", - "cindex": 206 + "uname_release": "5.3.0-1042-gke", + "cindex": 127 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1048-aws", - "cindex": 204 + "uname_release": "5.3.0-1043-gke", + "cindex": 127 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1048-azure", - "cindex": 206 + "uname_release": "5.3.0-1044-gke", + "cindex": 127 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1049-aws", - "cindex": 204 + "uname_release": "5.3.0-1045-gke", + "cindex": 127 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1049-azure", - "cindex": 206 + "uname_release": "5.3.0-19-generic", + "cindex": 129 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1049-gcp", - "cindex": 205 + "uname_release": "5.3.0-22-generic", + "cindex": 130 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1049-gke", - "cindex": 205 + "uname_release": "5.3.0-23-generic", + "cindex": 130 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1051-aws", - "cindex": 204 + "uname_release": "5.3.0-24-generic", + "cindex": 125 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1051-azure", - "cindex": 206 + "uname_release": "5.3.0-26-generic", + "cindex": 125 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1051-gcp", - "cindex": 205 + "uname_release": "5.3.0-28-generic", + "cindex": 125 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1051-gke", - "cindex": 205 + "uname_release": "5.3.0-40-generic", + "cindex": 125 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1052-gcp", - "cindex": 205 + "uname_release": "5.3.0-42-generic", + "cindex": 125 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1052-gke", - "cindex": 205 + "uname_release": "5.3.0-45-generic", + "cindex": 125 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1053-gcp", - "cindex": 205 + "uname_release": "5.3.0-46-generic", + "cindex": 125 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1053-gke", - "cindex": 205 + "uname_release": "5.3.0-51-generic", + "cindex": 125 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1054-aws", - "cindex": 204 + "uname_release": "5.3.0-53-generic", + "cindex": 126 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1054-gke", - "cindex": 205 + "uname_release": "5.3.0-59-generic", + "cindex": 126 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1055-aws", - "cindex": 204 + "uname_release": "5.3.0-61-generic", + "cindex": 126 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1055-azure", - "cindex": 206 + "uname_release": "5.3.0-62-generic", + "cindex": 126 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1055-gcp", - "cindex": 205 + "uname_release": "5.3.0-64-generic", + "cindex": 126 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1055-gke", - "cindex": 205 + "uname_release": "5.3.0-66-generic", + "cindex": 126 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1056-aws", - "cindex": 204 + "uname_release": "5.3.0-67-generic", + "cindex": 126 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1056-azure", - "cindex": 206 + "uname_release": "5.3.0-68-generic", + "cindex": 126 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1056-gcp", - "cindex": 205 + "uname_release": "5.3.0-69-generic", + "cindex": 126 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1056-gke", - "cindex": 205 + "uname_release": "5.3.0-70-generic", + "cindex": 126 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1057-aws", - "cindex": 204 + "uname_release": "5.3.0-72-generic", + "cindex": 126 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1057-gcp", - "cindex": 205 + "uname_release": "5.3.0-73-generic", + "cindex": 126 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1057-gke", - "cindex": 205 + "uname_release": "5.3.0-74-generic", + "cindex": 126 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1058-aws", - "cindex": 204 + "uname_release": "5.3.0-75-generic", + "cindex": 126 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1058-azure", - "cindex": 206 + "uname_release": "5.3.0-76-generic", + "cindex": 126 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1058-gcp", - "cindex": 205 + "uname_release": "5.4.0-1003-gkeop", + "cindex": 131 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1059-aws", - "cindex": 204 + "uname_release": "5.4.0-1004-gkeop", + "cindex": 131 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1059-azure", - "cindex": 206 + "uname_release": "5.4.0-1005-gkeop", + "cindex": 131 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1059-gke", - "cindex": 205 + "uname_release": "5.4.0-1007-gkeop", + "cindex": 131 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1060-aws", - "cindex": 204 + "uname_release": "5.4.0-1008-gkeop", + "cindex": 131 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1061-azure", - "cindex": 206 + "uname_release": "5.4.0-1009-gkeop", + "cindex": 131 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1061-gke", - "cindex": 205 + "uname_release": "5.4.0-1010-gkeop", + "cindex": 131 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1062-azure", - "cindex": 206 + "uname_release": "5.4.0-1011-gkeop", + "cindex": 131 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1062-gke", - "cindex": 205 + "uname_release": "5.4.0-1012-gkeop", + "cindex": 131 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1063-azure", - "cindex": 206 + "uname_release": "5.4.0-1013-gkeop", + "cindex": 131 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1063-gke", - "cindex": 205 + "uname_release": "5.4.0-1014-gkeop", + "cindex": 131 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1064-azure", - "cindex": 206 + "uname_release": "5.4.0-1015-gkeop", + "cindex": 131 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1065-gke", - "cindex": 205 + "uname_release": "5.4.0-1016-gkeop", + "cindex": 131 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1066-gke", - "cindex": 205 + "uname_release": "5.4.0-1018-aws", + "cindex": 131 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1067-gke", - "cindex": 205 + "uname_release": "5.4.0-1018-gkeop", + "cindex": 131 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-1068-gke", - "cindex": 205 + "uname_release": "5.4.0-1019-gcp", + "cindex": 132 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-37-generic", - "cindex": 204 + "uname_release": "5.4.0-1020-aws", + "cindex": 131 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-39-generic", - "cindex": 204 + "uname_release": "5.4.0-1020-azure", + "cindex": 133 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-40-generic", - "cindex": 204 + "uname_release": "5.4.0-1021-gcp", + "cindex": 132 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-42-generic", - "cindex": 204 + "uname_release": "5.4.0-1021-gkeop", + "cindex": 131 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-45-generic", - "cindex": 204 + "uname_release": "5.4.0-1022-aws", + "cindex": 131 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-47-generic", - "cindex": 204 + "uname_release": "5.4.0-1022-azure", + "cindex": 133 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-48-generic", - "cindex": 204 + "uname_release": "5.4.0-1022-gcp", + "cindex": 132 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-51-generic", - "cindex": 204 + "uname_release": "5.4.0-1022-gkeop", + "cindex": 131 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-52-generic", - "cindex": 204 + "uname_release": "5.4.0-1023-azure", + "cindex": 133 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-53-generic", - "cindex": 204 + "uname_release": "5.4.0-1023-gkeop", + "cindex": 131 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-54-generic", - "cindex": 204 + "uname_release": "5.4.0-1024-aws", + "cindex": 131 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-56-generic", - "cindex": 204 + "uname_release": "5.4.0-1024-gcp", + "cindex": 132 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-58-generic", - "cindex": 204 + "uname_release": "5.4.0-1024-gkeop", + "cindex": 131 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-59-generic", - "cindex": 204 + "uname_release": "5.4.0-1025-aws", + "cindex": 131 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-60-generic", - "cindex": 204 + "uname_release": "5.4.0-1025-azure", + "cindex": 133 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-62-generic", - "cindex": 204 + "uname_release": "5.4.0-1025-gcp", + "cindex": 132 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-64-generic", - "cindex": 204 + "uname_release": "5.4.0-1025-gke", + "cindex": 132 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-65-generic", - "cindex": 204 + "uname_release": "5.4.0-1025-gkeop", + "cindex": 131 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-66-generic", - "cindex": 204 + "uname_release": "5.4.0-1026-azure", + "cindex": 133 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-67-generic", - "cindex": 204 + "uname_release": "5.4.0-1026-gkeop", + "cindex": 131 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-70-generic", - "cindex": 204 + "uname_release": "5.4.0-1027-gke", + "cindex": 132 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-71-generic", - "cindex": 204 + "uname_release": "5.4.0-1027-gkeop", + "cindex": 131 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-72-generic", - "cindex": 204 + "uname_release": "5.4.0-1028-aws", + "cindex": 131 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-73-generic", - "cindex": 204 + "uname_release": "5.4.0-1028-gcp", + "cindex": 132 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-74-generic", - "cindex": 204 + "uname_release": "5.4.0-1029-aws", + "cindex": 131 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-77-generic", - "cindex": 204 + "uname_release": "5.4.0-1029-gcp", + "cindex": 132 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-80-generic", - "cindex": 204 + "uname_release": "5.4.0-1029-gke", + "cindex": 132 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-81-generic", - "cindex": 204 + "uname_release": "5.4.0-1030-aws", + "cindex": 131 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-84-generic", - "cindex": 204 + "uname_release": "5.4.0-1030-gcp", + "cindex": 132 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-86-generic", - "cindex": 204 + "uname_release": "5.4.0-1030-gke", + "cindex": 132 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-87-generic", - "cindex": 204 + "uname_release": "5.4.0-1031-azure", + "cindex": 133 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-89-generic", - "cindex": 204 + "uname_release": "5.4.0-1032-aws", + "cindex": 131 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-90-generic", - "cindex": 204 + "uname_release": "5.4.0-1032-azure", + "cindex": 133 }, { "distrib": "ubuntu", "version": "18.04", "arch": "x86_64", - "uname_release": "5.4.0-91-generic", - "cindex": 204 + "uname_release": "5.4.0-1032-gcp", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.11.0-1009-aws", - "cindex": 207 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1032-gke", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.11.0-1014-aws", - "cindex": 207 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1033-gcp", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.11.0-1016-aws", - "cindex": 207 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1033-gke", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.11.0-1017-aws", - "cindex": 207 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1034-aws", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.11.0-1019-aws", - "cindex": 207 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1034-azure", + "cindex": 133 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.11.0-1020-aws", - "cindex": 207 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1034-gcp", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.11.0-1025-azure", - "cindex": 208 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1035-aws", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.11.0-1027-azure", - "cindex": 208 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1035-azure", + "cindex": 133 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.3.0-1003-aws", - "cindex": 209 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1035-gke", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.3.0-1008-aws", - "cindex": 210 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1036-azure", + "cindex": 133 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.3.0-1009-aws", - "cindex": 210 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1036-gcp", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.3.0-1010-aws", - "cindex": 210 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1036-gke", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.3.0-18-generic", - "cindex": 209 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1037-aws", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.3.0-24-generic", - "cindex": 210 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1037-gcp", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-1005-aws", - "cindex": 211 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1037-gke", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-1007-aws", - "cindex": 211 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1038-aws", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-1008-aws", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1038-gcp", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-1009-aws", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1039-aws", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-1011-aws", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1039-azure", + "cindex": 133 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-1015-aws", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1039-gke", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-1017-aws", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1040-azure", + "cindex": 133 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-1018-aws", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1040-gcp", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-1020-aws", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1040-gke", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-1021-aws", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1041-aws", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-1022-aws", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1041-azure", + "cindex": 133 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-1024-aws", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1041-gcp", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-1025-aws", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1042-gcp", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-1028-aws", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1042-gke", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-1029-aws", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1043-aws", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-1030-aws", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1043-azure", + "cindex": 133 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-1032-aws", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1043-gcp", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-1034-aws", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1043-gke", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-1035-aws", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1044-azure", + "cindex": 133 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-1037-aws", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1044-gcp", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-1038-aws", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1044-gke", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-1039-aws", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1045-aws", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-1041-aws", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1046-azure", + "cindex": 133 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-1043-aws", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1046-gcp", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-1045-aws", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1046-gke", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", + "version": "18.04", + "arch": "x86_64", "uname_release": "5.4.0-1047-aws", - "cindex": 212 - }, - { - "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-1048-aws", - "cindex": 212 + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-1049-aws", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1047-azure", + "cindex": 133 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-1051-aws", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1048-aws", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-1054-aws", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1048-azure", + "cindex": 133 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-1055-aws", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1049-aws", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-1056-aws", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1049-azure", + "cindex": 133 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-1057-aws", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1049-gcp", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-1058-aws", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1049-gke", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-1059-aws", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1051-aws", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-1060-aws", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1051-azure", + "cindex": 133 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-12-generic", - "cindex": 211 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1051-gcp", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-14-generic", - "cindex": 211 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1051-gke", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-18-generic", - "cindex": 211 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1052-gcp", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-21-generic", - "cindex": 211 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1052-gke", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-24-generic", - "cindex": 213 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1053-gcp", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-25-generic", - "cindex": 213 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1053-gke", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-26-generic", - "cindex": 213 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1054-aws", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-28-generic", - "cindex": 213 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1054-gke", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-29-generic", - "cindex": 213 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1055-aws", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-31-generic", - "cindex": 213 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1055-azure", + "cindex": 133 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-33-generic", - "cindex": 213 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1055-gcp", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-37-generic", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1055-gke", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-39-generic", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1056-aws", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-40-generic", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1056-azure", + "cindex": 133 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-42-generic", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1056-gcp", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-45-generic", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1056-gke", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-47-generic", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1057-aws", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-48-generic", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1057-gcp", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-51-generic", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1057-gke", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-52-generic", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1058-aws", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-53-generic", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1058-azure", + "cindex": 133 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-54-generic", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1058-gcp", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-56-generic", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1059-aws", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-58-generic", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1059-azure", + "cindex": 133 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-59-generic", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1059-gke", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-60-generic", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1060-aws", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-62-generic", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1061-azure", + "cindex": 133 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-64-generic", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1061-gke", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-65-generic", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1062-azure", + "cindex": 133 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-66-generic", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1062-gke", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-67-generic", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1063-azure", + "cindex": 133 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-70-generic", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1063-gke", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-71-generic", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1064-azure", + "cindex": 133 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-72-generic", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1065-gke", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-73-generic", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1066-gke", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-74-generic", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1067-gke", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-77-generic", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-1068-gke", + "cindex": 132 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-80-generic", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-37-generic", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-81-generic", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-39-generic", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-84-generic", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-40-generic", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-86-generic", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-42-generic", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-88-generic", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-45-generic", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-89-generic", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-47-generic", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-9-generic", - "cindex": 211 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-48-generic", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-90-generic", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-51-generic", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.4.0-91-generic", - "cindex": 212 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-52-generic", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.8.0-1035-aws", - "cindex": 214 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-53-generic", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.8.0-1038-aws", - "cindex": 214 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-54-generic", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.8.0-1041-aws", - "cindex": 215 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-56-generic", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.8.0-1042-aws", - "cindex": 215 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-58-generic", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.8.0-23-generic", - "cindex": 214 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-59-generic", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.8.0-25-generic", - "cindex": 214 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-60-generic", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.8.0-28-generic", - "cindex": 214 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-62-generic", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.8.0-29-generic", - "cindex": 214 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-64-generic", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.8.0-31-generic", - "cindex": 214 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-65-generic", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.8.0-33-generic", - "cindex": 214 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-66-generic", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.8.0-34-generic", - "cindex": 214 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-67-generic", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.8.0-36-generic", - "cindex": 214 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-70-generic", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.8.0-38-generic", - "cindex": 214 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-71-generic", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.8.0-40-generic", - "cindex": 214 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-72-generic", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.8.0-41-generic", - "cindex": 214 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-73-generic", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.8.0-43-generic", - "cindex": 214 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-74-generic", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.8.0-44-generic", - "cindex": 214 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-77-generic", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.8.0-45-generic", - "cindex": 214 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-80-generic", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.8.0-48-generic", - "cindex": 214 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-81-generic", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.8.0-49-generic", - "cindex": 214 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-84-generic", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.8.0-50-generic", - "cindex": 214 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-86-generic", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.8.0-53-generic", - "cindex": 214 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-87-generic", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.8.0-55-generic", - "cindex": 214 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-89-generic", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.8.0-59-generic", - "cindex": 214 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-90-generic", + "cindex": 131 }, { "distrib": "ubuntu", - "version": "20.04", - "arch": "arm64", - "uname_release": "5.8.0-63-generic", - "cindex": 215 + "version": "18.04", + "arch": "x86_64", + "uname_release": "5.4.0-91-generic", + "cindex": 131 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.11.0-1007-azure", - "cindex": 216 + "cindex": 134 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.11.0-1009-aws", - "cindex": 217 + "cindex": 135 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.11.0-1009-gcp", - "cindex": 218 + "cindex": 136 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.11.0-1012-azure", - "cindex": 216 + "cindex": 134 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.11.0-1013-azure", - "cindex": 216 + "cindex": 134 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.11.0-1014-aws", - "cindex": 217 + "cindex": 135 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.11.0-1014-gcp", - "cindex": 218 + "cindex": 136 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.11.0-1015-azure", - "cindex": 216 + "cindex": 134 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.11.0-1016-aws", - "cindex": 217 + "cindex": 135 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.11.0-1017-aws", - "cindex": 217 + "cindex": 135 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.11.0-1017-azure", - "cindex": 216 + "cindex": 134 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.11.0-1017-gcp", - "cindex": 218 + "cindex": 136 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.11.0-1018-gcp", - "cindex": 218 + "cindex": 136 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.11.0-1019-aws", - "cindex": 217 + "cindex": 135 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.11.0-1019-azure", - "cindex": 216 + "cindex": 134 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.11.0-1020-aws", - "cindex": 217 + "cindex": 135 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.11.0-1020-azure", - "cindex": 216 + "cindex": 134 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.11.0-1020-gcp", - "cindex": 218 + "cindex": 136 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.11.0-1021-azure", - "cindex": 216 + "cindex": 134 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.11.0-1021-gcp", - "cindex": 218 + "cindex": 136 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.11.0-1022-azure", - "cindex": 216 + "cindex": 134 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.11.0-1022-gcp", - "cindex": 218 + "cindex": 136 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.11.0-1023-gcp", - "cindex": 218 + "cindex": 136 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.11.0-1024-gcp", - "cindex": 218 + "cindex": 136 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.11.0-1025-azure", - "cindex": 216 + "cindex": 134 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.11.0-1026-gcp", - "cindex": 218 + "cindex": 136 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.11.0-1027-azure", - "cindex": 216 + "cindex": 134 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.11.0-1028-gcp", - "cindex": 218 + "cindex": 136 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.3.0-1003-aws", - "cindex": 219 + "cindex": 137 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.3.0-1003-azure", - "cindex": 220 + "cindex": 138 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.3.0-1004-gcp", - "cindex": 221 + "cindex": 139 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.3.0-1008-aws", - "cindex": 222 + "cindex": 140 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.3.0-1008-azure", - "cindex": 223 + "cindex": 141 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.3.0-1009-aws", - "cindex": 222 + "cindex": 140 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.3.0-1009-azure", - "cindex": 223 + "cindex": 141 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.3.0-1009-gcp", - "cindex": 224 + "cindex": 142 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.3.0-1010-aws", - "cindex": 222 + "cindex": 140 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.3.0-1011-gcp", - "cindex": 224 + "cindex": 142 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.3.0-18-generic", - "cindex": 219 + "cindex": 137 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.3.0-24-generic", - "cindex": 222 + "cindex": 140 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1005-aws", - "cindex": 225 + "cindex": 143 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1005-gcp", - "cindex": 226 + "cindex": 144 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1006-azure", - "cindex": 227 + "cindex": 145 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1007-aws", - "cindex": 225 + "cindex": 143 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1007-gcp", - "cindex": 226 + "cindex": 144 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1008-aws", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1008-azure", - "cindex": 227 + "cindex": 145 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1008-gcp", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1008-gkeop", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1009-aws", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1009-azure", - "cindex": 230 + "cindex": 148 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1009-gcp", - "cindex": 231 + "cindex": 149 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1009-gkeop", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1010-azure", - "cindex": 232 + "cindex": 150 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1010-gkeop", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1011-aws", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1011-gcp", - "cindex": 231 + "cindex": 149 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1011-gkeop", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1012-azure", - "cindex": 232 + "cindex": 150 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1012-gkeop", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1013-gkeop", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1014-gkeop", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1015-aws", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1015-gcp", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1015-gkeop", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1016-azure", - "cindex": 230 + "cindex": 148 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1016-gkeop", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1017-aws", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1018-aws", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1018-gcp", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1018-gkeop", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1019-azure", - "cindex": 230 + "cindex": 148 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1019-gcp", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1020-aws", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1020-azure", - "cindex": 230 + "cindex": 148 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1021-aws", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1021-gcp", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1021-gkeop", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1022-aws", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1022-azure", - "cindex": 230 + "cindex": 148 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1022-gcp", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1022-gkeop", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1023-azure", - "cindex": 230 + "cindex": 148 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1023-gkeop", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1024-aws", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1024-gcp", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1024-gkeop", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1025-aws", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1025-azure", - "cindex": 230 + "cindex": 148 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1025-gcp", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1025-gkeop", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1026-azure", - "cindex": 230 + "cindex": 148 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1026-gkeop", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1027-gkeop", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1028-aws", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1028-gcp", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1029-aws", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1029-gcp", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1030-aws", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1030-gcp", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1031-azure", - "cindex": 230 + "cindex": 148 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1032-aws", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1032-azure", - "cindex": 230 + "cindex": 148 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1032-gcp", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1033-gcp", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1033-gke", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1034-aws", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1034-azure", - "cindex": 230 + "cindex": 148 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1034-gcp", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1035-aws", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1035-azure", - "cindex": 230 + "cindex": 148 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1035-gke", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1036-azure", - "cindex": 230 + "cindex": 148 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1036-gcp", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1036-gke", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1037-aws", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1037-gcp", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1037-gke", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1038-aws", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1038-gcp", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1039-aws", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1039-azure", - "cindex": 230 + "cindex": 148 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1039-gke", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1040-azure", - "cindex": 230 + "cindex": 148 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1040-gcp", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1041-aws", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1041-azure", - "cindex": 230 + "cindex": 148 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1041-gcp", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1041-gke", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1042-gcp", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1042-gke", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1043-aws", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1043-azure", - "cindex": 230 + "cindex": 148 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1043-gcp", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1043-gke", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1044-azure", - "cindex": 230 + "cindex": 148 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1044-gcp", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1044-gke", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1045-aws", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1046-azure", - "cindex": 230 + "cindex": 148 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1046-gcp", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1046-gke", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1047-aws", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1047-azure", - "cindex": 230 + "cindex": 148 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1048-aws", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1048-azure", - "cindex": 230 + "cindex": 148 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1049-aws", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1049-azure", - "cindex": 230 + "cindex": 148 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1049-gcp", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1049-gke", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1051-aws", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1051-azure", - "cindex": 230 + "cindex": 148 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1051-gcp", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1051-gke", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1052-gcp", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1052-gke", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1053-gcp", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1053-gke", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1054-aws", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1054-gke", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1055-aws", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1055-azure", - "cindex": 230 + "cindex": 148 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1055-gcp", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1055-gke", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1056-aws", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1056-azure", - "cindex": 230 + "cindex": 148 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1056-gcp", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1056-gke", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1057-aws", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1057-gcp", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1057-gke", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1058-aws", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1058-azure", - "cindex": 230 + "cindex": 148 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1058-gcp", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1059-aws", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1059-azure", - "cindex": 230 + "cindex": 148 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1059-gke", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1060-aws", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1061-azure", - "cindex": 230 + "cindex": 148 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1061-gke", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1062-azure", - "cindex": 230 + "cindex": 148 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1062-gke", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1063-azure", - "cindex": 230 + "cindex": 148 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1063-gke", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1064-azure", - "cindex": 230 + "cindex": 148 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1065-gke", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1066-gke", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1067-gke", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-1068-gke", - "cindex": 229 + "cindex": 147 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-12-generic", - "cindex": 225 + "cindex": 143 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-14-generic", - "cindex": 225 + "cindex": 143 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-18-generic", - "cindex": 225 + "cindex": 143 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-21-generic", - "cindex": 225 + "cindex": 143 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-24-generic", - "cindex": 233 + "cindex": 151 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-25-generic", - "cindex": 233 + "cindex": 151 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-26-generic", - "cindex": 233 + "cindex": 151 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-28-generic", - "cindex": 233 + "cindex": 151 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-29-generic", - "cindex": 233 + "cindex": 151 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-31-generic", - "cindex": 233 + "cindex": 151 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-33-generic", - "cindex": 233 + "cindex": 151 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-37-generic", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-39-generic", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-40-generic", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-42-generic", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-45-generic", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-47-generic", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-48-generic", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-51-generic", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-52-generic", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-53-generic", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-54-generic", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-56-generic", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-58-generic", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-59-generic", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-60-generic", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-62-generic", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-64-generic", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-65-generic", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-66-generic", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-67-generic", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-70-generic", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-71-generic", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-72-generic", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-73-generic", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-74-generic", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-77-generic", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-80-generic", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-81-generic", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-84-generic", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-86-generic", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-88-generic", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-89-generic", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-9-generic", - "cindex": 225 + "cindex": 143 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-90-generic", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.4.0-91-generic", - "cindex": 228 + "cindex": 146 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.8.0-1032-gcp", - "cindex": 234 + "cindex": 152 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.8.0-1033-azure", - "cindex": 235 + "cindex": 153 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.8.0-1035-aws", - "cindex": 236 + "cindex": 154 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.8.0-1035-gcp", - "cindex": 234 + "cindex": 152 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.8.0-1036-azure", - "cindex": 235 + "cindex": 153 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.8.0-1038-aws", - "cindex": 236 + "cindex": 154 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.8.0-1038-gcp", - "cindex": 237 + "cindex": 155 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.8.0-1039-azure", - "cindex": 238 + "cindex": 156 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.8.0-1039-gcp", - "cindex": 237 + "cindex": 155 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.8.0-1040-azure", - "cindex": 238 + "cindex": 156 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.8.0-1041-aws", - "cindex": 239 + "cindex": 157 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.8.0-1041-azure", - "cindex": 238 + "cindex": 156 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.8.0-1042-aws", - "cindex": 239 + "cindex": 157 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.8.0-1042-azure", - "cindex": 238 + "cindex": 156 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.8.0-1043-azure", - "cindex": 238 + "cindex": 156 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.8.0-23-generic", - "cindex": 236 + "cindex": 154 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.8.0-25-generic", - "cindex": 236 + "cindex": 154 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.8.0-28-generic", - "cindex": 236 + "cindex": 154 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.8.0-29-generic", - "cindex": 236 + "cindex": 154 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.8.0-31-generic", - "cindex": 236 + "cindex": 154 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.8.0-33-generic", - "cindex": 236 + "cindex": 154 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.8.0-34-generic", - "cindex": 236 + "cindex": 154 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.8.0-36-generic", - "cindex": 236 + "cindex": 154 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.8.0-38-generic", - "cindex": 236 + "cindex": 154 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.8.0-40-generic", - "cindex": 236 + "cindex": 154 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.8.0-41-generic", - "cindex": 236 + "cindex": 154 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.8.0-43-generic", - "cindex": 236 + "cindex": 154 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.8.0-44-generic", - "cindex": 236 + "cindex": 154 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.8.0-45-generic", - "cindex": 236 + "cindex": 154 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.8.0-48-generic", - "cindex": 236 + "cindex": 154 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.8.0-49-generic", - "cindex": 236 + "cindex": 154 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.8.0-50-generic", - "cindex": 236 + "cindex": 154 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.8.0-53-generic", - "cindex": 236 + "cindex": 154 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.8.0-55-generic", - "cindex": 236 + "cindex": 154 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.8.0-59-generic", - "cindex": 236 + "cindex": 154 }, { "distrib": "ubuntu", "version": "20.04", "arch": "x86_64", "uname_release": "5.8.0-63-generic", - "cindex": 239 + "cindex": 157 } ] } \ No newline at end of file diff --git a/pkg/security/probe/constantfetch/btfhub/constants_arm64.json b/pkg/security/probe/constantfetch/btfhub/constants_arm64.json new file mode 100644 index 0000000000000..8773532df75d4 --- /dev/null +++ b/pkg/security/probe/constantfetch/btfhub/constants_arm64.json @@ -0,0 +1,15220 @@ +{ + "constants": [ + { + "binprm_file_offset": 168, + "bpf_map_id_offset": 48, + "bpf_map_type_offset": 24, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 16, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1320, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 592, + "sizeof_upid": 32, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1880, + "tty_name_offset": 368, + "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_id_offset": 48, + "bpf_map_type_offset": 24, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 16, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1320, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 600, + "sizeof_upid": 32, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1880, + "tty_name_offset": 368, + "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_id_offset": 48, + "bpf_map_type_offset": 24, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 16, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1320, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 600, + "sizeof_upid": 32, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2008, + "tty_name_offset": 368, + "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_id_offset": 48, + "bpf_map_type_offset": 24, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 16, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 44, + "creds_uid_offset": 8, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1320, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 600, + "sizeof_upid": 32, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2008, + "tty_name_offset": 368, + "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_id_offset": 48, + "bpf_map_type_offset": 24, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 16, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1320, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 592, + "sizeof_upid": 32, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1880, + "tty_name_offset": 368, + "tty_offset": 376, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_type_offset": 4, + "bpf_prog_aux_offset": 16, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1128, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 20, + "flowi4_uli_offset": 28, + "flowi6_saddr_offset": 36, + "flowi6_uli_offset": 56, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 268, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 256, + "net_device_name_offset": 0, + "net_proc_inum_offset": 72, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 72, + "pipe_inode_info_bufs_offset": 128, + "pipe_inode_info_curbuf_offset": 68, + "pipe_inode_info_nrbufs_offset": 64, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 88, + "sizeof_inode": 576, + "sizeof_upid": 32, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 896, + "tty_name_offset": 400, + "tty_offset": 416, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_type_offset": 4, + "bpf_prog_aux_offset": 16, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1128, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 20, + "flowi4_uli_offset": 28, + "flowi6_saddr_offset": 36, + "flowi6_uli_offset": 56, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 268, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 256, + "net_device_name_offset": 0, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 72, + "pipe_inode_info_bufs_offset": 128, + "pipe_inode_info_curbuf_offset": 68, + "pipe_inode_info_nrbufs_offset": 64, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 88, + "sizeof_inode": 560, + "sizeof_upid": 32, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 896, + "tty_name_offset": 400, + "tty_offset": 416, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_type_offset": 4, + "bpf_prog_aux_offset": 16, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1128, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 20, + "flowi4_uli_offset": 28, + "flowi6_saddr_offset": 36, + "flowi6_uli_offset": 56, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 268, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 288, + "net_device_name_offset": 0, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 72, + "pipe_inode_info_bufs_offset": 128, + "pipe_inode_info_curbuf_offset": 68, + "pipe_inode_info_nrbufs_offset": 64, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 88, + "sizeof_inode": 544, + "sizeof_upid": 32, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 944, + "tty_name_offset": 400, + "tty_offset": 416, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_type_offset": 4, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 12, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1192, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 268, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 576, + "sizeof_upid": 32, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1808, + "tty_name_offset": 368, + "tty_offset": 376, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_id_offset": 28, + "bpf_map_type_offset": 4, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 16, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1320, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 592, + "sizeof_upid": 32, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2008, + "tty_name_offset": 368, + "tty_offset": 376, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_id_offset": 48, + "bpf_map_type_offset": 24, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 16, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1320, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 600, + "sizeof_upid": 32, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1528, + "tty_name_offset": 368, + "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 296, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 208, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 20, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1288, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 56, + "flowi4_uli_offset": 64, + "flowi6_saddr_offset": 72, + "flowi6_uli_offset": 92, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 320, + "linux_binprm_envc_offset": 324, + "linux_binprm_p_offset": 280, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 56, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 648, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "tty_name_offset": 368, + "tty_offset": 392, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 296, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 200, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1288, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 56, + "flowi4_uli_offset": 64, + "flowi6_saddr_offset": 72, + "flowi6_uli_offset": 92, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 320, + "linux_binprm_envc_offset": 324, + "linux_binprm_p_offset": 280, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 56, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 648, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "tty_name_offset": 368, + "tty_offset": 392, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 296, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 88, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 28, + "bpf_prog_aux_name_offset": 464, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1288, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 56, + "flowi4_uli_offset": 64, + "flowi6_saddr_offset": 72, + "flowi6_uli_offset": 92, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 320, + "linux_binprm_envc_offset": 324, + "linux_binprm_p_offset": 280, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 56, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 648, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "tty_name_offset": 368, + "tty_offset": 392, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 296, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 80, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 32, + "bpf_prog_aux_name_offset": 520, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1288, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 56, + "flowi4_uli_offset": 64, + "flowi6_saddr_offset": 72, + "flowi6_uli_offset": 92, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 320, + "linux_binprm_envc_offset": 324, + "linux_binprm_p_offset": 280, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 56, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 648, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "tty_name_offset": 368, + "tty_offset": 392, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 296, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 208, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 152, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1288, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 56, + "flowi4_uli_offset": 64, + "flowi6_saddr_offset": 72, + "flowi6_uli_offset": 92, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 320, + "linux_binprm_envc_offset": 324, + "linux_binprm_p_offset": 280, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 648, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1960, + "tty_name_offset": 368, + "tty_offset": 392, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 176, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 160, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1256, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 56, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 592, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1384, + "tty_name_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 176, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 160, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1256, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 56, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 600, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1384, + "tty_name_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_type_offset": 4, + "bpf_prog_aux_offset": 16, + "bpf_prog_type_offset": 8, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1160, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 20, + "flowi4_uli_offset": 28, + "flowi6_saddr_offset": 36, + "flowi6_uli_offset": 56, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 268, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 288, + "net_device_name_offset": 0, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 72, + "pipe_inode_info_bufs_offset": 128, + "pipe_inode_info_curbuf_offset": 68, + "pipe_inode_info_nrbufs_offset": 64, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 560, + "sizeof_upid": 32, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1464, + "tty_name_offset": 400, + "tty_offset": 416, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_type_offset": 4, + "bpf_prog_aux_offset": 16, + "bpf_prog_type_offset": 8, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1240, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 32, + "flowi4_uli_offset": 40, + "flowi6_saddr_offset": 48, + "flowi6_uli_offset": 68, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 268, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 288, + "net_device_name_offset": 0, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 72, + "pipe_inode_info_bufs_offset": 128, + "pipe_inode_info_curbuf_offset": 68, + "pipe_inode_info_nrbufs_offset": 64, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 560, + "sizeof_upid": 32, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1696, + "tty_name_offset": 400, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 64, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 88, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 28, + "bpf_prog_aux_name_offset": 496, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1264, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 80, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 88, + "linux_binprm_envc_offset": 92, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 256, + "net_device_name_offset": 0, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 96, + "pipe_inode_info_bufs_offset": 152, + "pipe_inode_info_head_offset": 80, + "pipe_inode_info_ring_size_offset": 92, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 600, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1912, + "tty_name_offset": 360, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 48, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 168, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1256, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 96, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 80, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 600, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1872, + "tty_name_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 296, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 208, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 20, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1288, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 56, + "flowi4_uli_offset": 64, + "flowi6_saddr_offset": 72, + "flowi6_uli_offset": 92, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 320, + "linux_binprm_envc_offset": 324, + "linux_binprm_p_offset": 280, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 56, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 648, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "tty_name_offset": 368, + "tty_offset": 392, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 296, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 208, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 152, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1288, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 56, + "flowi4_uli_offset": 64, + "flowi6_saddr_offset": 72, + "flowi6_uli_offset": 92, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 320, + "linux_binprm_envc_offset": 324, + "linux_binprm_p_offset": 280, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 648, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1960, + "tty_name_offset": 368, + "tty_offset": 392, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 176, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 160, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1256, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 112, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 56, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 592, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1360, + "tty_name_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 176, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 160, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 152, + "dentry_sb_offset": 152, + "device_nd_net_net_offset": 1368, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 240, + "nf_conn_ct_net_offset": 192, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 56, + "pipe_inode_info_buffers_offset": 112, + "pipe_inode_info_bufs_offset": 168, + "pipe_inode_info_curbuf_offset": 108, + "pipe_inode_info_nrbufs_offset": 104, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 752, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1456, + "tty_name_offset": 496, + "tty_offset": 440, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 176, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 160, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1256, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 112, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 56, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 600, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1360, + "tty_name_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 176, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 160, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 152, + "dentry_sb_offset": 152, + "device_nd_net_net_offset": 1368, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 248, + "nf_conn_ct_net_offset": 192, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 56, + "pipe_inode_info_buffers_offset": 112, + "pipe_inode_info_bufs_offset": 168, + "pipe_inode_info_curbuf_offset": 108, + "pipe_inode_info_nrbufs_offset": 104, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 760, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1448, + "tty_name_offset": 496, + "tty_offset": 440, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 176, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 160, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1256, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 112, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 56, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 600, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1368, + "tty_name_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 176, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 160, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 152, + "dentry_sb_offset": 152, + "device_nd_net_net_offset": 1368, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 248, + "nf_conn_ct_net_offset": 192, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 56, + "pipe_inode_info_buffers_offset": 112, + "pipe_inode_info_bufs_offset": 168, + "pipe_inode_info_curbuf_offset": 108, + "pipe_inode_info_nrbufs_offset": 104, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 760, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1456, + "tty_name_offset": 496, + "tty_offset": 440, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 176, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 160, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 44, + "creds_uid_offset": 8, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1256, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 112, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 56, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 600, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1368, + "tty_name_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 176, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 160, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 44, + "creds_uid_offset": 8, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 152, + "dentry_sb_offset": 152, + "device_nd_net_net_offset": 1368, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 248, + "nf_conn_ct_net_offset": 192, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 56, + "pipe_inode_info_buffers_offset": 112, + "pipe_inode_info_bufs_offset": 168, + "pipe_inode_info_curbuf_offset": 108, + "pipe_inode_info_nrbufs_offset": 104, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 760, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1456, + "tty_name_offset": 496, + "tty_offset": 440, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 176, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 160, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 152, + "dentry_sb_offset": 152, + "device_nd_net_net_offset": 1368, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 248, + "nf_conn_ct_net_offset": 192, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 56, + "pipe_inode_info_buffers_offset": 112, + "pipe_inode_info_bufs_offset": 168, + "pipe_inode_info_curbuf_offset": 108, + "pipe_inode_info_nrbufs_offset": 104, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 752, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1456, + "tty_name_offset": 496, + "tty_offset": 440, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 64, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 88, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 28, + "bpf_prog_aux_name_offset": 496, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1264, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 80, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 88, + "linux_binprm_envc_offset": 92, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 256, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 96, + "pipe_inode_info_bufs_offset": 152, + "pipe_inode_info_head_offset": 80, + "pipe_inode_info_ring_size_offset": 92, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 600, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1408, + "tty_name_offset": 360, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 64, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 88, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 28, + "bpf_prog_aux_name_offset": 512, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 64, + "dentry_d_name_offset": 48, + "dentry_d_sb_offset": 168, + "dentry_sb_offset": 168, + "device_nd_net_net_offset": 1376, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 80, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 88, + "linux_binprm_envc_offset": 92, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 256, + "net_device_name_offset": 0, + "net_ns_offset": 264, + "nf_conn_ct_net_offset": 192, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 176, + "pipe_inode_info_bufs_offset": 240, + "pipe_inode_info_head_offset": 168, + "pipe_inode_info_ring_size_offset": 180, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 760, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1480, + "tty_name_offset": 488, + "tty_offset": 440, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 64, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 88, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 28, + "bpf_prog_aux_name_offset": 496, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 44, + "creds_uid_offset": 8, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1264, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 80, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 88, + "linux_binprm_envc_offset": 92, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 256, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 96, + "pipe_inode_info_bufs_offset": 152, + "pipe_inode_info_head_offset": 80, + "pipe_inode_info_ring_size_offset": 92, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 600, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1408, + "tty_name_offset": 360, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 64, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 88, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 28, + "bpf_prog_aux_name_offset": 512, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 44, + "creds_uid_offset": 8, + "dentry_d_inode_offset": 64, + "dentry_d_name_offset": 48, + "dentry_d_sb_offset": 168, + "dentry_sb_offset": 168, + "device_nd_net_net_offset": 1376, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 80, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 88, + "linux_binprm_envc_offset": 92, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 256, + "net_device_name_offset": 0, + "net_ns_offset": 264, + "nf_conn_ct_net_offset": 192, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 176, + "pipe_inode_info_bufs_offset": 240, + "pipe_inode_info_head_offset": 168, + "pipe_inode_info_ring_size_offset": 180, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 760, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1480, + "tty_name_offset": 488, + "tty_offset": 440, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_type_offset": 8, + "bpf_prog_aux_offset": 16, + "bpf_prog_type_offset": 8, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1320, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 32, + "flowi4_uli_offset": 40, + "flowi6_saddr_offset": 48, + "flowi6_uli_offset": 68, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 296, + "net_device_name_offset": 0, + "net_ns_offset": 136, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 72, + "pipe_inode_info_bufs_offset": 128, + "pipe_inode_info_curbuf_offset": 68, + "pipe_inode_info_nrbufs_offset": 64, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 592, + "sizeof_upid": 32, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1304, + "tty_name_offset": 400, + "tty_offset": 384, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 176, + "bpf_map_type_offset": 24, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 152, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 16, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1312, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 600, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1512, + "tty_name_offset": 368, + "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 176, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 176, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 20, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1256, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 32, + "flowi4_uli_offset": 40, + "flowi6_saddr_offset": 48, + "flowi6_uli_offset": 68, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 200, + "linux_binprm_envc_offset": 204, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 112, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 56, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 584, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1384, + "tty_name_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 176, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 152, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1248, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 112, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 600, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1384, + "tty_name_offset": 368, + "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 48, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 168, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1256, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 88, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 80, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 584, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1392, + "tty_name_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 48, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 88, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 28, + "bpf_prog_aux_name_offset": 256, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1264, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 88, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 256, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 80, + "pipe_inode_info_bufs_offset": 144, + "pipe_inode_info_head_offset": 80, + "pipe_inode_info_ring_size_offset": 92, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 600, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1872, + "tty_name_offset": 368, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 48, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 168, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1256, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 88, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 112, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 80, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 584, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1392, + "tty_name_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_id_offset": 48, + "bpf_map_type_offset": 24, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 16, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1320, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 600, + "sizeof_upid": 32, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2008, + "tty_name_offset": 368, + "tty_offset": 376, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 48, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 168, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1256, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 96, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 80, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 584, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1872, + "tty_name_offset": 368, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 48, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 200, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1288, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 56, + "flowi4_uli_offset": 64, + "flowi6_saddr_offset": 72, + "flowi6_uli_offset": 92, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 96, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 80, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 632, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1936, + "tty_name_offset": 368, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 48, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 200, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1288, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 56, + "flowi4_uli_offset": 64, + "flowi6_saddr_offset": 72, + "flowi6_uli_offset": 92, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 80, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 80, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 632, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1936, + "tty_name_offset": 368, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 296, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 208, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 20, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1288, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 56, + "flowi4_uli_offset": 64, + "flowi6_saddr_offset": 72, + "flowi6_uli_offset": 92, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 320, + "linux_binprm_envc_offset": 324, + "linux_binprm_p_offset": 280, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 56, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 648, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "tty_name_offset": 368, + "tty_offset": 392, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 296, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 208, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 152, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1288, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 56, + "flowi4_uli_offset": 64, + "flowi6_saddr_offset": 72, + "flowi6_uli_offset": 92, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 320, + "linux_binprm_envc_offset": 324, + "linux_binprm_p_offset": 280, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 648, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1960, + "tty_name_offset": 368, + "tty_offset": 392, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 48, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 200, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1288, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 56, + "flowi4_uli_offset": 64, + "flowi6_saddr_offset": 72, + "flowi6_uli_offset": 92, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 96, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 80, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 632, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1936, + "tty_name_offset": 368, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_id_offset": 28, + "bpf_map_type_offset": 4, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 16, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1320, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 292, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 592, + "sizeof_upid": 32, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1456, + "tty_name_offset": 368, + "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_id_offset": 48, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1264, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 292, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 592, + "sizeof_upid": 32, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1344, + "tty_name_offset": 368, + "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 48, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 88, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 28, + "bpf_prog_aux_name_offset": 256, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1256, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 88, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 292, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 256, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 80, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 592, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1472, + "tty_name_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 48, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 88, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 28, + "bpf_prog_aux_name_offset": 424, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1264, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 88, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 292, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 256, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 80, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 592, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1472, + "tty_name_offset": 368, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_type_offset": 4, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 12, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1192, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 268, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 576, + "sizeof_upid": 32, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1808, + "tty_name_offset": 368, + "tty_offset": 376, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_id_offset": 28, + "bpf_map_type_offset": 4, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 16, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1320, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 592, + "sizeof_upid": 32, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 2008, + "tty_name_offset": 368, + "tty_offset": 376, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 296, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 208, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 20, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1288, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 56, + "flowi4_uli_offset": 64, + "flowi6_saddr_offset": 72, + "flowi6_uli_offset": 92, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 320, + "linux_binprm_envc_offset": 324, + "linux_binprm_p_offset": 280, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 56, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 648, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "tty_name_offset": 368, + "tty_offset": 392, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 296, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 208, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 152, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1288, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 56, + "flowi4_uli_offset": 64, + "flowi6_saddr_offset": 72, + "flowi6_uli_offset": 92, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 320, + "linux_binprm_envc_offset": 324, + "linux_binprm_p_offset": 280, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 648, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1960, + "tty_name_offset": 368, + "tty_offset": 392, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_id_offset": 28, + "bpf_map_type_offset": 4, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 16, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1320, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 292, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 592, + "sizeof_upid": 32, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1472, + "tty_name_offset": 368, + "tty_offset": 376, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_id_offset": 48, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1264, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 292, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 592, + "sizeof_upid": 32, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1376, + "tty_name_offset": 368, + "tty_offset": 376, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 48, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 88, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 28, + "bpf_prog_aux_name_offset": 256, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1256, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 88, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 292, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 256, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 80, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 592, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1472, + "tty_name_offset": 368, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 48, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 88, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 28, + "bpf_prog_aux_name_offset": 424, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1264, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 88, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 292, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 256, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 80, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 592, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1472, + "tty_name_offset": 368, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_id_offset": 28, + "bpf_map_type_offset": 4, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 16, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1320, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 592, + "sizeof_upid": 32, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1544, + "tty_name_offset": 368, + "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 176, + "bpf_map_type_offset": 24, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 128, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 16, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1312, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 600, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1520, + "tty_name_offset": 368, + "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 176, + "bpf_map_type_offset": 24, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 128, + "bpf_prog_aux_offset": 24, + "bpf_prog_tag_offset": 16, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1312, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 608, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1520, + "tty_name_offset": 368, + "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 168, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 176, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 16, + "bpf_prog_aux_name_offset": 152, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1312, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 192, + "linux_binprm_envc_offset": 196, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 112, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_link_pid_offset": 16, + "pid_numbers_offset": 48, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 600, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_link_offset": 1392, + "tty_name_offset": 368, + "tty_offset": 368, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 176, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 176, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 20, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1320, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 32, + "flowi4_uli_offset": 40, + "flowi6_saddr_offset": 48, + "flowi6_uli_offset": 68, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 200, + "linux_binprm_envc_offset": 204, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 112, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 56, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 584, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1392, + "tty_name_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 176, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 176, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 20, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1320, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 32, + "flowi4_uli_offset": 40, + "flowi6_saddr_offset": 48, + "flowi6_uli_offset": 68, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 200, + "linux_binprm_envc_offset": 204, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 56, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 584, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1392, + "tty_name_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 176, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 176, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 20, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1320, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 32, + "flowi4_uli_offset": 40, + "flowi6_saddr_offset": 48, + "flowi6_uli_offset": 68, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "linux_binprm_argc_offset": 200, + "linux_binprm_envc_offset": 204, + "linux_binprm_p_offset": 152, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 56, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 592, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 32, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1392, + "tty_name_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 48, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 168, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1256, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 88, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 80, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 584, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1416, + "tty_name_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 48, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 168, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1256, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 88, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 80, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 592, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1416, + "tty_name_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 48, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 168, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1256, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 80, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 112, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 80, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 584, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1416, + "tty_name_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 48, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 168, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1256, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 88, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 112, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 80, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 584, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1416, + "tty_name_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 48, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 168, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1256, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 96, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 80, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 600, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1416, + "tty_name_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 64, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 80, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 28, + "bpf_prog_aux_name_offset": 504, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1328, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 80, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 88, + "linux_binprm_envc_offset": 92, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 256, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 96, + "pipe_inode_info_bufs_offset": 152, + "pipe_inode_info_head_offset": 80, + "pipe_inode_info_ring_size_offset": 92, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 600, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1440, + "tty_name_offset": 360, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 64, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 80, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 28, + "bpf_prog_aux_name_offset": 504, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1264, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 80, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 88, + "linux_binprm_envc_offset": 92, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 256, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 96, + "pipe_inode_info_bufs_offset": 152, + "pipe_inode_info_head_offset": 80, + "pipe_inode_info_ring_size_offset": 92, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 600, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1440, + "tty_name_offset": 360, + "tty_offset": 408, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 48, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 168, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1256, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 80, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 112, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 80, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 584, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1416, + "tty_name_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 48, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 168, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1256, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 88, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 80, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 584, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1416, + "tty_name_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 48, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 168, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1256, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 96, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 80, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 592, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1416, + "tty_name_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 48, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 168, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1256, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 96, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 80, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 600, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1416, + "tty_name_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 48, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 168, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 24, + "bpf_prog_aux_name_offset": 176, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1256, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 96, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 72, + "linux_binprm_envc_offset": 76, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 264, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 80, + "pipe_inode_info_buffers_offset": 64, + "pipe_inode_info_bufs_offset": 120, + "pipe_inode_info_curbuf_offset": 60, + "pipe_inode_info_nrbufs_offset": 56, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 600, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1440, + "tty_name_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 64, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 88, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 28, + "bpf_prog_aux_name_offset": 416, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1264, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 80, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 88, + "linux_binprm_envc_offset": 92, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 256, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 96, + "pipe_inode_info_bufs_offset": 152, + "pipe_inode_info_head_offset": 80, + "pipe_inode_info_ring_size_offset": 92, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 600, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1448, + "tty_name_offset": 368, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + }, + { + "binprm_file_offset": 64, + "bpf_map_id_offset": 48, + "bpf_map_name_offset": 88, + "bpf_map_type_offset": 24, + "bpf_prog_attach_type_offset": 8, + "bpf_prog_aux_id_offset": 28, + "bpf_prog_aux_name_offset": 416, + "bpf_prog_aux_offset": 32, + "bpf_prog_tag_offset": 20, + "bpf_prog_type_offset": 4, + "creds_cap_inheritable_offset": 40, + "creds_uid_offset": 4, + "dentry_d_inode_offset": 48, + "dentry_d_name_offset": 32, + "dentry_d_sb_offset": 104, + "dentry_sb_offset": 104, + "device_nd_net_net_offset": 1264, + "file_f_inode_offset": 32, + "file_f_path_offset": 16, + "flowi4_saddr_offset": 40, + "flowi4_uli_offset": 48, + "flowi6_saddr_offset": 56, + "flowi6_uli_offset": 76, + "inode_ctime_offset": 120, + "inode_gid_offset": 8, + "inode_ino_offset": 64, + "inode_mtime_offset": 104, + "inode_nlink_offset": 72, + "inode_sb_offset": 40, + "iokiocb_ctx_offset": 80, + "kernel_clone_args_exit_signal_offset": 32, + "linux_binprm_argc_offset": 88, + "linux_binprm_envc_offset": 92, + "linux_binprm_p_offset": 24, + "mount_id_offset": 284, + "mount_mnt_mountpoint_offset": 24, + "mountpoint_dentry_offset": 16, + "net_device_ifindex_offset": 256, + "net_device_name_offset": 0, + "net_ns_offset": 120, + "nf_conn_ct_net_offset": 144, + "path_dentry_offset": 8, + "path_mnt_offset": 0, + "pid_level_offset": 4, + "pid_numbers_offset": 96, + "pipe_inode_info_bufs_offset": 152, + "pipe_inode_info_head_offset": 80, + "pipe_inode_info_ring_size_offset": 92, + "sb_dev_offset": 16, + "sb_flags_offset": 80, + "sb_magic_offset": 96, + "sizeof_inode": 600, + "sizeof_upid": 16, + "sock_common_skc_family_offset": 16, + "sock_common_skc_net_offset": 48, + "sock_common_skc_num_offset": 14, + "socket_sock_offset": 24, + "super_block_s_type_offset": 40, + "task_struct_pid_offset": 1448, + "tty_name_offset": 360, + "tty_offset": 400, + "vfsmount_mnt_flags_offset": 16, + "vfsmount_mnt_root_offset": 0, + "vfsmount_mnt_sb_offset": 8, + "vm_area_struct_flags_offset": 80 + } + ], + "kernels": [ + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.101-91.76.amzn2.aarch64", + "cindex": 0 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.104-95.84.amzn2.aarch64", + "cindex": 0 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.106-97.85.amzn2.aarch64", + "cindex": 0 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.109-99.92.amzn2.aarch64", + "cindex": 0 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.114-103.97.amzn2.aarch64", + "cindex": 0 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.114-105.126.amzn2.aarch64", + "cindex": 0 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.121-109.96.amzn2.aarch64", + "cindex": 0 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.123-111.109.amzn2.aarch64", + "cindex": 0 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.128-112.105.amzn2.aarch64", + "cindex": 0 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.133-113.105.amzn2.aarch64", + "cindex": 0 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.133-113.112.amzn2.aarch64", + "cindex": 0 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.138-114.102.amzn2.aarch64", + "cindex": 0 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.143-118.123.amzn2.aarch64", + "cindex": 0 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.146-119.123.amzn2.aarch64", + "cindex": 0 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.146-120.181.amzn2.aarch64", + "cindex": 0 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.152-124.171.amzn2.aarch64", + "cindex": 0 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.152-127.182.amzn2.aarch64", + "cindex": 0 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.154-128.181.amzn2.aarch64", + "cindex": 0 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.158-129.185.amzn2.aarch64", + "cindex": 0 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.165-131.185.amzn2.aarch64", + "cindex": 0 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.165-133.209.amzn2.aarch64", + "cindex": 0 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.171-136.231.amzn2.aarch64", + "cindex": 0 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.173-137.228.amzn2.aarch64", + "cindex": 0 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.173-137.229.amzn2.aarch64", + "cindex": 0 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.177-139.253.amzn2.aarch64", + "cindex": 1 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.177-139.254.amzn2.aarch64", + "cindex": 1 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.181-140.257.amzn2.aarch64", + "cindex": 1 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.181-142.260.amzn2.aarch64", + "cindex": 1 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.186-146.268.amzn2.aarch64", + "cindex": 1 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.192-147.314.amzn2.aarch64", + "cindex": 1 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.193-149.317.amzn2.aarch64", + "cindex": 1 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.198-152.320.amzn2.aarch64", + "cindex": 1 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.200-155.322.amzn2.aarch64", + "cindex": 1 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.203-156.332.amzn2.aarch64", + "cindex": 1 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.209-160.335.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.209-160.339.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.214-160.339.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.219-161.340.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.219-164.354.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.225-168.357.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.225-169.362.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.231-173.360.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.231-173.361.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.232-176.381.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.232-177.418.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.238-182.421.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.238-182.422.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.241-184.433.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.243-185.433.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.246-187.474.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.248-189.473.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.252-195.481.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.252-195.483.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.256-197.484.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.262-200.489.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.268-205.500.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.273-207.502.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.275-207.503.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.276-211.499.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.281-212.502.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.285-215.501.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.287-215.504.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.290-217.505.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.291-218.527.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.294-220.533.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.296-222.539.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.299-223.520.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.301-224.520.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.301-225.528.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.304-226.531.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.305-227.531.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.309-231.529.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.311-233.529.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.313-235.533.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.314-237.533.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.314-238.539.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.318-240.529.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.318-241.531.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.320-242.534.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.320-243.544.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.322-244.536.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.322-244.539.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.322-246.539.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.326-245.539.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.327-246.539.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.328-248.540.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.330-250.540.amzn2.aarch64", + "cindex": 2 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.334-252.552.amzn2.aarch64", + "cindex": 3 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.336-253.554.amzn2.aarch64", + "cindex": 3 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.336-255.557.amzn2.aarch64", + "cindex": 3 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.336-256.557.amzn2.aarch64", + "cindex": 3 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.336-256.559.amzn2.aarch64", + "cindex": 3 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.336-257.562.amzn2.aarch64", + "cindex": 3 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.336-257.566.amzn2.aarch64", + "cindex": 3 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.336-257.568.amzn2.aarch64", + "cindex": 3 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.343-259.562.amzn2.aarch64", + "cindex": 3 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.343-260.564.amzn2.aarch64", + "cindex": 3 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.343-261.564.amzn2.aarch64", + "cindex": 3 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.344-262.563.amzn2.aarch64", + "cindex": 3 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.345-262.561.amzn2.aarch64", + "cindex": 3 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.348-265.562.amzn2.aarch64", + "cindex": 3 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.348-265.565.amzn2.aarch64", + "cindex": 3 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.349-266.564.amzn2.aarch64", + "cindex": 3 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.350-266.564.amzn2.aarch64", + "cindex": 3 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.352-267.564.amzn2.aarch64", + "cindex": 3 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.352-268.568.amzn2.aarch64", + "cindex": 3 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.352-268.569.amzn2.aarch64", + "cindex": 3 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.353-270.569.amzn2.aarch64", + "cindex": 3 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.355-271.569.amzn2.aarch64", + "cindex": 3 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.355-275.570.amzn2.aarch64", + "cindex": 3 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.77-80.57.amzn2.aarch64", + "cindex": 4 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.77-81.59.amzn2.aarch64", + "cindex": 4 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.77-86.82.amzn2.aarch64", + "cindex": 0 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.88-88.73.amzn2.aarch64", + "cindex": 0 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.88-88.76.amzn2.aarch64", + "cindex": 0 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.94-89.73.amzn2.aarch64", + "cindex": 0 + }, + { + "distrib": "amzn", + "version": "2", + "arch": "arm64", + "uname_release": "4.14.97-90.72.amzn2.aarch64", + "cindex": 0 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "3.18.9-200.el7.aarch64", + "cindex": 5 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "3.19.0-0.80.aa7a.aarch64", + "cindex": 6 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.0.0-0.rc7.git1.1.el7.aarch64", + "cindex": 7 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.0.0-1.el7.aarch64", + "cindex": 7 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.11.0-22.el7.2.aarch64", + "cindex": 8 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.11.0-22.el7a.aarch64", + "cindex": 8 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.11.0-45.4.1.el7a.aarch64", + "cindex": 8 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.11.0-45.6.1.el7a.aarch64", + "cindex": 8 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.11.0-45.el7.aarch64", + "cindex": 8 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-115.10.1.el7a.aarch64", + "cindex": 9 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-115.2.2.el7a.aarch64", + "cindex": 9 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-115.5.1.el7a.aarch64", + "cindex": 9 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-115.6.1.el7a.aarch64", + "cindex": 9 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-115.7.1.el7a.aarch64", + "cindex": 9 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-115.8.1.el7a.aarch64", + "cindex": 9 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-115.8.2.el7a.aarch64", + "cindex": 9 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-115.el7a.0.1.aarch64", + "cindex": 9 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-115.el7a.aarch64", + "cindex": 9 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-49.10.1.el7a.aarch64", + "cindex": 9 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-49.13.1.el7a.aarch64", + "cindex": 9 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-49.2.2.el7a.aarch64", + "cindex": 9 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-49.8.1.el7a.aarch64", + "cindex": 9 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-49.el7a.aarch64", + "cindex": 9 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.101-200.el7.aarch64", + "cindex": 10 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.71-201.el7.aarch64", + "cindex": 10 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.78-201.el7.aarch64", + "cindex": 10 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.82-201.el7.aarch64", + "cindex": 10 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.94-200.el7.aarch64", + "cindex": 10 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.18.0-147.0.3.el7.aarch64", + "cindex": 11 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.18.0-147.8.1.el7.aarch64", + "cindex": 11 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.18.0-193.1.2.el7.aarch64", + "cindex": 12 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.18.0-193.28.1.el7.aarch64", + "cindex": 12 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.18.0-305.10.2.el7.aarch64", + "cindex": 13 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.18.0-348.20.1.el7.aarch64", + "cindex": 14 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.18.0-80.7.1.el7.aarch64", + "cindex": 15 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.18.0-80.7.2.el7.aarch64", + "cindex": 15 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.19.104-300.el7.aarch64", + "cindex": 16 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.19.110-300.el7.aarch64", + "cindex": 16 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.19.113-300.el7.aarch64", + "cindex": 17 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.19.23-300.el7.aarch64", + "cindex": 16 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.19.84-300.el7.aarch64", + "cindex": 16 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.19.94-300.el7.aarch64", + "cindex": 16 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.2.0-0.22.el7.1.aarch64", + "cindex": 18 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.2.0-0.24.el7.1.aarch64", + "cindex": 18 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.2.0-0.25.el7.1.aarch64", + "cindex": 18 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.2.0-0.26.el7.1.aarch64", + "cindex": 18 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.2.0-0.27.el7.1.aarch64", + "cindex": 18 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.2.0-0.28.el7.1.aarch64", + "cindex": 18 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.2.0-0.29.el7.1.aarch64", + "cindex": 18 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.2.0-0.30.el7.1.aarch64", + "cindex": 18 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.2.0-0.31.el7.1.aarch64", + "cindex": 18 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.5.0-19.el7.aarch64", + "cindex": 19 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.5.0-20.el7.aarch64", + "cindex": 19 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.5.0-21.el7.aarch64", + "cindex": 19 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.5.0-22.el7.aarch64", + "cindex": 19 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.5.0-23.el7.aarch64", + "cindex": 19 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.5.0-25.el7.aarch64", + "cindex": 19 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.5.0-27.el7.aarch64", + "cindex": 19 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "4.5.0-29.el7.aarch64", + "cindex": 19 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "5.10.109-200.el7.aarch64", + "cindex": 20 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.28-200.el7.aarch64", + "cindex": 21 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.42-200.el7.aarch64", + "cindex": 21 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.49-200.el7.aarch64", + "cindex": 21 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.88-200.el7.aarch64", + "cindex": 21 + }, + { + "distrib": "centos", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.96-200.el7.aarch64", + "cindex": 21 + }, + { + "distrib": "centos", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-147.0.3.el8_1.aarch64", + "cindex": 22 + }, + { + "distrib": "centos", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-147.3.1.el8_1.aarch64", + "cindex": 22 + }, + { + "distrib": "centos", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-147.5.1.el8_1.aarch64", + "cindex": 22 + }, + { + "distrib": "centos", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-147.8.1.el8_1.aarch64", + "cindex": 22 + }, + { + "distrib": "centos", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-147.el8.aarch64", + "cindex": 22 + }, + { + "distrib": "centos", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-80.7.1.el8_0.aarch64", + "cindex": 23 + }, + { + "distrib": "centos", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-80.el8.aarch64", + "cindex": 23 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-1-arm64", + "cindex": 24 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-1-rt-arm64", + "cindex": 25 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-10-arm64", + "cindex": 26 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-10-rt-arm64", + "cindex": 27 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-11-arm64", + "cindex": 26 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-11-rt-arm64", + "cindex": 27 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-12-arm64", + "cindex": 26 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-12-rt-arm64", + "cindex": 27 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-13-arm64", + "cindex": 26 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-13-rt-arm64", + "cindex": 27 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-14-arm64", + "cindex": 26 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-14-rt-arm64", + "cindex": 27 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-15-arm64", + "cindex": 26 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-15-rt-arm64", + "cindex": 27 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-16-arm64", + "cindex": 26 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-16-rt-arm64", + "cindex": 27 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-17-arm64", + "cindex": 26 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-17-rt-arm64", + "cindex": 27 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-18-arm64", + "cindex": 26 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-18-rt-arm64", + "cindex": 27 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-19-arm64", + "cindex": 26 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-19-rt-arm64", + "cindex": 27 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-2-arm64", + "cindex": 24 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-2-rt-arm64", + "cindex": 25 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-20-arm64", + "cindex": 26 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-20-rt-arm64", + "cindex": 27 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-21-arm64", + "cindex": 26 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-21-rt-arm64", + "cindex": 27 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-22-arm64", + "cindex": 28 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-22-rt-arm64", + "cindex": 29 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-23-arm64", + "cindex": 28 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-23-rt-arm64", + "cindex": 29 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-24-arm64", + "cindex": 28 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-24-rt-arm64", + "cindex": 29 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-25-arm64", + "cindex": 28 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-25-rt-arm64", + "cindex": 29 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-26-arm64", + "cindex": 30 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-26-rt-arm64", + "cindex": 31 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-27-arm64", + "cindex": 30 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-27-rt-arm64", + "cindex": 31 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-3-arm64", + "cindex": 24 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-3-rt-arm64", + "cindex": 25 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-4-arm64", + "cindex": 24 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-4-rt-arm64", + "cindex": 25 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-5-arm64", + "cindex": 24 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-5-rt-arm64", + "cindex": 32 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-6-arm64", + "cindex": 24 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-6-rt-arm64", + "cindex": 32 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-7-arm64", + "cindex": 24 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-7-rt-arm64", + "cindex": 32 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-8-arm64", + "cindex": 24 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-8-rt-arm64", + "cindex": 32 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-9-arm64", + "cindex": 26 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "4.19.0-9-rt-arm64", + "cindex": 27 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "5.10.0-0.deb10.17-arm64", + "cindex": 33 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "5.10.0-0.deb10.17-cloud-arm64", + "cindex": 33 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "5.10.0-0.deb10.17-rt-arm64", + "cindex": 34 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "5.10.0-0.deb10.19-arm64", + "cindex": 33 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "5.10.0-0.deb10.19-cloud-arm64", + "cindex": 33 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "5.10.0-0.deb10.19-rt-arm64", + "cindex": 34 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "5.10.0-0.deb10.20-arm64", + "cindex": 33 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "5.10.0-0.deb10.20-cloud-arm64", + "cindex": 33 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "5.10.0-0.deb10.20-rt-arm64", + "cindex": 34 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "5.10.0-0.deb10.21-arm64", + "cindex": 33 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "5.10.0-0.deb10.21-cloud-arm64", + "cindex": 33 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "5.10.0-0.deb10.21-rt-arm64", + "cindex": 34 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "5.10.0-0.deb10.22-arm64", + "cindex": 33 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "5.10.0-0.deb10.22-cloud-arm64", + "cindex": 33 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "5.10.0-0.deb10.22-rt-arm64", + "cindex": 34 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "5.10.0-0.deb10.23-arm64", + "cindex": 33 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "5.10.0-0.deb10.23-cloud-arm64", + "cindex": 33 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "5.10.0-0.deb10.23-rt-arm64", + "cindex": 34 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "5.10.0-0.deb10.24-arm64", + "cindex": 33 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "5.10.0-0.deb10.24-cloud-arm64", + "cindex": 33 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "5.10.0-0.deb10.24-rt-arm64", + "cindex": 34 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "5.10.0-0.deb10.26-arm64", + "cindex": 33 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "5.10.0-0.deb10.26-cloud-arm64", + "cindex": 33 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "5.10.0-0.deb10.26-rt-arm64", + "cindex": 34 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "5.10.0-0.deb10.27-arm64", + "cindex": 35 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "5.10.0-0.deb10.27-cloud-arm64", + "cindex": 35 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "5.10.0-0.deb10.27-rt-arm64", + "cindex": 36 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "5.10.0-0.deb10.28-arm64", + "cindex": 35 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "5.10.0-0.deb10.28-cloud-arm64", + "cindex": 35 + }, + { + "distrib": "debian", + "version": "10", + "arch": "arm64", + "uname_release": "5.10.0-0.deb10.28-rt-arm64", + "cindex": 36 + }, + { + "distrib": "debian", + "version": "9", + "arch": "arm64", + "uname_release": "4.19.0-0.bpo.19-arm64", + "cindex": 26 + }, + { + "distrib": "debian", + "version": "9", + "arch": "arm64", + "uname_release": "4.19.0-0.bpo.19-rt-arm64", + "cindex": 27 + }, + { + "distrib": "debian", + "version": "9", + "arch": "arm64", + "uname_release": "4.9.0-13-arm64", + "cindex": 37 + }, + { + "distrib": "debian", + "version": "9", + "arch": "arm64", + "uname_release": "4.9.0-18-arm64", + "cindex": 37 + }, + { + "distrib": "debian", + "version": "9", + "arch": "arm64", + "uname_release": "4.9.0-19-arm64", + "cindex": 37 + }, + { + "distrib": "fedora", + "version": "28", + "arch": "arm64", + "uname_release": "4.16.3-301.fc28.aarch64", + "cindex": 38 + }, + { + "distrib": "fedora", + "version": "28", + "arch": "arm64", + "uname_release": "5.0.16-100.fc28.aarch64", + "cindex": 39 + }, + { + "distrib": "fedora", + "version": "29", + "arch": "arm64", + "uname_release": "4.18.16-300.fc29.aarch64", + "cindex": 40 + }, + { + "distrib": "fedora", + "version": "29", + "arch": "arm64", + "uname_release": "5.3.11-100.fc29.aarch64", + "cindex": 41 + }, + { + "distrib": "fedora", + "version": "30", + "arch": "arm64", + "uname_release": "5.0.9-301.fc30.aarch64", + "cindex": 39 + }, + { + "distrib": "fedora", + "version": "30", + "arch": "arm64", + "uname_release": "5.6.13-100.fc30.aarch64", + "cindex": 42 + }, + { + "distrib": "fedora", + "version": "31", + "arch": "arm64", + "uname_release": "5.3.7-301.fc31.aarch64", + "cindex": 43 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1818.0.10.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1818.0.15.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1818.0.9.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1818.1.6.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1818.2.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1818.3.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1818.4.6.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1818.4.7.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1818.5.4.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1844.0.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1844.0.4.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1844.0.6.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1844.0.7.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1844.1.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1844.2.5.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1844.3.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1844.4.5.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1844.4.5.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1844.5.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1846.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1847.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1848.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1849.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1850a.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1851.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1901.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.0.10.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.0.11.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.0.12.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.0.13.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.0.14.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.0.15.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.0.18.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.0.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.0.4.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.0.5.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.0.6.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.0.7.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.0.9.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.1.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.10.2.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.10.4.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.10.4.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.10.4.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.10.7.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.10.8.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.11.3.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.11.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.12.0.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.3.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.3.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.300.11.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.301.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.302.0.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.302.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.302.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.303.0.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.303.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.303.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.303.4.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.303.5.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.304.5.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.304.6.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.304.6.4.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.304.6.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.305.0.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.305.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.305.4.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.305.4.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.306.2.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.306.2.10.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.306.2.12.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.306.2.13.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.306.2.14.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.306.2.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.306.2.4.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.306.2.5.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.306.2.7.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.306.2.8.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.306.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.4.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.4.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.4.8.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.5.0.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.5.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.5.2.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.5.2.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.5.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.6.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.6.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.6.4.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.6.5.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.6.6.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.7.0.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.7.3.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.7.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.8.4.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.9.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1902.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1903.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1904.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1905.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1906.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1907.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1908.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1909.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1910a.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1911.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1912.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1915.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1916.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1917.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1923.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1929.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1933.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-1941.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2013.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2015.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2016.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2017.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2018.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2019.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2020.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2025.400.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2025.400.8.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2025.400.9.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2025.400.9.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2025.401.4.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2025.402.0.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2025.402.2.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2025.403.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2025.403.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2025.403.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2025.403.4.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2025.403.5.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2025.404.0.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2025.404.1.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2025.404.1.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2025.405.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2025.405.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2025.405.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2039.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2040.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2041.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.500.10.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.500.5.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.500.9.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.500.9.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.501.0.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.501.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.501.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.502.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.502.4.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.502.4.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.502.5.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.503.0.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.503.1.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.503.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.504.0.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.504.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.504.2.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.504.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.505.0.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.505.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.505.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.505.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.505.4.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.505.4.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.505.4.4.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.505.4.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.506.0.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.506.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.506.10.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.506.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.506.4.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.506.8.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.506.8.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.507.0.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.507.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.507.7.4.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.507.7.5.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.507.7.6.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.508.3.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.508.3.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.508.3.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.508.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.509.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.509.2.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.509.2.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.510.0.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.510.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.510.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.510.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.510.4.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.510.5.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.510.5.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.510.5.4.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.510.5.5.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.510.5.6.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.511.0.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.511.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.511.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.511.5.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.511.5.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.511.5.4.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.511.5.5.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.511.5.5.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.511.5.5.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.511.5.6.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.511.5.7.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.511.5.8.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.511.5.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.512.0.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.512.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.512.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.512.4.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.512.5.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.512.6.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.513.0.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.513.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.513.2.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.513.2.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.513.2.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.513.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.514.0.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.514.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.514.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.514.5.1.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.514.5.1.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.514.5.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.515.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.516.1.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.516.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.516.2.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.516.2.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.516.2.4.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.516.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.517.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.517.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.517.3.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.517.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.518.0.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.518.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.518.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.518.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.518.4.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.518.4.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.518.4.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.518.4.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.519.0.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.519.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.519.2.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.519.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.520.0.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.520.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.520.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.520.3.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.521.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.521.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.521.4.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.522.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.522.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.522.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.523.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.523.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.523.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.523.4.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.523.4.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.523.4.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.524.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.524.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.524.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.524.4.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.524.5.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.525.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.526.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.526.2.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.526.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.527.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.527.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.528.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.528.2.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.528.2.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.528.2.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.528.2.4.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.528.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.529.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.529.3.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.529.3.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.529.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.530.5.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.531.2.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.532.3.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.532.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.533.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.534.3.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.534.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.535.2.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.536.5.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.537.4.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.537.4.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.538.5.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.538.5.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.539.5.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.540.4.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.540.4.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.541.4.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.542.2.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.543.3.1.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2047.543.3.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2048.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2049.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2050.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2051.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2052.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2102.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2103.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2104.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2105.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2106.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2108.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2109.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2110.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2111.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2112.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2113.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2114.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2115.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2116.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2118.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2120.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2121.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2122.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.35-2124.el7uek.aarch64", + "cindex": 44 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.0-1948.3.el7uek.aarch64", + "cindex": 45 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2006.5.el7uek.aarch64", + "cindex": 45 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2011.4.6.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2011.6.2.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2028.2.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2036.100.1.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2036.100.3.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2036.100.6.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2036.101.0.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2036.101.1.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2036.101.2.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2036.102.0.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2036.103.2.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2036.104.0.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2036.104.2.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2036.105.1.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2036.105.3.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2040.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2041.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2051.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2102.200.7.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2102.200.9.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2102.202.4.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2102.202.5.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2102.203.3.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2102.203.4.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2102.204.0.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2102.204.1.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2102.204.2.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2102.204.3.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2102.204.4.3.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2102.205.2.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2102.205.7.2.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2102.205.7.3.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2102.206.1.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2106.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2108.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2109.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2111.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2114.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2118.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2120.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2122.303.5.el7uek.aarch64", + "cindex": 47 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2122.el7uek.aarch64", + "cindex": 46 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2136.300.7.el7uek.aarch64", + "cindex": 47 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.17-2136.301.0.el7uek.aarch64", + "cindex": 47 + }, + { + "distrib": "ol", + "version": "7", + "arch": "arm64", + "uname_release": "5.4.2-1950.2.el7uek.aarch64", + "cindex": 45 + }, + { + "distrib": "ol", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-147.0.2.el8_1.aarch64", + "cindex": 48 + }, + { + "distrib": "ol", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-147.0.3.el8_1.aarch64", + "cindex": 48 + }, + { + "distrib": "ol", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-147.3.1.el8_1.aarch64", + "cindex": 48 + }, + { + "distrib": "ol", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-147.5.1.el8_1.aarch64", + "cindex": 48 + }, + { + "distrib": "ol", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-147.8.1.el8_1.aarch64", + "cindex": 48 + }, + { + "distrib": "ol", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-147.el8.aarch64", + "cindex": 48 + }, + { + "distrib": "ol", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-80.1.2.el8_0.aarch64", + "cindex": 49 + }, + { + "distrib": "ol", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-80.11.1.el8_0.aarch64", + "cindex": 49 + }, + { + "distrib": "ol", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-80.11.2.el8_0.aarch64", + "cindex": 49 + }, + { + "distrib": "ol", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-80.4.2.el8_0.aarch64", + "cindex": 49 + }, + { + "distrib": "ol", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-80.7.1.el8_0.aarch64", + "cindex": 49 + }, + { + "distrib": "ol", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-80.7.2.el8_0.aarch64", + "cindex": 49 + }, + { + "distrib": "ol", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-80.el8.aarch64", + "cindex": 49 + }, + { + "distrib": "ol", + "version": "8", + "arch": "arm64", + "uname_release": "5.4.17-2011.0.7.el8uek.aarch64", + "cindex": 50 + }, + { + "distrib": "ol", + "version": "8", + "arch": "arm64", + "uname_release": "5.4.17-2011.1.2.el8uek.aarch64", + "cindex": 50 + }, + { + "distrib": "ol", + "version": "8", + "arch": "arm64", + "uname_release": "5.4.17-2011.2.2.el8uek.aarch64", + "cindex": 50 + }, + { + "distrib": "ol", + "version": "8", + "arch": "arm64", + "uname_release": "5.4.17-2011.3.2.1.el8uek.aarch64", + "cindex": 50 + }, + { + "distrib": "ol", + "version": "8", + "arch": "arm64", + "uname_release": "5.4.17-2011.4.4.el8uek.aarch64", + "cindex": 50 + }, + { + "distrib": "ol", + "version": "8", + "arch": "arm64", + "uname_release": "5.4.17-2011.4.6.el8uek.aarch64", + "cindex": 50 + }, + { + "distrib": "ol", + "version": "8", + "arch": "arm64", + "uname_release": "5.4.17-2011.5.3.el8uek.aarch64", + "cindex": 50 + }, + { + "distrib": "ol", + "version": "8", + "arch": "arm64", + "uname_release": "5.4.17-2011.6.2.el8uek.aarch64", + "cindex": 50 + }, + { + "distrib": "ol", + "version": "8", + "arch": "arm64", + "uname_release": "5.4.17-2011.7.4.el8uek.aarch64", + "cindex": 50 + }, + { + "distrib": "ol", + "version": "8", + "arch": "arm64", + "uname_release": "5.4.17-2036.100.6.1.el8uek.aarch64", + "cindex": 50 + }, + { + "distrib": "ol", + "version": "8", + "arch": "arm64", + "uname_release": "5.4.17-2036.101.2.el8uek.aarch64", + "cindex": 50 + }, + { + "distrib": "ol", + "version": "8", + "arch": "arm64", + "uname_release": "5.4.17-2036.102.0.2.el8uek.aarch64", + "cindex": 50 + }, + { + "distrib": "ol", + "version": "8", + "arch": "arm64", + "uname_release": "5.4.17-2036.103.3.1.el8uek.aarch64", + "cindex": 50 + }, + { + "distrib": "ol", + "version": "8", + "arch": "arm64", + "uname_release": "5.4.17-2036.103.3.el8uek.aarch64", + "cindex": 50 + }, + { + "distrib": "ol", + "version": "8", + "arch": "arm64", + "uname_release": "5.4.17-2036.104.4.el8uek.aarch64", + "cindex": 50 + }, + { + "distrib": "ol", + "version": "8", + "arch": "arm64", + "uname_release": "5.4.17-2036.104.5.el8uek.aarch64", + "cindex": 50 + }, + { + "distrib": "ol", + "version": "8", + "arch": "arm64", + "uname_release": "5.4.17-2102.200.13.el8uek.aarch64", + "cindex": 50 + }, + { + "distrib": "ol", + "version": "8", + "arch": "arm64", + "uname_release": "5.4.17-2102.201.3.el8uek.aarch64", + "cindex": 50 + }, + { + "distrib": "ol", + "version": "8", + "arch": "arm64", + "uname_release": "5.4.17-2102.202.5.el8uek.aarch64", + "cindex": 50 + }, + { + "distrib": "ol", + "version": "8", + "arch": "arm64", + "uname_release": "5.4.17-2102.203.5.el8uek.aarch64", + "cindex": 50 + }, + { + "distrib": "ol", + "version": "8", + "arch": "arm64", + "uname_release": "5.4.17-2102.203.6.el8uek.aarch64", + "cindex": 50 + }, + { + "distrib": "ol", + "version": "8", + "arch": "arm64", + "uname_release": "5.4.17-2102.204.4.2.el8uek.aarch64", + "cindex": 50 + }, + { + "distrib": "opensuse-leap", + "version": "15.0", + "arch": "arm64", + "uname_release": "4.12.14-lp150.11-default", + "cindex": 51 + }, + { + "distrib": "opensuse-leap", + "version": "15.1", + "arch": "arm64", + "uname_release": "4.12.14-lp151.27-default", + "cindex": 52 + }, + { + "distrib": "opensuse-leap", + "version": "15.2", + "arch": "arm64", + "uname_release": "5.3.18-lp152.19-default", + "cindex": 53 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.43-64kb", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.43-default", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.46-64kb", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.46-default", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.49-64kb", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.49-default", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.54-64kb", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.54-default", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.60-64kb", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.60-default", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.63-64kb", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.63-default", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.68-64kb", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.68-default", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.71-64kb", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.71-default", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.76-64kb", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.76-default", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.81-64kb", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.81-default", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.87-64kb", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.87-default", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-57-64kb", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-57-default", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.10-64kb", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.10-default", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.13-64kb", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.13-default", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.16-64kb", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.16-default", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.19-64kb", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.19-default", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.24-64kb", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.24-default", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.27-64kb", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.27-default", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.30-64kb", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.30-default", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.34-64kb", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.34-default", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.37-64kb", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.37-default", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.40-64kb", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.40-default", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.5-64kb", + "cindex": 54 + }, + { + "distrib": "opensuse-leap", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.5-default", + "cindex": 54 + }, + { + "distrib": "rhel", + "version": "7", + "arch": "arm64", + "uname_release": "4.11.0-44.2.1.el7a.aarch64", + "cindex": 55 + }, + { + "distrib": "rhel", + "version": "7", + "arch": "arm64", + "uname_release": "4.11.0-44.4.1.el7a.aarch64", + "cindex": 55 + }, + { + "distrib": "rhel", + "version": "7", + "arch": "arm64", + "uname_release": "4.11.0-44.6.1.el7a.aarch64", + "cindex": 55 + }, + { + "distrib": "rhel", + "version": "7", + "arch": "arm64", + "uname_release": "4.11.0-44.7.1.el7a.aarch64", + "cindex": 55 + }, + { + "distrib": "rhel", + "version": "7", + "arch": "arm64", + "uname_release": "4.11.0-44.el7a.aarch64", + "cindex": 55 + }, + { + "distrib": "rhel", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-115.10.1.el7a.aarch64", + "cindex": 56 + }, + { + "distrib": "rhel", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-115.12.1.el7a.aarch64", + "cindex": 56 + }, + { + "distrib": "rhel", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-115.13.1.el7a.aarch64", + "cindex": 56 + }, + { + "distrib": "rhel", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-115.14.1.el7a.aarch64", + "cindex": 56 + }, + { + "distrib": "rhel", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-115.16.1.el7a.aarch64", + "cindex": 56 + }, + { + "distrib": "rhel", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-115.17.1.el7a.aarch64", + "cindex": 56 + }, + { + "distrib": "rhel", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-115.18.1.el7a.aarch64", + "cindex": 56 + }, + { + "distrib": "rhel", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-115.19.1.el7a.aarch64", + "cindex": 56 + }, + { + "distrib": "rhel", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-115.2.2.el7a.aarch64", + "cindex": 56 + }, + { + "distrib": "rhel", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-115.21.2.el7a.aarch64", + "cindex": 56 + }, + { + "distrib": "rhel", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-115.26.1.el7a.aarch64", + "cindex": 56 + }, + { + "distrib": "rhel", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-115.29.1.el7a.aarch64", + "cindex": 56 + }, + { + "distrib": "rhel", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-115.32.1.el7a.aarch64", + "cindex": 56 + }, + { + "distrib": "rhel", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-115.33.1.el7a.aarch64", + "cindex": 56 + }, + { + "distrib": "rhel", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-115.5.1.el7a.aarch64", + "cindex": 56 + }, + { + "distrib": "rhel", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-115.6.1.el7a.aarch64", + "cindex": 56 + }, + { + "distrib": "rhel", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-115.7.1.el7a.aarch64", + "cindex": 56 + }, + { + "distrib": "rhel", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-115.8.1.el7a.aarch64", + "cindex": 56 + }, + { + "distrib": "rhel", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-115.8.2.el7a.aarch64", + "cindex": 56 + }, + { + "distrib": "rhel", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-115.el7a.aarch64", + "cindex": 56 + }, + { + "distrib": "rhel", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-49.10.1.el7a.aarch64", + "cindex": 56 + }, + { + "distrib": "rhel", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-49.13.1.el7a.aarch64", + "cindex": 56 + }, + { + "distrib": "rhel", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-49.2.2.el7a.aarch64", + "cindex": 56 + }, + { + "distrib": "rhel", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-49.8.1.el7a.aarch64", + "cindex": 56 + }, + { + "distrib": "rhel", + "version": "7", + "arch": "arm64", + "uname_release": "4.14.0-49.el7a.aarch64", + "cindex": 56 + }, + { + "distrib": "rhel", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-147.0.2.el8_1.aarch64", + "cindex": 57 + }, + { + "distrib": "rhel", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-147.0.3.el8_1.aarch64", + "cindex": 57 + }, + { + "distrib": "rhel", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-147.13.2.el8_1.aarch64", + "cindex": 57 + }, + { + "distrib": "rhel", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-147.20.1.el8_1.aarch64", + "cindex": 57 + }, + { + "distrib": "rhel", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-147.24.2.el8_1.aarch64", + "cindex": 57 + }, + { + "distrib": "rhel", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-147.27.1.el8_1.aarch64", + "cindex": 57 + }, + { + "distrib": "rhel", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-147.3.1.el8_1.aarch64", + "cindex": 57 + }, + { + "distrib": "rhel", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-147.32.1.el8_1.aarch64", + "cindex": 57 + }, + { + "distrib": "rhel", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-147.34.1.el8_1.aarch64", + "cindex": 57 + }, + { + "distrib": "rhel", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-147.38.1.el8_1.aarch64", + "cindex": 57 + }, + { + "distrib": "rhel", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-147.43.1.el8_1.aarch64", + "cindex": 57 + }, + { + "distrib": "rhel", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-147.44.1.el8_1.aarch64", + "cindex": 57 + }, + { + "distrib": "rhel", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-147.48.1.el8_1.aarch64", + "cindex": 57 + }, + { + "distrib": "rhel", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-147.5.1.el8_1.aarch64", + "cindex": 57 + }, + { + "distrib": "rhel", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-147.51.1.el8_1.aarch64", + "cindex": 57 + }, + { + "distrib": "rhel", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-147.51.2.el8_1.aarch64", + "cindex": 57 + }, + { + "distrib": "rhel", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-147.52.1.el8_1.aarch64", + "cindex": 57 + }, + { + "distrib": "rhel", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-147.54.2.el8_1.aarch64", + "cindex": 57 + }, + { + "distrib": "rhel", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-147.56.1.el8_1.aarch64", + "cindex": 57 + }, + { + "distrib": "rhel", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-147.57.1.el8_1.aarch64", + "cindex": 57 + }, + { + "distrib": "rhel", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-147.8.1.el8_1.aarch64", + "cindex": 57 + }, + { + "distrib": "rhel", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-147.el8.aarch64", + "cindex": 57 + }, + { + "distrib": "rhel", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-80.1.2.el8_0.aarch64", + "cindex": 58 + }, + { + "distrib": "rhel", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-80.11.1.el8_0.aarch64", + "cindex": 58 + }, + { + "distrib": "rhel", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-80.11.2.el8_0.aarch64", + "cindex": 58 + }, + { + "distrib": "rhel", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-80.4.2.el8_0.aarch64", + "cindex": 58 + }, + { + "distrib": "rhel", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-80.7.1.el8_0.aarch64", + "cindex": 58 + }, + { + "distrib": "rhel", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-80.7.2.el8_0.aarch64", + "cindex": 58 + }, + { + "distrib": "rhel", + "version": "8", + "arch": "arm64", + "uname_release": "4.18.0-80.el8.aarch64", + "cindex": 58 + }, + { + "distrib": "sles", + "version": "15.0", + "arch": "arm64", + "uname_release": "4.12.14-150.14-default", + "cindex": 59 + }, + { + "distrib": "sles", + "version": "15.0", + "arch": "arm64", + "uname_release": "4.12.14-150.17-default", + "cindex": 59 + }, + { + "distrib": "sles", + "version": "15.0", + "arch": "arm64", + "uname_release": "4.12.14-150.22-default", + "cindex": 59 + }, + { + "distrib": "sles", + "version": "15.0", + "arch": "arm64", + "uname_release": "4.12.14-150.27-default", + "cindex": 59 + }, + { + "distrib": "sles", + "version": "15.0", + "arch": "arm64", + "uname_release": "4.12.14-150.32-default", + "cindex": 59 + }, + { + "distrib": "sles", + "version": "15.0", + "arch": "arm64", + "uname_release": "4.12.14-150.35-default", + "cindex": 59 + }, + { + "distrib": "sles", + "version": "15.0", + "arch": "arm64", + "uname_release": "4.12.14-150.38-default", + "cindex": 59 + }, + { + "distrib": "sles", + "version": "15.0", + "arch": "arm64", + "uname_release": "4.12.14-150.41-default", + "cindex": 59 + }, + { + "distrib": "sles", + "version": "15.0", + "arch": "arm64", + "uname_release": "4.12.14-150.47-default", + "cindex": 59 + }, + { + "distrib": "sles", + "version": "15.0", + "arch": "arm64", + "uname_release": "4.12.14-23-default", + "cindex": 59 + }, + { + "distrib": "sles", + "version": "15.0", + "arch": "arm64", + "uname_release": "4.12.14-25.13-default", + "cindex": 59 + }, + { + "distrib": "sles", + "version": "15.0", + "arch": "arm64", + "uname_release": "4.12.14-25.16-default", + "cindex": 59 + }, + { + "distrib": "sles", + "version": "15.0", + "arch": "arm64", + "uname_release": "4.12.14-25.19-default", + "cindex": 59 + }, + { + "distrib": "sles", + "version": "15.0", + "arch": "arm64", + "uname_release": "4.12.14-25.22-default", + "cindex": 59 + }, + { + "distrib": "sles", + "version": "15.0", + "arch": "arm64", + "uname_release": "4.12.14-25.25-default", + "cindex": 59 + }, + { + "distrib": "sles", + "version": "15.0", + "arch": "arm64", + "uname_release": "4.12.14-25.28-default", + "cindex": 59 + }, + { + "distrib": "sles", + "version": "15.0", + "arch": "arm64", + "uname_release": "4.12.14-25.3-default", + "cindex": 59 + }, + { + "distrib": "sles", + "version": "15.0", + "arch": "arm64", + "uname_release": "4.12.14-25.6-default", + "cindex": 59 + }, + { + "distrib": "sles", + "version": "15.1", + "arch": "arm64", + "uname_release": "4.12.14-195-default", + "cindex": 60 + }, + { + "distrib": "sles", + "version": "15.1", + "arch": "arm64", + "uname_release": "4.12.14-197.10-default", + "cindex": 60 + }, + { + "distrib": "sles", + "version": "15.1", + "arch": "arm64", + "uname_release": "4.12.14-197.15-default", + "cindex": 60 + }, + { + "distrib": "sles", + "version": "15.1", + "arch": "arm64", + "uname_release": "4.12.14-197.18-default", + "cindex": 60 + }, + { + "distrib": "sles", + "version": "15.1", + "arch": "arm64", + "uname_release": "4.12.14-197.21-default", + "cindex": 60 + }, + { + "distrib": "sles", + "version": "15.1", + "arch": "arm64", + "uname_release": "4.12.14-197.26-default", + "cindex": 60 + }, + { + "distrib": "sles", + "version": "15.1", + "arch": "arm64", + "uname_release": "4.12.14-197.29-default", + "cindex": 60 + }, + { + "distrib": "sles", + "version": "15.1", + "arch": "arm64", + "uname_release": "4.12.14-197.34-default", + "cindex": 60 + }, + { + "distrib": "sles", + "version": "15.1", + "arch": "arm64", + "uname_release": "4.12.14-197.37-default", + "cindex": 60 + }, + { + "distrib": "sles", + "version": "15.1", + "arch": "arm64", + "uname_release": "4.12.14-197.4-default", + "cindex": 60 + }, + { + "distrib": "sles", + "version": "15.1", + "arch": "arm64", + "uname_release": "4.12.14-197.40-default", + "cindex": 60 + }, + { + "distrib": "sles", + "version": "15.1", + "arch": "arm64", + "uname_release": "4.12.14-197.45-default", + "cindex": 60 + }, + { + "distrib": "sles", + "version": "15.1", + "arch": "arm64", + "uname_release": "4.12.14-197.48-default", + "cindex": 60 + }, + { + "distrib": "sles", + "version": "15.1", + "arch": "arm64", + "uname_release": "4.12.14-197.51-default", + "cindex": 60 + }, + { + "distrib": "sles", + "version": "15.1", + "arch": "arm64", + "uname_release": "4.12.14-197.56-default", + "cindex": 60 + }, + { + "distrib": "sles", + "version": "15.1", + "arch": "arm64", + "uname_release": "4.12.14-197.61-default", + "cindex": 60 + }, + { + "distrib": "sles", + "version": "15.1", + "arch": "arm64", + "uname_release": "4.12.14-197.64-default", + "cindex": 60 + }, + { + "distrib": "sles", + "version": "15.1", + "arch": "arm64", + "uname_release": "4.12.14-197.67-default", + "cindex": 60 + }, + { + "distrib": "sles", + "version": "15.1", + "arch": "arm64", + "uname_release": "4.12.14-197.7-default", + "cindex": 60 + }, + { + "distrib": "sles", + "version": "15.1", + "arch": "arm64", + "uname_release": "4.12.14-197.72-default", + "cindex": 60 + }, + { + "distrib": "sles", + "version": "15.1", + "arch": "arm64", + "uname_release": "4.12.14-197.75-default", + "cindex": 60 + }, + { + "distrib": "sles", + "version": "15.1", + "arch": "arm64", + "uname_release": "4.12.14-197.78-default", + "cindex": 60 + }, + { + "distrib": "sles", + "version": "15.2", + "arch": "arm64", + "uname_release": "5.3.18-22-default", + "cindex": 61 + }, + { + "distrib": "sles", + "version": "15.2", + "arch": "arm64", + "uname_release": "5.3.18-24.12-default", + "cindex": 61 + }, + { + "distrib": "sles", + "version": "15.2", + "arch": "arm64", + "uname_release": "5.3.18-24.15-default", + "cindex": 61 + }, + { + "distrib": "sles", + "version": "15.2", + "arch": "arm64", + "uname_release": "5.3.18-24.24-default", + "cindex": 61 + }, + { + "distrib": "sles", + "version": "15.2", + "arch": "arm64", + "uname_release": "5.3.18-24.29-default", + "cindex": 61 + }, + { + "distrib": "sles", + "version": "15.2", + "arch": "arm64", + "uname_release": "5.3.18-24.34-default", + "cindex": 61 + }, + { + "distrib": "sles", + "version": "15.2", + "arch": "arm64", + "uname_release": "5.3.18-24.37-default", + "cindex": 61 + }, + { + "distrib": "sles", + "version": "15.2", + "arch": "arm64", + "uname_release": "5.3.18-24.43-default", + "cindex": 61 + }, + { + "distrib": "sles", + "version": "15.2", + "arch": "arm64", + "uname_release": "5.3.18-24.46-default", + "cindex": 61 + }, + { + "distrib": "sles", + "version": "15.2", + "arch": "arm64", + "uname_release": "5.3.18-24.49-default", + "cindex": 61 + }, + { + "distrib": "sles", + "version": "15.2", + "arch": "arm64", + "uname_release": "5.3.18-24.52-default", + "cindex": 61 + }, + { + "distrib": "sles", + "version": "15.2", + "arch": "arm64", + "uname_release": "5.3.18-24.53.4-default", + "cindex": 61 + }, + { + "distrib": "sles", + "version": "15.2", + "arch": "arm64", + "uname_release": "5.3.18-24.61-default", + "cindex": 61 + }, + { + "distrib": "sles", + "version": "15.2", + "arch": "arm64", + "uname_release": "5.3.18-24.64-default", + "cindex": 61 + }, + { + "distrib": "sles", + "version": "15.2", + "arch": "arm64", + "uname_release": "5.3.18-24.67-default", + "cindex": 61 + }, + { + "distrib": "sles", + "version": "15.2", + "arch": "arm64", + "uname_release": "5.3.18-24.70-default", + "cindex": 61 + }, + { + "distrib": "sles", + "version": "15.2", + "arch": "arm64", + "uname_release": "5.3.18-24.75-default", + "cindex": 61 + }, + { + "distrib": "sles", + "version": "15.2", + "arch": "arm64", + "uname_release": "5.3.18-24.78-default", + "cindex": 61 + }, + { + "distrib": "sles", + "version": "15.2", + "arch": "arm64", + "uname_release": "5.3.18-24.83-default", + "cindex": 61 + }, + { + "distrib": "sles", + "version": "15.2", + "arch": "arm64", + "uname_release": "5.3.18-24.86-default", + "cindex": 61 + }, + { + "distrib": "sles", + "version": "15.2", + "arch": "arm64", + "uname_release": "5.3.18-24.9-default", + "cindex": 61 + }, + { + "distrib": "sles", + "version": "15.2", + "arch": "arm64", + "uname_release": "5.3.18-24.93-default", + "cindex": 61 + }, + { + "distrib": "sles", + "version": "15.2", + "arch": "arm64", + "uname_release": "5.3.18-24.96-default", + "cindex": 61 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.43-64kb", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.43-default", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.46-64kb", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.46-default", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.49-64kb", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.49-default", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.54-64kb", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.54-default", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.60-64kb", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.60-default", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.63-64kb", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.63-default", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.68-64kb", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.68-default", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.71-64kb", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.71-default", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.76-64kb", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.76-default", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.81-64kb", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.81-default", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.87-64kb", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-150300.59.87-default", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-57-64kb", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-57-default", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.10-64kb", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.10-default", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.13-64kb", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.13-default", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.16-64kb", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.16-default", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.19-64kb", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.19-default", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.24-64kb", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.24-default", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.27-64kb", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.27-default", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.30-64kb", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.30-default", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.34-64kb", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.34-default", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.37-64kb", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.37-default", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.40-64kb", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.40-default", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.5-64kb", + "cindex": 62 + }, + { + "distrib": "sles", + "version": "15.3", + "arch": "arm64", + "uname_release": "5.3.18-59.5-default", + "cindex": 62 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.13.0-16-generic", + "cindex": 63 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.13.0-17-generic", + "cindex": 63 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.13.0-25-generic", + "cindex": 63 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.13.0-32-generic", + "cindex": 63 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-10-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-101-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1029-aws", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1031-aws", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1032-aws", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1033-aws", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1034-aws", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1035-aws", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1037-aws", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1039-aws", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1040-aws", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1041-aws", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1043-aws", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1044-aws", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1045-aws", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1047-aws", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1048-aws", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1050-aws", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1051-aws", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1052-aws", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1054-aws", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1056-aws", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1057-aws", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1058-aws", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-106-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1060-aws", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1063-aws", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1065-aws", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1066-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1067-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1073-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1076-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1077-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1079-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-108-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1080-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1082-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1083-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1086-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1087-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1088-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-109-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1090-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1091-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1092-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1093-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1094-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1095-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1096-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1097-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1098-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1099-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1101-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1102-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1103-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1106-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1109-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-111-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1110-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1111-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1112-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1114-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1115-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1116-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1118-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1119-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-112-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1121-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1123-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1124-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1126-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1127-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1128-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1130-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1133-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1136-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1137-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1139-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1140-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1141-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1142-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1143-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1144-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1146-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1147-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1148-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-115-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1150-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1151-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1153-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1154-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1155-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1156-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1157-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-1158-aws", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-117-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-118-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-12-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-121-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-122-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-123-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-124-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-126-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-128-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-129-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-13-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-130-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-132-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-134-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-135-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-136-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-137-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-139-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-140-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-141-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-142-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-143-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-144-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-147-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-15-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-151-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-153-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-154-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-156-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-158-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-159-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-161-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-162-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-163-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-166-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-167-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-169-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-171-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-173-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-175-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-176-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-177-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-180-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-184-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-187-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-188-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-189-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-19-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-191-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-192-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-193-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-194-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-196-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-197-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-20-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-200-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-201-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-202-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-204-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-206-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-208-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-209-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-210-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-211-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-212-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-213-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-22-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-23-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-24-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-29-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-30-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-32-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-33-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-34-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-36-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-38-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-39-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-42-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-43-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-44-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-45-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-46-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-47-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-48-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-50-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-51-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-52-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-54-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-55-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-58-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-60-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-62-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-64-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-65-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-66-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-69-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-70-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-72-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-74-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-76-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-88-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-91-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-96-generic", + "cindex": 64 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.15.0-99-generic", + "cindex": 65 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.18.0-1006-aws", + "cindex": 66 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.18.0-1007-aws", + "cindex": 66 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.18.0-1008-aws", + "cindex": 66 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.18.0-1011-aws", + "cindex": 66 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.18.0-1012-aws", + "cindex": 66 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.18.0-1013-aws", + "cindex": 66 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.18.0-1016-aws", + "cindex": 66 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.18.0-1017-aws", + "cindex": 66 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.18.0-1018-aws", + "cindex": 66 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.18.0-1020-aws", + "cindex": 66 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.18.0-13-generic", + "cindex": 66 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.18.0-14-generic", + "cindex": 66 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.18.0-15-generic", + "cindex": 66 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.18.0-16-generic", + "cindex": 66 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.18.0-17-generic", + "cindex": 66 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.18.0-18-generic", + "cindex": 66 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.18.0-20-generic", + "cindex": 66 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.18.0-21-generic", + "cindex": 66 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.18.0-22-generic", + "cindex": 66 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.18.0-24-generic", + "cindex": 66 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "4.18.0-25-generic", + "cindex": 66 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-1011-aws", + "cindex": 67 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-1012-aws", + "cindex": 67 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-1014-aws", + "cindex": 67 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-1016-aws", + "cindex": 67 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-1018-aws", + "cindex": 67 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-1019-aws", + "cindex": 67 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-1021-aws", + "cindex": 67 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-1022-aws", + "cindex": 67 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-1023-aws", + "cindex": 68 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-1024-aws", + "cindex": 68 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-1025-aws", + "cindex": 68 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-1027-aws", + "cindex": 68 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-15-generic", + "cindex": 67 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-16-generic", + "cindex": 67 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-17-generic", + "cindex": 67 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-19-generic", + "cindex": 67 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-20-generic", + "cindex": 67 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-23-generic", + "cindex": 67 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-25-generic", + "cindex": 67 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-27-generic", + "cindex": 67 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-29-generic", + "cindex": 67 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-31-generic", + "cindex": 67 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-32-generic", + "cindex": 67 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-35-generic", + "cindex": 67 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-36-generic", + "cindex": 67 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-37-generic", + "cindex": 67 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-43-generic", + "cindex": 68 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-44-generic", + "cindex": 68 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-47-generic", + "cindex": 68 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-48-generic", + "cindex": 69 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-52-generic", + "cindex": 69 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-53-generic", + "cindex": 69 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-58-generic", + "cindex": 69 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-60-generic", + "cindex": 69 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-61-generic", + "cindex": 69 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-62-generic", + "cindex": 69 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-63-generic", + "cindex": 69 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.0.0-65-generic", + "cindex": 69 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.3.0-1016-aws", + "cindex": 70 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.3.0-1017-aws", + "cindex": 70 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.3.0-1019-aws", + "cindex": 71 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.3.0-1023-aws", + "cindex": 71 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.3.0-1028-aws", + "cindex": 71 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.3.0-1030-aws", + "cindex": 71 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.3.0-1032-aws", + "cindex": 71 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.3.0-1033-aws", + "cindex": 71 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.3.0-1034-aws", + "cindex": 71 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.3.0-1035-aws", + "cindex": 71 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.3.0-19-generic", + "cindex": 72 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.3.0-22-generic", + "cindex": 73 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.3.0-23-generic", + "cindex": 73 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.3.0-24-generic", + "cindex": 70 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.3.0-26-generic", + "cindex": 70 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.3.0-28-generic", + "cindex": 70 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.3.0-40-generic", + "cindex": 70 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.3.0-42-generic", + "cindex": 70 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.3.0-45-generic", + "cindex": 70 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.3.0-46-generic", + "cindex": 70 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.3.0-51-generic", + "cindex": 70 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.3.0-53-generic", + "cindex": 71 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.3.0-59-generic", + "cindex": 71 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.3.0-61-generic", + "cindex": 71 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.3.0-62-generic", + "cindex": 71 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.3.0-64-generic", + "cindex": 71 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-1018-aws", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-1020-aws", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-1022-aws", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-1024-aws", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-1025-aws", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-1028-aws", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-1029-aws", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-1030-aws", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-1032-aws", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-1034-aws", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-1035-aws", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-1037-aws", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-1038-aws", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-1039-aws", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-1041-aws", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-1043-aws", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-1045-aws", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-1047-aws", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-1048-aws", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-1049-aws", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-1051-aws", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-1054-aws", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-1055-aws", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-1056-aws", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-1057-aws", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-1058-aws", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-1059-aws", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-1060-aws", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-37-generic", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-39-generic", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-40-generic", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-42-generic", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-45-generic", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-47-generic", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-48-generic", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-51-generic", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-52-generic", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-53-generic", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-54-generic", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-56-generic", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-58-generic", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-59-generic", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-60-generic", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-62-generic", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-64-generic", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-65-generic", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-66-generic", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-67-generic", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-70-generic", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-71-generic", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-72-generic", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-73-generic", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-74-generic", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-77-generic", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-80-generic", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-81-generic", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-84-generic", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-86-generic", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-87-generic", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-89-generic", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-90-generic", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "18.04", + "arch": "arm64", + "uname_release": "5.4.0-91-generic", + "cindex": 74 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.11.0-1009-aws", + "cindex": 75 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.11.0-1014-aws", + "cindex": 75 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.11.0-1016-aws", + "cindex": 75 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.11.0-1017-aws", + "cindex": 75 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.11.0-1019-aws", + "cindex": 75 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.11.0-1020-aws", + "cindex": 75 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.11.0-1025-azure", + "cindex": 76 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.11.0-1027-azure", + "cindex": 76 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.3.0-1003-aws", + "cindex": 77 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.3.0-1008-aws", + "cindex": 78 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.3.0-1009-aws", + "cindex": 78 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.3.0-1010-aws", + "cindex": 78 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.3.0-18-generic", + "cindex": 77 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.3.0-24-generic", + "cindex": 78 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-1005-aws", + "cindex": 79 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-1007-aws", + "cindex": 79 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-1008-aws", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-1009-aws", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-1011-aws", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-1015-aws", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-1017-aws", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-1018-aws", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-1020-aws", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-1021-aws", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-1022-aws", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-1024-aws", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-1025-aws", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-1028-aws", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-1029-aws", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-1030-aws", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-1032-aws", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-1034-aws", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-1035-aws", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-1037-aws", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-1038-aws", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-1039-aws", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-1041-aws", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-1043-aws", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-1045-aws", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-1047-aws", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-1048-aws", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-1049-aws", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-1051-aws", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-1054-aws", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-1055-aws", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-1056-aws", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-1057-aws", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-1058-aws", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-1059-aws", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-1060-aws", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-12-generic", + "cindex": 79 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-14-generic", + "cindex": 79 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-18-generic", + "cindex": 79 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-21-generic", + "cindex": 79 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-24-generic", + "cindex": 81 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-25-generic", + "cindex": 81 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-26-generic", + "cindex": 81 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-28-generic", + "cindex": 81 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-29-generic", + "cindex": 81 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-31-generic", + "cindex": 81 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-33-generic", + "cindex": 81 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-37-generic", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-39-generic", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-40-generic", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-42-generic", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-45-generic", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-47-generic", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-48-generic", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-51-generic", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-52-generic", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-53-generic", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-54-generic", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-56-generic", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-58-generic", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-59-generic", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-60-generic", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-62-generic", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-64-generic", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-65-generic", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-66-generic", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-67-generic", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-70-generic", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-71-generic", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-72-generic", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-73-generic", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-74-generic", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-77-generic", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-80-generic", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-81-generic", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-84-generic", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-86-generic", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-88-generic", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-89-generic", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-9-generic", + "cindex": 79 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-90-generic", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.4.0-91-generic", + "cindex": 80 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.8.0-1035-aws", + "cindex": 82 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.8.0-1038-aws", + "cindex": 82 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.8.0-1041-aws", + "cindex": 83 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.8.0-1042-aws", + "cindex": 83 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.8.0-23-generic", + "cindex": 82 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.8.0-25-generic", + "cindex": 82 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.8.0-28-generic", + "cindex": 82 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.8.0-29-generic", + "cindex": 82 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.8.0-31-generic", + "cindex": 82 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.8.0-33-generic", + "cindex": 82 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.8.0-34-generic", + "cindex": 82 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.8.0-36-generic", + "cindex": 82 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.8.0-38-generic", + "cindex": 82 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.8.0-40-generic", + "cindex": 82 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.8.0-41-generic", + "cindex": 82 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.8.0-43-generic", + "cindex": 82 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.8.0-44-generic", + "cindex": 82 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.8.0-45-generic", + "cindex": 82 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.8.0-48-generic", + "cindex": 82 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.8.0-49-generic", + "cindex": 82 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.8.0-50-generic", + "cindex": 82 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.8.0-53-generic", + "cindex": 82 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.8.0-55-generic", + "cindex": 82 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.8.0-59-generic", + "cindex": 82 + }, + { + "distrib": "ubuntu", + "version": "20.04", + "arch": "arm64", + "uname_release": "5.8.0-63-generic", + "cindex": 83 + } + ] +} \ No newline at end of file diff --git a/pkg/security/probe/constantfetch/btfhub/main.go b/pkg/security/probe/constantfetch/btfhub/main.go index f71dcaaf486e4..5d7849b3dedcf 100644 --- a/pkg/security/probe/constantfetch/btfhub/main.go +++ b/pkg/security/probe/constantfetch/btfhub/main.go @@ -40,13 +40,11 @@ import ( func main() { var archiveRootPath string var constantOutputPath string - var forceRefresh bool var combineConstants bool var cpuPprofPath string flag.StringVar(&archiveRootPath, "archive-root", "", "Root path of BTFHub archive") flag.StringVar(&constantOutputPath, "output", "", "Output path for JSON constants") - flag.BoolVar(&forceRefresh, "force-refresh", false, "Force refresh of the constants") flag.BoolVar(&combineConstants, "combine", false, "Don't read btf files, but read constants") flag.StringVar(&cpuPprofPath, "cpu-prof", "", "Path to the CPU profile to generate") flag.Parse() @@ -81,21 +79,7 @@ func main() { } fmt.Printf("btfhub-archive: commit %s\n", archiveCommit) - preAllocHint := 0 - - if !forceRefresh { - // skip if commit is already the most recent - currentConstants, err := getCurrentConstants(constantOutputPath) - if err == nil && currentConstants.Commit != "" { - if currentConstants.Commit == archiveCommit { - fmt.Printf("already at most recent archive commit") - return - } - preAllocHint = len(currentConstants.Kernels) - } - } - - twCollector := newTreeWalkCollector(preAllocHint) + twCollector := newTreeWalkCollector() var wg sync.WaitGroup // github actions runner have only 2 cores @@ -118,8 +102,6 @@ func main() { export := twCollector.finish() - export.Commit = archiveCommit - if err := outputConstants(&export, constantOutputPath); err != nil { panic(err) } @@ -173,16 +155,7 @@ func combineConstantFiles(archiveRootPath string) (constantfetch.BTFHubConstants return constantfetch.BTFHubConstants{}, errors.New("no json file found") } - lastCommit := "" - for _, file := range files { - if lastCommit != "" && file.Commit != lastCommit { - return constantfetch.BTFHubConstants{}, errors.New("multiple different commits in constant files") - } - } - - res := constantfetch.BTFHubConstants{ - Commit: lastCommit, - } + res := constantfetch.BTFHubConstants{} for _, file := range files { offset := len(res.Constants) @@ -197,20 +170,6 @@ func combineConstantFiles(archiveRootPath string) (constantfetch.BTFHubConstants return res, nil } -func getCurrentConstants(path string) (*constantfetch.BTFHubConstants, error) { - cjson, err := os.ReadFile(path) - if err != nil { - return nil, err - } - - var currentConstants constantfetch.BTFHubConstants - if err := json.Unmarshal(cjson, ¤tConstants); err != nil { - return nil, err - } - - return ¤tConstants, nil -} - func getCommitSha(cwd string) (string, error) { cmd := exec.Command("git", "rev-parse", "HEAD") cmd.Dir = cwd @@ -231,10 +190,10 @@ type treeWalkCollector struct { queryChan chan extractionQuery } -func newTreeWalkCollector(preAllocHint int) *treeWalkCollector { +func newTreeWalkCollector() *treeWalkCollector { return &treeWalkCollector{ counter: 0, - results: make([]extractionResult, 0, preAllocHint), + results: make([]extractionResult, 0), cache: make(map[string]map[string]uint64), queryChan: make(chan extractionQuery), } diff --git a/pkg/security/probe/constantfetch/btfhub_amd64.go b/pkg/security/probe/constantfetch/btfhub_amd64.go new file mode 100644 index 0000000000000..3539108686450 --- /dev/null +++ b/pkg/security/probe/constantfetch/btfhub_amd64.go @@ -0,0 +1,14 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux && amd64 + +// Package constantfetch holds constantfetch related files +package constantfetch + +import _ "embed" // for go:embed + +//go:embed btfhub/constants_amd64.json +var btfhubConstants []byte diff --git a/pkg/security/probe/constantfetch/btfhub_arm64.go b/pkg/security/probe/constantfetch/btfhub_arm64.go new file mode 100644 index 0000000000000..924b801c06c1e --- /dev/null +++ b/pkg/security/probe/constantfetch/btfhub_arm64.go @@ -0,0 +1,14 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux && arm64 + +// Package constantfetch holds constantfetch related files +package constantfetch + +import _ "embed" // for go:embed + +//go:embed btfhub/constants_arm64.json +var btfhubConstants []byte diff --git a/pkg/security/probe/constantfetch/constant_names.go b/pkg/security/probe/constantfetch/constant_names.go index 80d8b57c04a25..528d1b756cbd7 100644 --- a/pkg/security/probe/constantfetch/constant_names.go +++ b/pkg/security/probe/constantfetch/constant_names.go @@ -30,6 +30,18 @@ const ( OffsetNameFileFpath = "file_f_path_offset" OffsetNameDentryDSb = "dentry_d_sb_offset" OffsetNameMountMntID = "mount_id_offset" + OffsetNameSbDev = "sb_dev_offset" + OffsetNameDentryDInode = "dentry_d_inode_offset" + OffsetNamePathDentry = "path_dentry_offset" + OffsetNameInodeSuperblock = "inode_sb_offset" + OffsetNamePathMnt = "path_mnt_offset" + OffsetNameMountMntMountpoint = "mount_mnt_mountpoint_offset" + OffsetNameMountpointDentry = "mountpoint_dentry_offset" + OffsetNameVfsmountMntFlags = "vfsmount_mnt_flags_offset" + OffsetNameVfsmountMntRoot = "vfsmount_mnt_root_offset" + OffsetNameVfsmountMntSb = "vfsmount_mnt_sb_offset" + OffsetNameSuperblockSType = "super_block_s_type_offset" + OffsetNameDentryDName = "dentry_d_name_offset" // inode OffsetInodeIno = "inode_ino_offset" @@ -89,11 +101,16 @@ const ( OffsetNameSocketStructSK = "socket_sock_offset" OffsetNameNFConnStructCTNet = "nf_conn_ct_net_offset" OffsetNameSockCommonStructSKCFamily = "sock_common_skc_family_offset" + OffsetNameSockCommonStructSKCNum = "sock_common_skc_num_offset" OffsetNameFlowI4StructSADDR = "flowi4_saddr_offset" OffsetNameFlowI6StructSADDR = "flowi6_saddr_offset" OffsetNameFlowI4StructULI = "flowi4_uli_offset" OffsetNameFlowI6StructULI = "flowi6_uli_offset" + // TODO: needed for l4_protocol resolution, see network/flow.h + OffsetNameFlowI4StructProto = "flowi4_proto_offset" + OffsetNameFlowI6StructProto = "flowi6_proto_offset" + // Interpreter constants OffsetNameLinuxBinprmStructFile = "binprm_file_offset" diff --git a/pkg/security/probe/constantfetch/fallback.go b/pkg/security/probe/constantfetch/fallback.go index cc47a7b378133..6ab1664c85722 100644 --- a/pkg/security/probe/constantfetch/fallback.go +++ b/pkg/security/probe/constantfetch/fallback.go @@ -22,6 +22,9 @@ import ( type FallbackConstantFetcher struct { kernelVersion *kernel.Version res map[string]uint64 + + raws map[string]uint64 + callbacks map[string]func(*kernel.Version) uint64 } // NewFallbackConstantFetcher returns a new FallbackConstantFetcher @@ -29,6 +32,9 @@ func NewFallbackConstantFetcher(kv *kernel.Version) *FallbackConstantFetcher { return &FallbackConstantFetcher{ kernelVersion: kv, res: make(map[string]uint64), + + raws: computeRawsTable(), + callbacks: computeCallbacksTable(), } } @@ -36,127 +42,96 @@ func (f *FallbackConstantFetcher) String() string { return "fallback" } +func computeRawsTable() map[string]uint64 { + return map[string]uint64{ + OffsetInodeIno: 64, + OffsetInodeGid: 8, + OffsetInodeNlink: 72, + OffsetInodeMtime: 104, + OffsetInodeCtime: 120, + OffsetNameSuperBlockStructSFlags: 80, + OffsetNameBPFMapStructMapType: 24, + OffsetNameBPFProgStructType: 4, + OffsetNameBPFProgStructExpectedAttachType: 8, + OffsetNamePIDStructLevel: 4, + OffsetNameNetStructProcInum: 72, + OffsetNameSockCommonStructSKCNet: 48, + OffsetNameSockCommonStructSKCFamily: 16, + OffsetNameDentryDSb: 104, + OffsetNameNetDeviceStructName: 0, + OffsetNameRenameStructOldDentry: 16, + OffsetNameRenameStructNewDentry: 40, + OffsetNameSbDev: 16, + OffsetNameDentryDInode: 48, + OffsetNamePathDentry: 8, + OffsetNameInodeSuperblock: 40, + OffsetNamePathMnt: 0, + OffsetNameMountMntMountpoint: 24, + OffsetNameMountpointDentry: 16, + OffsetNameVfsmountMntFlags: 16, + OffsetNameSuperblockSType: 40, + OffsetNameVfsmountMntRoot: 0, + OffsetNameDentryDName: 32, + OffsetNameVfsmountMntSb: 8, + OffsetNameSockCommonStructSKCNum: 14, + OffsetNameFlowI4StructProto: 18, + OffsetNameFlowI6StructProto: 18, + } +} + +func computeCallbacksTable() map[string]func(*kernel.Version) uint64 { + return map[string]func(*kernel.Version) uint64{ + SizeOfInode: getSizeOfStructInode, + OffsetNameSuperBlockStructSMagic: getSuperBlockMagicOffset, + OffsetNameSignalStructStructTTY: getSignalTTYOffset, + OffsetNameTTYStructStructName: getTTYNameOffset, + OffsetNameCredStructUID: getCredsUIDOffset, + OffsetNameCredStructCapInheritable: getCredCapInheritableOffset, + OffsetNameBPFMapStructID: getBpfMapIDOffset, + OffsetNameBPFMapStructName: getBpfMapNameOffset, + OffsetNameBPFProgStructAux: getBpfProgAuxOffset, + OffsetNameBPFProgStructTag: getBpfProgTagOffset, + OffsetNameBPFProgAuxStructID: getBpfProgAuxIDOffset, + OffsetNameBPFProgAuxStructName: getBpfProgAuxNameOffset, + OffsetNamePIDStructNumbers: getPIDNumbersOffset, + SizeOfUPID: getSizeOfUpid, + OffsetNamePIDLinkStructPID: getPIDLinkPIDOffset, + OffsetNameDentryStructDSB: getDentrySuperBlockOffset, + OffsetNamePipeInodeInfoStructBufs: getPipeInodeInfoBufsOffset, + OffsetNamePipeInodeInfoStructNrbufs: getPipeInodeInfoStructNrbufs, + OffsetNamePipeInodeInfoStructCurbuf: getPipeInodeInfoStructCurbuf, + OffsetNamePipeInodeInfoStructBuffers: getPipeInodeInfoStructBuffers, + OffsetNamePipeInodeInfoStructHead: getPipeInodeInfoStructHead, + OffsetNamePipeInodeInfoStructRingsize: getPipeInodeInfoStructRingsize, + OffsetNameNetDeviceStructIfIndex: getNetDeviceIfindexOffset, + OffsetNameNetStructNS: getNetNSOffset, + OffsetNameSocketStructSK: getSocketSockOffset, + OffsetNameNFConnStructCTNet: getNFConnCTNetOffset, + OffsetNameFlowI4StructSADDR: getFlowi4SAddrOffset, + OffsetNameFlowI6StructSADDR: getFlowi6SAddrOffset, + OffsetNameFlowI4StructULI: getFlowi4ULIOffset, + OffsetNameFlowI6StructULI: getFlowi6ULIOffset, + OffsetNameLinuxBinprmStructFile: getBinPrmFileFieldOffset, + OffsetNameIoKiocbStructCtx: getIoKcbCtxOffset, + OffsetNameLinuxBinprmP: getLinuxBinPrmPOffset, + OffsetNameLinuxBinprmArgc: getLinuxBinPrmArgcOffset, + OffsetNameLinuxBinprmEnvc: getLinuxBinPrmEnvcOffset, + OffsetNameVMAreaStructFlags: getVMAreaStructFlagsOffset, + OffsetNameKernelCloneArgsExitSignal: getKernelCloneArgsExitSignalOffset, + OffsetNameFileFinode: getFileFinodeOffset, + OffsetNameFileFpath: getFileFpathOffset, + OffsetNameMountMntID: getMountIDOffset, + } +} + func (f *FallbackConstantFetcher) appendRequest(id string) { - var value = ErrorSentinel - switch id { - case SizeOfInode: - value = getSizeOfStructInode(f.kernelVersion) - case OffsetNameSuperBlockStructSFlags: - value = getSuperBlockFlagsOffset(f.kernelVersion) - case OffsetNameSuperBlockStructSMagic: - value = getSuperBlockMagicOffset(f.kernelVersion) - case OffsetNameSignalStructStructTTY: - value = getSignalTTYOffset(f.kernelVersion) - case OffsetNameTTYStructStructName: - value = getTTYNameOffset(f.kernelVersion) - case OffsetNameCredStructUID: - value = getCredsUIDOffset(f.kernelVersion) - case OffsetNameCredStructCapInheritable: - value = getCredCapInheritableOffset(f.kernelVersion) - case OffsetNameBPFMapStructID: - value = getBpfMapIDOffset(f.kernelVersion) - case OffsetNameBPFMapStructName: - value = getBpfMapNameOffset(f.kernelVersion) - case OffsetNameBPFMapStructMapType: - value = getBpfMapTypeOffset(f.kernelVersion) - case OffsetNameBPFProgStructAux: - value = getBpfProgAuxOffset(f.kernelVersion) - case OffsetNameBPFProgStructTag: - value = getBpfProgTagOffset(f.kernelVersion) - case OffsetNameBPFProgStructType: - value = getBpfProgTypeOffset(f.kernelVersion) - case OffsetNameBPFProgStructExpectedAttachType: - value = getBpfProgAttachTypeOffset(f.kernelVersion) - case OffsetNameBPFProgAuxStructID: - value = getBpfProgAuxIDOffset(f.kernelVersion) - case OffsetNameBPFProgAuxStructName: - value = getBpfProgAuxNameOffset(f.kernelVersion) - case OffsetNamePIDStructLevel: - value = getPIDLevelOffset(f.kernelVersion) - case OffsetNamePIDStructNumbers: - value = getPIDNumbersOffset(f.kernelVersion) - case SizeOfUPID: - value = getSizeOfUpid(f.kernelVersion) - case OffsetNameTaskStructPID: - value = getTaskStructPIDOffset(f.kernelVersion) - case OffsetNameTaskStructPIDLink: - value = getTaskStructPIDLinkOffset(f.kernelVersion) - case OffsetNamePIDLinkStructPID: - value = getPIDLinkPIDOffset(f.kernelVersion) - case OffsetNameDentryStructDSB: - value = getDentrySuperBlockOffset(f.kernelVersion) - case OffsetNamePipeInodeInfoStructBufs: - value = getPipeInodeInfoBufsOffset(f.kernelVersion) - case OffsetNamePipeInodeInfoStructNrbufs: - value = getPipeInodeInfoStructNrbufs(f.kernelVersion) - case OffsetNamePipeInodeInfoStructCurbuf: - value = getPipeInodeInfoStructCurbuf(f.kernelVersion) - case OffsetNamePipeInodeInfoStructBuffers: - value = getPipeInodeInfoStructBuffers(f.kernelVersion) - case OffsetNamePipeInodeInfoStructHead: - value = getPipeInodeInfoStructHead(f.kernelVersion) - case OffsetNamePipeInodeInfoStructRingsize: - value = getPipeInodeInfoStructRingsize(f.kernelVersion) - case OffsetNameNetDeviceStructIfIndex: - value = getNetDeviceIfindexOffset(f.kernelVersion) - case OffsetNameNetDeviceStructName: - value = getNetDeviceNameOffset(f.kernelVersion) - case OffsetNameNetStructNS: - value = getNetNSOffset(f.kernelVersion) - case OffsetNameNetStructProcInum: - value = getNetProcINumOffset(f.kernelVersion) - case OffsetNameSockCommonStructSKCNet: - value = getSockCommonSKCNetOffset(f.kernelVersion) - case OffsetNameSocketStructSK: - value = getSocketSockOffset(f.kernelVersion) - case OffsetNameNFConnStructCTNet: - value = getNFConnCTNetOffset(f.kernelVersion) - case OffsetNameSockCommonStructSKCFamily: - value = getSockCommonSKCFamilyOffset(f.kernelVersion) - case OffsetNameFlowI4StructSADDR: - value = getFlowi4SAddrOffset(f.kernelVersion) - case OffsetNameFlowI6StructSADDR: - value = getFlowi6SAddrOffset(f.kernelVersion) - case OffsetNameFlowI4StructULI: - value = getFlowi4ULIOffset(f.kernelVersion) - case OffsetNameFlowI6StructULI: - value = getFlowi6ULIOffset(f.kernelVersion) - case OffsetNameLinuxBinprmStructFile: - value = getBinPrmFileFieldOffset(f.kernelVersion) - case OffsetNameIoKiocbStructCtx: - value = getIoKcbCtxOffset(f.kernelVersion) - case OffsetNameLinuxBinprmP: - value = getLinuxBinPrmPOffset(f.kernelVersion) - case OffsetNameLinuxBinprmArgc: - value = getLinuxBinPrmArgcOffset(f.kernelVersion) - case OffsetNameLinuxBinprmEnvc: - value = getLinuxBinPrmEnvcOffset(f.kernelVersion) - case OffsetNameVMAreaStructFlags: - value = getVMAreaStructFlagsOffset(f.kernelVersion) - case OffsetNameKernelCloneArgsExitSignal: - value = getKernelCloneArgsExitSignalOffset(f.kernelVersion) - case OffsetNameFileFinode: - value = getFileFinodeOffset(f.kernelVersion) - case OffsetNameFileFpath: - value = getFileFpathOffset(f.kernelVersion) - case OffsetNameDentryDSb: - value = getDentryDsbOffset(f.kernelVersion) - case OffsetNameMountMntID: - value = getMountIDOffset(f.kernelVersion) - case OffsetNameRenameStructOldDentry: - value = getRenameStructOldDentryOffset(f.kernelVersion) - case OffsetNameRenameStructNewDentry: - value = getRenameStructNewDentryOffset(f.kernelVersion) - case OffsetInodeIno: - value = getInodeInoOffset(f.kernelVersion) - case OffsetInodeGid: - value = getInodeGIDOffset(f.kernelVersion) - case OffsetInodeNlink: - value = getInodeNlinkOffset(f.kernelVersion) - case OffsetInodeMtime: - value = getInodeMtimeOffset(f.kernelVersion) - case OffsetInodeCtime: - value = getInodeCtimeOffset(f.kernelVersion) + var value uint64 + if raw, ok := f.raws[id]; ok { + value = raw + } else if cb, ok := f.callbacks[id]; ok { + value = cb(f.kernelVersion) + } else { + value = ErrorSentinel } f.res[id] = value } @@ -246,30 +221,6 @@ func getSizeOfStructInode(kv *kernel.Version) uint64 { return sizeOf } -func getInodeInoOffset(_ *kernel.Version) uint64 { - return uint64(64) -} - -func getInodeGIDOffset(_ *kernel.Version) uint64 { - return uint64(8) -} - -func getInodeNlinkOffset(_ *kernel.Version) uint64 { - return uint64(72) -} - -func getInodeMtimeOffset(_ *kernel.Version) uint64 { - return uint64(104) -} - -func getInodeCtimeOffset(_ *kernel.Version) uint64 { - return uint64(120) -} - -func getSuperBlockFlagsOffset(_ *kernel.Version) uint64 { - return uint64(80) -} - func getSuperBlockMagicOffset(kv *kernel.Version) uint64 { offset := uint64(96) @@ -437,10 +388,6 @@ func getBpfMapNameOffset(kv *kernel.Version) uint64 { return nameOffset } -func getBpfMapTypeOffset(_ *kernel.Version) uint64 { - return uint64(24) -} - func getBpfProgAuxOffset(kv *kernel.Version) uint64 { auxOffset := uint64(32) @@ -468,14 +415,6 @@ func getBpfProgTagOffset(kv *kernel.Version) uint64 { return progTagOffset } -func getBpfProgTypeOffset(_ *kernel.Version) uint64 { - return uint64(4) -} - -func getBpfProgAttachTypeOffset(_ *kernel.Version) uint64 { - return uint64(8) -} - func getBpfProgAuxIDOffset(kv *kernel.Version) uint64 { idOffset := uint64(24) @@ -499,7 +438,7 @@ func getBpfProgAuxIDOffset(kv *kernel.Version) uint64 { idOffset = 24 case kv.IsInRangeCloseOpen(kernel.Kernel5_8, kernel.Kernel5_13): idOffset = 28 - case kv.Code != 0 && kv.Code >= kernel.Kernel5_13: + case kv.Code >= kernel.Kernel5_13: idOffset = 32 } @@ -547,17 +486,13 @@ func getBpfProgAuxNameOffset(kv *kernel.Version) uint64 { nameOffset = 544 case kv.IsInRangeCloseOpen(kernel.Kernel5_17, kernel.Kernel6_1): nameOffset = 528 - case kv.Code != 0 && kv.Code >= kernel.Kernel6_1: + case kv.Code >= kernel.Kernel6_1: nameOffset = 912 } return nameOffset } -func getPIDLevelOffset(_ *kernel.Version) uint64 { - return uint64(4) -} - func getPIDNumbersOffset(kv *kernel.Version) uint64 { pidNumbersOffset := uint64(48) @@ -656,7 +591,7 @@ func getPipeInodeInfoBufsOffset(kv *kernel.Version) uint64 { case kv.IsInRangeCloseOpen(kernel.Kernel5_6, kernel.Kernel5_8) || kv.IsInRangeCloseOpen(kernel.Kernel5_10, kernel.Kernel5_11): offset = 144 - case kv.Code != 0 && kv.Code >= kernel.Kernel5_8: + case kv.Code >= kernel.Kernel5_8: offset = 152 } @@ -793,14 +728,6 @@ func getNetNSOffset(kv *kernel.Version) uint64 { } } -func getNetProcINumOffset(_ *kernel.Version) uint64 { - return uint64(72) -} - -func getSockCommonSKCNetOffset(_ *kernel.Version) uint64 { - return uint64(48) -} - func getSocketSockOffset(kv *kernel.Version) uint64 { offset := uint64(32) @@ -836,10 +763,6 @@ func getNFConnCTNetOffset(kv *kernel.Version) uint64 { } } -func getSockCommonSKCFamilyOffset(_ *kernel.Version) uint64 { - return 16 -} - func getFlowi4SAddrOffset(kv *kernel.Version) uint64 { offset := uint64(40) @@ -1000,16 +923,6 @@ func getVMAreaStructFlagsOffset(kv *kernel.Version) uint64 { return 80 } -func getTaskStructPIDOffset(_ *kernel.Version) uint64 { - // do not use fallback for offsets inside task_struct - return ErrorSentinel -} - -func getTaskStructPIDLinkOffset(_ *kernel.Version) uint64 { - // do not use fallback for offsets inside task_struct - return ErrorSentinel -} - func getPIDLinkPIDOffset(kv *kernel.Version) uint64 { offset := ErrorSentinel if kv.HavePIDLinkStruct() { @@ -1045,10 +958,6 @@ func getFileFpathOffset(kv *kernel.Version) uint64 { } } -func getDentryDsbOffset(_ *kernel.Version) uint64 { - return 104 -} - func getMountIDOffset(kv *kernel.Version) uint64 { switch { case kv.IsSuseKernel() || kv.Code >= kernel.Kernel5_12: @@ -1059,15 +968,3 @@ func getMountIDOffset(kv *kernel.Version) uint64 { return 284 } } - -func getNetDeviceNameOffset(_ *kernel.Version) uint64 { - return 0 -} - -func getRenameStructOldDentryOffset(_ *kernel.Version) uint64 { - return 16 -} - -func getRenameStructNewDentryOffset(_ *kernel.Version) uint64 { - return 40 -} diff --git a/pkg/security/probe/constantfetch/fetcher.go b/pkg/security/probe/constantfetch/fetcher.go index f3b5c4445976d..10f1bfbda76df 100644 --- a/pkg/security/probe/constantfetch/fetcher.go +++ b/pkg/security/probe/constantfetch/fetcher.go @@ -108,11 +108,13 @@ func (f *ComposeConstantFetcher) fillConstantCacheIfNeeded() { seclog.Errorf("failed to run constant fetcher: %v", err) } - for _, req := range f.requests { - if req.value == ErrorSentinel { - if newValue, present := res[req.id]; present { - req.value = newValue - req.fetcherName = fetcher.String() + if len(res) != 0 { + for _, req := range f.requests { + if req.value == ErrorSentinel { + if newValue, present := res[req.id]; present { + req.value = newValue + req.fetcherName = fetcher.String() + } } } } diff --git a/pkg/security/probe/discarders_linux.go b/pkg/security/probe/discarders_linux.go index 30b4333d1995b..ad34db42a0eb9 100644 --- a/pkg/security/probe/discarders_linux.go +++ b/pkg/security/probe/discarders_linux.go @@ -237,7 +237,7 @@ func (id *inodeDiscarders) getParentDiscarderFnc(rs *rules.RuleSet, eventType mo return nil, nil } - if _, err := id.discarderEvent.GetFieldType(field); err != nil { + if _, _, err := id.discarderEvent.GetFieldMetadata(field); err != nil { return nil, err } @@ -246,7 +246,7 @@ func (id *inodeDiscarders) getParentDiscarderFnc(rs *rules.RuleSet, eventType mo } basenameField := strings.Replace(field, model.PathSuffix, model.NameSuffix, 1) - if _, err := id.discarderEvent.GetFieldType(basenameField); err != nil { + if _, _, err := id.discarderEvent.GetFieldMetadata(basenameField); err != nil { return nil, err } diff --git a/pkg/security/probe/field_handlers_ebpf.go b/pkg/security/probe/field_handlers_ebpf.go index 63215156e4a19..286813d274dea 100644 --- a/pkg/security/probe/field_handlers_ebpf.go +++ b/pkg/security/probe/field_handlers_ebpf.go @@ -121,7 +121,7 @@ func (fh *EBPFFieldHandlers) ResolveProcessArgsOptions(ev *model.Event, process // ResolveFileFieldsInUpperLayer resolves whether the file is in an upper layer func (fh *EBPFFieldHandlers) ResolveFileFieldsInUpperLayer(_ *model.Event, f *model.FileFields) bool { - return f.GetInUpperLayer() + return f.IsInUpperLayer() } // ResolveXAttrName returns the string representation of the extended attribute name diff --git a/pkg/security/probe/kfilters/process.go b/pkg/security/probe/kfilters/process.go index 84d65b3db9d3a..c39838d3c7818 100644 --- a/pkg/security/probe/kfilters/process.go +++ b/pkg/security/probe/kfilters/process.go @@ -12,12 +12,13 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/ebpf" "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/model" + "github.com/DataDog/datadog-agent/pkg/security/secl/model/sharedconsts" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" ) const ( auidField = "process.auid" - maxAUID = model.AuditUIDUnset - 1 + maxAUID = sharedconsts.AuditUIDUnset - 1 auidApproversTable = "auid_approvers" auidRangeApproversTable = "auid_range_approvers" ) @@ -29,14 +30,14 @@ var processCapabilities = rules.FieldCapabilities{ FilterMode: rules.ApproverOnlyMode, RangeFilterValue: &rules.RangeFilterValue{Min: 0, Max: maxAUID}, FilterWeight: 100, - // convert `!= model.AuditUIDUnset`` to the max range + // convert `!= sharedconsts.AuditUIDUnset`` to the max range HandleNotApproverValue: func(fieldValueType eval.FieldValueType, value interface{}) (eval.FieldValueType, interface{}, bool) { if fieldValueType != eval.ScalarValueType { return fieldValueType, value, false } - if i, ok := value.(int); ok && uint32(i) == model.AuditUIDUnset { - return eval.RangeValueType, rules.RangeFilterValue{Min: 0, Max: model.AuditUIDUnset - 1}, true + if i, ok := value.(int); ok && uint32(i) == sharedconsts.AuditUIDUnset { + return eval.RangeValueType, rules.RangeFilterValue{Min: 0, Max: sharedconsts.AuditUIDUnset - 1}, true } return fieldValueType, value, false diff --git a/pkg/security/probe/model_ebpfless.go b/pkg/security/probe/model_ebpfless.go index de303e44cefd4..86283f18effb7 100644 --- a/pkg/security/probe/model_ebpfless.go +++ b/pkg/security/probe/model_ebpfless.go @@ -47,7 +47,10 @@ func NewEBPFLessModel() *model.Model { !strings.HasPrefix(field, "chdir.") && !strings.HasPrefix(field, "mount.") && !strings.HasPrefix(field, "umount.") && - !strings.HasPrefix(field, "event.") { + !strings.HasPrefix(field, "event.") && + !strings.HasPrefix(field, "accept.") && + !strings.HasPrefix(field, "bind.") && + !strings.HasPrefix(field, "connect.") { return rules.ErrEventTypeNotEnabled } return nil diff --git a/pkg/security/probe/probe.go b/pkg/security/probe/probe.go index 52fd27e3508c8..239b428dbaf6a 100644 --- a/pkg/security/probe/probe.go +++ b/pkg/security/probe/probe.go @@ -323,18 +323,12 @@ func (p *Probe) sendEventToConsumers(event *model.Event) { } } -func traceEvent(fmt string, marshaller func() ([]byte, model.EventType, error)) { +func logTraceEvent(eventType model.EventType, event interface{}) { if !seclog.DefaultLogger.IsTracing() { return } - eventJSON, eventType, err := marshaller() - if err != nil { - seclog.DefaultLogger.TraceTagf(eventType, fmt, err) - return - } - - seclog.DefaultLogger.TraceTagf(eventType, fmt, string(eventJSON)) + seclog.DefaultLogger.TraceTagf(eventType, "Dispatching event %s", serializers.EventStringerWrapper{Event: event}) } // AddDiscarderPushedCallback add a callback to the list of func that have to be called when a discarder is pushed to kernel @@ -344,10 +338,7 @@ func (p *Probe) AddDiscarderPushedCallback(cb DiscarderPushedCallback) { // DispatchCustomEvent sends a custom event to the probe event handler func (p *Probe) DispatchCustomEvent(rule *rules.Rule, event *events.CustomEvent) { - traceEvent("Dispatching custom event %s", func() ([]byte, model.EventType, error) { - eventJSON, err := serializers.MarshalCustomEvent(event) - return eventJSON, event.GetEventType(), err - }) + logTraceEvent(event.GetEventType(), event) // send wildcard first for _, handler := range p.customEventHandlers[model.UnknownEventType] { @@ -424,6 +415,11 @@ func (p *Probe) IsNetworkRawPacketEnabled() bool { return p.IsNetworkEnabled() && p.Config.Probe.NetworkRawPacketEnabled } +// IsNetworkFlowMonitorEnabled returns whether the network flow monitor is enabled +func (p *Probe) IsNetworkFlowMonitorEnabled() bool { + return p.IsNetworkEnabled() && p.Config.Probe.NetworkFlowMonitorEnabled +} + // IsActivityDumpEnabled returns whether activity dump is enabled func (p *Probe) IsActivityDumpEnabled() bool { return p.Config.RuntimeSecurity.ActivityDumpEnabled diff --git a/pkg/security/probe/probe_ebpf.go b/pkg/security/probe/probe_ebpf.go index 399d04870e67e..66565660371e9 100644 --- a/pkg/security/probe/probe_ebpf.go +++ b/pkg/security/probe/probe_ebpf.go @@ -17,6 +17,7 @@ import ( "path/filepath" "runtime" "slices" + "strings" "sync" "time" @@ -64,6 +65,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/security_profile/dump" "github.com/DataDog/datadog-agent/pkg/security/serializers" "github.com/DataDog/datadog-agent/pkg/security/utils" + "github.com/DataDog/datadog-agent/pkg/security/utils/hostnameutils" utilkernel "github.com/DataDog/datadog-agent/pkg/util/kernel" ddsync "github.com/DataDog/datadog-agent/pkg/util/sync" ) @@ -188,6 +190,11 @@ func (p *EBPFProbe) UseRingBuffers() bool { return p.config.Probe.EventStreamUseRingBuffer && p.kernelVersion.HaveRingBuffers() } +// GetUseFentry returns true if fentry is used +func (p *EBPFProbe) GetUseFentry() bool { + return p.useFentry +} + func (p *EBPFProbe) selectFentryMode() { if !p.config.Probe.EventStreamUseFentry { p.useFentry = false @@ -229,6 +236,10 @@ func (p *EBPFProbe) isRawPacketNotSupported() bool { return IsRawPacketNotSupported(p.kernelVersion) } +func (p *EBPFProbe) isNetworkFlowMonitorNotSupported() bool { + return IsNetworkFlowMonitorNotSupported(p.kernelVersion) +} + func (p *EBPFProbe) sanityChecks() error { // make sure debugfs is mounted if _, err := tracefs.Root(); err != nil { @@ -249,6 +260,16 @@ func (p *EBPFProbe) sanityChecks() error { p.config.Probe.NetworkRawPacketEnabled = false } + if p.config.Probe.NetworkFlowMonitorEnabled && !p.config.Probe.NetworkEnabled { + seclog.Warnf("The network flow monitor feature of CWS requires event_monitoring_config.network.enabled to be true, setting event_monitoring_config.network.flow_monitor.enabled to false") + p.config.Probe.NetworkFlowMonitorEnabled = false + } + + if p.config.Probe.NetworkFlowMonitorEnabled && p.isNetworkFlowMonitorNotSupported() { + seclog.Warnf("The network flow monitor feature of CWS requires a more recent kernel (at least 5.13) with support the bpf_for_each_elem map helper, setting event_monitoring_config.network.flow_monitor.enabled to false") + p.config.Probe.NetworkFlowMonitorEnabled = false + } + return nil } @@ -347,7 +368,7 @@ func (p *EBPFProbe) Init() error { }) } - p.managerOptions.ActivatedProbes = append(p.managerOptions.ActivatedProbes, probes.SnapshotSelectors()...) + p.managerOptions.ActivatedProbes = append(p.managerOptions.ActivatedProbes, probes.SnapshotSelectors(p.useFentry)...) if err := p.Manager.InitWithOptions(bytecodeReader, p.managerOptions); err != nil { return fmt.Errorf("failed to init manager: %w", err) @@ -571,10 +592,7 @@ func (p *EBPFProbe) AddActivityDumpHandler(handler dump.ActivityDumpHandler) { // DispatchEvent sends an event to the probe event handler func (p *EBPFProbe) DispatchEvent(event *model.Event, notifyConsumers bool) { - traceEvent("Dispatching event %s", func() ([]byte, model.EventType, error) { - eventJSON, err := serializers.MarshalEvent(event, nil) - return eventJSON, event.GetEventType(), err - }) + logTraceEvent(event.GetEventType(), event) // filter out event if already present on a profile if p.config.RuntimeSecurity.SecurityProfileEnabled { @@ -648,7 +666,7 @@ func (p *EBPFProbe) unmarshalContexts(data []byte, event *model.Event) (int, err } func eventWithNoProcessContext(eventType model.EventType) bool { - return eventType == model.DNSEventType || eventType == model.IMDSEventType || eventType == model.RawPacketEventType || eventType == model.LoadModuleEventType || eventType == model.UnloadModuleEventType + return eventType == model.DNSEventType || eventType == model.IMDSEventType || eventType == model.RawPacketEventType || eventType == model.LoadModuleEventType || eventType == model.UnloadModuleEventType || eventType == model.NetworkFlowMonitorEventType } func (p *EBPFProbe) unmarshalProcessCacheEntry(ev *model.Event, data []byte) (int, error) { @@ -733,6 +751,19 @@ func (p *EBPFProbe) zeroEvent() *model.Event { return p.event } +func (p *EBPFProbe) resolveCGroup(pid uint32, cgroupPathKey model.PathKey, cgroupFlags containerutils.CGroupFlags, newEntryCb func(entry *model.ProcessCacheEntry, err error)) (*model.CGroupContext, error) { + cgroupContext, err := p.Resolvers.ResolveCGroupContext(cgroupPathKey, cgroupFlags) + if err != nil { + return nil, fmt.Errorf("failed to resorve cgroup for pid %d: %w", pid, err) + } + updated := p.Resolvers.ProcessResolver.UpdateProcessCGroupContext(pid, cgroupContext, newEntryCb) + if !updated { + return nil, fmt.Errorf("failed to update cgroup for pid %d", pid) + } + + return cgroupContext, nil +} + func (p *EBPFProbe) handleEvent(CPU int, data []byte) { // handle play snapshot if p.playSnapShotState.Swap(false) { @@ -812,44 +843,25 @@ func (p *EBPFProbe) handleEvent(CPU int, data []byte) { seclog.Errorf("shouldn't receive Cgroup event if activity dumps are disabled") return } - if _, err = event.CgroupTracing.UnmarshalBinary(data[offset:]); err != nil { seclog.Errorf("failed to decode cgroup tracing event: %s (offset %d, len %d)", err, offset, dataLen) return } - - cgroupContext, err := p.Resolvers.ResolveCGroupContext(event.CgroupTracing.CGroupContext.CGroupFile, containerutils.CGroupFlags(event.CgroupTracing.CGroupContext.CGroupFlags)) - if err != nil { - seclog.Debugf("Failed to resolve cgroup: %s", err) + if cgroupContext, err := p.resolveCGroup(event.CgroupTracing.Pid, event.CgroupTracing.CGroupContext.CGroupFile, event.CgroupTracing.CGroupContext.CGroupFlags, newEntryCb); err != nil { + seclog.Debugf("Failed to resolve cgroup: %s", err.Error()) } else { event.CgroupTracing.CGroupContext = *cgroupContext p.profileManagers.activityDumpManager.HandleCGroupTracingEvent(&event.CgroupTracing) } - return case model.CgroupWriteEventType: if _, err = event.CgroupWrite.UnmarshalBinary(data[offset:]); err != nil { seclog.Errorf("failed to decode cgroup write released event: %s (offset %d, len %d)", err, offset, dataLen) return } - - pce := p.Resolvers.ProcessResolver.Resolve(event.CgroupWrite.Pid, event.CgroupWrite.Pid, 0, false, newEntryCb) - if pce != nil { - cgroupContext, err := p.Resolvers.ResolveCGroupContext(event.CgroupWrite.File.PathKey, containerutils.CGroupFlags(event.CgroupWrite.CGroupFlags)) - if err != nil { - seclog.Debugf("Failed to resolve cgroup: %s", err) - } else { - pce.Process.CGroup = *cgroupContext - pce.CGroup = *cgroupContext - - if cgroupContext.CGroupFlags.IsContainer() { - containerID, _ := containerutils.FindContainerID(cgroupContext.CGroupID) - pce.ContainerID = containerID - pce.Process.ContainerID = containerID - } - } + if _, err := p.resolveCGroup(event.CgroupWrite.Pid, event.CgroupWrite.File.PathKey, containerutils.CGroupFlags(event.CgroupWrite.CGroupFlags), newEntryCb); err != nil { + seclog.Debugf("Failed to resolve cgroup: %s", err.Error()) } - return case model.UnshareMountNsEventType: if _, err = event.UnshareMountNS.UnmarshalBinary(data[offset:]); err != nil { @@ -1214,6 +1226,16 @@ func (p *EBPFProbe) handleEvent(CPU int, data []byte) { seclog.Errorf("failed to decode RawPacket event: %s (offset %d, len %d)", err, offset, len(data)) return } + case model.NetworkFlowMonitorEventType: + if _, err = event.NetworkFlowMonitor.UnmarshalBinary(data[offset:]); err != nil { + seclog.Errorf("failed to decode NetworkFlowMonitor event: %s (offset %d, len %d)", err, offset, len(data)) + return + } + case model.AcceptEventType: + if _, err = event.Accept.UnmarshalBinary(data[offset:]); err != nil { + seclog.Errorf("failed to decode accept event: %s (offset %d, len %d)", err, offset, len(data)) + return + } case model.BindEventType: if _, err = event.Bind.UnmarshalBinary(data[offset:]); err != nil { seclog.Errorf("failed to decode bind event: %s (offset %d, len %d)", err, offset, len(data)) @@ -1418,6 +1440,8 @@ func (p *EBPFProbe) validEventTypeForConfig(eventType string) bool { return p.probe.IsNetworkEnabled() case "packet": return p.probe.IsNetworkRawPacketEnabled() + case "network_flow_monitor": + return p.probe.IsNetworkFlowMonitorEnabled() } return true } @@ -1440,7 +1464,7 @@ func (p *EBPFProbe) updateProbes(ruleEventTypes []eval.EventType, needRawSyscall } } - activatedProbes := probes.SnapshotSelectors() + activatedProbes := probes.SnapshotSelectors(p.useFentry) // extract probe to activate per the event types for eventType, selectors := range probes.GetSelectorsPerEventType(p.useFentry) { @@ -1529,51 +1553,16 @@ func (p *EBPFProbe) updateProbes(ruleEventTypes []eval.EventType, needRawSyscall return fmt.Errorf("failed to set enabled events: %w", err) } - previousProbes := p.computeProbesIDs() if err = p.Manager.UpdateActivatedProbes(activatedProbes); err != nil { return err } - newProbes := p.computeProbesIDs() - p.computeProbesDiffAndRemoveMapping(previousProbes, newProbes) + p.updateEBPFCheckMapping() return nil } -func (p *EBPFProbe) computeProbesIDs() map[string]lib.ProgramID { - out := make(map[string]lib.ProgramID) - progList, err := p.Manager.GetPrograms() - if err != nil { - return out - } - for funcName, prog := range progList { - programInfo, err := prog.Info() - if err != nil { - continue - } - - programID, isAvailable := programInfo.ID() - if isAvailable { - out[funcName] = programID - } - } - - return out -} - -func (p *EBPFProbe) computeProbesDiffAndRemoveMapping(previousProbes map[string]lib.ProgramID, newProbes map[string]lib.ProgramID) { - // Compute the list of programs that need to be deleted from the ddebpf mapping - var toDelete []lib.ProgramID - for previousProbeFuncName, programID := range previousProbes { - if _, ok := newProbes[previousProbeFuncName]; !ok { - toDelete = append(toDelete, programID) - } - } - - for _, id := range toDelete { - ddebpf.RemoveProgramID(uint32(id), "cws") - } - - // new programs could have been introduced during the update, add them now +func (p *EBPFProbe) updateEBPFCheckMapping() { + ddebpf.ClearNameMappings("cws") ddebpf.AddNameMappings(p.Manager, "cws") } @@ -1660,8 +1649,6 @@ func (p *EBPFProbe) Close() error { // perf map reader are ignored p.cancelFnc() - close(p.newTCNetDevices) - // we wait until both the reorderer and the monitor are stopped p.wg.Wait() @@ -1681,6 +1668,8 @@ func (p *EBPFProbe) Close() error { } // when we reach this point, we do not generate nor consume events anymore, we can close the resolvers + close(p.newTCNetDevices) + return p.Resolvers.Close() } @@ -2002,13 +1991,14 @@ func NewEBPFProbe(probe *Probe, config *config.Config, opts Opts) (*EBPFProbe, e } p.managerOptions.MapSpecEditors = probes.AllMapSpecEditors(p.numCPU, probes.MapSpecEditorOpts{ - TracedCgroupSize: config.RuntimeSecurity.ActivityDumpTracedCgroupsCount, - UseRingBuffers: useRingBuffers, - UseMmapableMaps: useMmapableMaps, - RingBufferSize: uint32(config.Probe.EventStreamBufferSize), - PathResolutionEnabled: probe.Opts.PathResolutionEnabled, - SecurityProfileMaxCount: config.RuntimeSecurity.SecurityProfileMaxCount, - }) + TracedCgroupSize: config.RuntimeSecurity.ActivityDumpTracedCgroupsCount, + UseRingBuffers: useRingBuffers, + UseMmapableMaps: useMmapableMaps, + RingBufferSize: uint32(config.Probe.EventStreamBufferSize), + PathResolutionEnabled: probe.Opts.PathResolutionEnabled, + SecurityProfileMaxCount: config.RuntimeSecurity.SecurityProfileMaxCount, + NetworkFlowMonitorEnabled: config.Probe.NetworkFlowMonitorEnabled, + }, p.kernelVersion) if config.RuntimeSecurity.ActivityDumpEnabled { for _, e := range config.RuntimeSecurity.ActivityDumpTracedEventTypes { @@ -2116,6 +2106,18 @@ func NewEBPFProbe(probe *Probe, config *config.Config, opts Opts) (*EBPFProbe, e Name: "syscall_monitor_event_period", Value: uint64(config.RuntimeSecurity.ActivityDumpSyscallMonitorPeriod.Nanoseconds()), }, + manager.ConstantEditor{ + Name: "network_monitor_period", + Value: uint64(config.Probe.NetworkFlowMonitorPeriod.Nanoseconds()), + }, + manager.ConstantEditor{ + Name: "is_sk_storage_supported", + Value: utils.BoolTouint64(p.useFentry && p.kernelVersion.HasSKStorageInTracingPrograms() && config.Probe.NetworkFlowMonitorSKStorageEnabled), + }, + manager.ConstantEditor{ + Name: "is_network_flow_monitor_enabled", + Value: utils.BoolTouint64(p.config.Probe.NetworkFlowMonitorEnabled), + }, manager.ConstantEditor{ Name: "send_signal", Value: utils.BoolTouint64(p.kernelVersion.SupportBPFSendSignal()), @@ -2162,8 +2164,18 @@ func NewEBPFProbe(probe *Probe, config *config.Config, opts Opts) (*EBPFProbe, e ) } + p.managerOptions.ConstantEditors = append(p.managerOptions.ConstantEditors, + manager.ConstantEditor{ + Name: "fentry_func_argc", + ValueCallback: func(prog *lib.ProgramSpec) interface{} { + // use a separate function to make sure we always return a uint64 + return getFuncArgCount(prog) + }, + }, + ) + // tail calls - p.managerOptions.TailCallRouter = probes.AllTailRoutes(config.Probe.ERPCDentryResolutionEnabled, config.Probe.NetworkEnabled, config.Probe.NetworkRawPacketEnabled, useMmapableMaps) + p.managerOptions.TailCallRouter = probes.AllTailRoutes(config.Probe.ERPCDentryResolutionEnabled, config.Probe.NetworkEnabled, config.Probe.NetworkFlowMonitorEnabled, config.Probe.NetworkRawPacketEnabled, useMmapableMaps) if !config.Probe.ERPCDentryResolutionEnabled || useMmapableMaps { // exclude the programs that use the bpf_probe_write_user helper p.managerOptions.ExcludedFunctions = probes.AllBPFProbeWriteUserProgramFunctions() @@ -2176,6 +2188,16 @@ func NewEBPFProbe(probe *Probe, config *config.Config, opts Opts) (*EBPFProbe, e p.managerOptions.ExcludedFunctions = append(p.managerOptions.ExcludedFunctions, probes.GetRawPacketTCProgramFunctions()...) } + // prevent some tal calls from loading + if !p.config.Probe.NetworkFlowMonitorEnabled { + p.managerOptions.ExcludedFunctions = append(p.managerOptions.ExcludedFunctions, probes.GetAllFlushNetworkStatsTaillCallFunctions()...) + } + + // prevent some helpers from loading + if !p.kernelVersion.HasBPFForEachMapElemHelper() { + p.managerOptions.ExcludedFunctions = append(p.managerOptions.ExcludedFunctions, probes.AllBPFForEachMapElemProgramFunctions()...) + } + if p.useFentry { afBasedExcluder, err := newAvailableFunctionsBasedExcluder() if err != nil { @@ -2200,7 +2222,7 @@ func NewEBPFProbe(probe *Probe, config *config.Config, opts Opts) (*EBPFProbe, e p.fileHasher = NewFileHasher(config, p.Resolvers.HashResolver) - hostname, err := utils.GetHostname() + hostname, err := hostnameutils.GetHostname() if err != nil || hostname == "" { hostname = "unknown" } @@ -2245,6 +2267,20 @@ func NewEBPFProbe(probe *Probe, config *config.Config, opts Opts) (*EBPFProbe, e return p, nil } +func getFuncArgCount(prog *lib.ProgramSpec) uint64 { + if !strings.HasPrefix(prog.SectionName, "fexit/") { + return 0 // this value should never be used + } + + argc, err := constantfetch.GetBTFFunctionArgCount(prog.AttachTo) + if err != nil { + seclog.Errorf("failed to get function argument count for %s: %v", prog.AttachTo, err) + return 0 + } + + return uint64(argc) +} + // GetProfileManagers returns the security profile managers func (p *EBPFProbe) GetProfileManagers() *SecurityProfileManagers { return p.profileManagers @@ -2478,6 +2514,11 @@ func AppendProbeRequestsToFetcher(constantFetcher constantfetch.ConstantFetcher, constantFetcher.AppendOffsetofRequest(constantfetch.OffsetNameFlowI6StructSADDR, "struct flowi6", "saddr") constantFetcher.AppendOffsetofRequest(constantfetch.OffsetNameFlowI6StructULI, "struct flowi6", "uli") constantFetcher.AppendOffsetofRequest(constantfetch.OffsetNameSocketStructSK, "struct socket", "sk") + constantFetcher.AppendOffsetofRequest(constantfetch.OffsetNameSockCommonStructSKCNum, "struct sock_common", "skc_num") + // TODO: needed for l4_protocol resolution, see network/flow.h + //constantFetcher.AppendOffsetofRequest(constantfetch.OffsetNameFlowI4StructProto, "struct flowi4", "flowi4_proto") + // TODO: needed for l4_protocol resolution, see network/flow.h + //constantFetcher.AppendOffsetofRequest(constantfetch.OffsetNameFlowI6StructProto, "struct flowi6", "flowi6_proto") // Interpreter constants constantFetcher.AppendOffsetofRequest(constantfetch.OffsetNameLinuxBinprmStructFile, "struct linux_binprm", "file") @@ -2503,6 +2544,20 @@ func AppendProbeRequestsToFetcher(constantFetcher constantfetch.ConstantFetcher, constantFetcher.AppendOffsetofRequest(constantfetch.OffsetInodeNlink, "struct inode", "i_nlink") constantFetcher.AppendOffsetofRequest(constantfetch.OffsetInodeMtime, "struct inode", "i_mtime", "__i_mtime") constantFetcher.AppendOffsetofRequest(constantfetch.OffsetInodeCtime, "struct inode", "i_ctime", "__i_ctime") + + // fs + constantFetcher.AppendOffsetofRequest(constantfetch.OffsetNameSbDev, "struct super_block", "s_dev") + constantFetcher.AppendOffsetofRequest(constantfetch.OffsetNameSuperblockSType, "struct super_block", "s_type") + constantFetcher.AppendOffsetofRequest(constantfetch.OffsetNameDentryDInode, "struct dentry", "d_inode") + constantFetcher.AppendOffsetofRequest(constantfetch.OffsetNameDentryDName, "struct dentry", "d_name") + constantFetcher.AppendOffsetofRequest(constantfetch.OffsetNamePathDentry, "struct path", "dentry") + constantFetcher.AppendOffsetofRequest(constantfetch.OffsetNamePathMnt, "struct path", "mnt") + constantFetcher.AppendOffsetofRequest(constantfetch.OffsetNameInodeSuperblock, "struct inode", "i_sb") + constantFetcher.AppendOffsetofRequest(constantfetch.OffsetNameMountMntMountpoint, "struct mount", "mnt_mountpoint") + constantFetcher.AppendOffsetofRequest(constantfetch.OffsetNameMountpointDentry, "struct mountpoint", "m_dentry") + constantFetcher.AppendOffsetofRequest(constantfetch.OffsetNameVfsmountMntFlags, "struct vfsmount", "mnt_flags") + constantFetcher.AppendOffsetofRequest(constantfetch.OffsetNameVfsmountMntRoot, "struct vfsmount", "mnt_root") + constantFetcher.AppendOffsetofRequest(constantfetch.OffsetNameVfsmountMntSb, "struct vfsmount", "mnt_sb") } // HandleActions handles the rule actions diff --git a/pkg/security/probe/probe_ebpfless.go b/pkg/security/probe/probe_ebpfless.go index 040af49fd827a..7292ae2d6c74d 100644 --- a/pkg/security/probe/probe_ebpfless.go +++ b/pkg/security/probe/probe_ebpfless.go @@ -13,6 +13,7 @@ import ( "encoding/binary" "errors" "fmt" + "golang.org/x/sys/unix" "io" "net" "path/filepath" @@ -36,8 +37,8 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" "github.com/DataDog/datadog-agent/pkg/security/seclog" - "github.com/DataDog/datadog-agent/pkg/security/serializers" "github.com/DataDog/datadog-agent/pkg/security/utils" + "github.com/DataDog/datadog-agent/pkg/security/utils/hostnameutils" ) const ( @@ -296,6 +297,46 @@ func (p *EBPFLessProbe) handleSyscallMsg(cl *client, syscallMsg *ebpfless.Syscal case ebpfless.SyscallTypeUmount: event.Type = uint32(model.FileUmountEventType) event.Umount.Retval = syscallMsg.Retval + + case ebpfless.SyscallTypeConnect: + event.Type = uint32(model.ConnectEventType) + event.Connect.Addr = model.IPPortContext{ + IPNet: *eval.IPNetFromIP(syscallMsg.Connect.Addr), + Port: syscallMsg.Connect.Port, + } + event.Connect.AddrFamily = syscallMsg.Connect.AddressFamily + event.Connect.Protocol = syscallMsg.Connect.Protocol + event.Connect.Retval = syscallMsg.Retval + + case ebpfless.SyscallTypeAccept: + event.Type = uint32(model.AcceptEventType) + event.Accept.Addr = model.IPPortContext{ + IPNet: *eval.IPNetFromIP(syscallMsg.Accept.Addr), + Port: syscallMsg.Accept.Port, + } + event.Accept.AddrFamily = syscallMsg.Accept.AddressFamily + event.Accept.Retval = syscallMsg.Retval + + case ebpfless.SyscallTypeBind: + event.Type = uint32(model.BindEventType) + if syscallMsg.Bind.AddressFamily == unix.AF_UNIX { + event.Bind.Addr = model.IPPortContext{ + IPNet: net.IPNet{ + IP: net.IP(nil), + Mask: net.IPMask(nil), + }, + Port: 0, + } + } else { + event.Bind.Addr = model.IPPortContext{ + IPNet: *eval.IPNetFromIP(syscallMsg.Bind.Addr), + Port: syscallMsg.Bind.Port, + } + } + + event.Bind.AddrFamily = syscallMsg.Bind.AddressFamily + event.Bind.Protocol = syscallMsg.Bind.Protocol + event.Bind.Retval = syscallMsg.Retval } // container context @@ -339,10 +380,7 @@ func (p *EBPFLessProbe) handleSyscallMsg(cl *client, syscallMsg *ebpfless.Syscal // DispatchEvent sends an event to the probe event handler func (p *EBPFLessProbe) DispatchEvent(event *model.Event) { - traceEvent("Dispatching event %s", func() ([]byte, model.EventType, error) { - eventJSON, err := serializers.MarshalEvent(event, nil) - return eventJSON, event.GetEventType(), err - }) + logTraceEvent(event.GetEventType(), event) // send event to wildcard handlers, like the CWS rule engine, first p.probe.sendEventToHandlers(event) @@ -689,7 +727,7 @@ func NewEBPFLessProbe(probe *Probe, config *config.Config, opts Opts) (*EBPFLess p.fileHasher = NewFileHasher(config, p.Resolvers.HashResolver) - hostname, err := utils.GetHostname() + hostname, err := hostnameutils.GetHostname() if err != nil || hostname == "" { hostname = "unknown" } diff --git a/pkg/security/probe/probe_kernel_file_windows.go b/pkg/security/probe/probe_kernel_file_windows.go index 0fbe624a66056..29f6b1050e339 100644 --- a/pkg/security/probe/probe_kernel_file_windows.go +++ b/pkg/security/probe/probe_kernel_file_windows.go @@ -164,7 +164,10 @@ func (wp *WindowsProbe) parseCreateHandleArgs(e *etw.DDEventRecord) (*createHand return nil, fmt.Errorf("unknown version %v", e.EventHeader.EventDescriptor.Version) } - // not amazing to double compute the basename.. + // invalidate the path resolver entry + wp.filePathResolver.Remove(ca.fileObject) + + // not amazing to double compute the basename. basename := filepath.Base(ca.fileName) if !wp.approveFimBasename(basename) { @@ -200,7 +203,7 @@ func (wp *WindowsProbe) parseCreateHandleArgs(e *etw.DDEventRecord) (*createHand if wp.filePathResolver.Add(ca.fileObject, fc) { wp.stats.fileNameCacheEvictions++ } - // if we get here, we have a new file handle. Remove it from the discarder cache in case + // if we get here, we have a new file handle. Remove it from the discarder cache in case // we missed the close notification wp.discardedFileHandles.Remove(fileObjectPointer(ca.fileObject)) diff --git a/pkg/security/probe/probe_linux.go b/pkg/security/probe/probe_linux.go index b4b557db19aea..467de397f7235 100644 --- a/pkg/security/probe/probe_linux.go +++ b/pkg/security/probe/probe_linux.go @@ -70,6 +70,11 @@ func IsNetworkNotSupported(kv *kernel.Version) bool { return kv.IsRH7Kernel() || kv.IsOracleUEKKernel() } +// IsNetworkFlowMonitorNotSupported returns if the network flow monitor feature is supported +func IsNetworkFlowMonitorNotSupported(kv *kernel.Version) bool { + return IsNetworkNotSupported(kv) || !kv.IsMapValuesToMapHelpersAllowed() || !kv.HasBPFForEachMapElemHelper() +} + // NewAgentContainerContext returns the agent container context func NewAgentContainerContext() (*events.AgentContainerContext, error) { pid := utils.Getpid() diff --git a/pkg/security/probe/probe_monitor.go b/pkg/security/probe/probe_monitor.go index a110b946105c8..420ac6241d3fb 100644 --- a/pkg/security/probe/probe_monitor.go +++ b/pkg/security/probe/probe_monitor.go @@ -102,6 +102,10 @@ func (m *EBPFMonitors) SendStats() error { return fmt.Errorf("failed to send mount_resolver stats: %w", err) } + if err := resolvers.CGroupResolver.SendStats(); err != nil { + return fmt.Errorf("failed to send cgroup_resolver stats: %w", err) + } + if resolvers.SBOMResolver != nil { if err := resolvers.SBOMResolver.SendStats(); err != nil { return fmt.Errorf("failed to send sbom_resolver stats: %w", err) diff --git a/pkg/security/probe/probe_others.go b/pkg/security/probe/probe_others.go index daf1ab253299a..eba822eeaed92 100644 --- a/pkg/security/probe/probe_others.go +++ b/pkg/security/probe/probe_others.go @@ -82,6 +82,11 @@ func (p *Probe) IsNetworkRawPacketEnabled() bool { return p.IsNetworkEnabled() && p.Config.Probe.NetworkRawPacketEnabled } +// IsNetworkFlowMonitorEnabled returns whether the network flow monitor is enabled +func (p *Probe) IsNetworkFlowMonitorEnabled() bool { + return p.IsNetworkEnabled() && p.Config.Probe.NetworkFlowMonitorEnabled +} + // IsActivityDumpEnabled returns whether activity dump is enabled func (p *Probe) IsActivityDumpEnabled() bool { return p.Config.RuntimeSecurity.ActivityDumpEnabled diff --git a/pkg/security/probe/probe_windows.go b/pkg/security/probe/probe_windows.go index b4f954d7e96aa..6df1068d62fb7 100644 --- a/pkg/security/probe/probe_windows.go +++ b/pkg/security/probe/probe_windows.go @@ -30,8 +30,8 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" "github.com/DataDog/datadog-agent/pkg/security/seclog" - "github.com/DataDog/datadog-agent/pkg/security/serializers" "github.com/DataDog/datadog-agent/pkg/security/utils" + "github.com/DataDog/datadog-agent/pkg/security/utils/hostnameutils" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/util/winutil" "github.com/DataDog/datadog-agent/pkg/windowsdriver/procmon" @@ -1060,10 +1060,7 @@ func (p *WindowsProbe) setProcessContext(pid uint32, event *model.Event) error { // DispatchEvent sends an event to the probe event handler func (p *WindowsProbe) DispatchEvent(event *model.Event) { - traceEvent("Dispatching event %s", func() ([]byte, model.EventType, error) { - eventJSON, err := serializers.MarshalEvent(event, nil) - return eventJSON, event.GetEventType(), err - }) + logTraceEvent(event.GetEventType(), event) // send event to wildcard handlers, like the CWS rule engine, first p.probe.sendEventToHandlers(event) @@ -1305,7 +1302,7 @@ func NewWindowsProbe(probe *Probe, config *config.Config, opts Opts) (*WindowsPr return nil, err } - hostname, err := utils.GetHostname() + hostname, err := hostnameutils.GetHostname() if err != nil || hostname == "" { hostname = "unknown" } diff --git a/pkg/security/probe/selftests/tester_windows.go b/pkg/security/probe/selftests/tester_windows.go index d6743c5e5bc5d..d2a9ef5b7d33f 100644 --- a/pkg/security/probe/selftests/tester_windows.go +++ b/pkg/security/probe/selftests/tester_windows.go @@ -15,7 +15,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/probe" - "github.com/DataDog/datadog-agent/pkg/security/utils" + "github.com/DataDog/datadog-agent/pkg/security/utils/pathutils" ) // NewSelfTester returns a new SelfTester, enabled or not @@ -38,7 +38,7 @@ func NewSelfTester(cfg *config.RuntimeSecurityConfig, probe *probe.Probe) (*Self keyPath := "Software\\Datadog\\Datadog Agent" - dirLongPath, err := utils.GetLongPathName(dir) + dirLongPath, err := pathutils.GetLongPathName(dir) if err != nil { return nil, err } diff --git a/pkg/security/process_list/activity_tree/activity_tree.go b/pkg/security/process_list/activity_tree/activity_tree.go index 65cd8b304ed51..24a3915d35449 100644 --- a/pkg/security/process_list/activity_tree/activity_tree.go +++ b/pkg/security/process_list/activity_tree/activity_tree.go @@ -9,8 +9,9 @@ package activitytree import ( + "slices" + "github.com/DataDog/datadog-go/v5/statsd" - "golang.org/x/exp/slices" processlist "github.com/DataDog/datadog-agent/pkg/security/process_list" processresolver "github.com/DataDog/datadog-agent/pkg/security/process_list/process_resolver" diff --git a/pkg/security/process_list/process_list.go b/pkg/security/process_list/process_list.go index 64c76705a553f..c50127a47abc5 100644 --- a/pkg/security/process_list/process_list.go +++ b/pkg/security/process_list/process_list.go @@ -12,13 +12,13 @@ import ( "errors" "fmt" "io" + "slices" "sync" "github.com/DataDog/datadog-agent/pkg/process/procutil" cgroupModel "github.com/DataDog/datadog-agent/pkg/security/resolvers/cgroup/model" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-go/v5/statsd" - "golang.org/x/exp/slices" "golang.org/x/sys/unix" ) diff --git a/pkg/security/process_list/process_node.go b/pkg/security/process_list/process_node.go index 1a9d62ca30477..dc4f32e9b783b 100644 --- a/pkg/security/process_list/process_node.go +++ b/pkg/security/process_list/process_node.go @@ -12,10 +12,10 @@ import ( "fmt" "io" "math/rand" + "slices" "sync" "github.com/DataDog/datadog-agent/pkg/security/secl/model" - "golang.org/x/exp/slices" ) // ProcessNode holds the activity of a process diff --git a/pkg/security/process_list/process_resolver/process_resolver_test.go b/pkg/security/process_list/process_resolver/process_resolver_test.go index 1c062c45c0f99..95d70e99aba4e 100644 --- a/pkg/security/process_list/process_resolver/process_resolver_test.go +++ b/pkg/security/process_list/process_resolver/process_resolver_test.go @@ -9,6 +9,7 @@ package processresolver import ( + "slices" "testing" "time" @@ -16,7 +17,6 @@ import ( cgroupModel "github.com/DataDog/datadog-agent/pkg/security/resolvers/cgroup/model" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/stretchr/testify/assert" - "golang.org/x/exp/slices" ) func newFakeExecEvent(ppid, pid int, pathname string) *model.Event { diff --git a/pkg/security/proto/api/api.pb.go b/pkg/security/proto/api/api.pb.go index e8fd80f24c5c8..91f60ae16f81d 100644 --- a/pkg/security/proto/api/api.pb.go +++ b/pkg/security/proto/api/api.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.34.2 +// protoc-gen-go v1.36.3 // protoc // source: pkg/security/proto/api/api.proto @@ -21,18 +21,16 @@ const ( ) type GetEventParams struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetEventParams) Reset() { *x = GetEventParams{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetEventParams) String() string { @@ -43,7 +41,7 @@ func (*GetEventParams) ProtoMessage() {} func (x *GetEventParams) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -59,23 +57,20 @@ func (*GetEventParams) Descriptor() ([]byte, []int) { } type SecurityEventMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + RuleID string `protobuf:"bytes,1,opt,name=RuleID,proto3" json:"RuleID,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=Data,proto3" json:"Data,omitempty"` + Tags []string `protobuf:"bytes,3,rep,name=Tags,proto3" json:"Tags,omitempty"` + Service string `protobuf:"bytes,4,opt,name=Service,proto3" json:"Service,omitempty"` unknownFields protoimpl.UnknownFields - - RuleID string `protobuf:"bytes,1,opt,name=RuleID,proto3" json:"RuleID,omitempty"` - Data []byte `protobuf:"bytes,2,opt,name=Data,proto3" json:"Data,omitempty"` - Tags []string `protobuf:"bytes,3,rep,name=Tags,proto3" json:"Tags,omitempty"` - Service string `protobuf:"bytes,4,opt,name=Service,proto3" json:"Service,omitempty"` + sizeCache protoimpl.SizeCache } func (x *SecurityEventMessage) Reset() { *x = SecurityEventMessage{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SecurityEventMessage) String() string { @@ -86,7 +81,7 @@ func (*SecurityEventMessage) ProtoMessage() {} func (x *SecurityEventMessage) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -130,21 +125,18 @@ func (x *SecurityEventMessage) GetService() string { } type DumpProcessCacheParams struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + WithArgs bool `protobuf:"varint,1,opt,name=WithArgs,proto3" json:"WithArgs,omitempty"` + Format string `protobuf:"bytes,2,opt,name=Format,proto3" json:"Format,omitempty"` unknownFields protoimpl.UnknownFields - - WithArgs bool `protobuf:"varint,1,opt,name=WithArgs,proto3" json:"WithArgs,omitempty"` - Format string `protobuf:"bytes,2,opt,name=Format,proto3" json:"Format,omitempty"` + sizeCache protoimpl.SizeCache } func (x *DumpProcessCacheParams) Reset() { *x = DumpProcessCacheParams{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DumpProcessCacheParams) String() string { @@ -155,7 +147,7 @@ func (*DumpProcessCacheParams) ProtoMessage() {} func (x *DumpProcessCacheParams) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -185,20 +177,17 @@ func (x *DumpProcessCacheParams) GetFormat() string { } type SecurityDumpProcessCacheMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Filename string `protobuf:"bytes,1,opt,name=Filename,proto3" json:"Filename,omitempty"` unknownFields protoimpl.UnknownFields - - Filename string `protobuf:"bytes,1,opt,name=Filename,proto3" json:"Filename,omitempty"` + sizeCache protoimpl.SizeCache } func (x *SecurityDumpProcessCacheMessage) Reset() { *x = SecurityDumpProcessCacheMessage{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SecurityDumpProcessCacheMessage) String() string { @@ -209,7 +198,7 @@ func (*SecurityDumpProcessCacheMessage) ProtoMessage() {} func (x *SecurityDumpProcessCacheMessage) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -232,20 +221,17 @@ func (x *SecurityDumpProcessCacheMessage) GetFilename() string { } type DumpNetworkNamespaceParams struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - SnapshotInterfaces bool `protobuf:"varint,1,opt,name=SnapshotInterfaces,proto3" json:"SnapshotInterfaces,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + SnapshotInterfaces bool `protobuf:"varint,1,opt,name=SnapshotInterfaces,proto3" json:"SnapshotInterfaces,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *DumpNetworkNamespaceParams) Reset() { *x = DumpNetworkNamespaceParams{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DumpNetworkNamespaceParams) String() string { @@ -256,7 +242,7 @@ func (*DumpNetworkNamespaceParams) ProtoMessage() {} func (x *DumpNetworkNamespaceParams) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -279,22 +265,19 @@ func (x *DumpNetworkNamespaceParams) GetSnapshotInterfaces() bool { } type DumpNetworkNamespaceMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + DumpFilename string `protobuf:"bytes,2,opt,name=DumpFilename,proto3" json:"DumpFilename,omitempty"` + GraphFilename string `protobuf:"bytes,3,opt,name=GraphFilename,proto3" json:"GraphFilename,omitempty"` unknownFields protoimpl.UnknownFields - - Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` - DumpFilename string `protobuf:"bytes,2,opt,name=DumpFilename,proto3" json:"DumpFilename,omitempty"` - GraphFilename string `protobuf:"bytes,3,opt,name=GraphFilename,proto3" json:"GraphFilename,omitempty"` + sizeCache protoimpl.SizeCache } func (x *DumpNetworkNamespaceMessage) Reset() { *x = DumpNetworkNamespaceMessage{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DumpNetworkNamespaceMessage) String() string { @@ -305,7 +288,7 @@ func (*DumpNetworkNamespaceMessage) ProtoMessage() {} func (x *DumpNetworkNamespaceMessage) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -342,18 +325,16 @@ func (x *DumpNetworkNamespaceMessage) GetGraphFilename() string { } type GetConfigParams struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetConfigParams) Reset() { *x = GetConfigParams{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetConfigParams) String() string { @@ -364,7 +345,7 @@ func (*GetConfigParams) ProtoMessage() {} func (x *GetConfigParams) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -380,22 +361,19 @@ func (*GetConfigParams) Descriptor() ([]byte, []int) { } type SecurityConfigMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - RuntimeEnabled bool `protobuf:"varint,1,opt,name=RuntimeEnabled,proto3" json:"RuntimeEnabled,omitempty"` - FIMEnabled bool `protobuf:"varint,2,opt,name=FIMEnabled,proto3" json:"FIMEnabled,omitempty"` - ActivityDumpEnabled bool `protobuf:"varint,3,opt,name=ActivityDumpEnabled,proto3" json:"ActivityDumpEnabled,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + RuntimeEnabled bool `protobuf:"varint,1,opt,name=RuntimeEnabled,proto3" json:"RuntimeEnabled,omitempty"` + FIMEnabled bool `protobuf:"varint,2,opt,name=FIMEnabled,proto3" json:"FIMEnabled,omitempty"` + ActivityDumpEnabled bool `protobuf:"varint,3,opt,name=ActivityDumpEnabled,proto3" json:"ActivityDumpEnabled,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SecurityConfigMessage) Reset() { *x = SecurityConfigMessage{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SecurityConfigMessage) String() string { @@ -406,7 +384,7 @@ func (*SecurityConfigMessage) ProtoMessage() {} func (x *SecurityConfigMessage) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -443,20 +421,17 @@ func (x *SecurityConfigMessage) GetActivityDumpEnabled() bool { } type RuleSetReportMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Policies []*EventTypePolicy `protobuf:"bytes,1,rep,name=Policies,proto3" json:"Policies,omitempty"` unknownFields protoimpl.UnknownFields - - Policies []*EventTypePolicy `protobuf:"bytes,1,rep,name=Policies,proto3" json:"Policies,omitempty"` + sizeCache protoimpl.SizeCache } func (x *RuleSetReportMessage) Reset() { *x = RuleSetReportMessage{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RuleSetReportMessage) String() string { @@ -467,7 +442,7 @@ func (*RuleSetReportMessage) ProtoMessage() {} func (x *RuleSetReportMessage) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -490,23 +465,20 @@ func (x *RuleSetReportMessage) GetPolicies() []*EventTypePolicy { } type EventTypePolicy struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - EventType string `protobuf:"bytes,1,opt,name=EventType,proto3" json:"EventType,omitempty"` - Mode uint32 `protobuf:"varint,2,opt,name=Mode,proto3" json:"Mode,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + EventType string `protobuf:"bytes,1,opt,name=EventType,proto3" json:"EventType,omitempty"` + Mode uint32 `protobuf:"varint,2,opt,name=Mode,proto3" json:"Mode,omitempty"` // field 3 is deprecated - Approvers *Approvers `protobuf:"bytes,4,opt,name=Approvers,proto3" json:"Approvers,omitempty"` + Approvers *Approvers `protobuf:"bytes,4,opt,name=Approvers,proto3" json:"Approvers,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *EventTypePolicy) Reset() { *x = EventTypePolicy{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EventTypePolicy) String() string { @@ -517,7 +489,7 @@ func (*EventTypePolicy) ProtoMessage() {} func (x *EventTypePolicy) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -554,21 +526,18 @@ func (x *EventTypePolicy) GetApprovers() *Approvers { } type Approvers struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Field string `protobuf:"bytes,1,opt,name=Field,proto3" json:"Field,omitempty"` - ApproverDetails []*ApproverDetails `protobuf:"bytes,2,rep,name=ApproverDetails,proto3" json:"ApproverDetails,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Field string `protobuf:"bytes,1,opt,name=Field,proto3" json:"Field,omitempty"` + ApproverDetails []*ApproverDetails `protobuf:"bytes,2,rep,name=ApproverDetails,proto3" json:"ApproverDetails,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Approvers) Reset() { *x = Approvers{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Approvers) String() string { @@ -579,7 +548,7 @@ func (*Approvers) ProtoMessage() {} func (x *Approvers) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -609,22 +578,19 @@ func (x *Approvers) GetApproverDetails() []*ApproverDetails { } type ApproverDetails struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Field string `protobuf:"bytes,1,opt,name=Field,proto3" json:"Field,omitempty"` + Value string `protobuf:"bytes,2,opt,name=Value,proto3" json:"Value,omitempty"` + Type int32 `protobuf:"varint,3,opt,name=Type,proto3" json:"Type,omitempty"` unknownFields protoimpl.UnknownFields - - Field string `protobuf:"bytes,1,opt,name=Field,proto3" json:"Field,omitempty"` - Value string `protobuf:"bytes,2,opt,name=Value,proto3" json:"Value,omitempty"` - Type int32 `protobuf:"varint,3,opt,name=Type,proto3" json:"Type,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ApproverDetails) Reset() { *x = ApproverDetails{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ApproverDetails) String() string { @@ -635,7 +601,7 @@ func (*ApproverDetails) ProtoMessage() {} func (x *ApproverDetails) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -672,18 +638,16 @@ func (x *ApproverDetails) GetType() int32 { } type GetRuleSetReportParams struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetRuleSetReportParams) Reset() { *x = GetRuleSetReportParams{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetRuleSetReportParams) String() string { @@ -694,7 +658,7 @@ func (*GetRuleSetReportParams) ProtoMessage() {} func (x *GetRuleSetReportParams) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -710,21 +674,18 @@ func (*GetRuleSetReportParams) Descriptor() ([]byte, []int) { } type GetRuleSetReportResultMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - RuleSetReportMessage *RuleSetReportMessage `protobuf:"bytes,1,opt,name=RuleSetReportMessage,proto3" json:"RuleSetReportMessage,omitempty"` - Error string `protobuf:"bytes,2,opt,name=Error,proto3" json:"Error,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + RuleSetReportMessage *RuleSetReportMessage `protobuf:"bytes,1,opt,name=RuleSetReportMessage,proto3" json:"RuleSetReportMessage,omitempty"` + Error string `protobuf:"bytes,2,opt,name=Error,proto3" json:"Error,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetRuleSetReportResultMessage) Reset() { *x = GetRuleSetReportResultMessage{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetRuleSetReportResultMessage) String() string { @@ -735,7 +696,7 @@ func (*GetRuleSetReportResultMessage) ProtoMessage() {} func (x *GetRuleSetReportResultMessage) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -765,18 +726,16 @@ func (x *GetRuleSetReportResultMessage) GetError() string { } type ReloadPoliciesParams struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ReloadPoliciesParams) Reset() { *x = ReloadPoliciesParams{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ReloadPoliciesParams) String() string { @@ -787,7 +746,7 @@ func (*ReloadPoliciesParams) ProtoMessage() {} func (x *ReloadPoliciesParams) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -803,18 +762,16 @@ func (*ReloadPoliciesParams) Descriptor() ([]byte, []int) { } type ReloadPoliciesResultMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ReloadPoliciesResultMessage) Reset() { *x = ReloadPoliciesResultMessage{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ReloadPoliciesResultMessage) String() string { @@ -825,7 +782,7 @@ func (*ReloadPoliciesResultMessage) ProtoMessage() {} func (x *ReloadPoliciesResultMessage) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -841,18 +798,16 @@ func (*ReloadPoliciesResultMessage) Descriptor() ([]byte, []int) { } type RunSelfTestParams struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *RunSelfTestParams) Reset() { *x = RunSelfTestParams{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RunSelfTestParams) String() string { @@ -863,7 +818,7 @@ func (*RunSelfTestParams) ProtoMessage() {} func (x *RunSelfTestParams) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -879,21 +834,18 @@ func (*RunSelfTestParams) Descriptor() ([]byte, []int) { } type SecuritySelfTestResultMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Ok bool `protobuf:"varint,1,opt,name=Ok,proto3" json:"Ok,omitempty"` + Error string `protobuf:"bytes,2,opt,name=Error,proto3" json:"Error,omitempty"` unknownFields protoimpl.UnknownFields - - Ok bool `protobuf:"varint,1,opt,name=Ok,proto3" json:"Ok,omitempty"` - Error string `protobuf:"bytes,2,opt,name=Error,proto3" json:"Error,omitempty"` + sizeCache protoimpl.SizeCache } func (x *SecuritySelfTestResultMessage) Reset() { *x = SecuritySelfTestResultMessage{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SecuritySelfTestResultMessage) String() string { @@ -904,7 +856,7 @@ func (*SecuritySelfTestResultMessage) ProtoMessage() {} func (x *SecuritySelfTestResultMessage) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -934,18 +886,16 @@ func (x *SecuritySelfTestResultMessage) GetError() string { } type GetStatusParams struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *GetStatusParams) Reset() { *x = GetStatusParams{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *GetStatusParams) String() string { @@ -956,7 +906,7 @@ func (*GetStatusParams) ProtoMessage() {} func (x *GetStatusParams) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -972,22 +922,19 @@ func (*GetStatusParams) Descriptor() ([]byte, []int) { } type ConstantValueAndSource struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + Value uint64 `protobuf:"varint,2,opt,name=Value,proto3" json:"Value,omitempty"` + Source string `protobuf:"bytes,3,opt,name=Source,proto3" json:"Source,omitempty"` unknownFields protoimpl.UnknownFields - - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - Value uint64 `protobuf:"varint,2,opt,name=Value,proto3" json:"Value,omitempty"` - Source string `protobuf:"bytes,3,opt,name=Source,proto3" json:"Source,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ConstantValueAndSource) Reset() { *x = ConstantValueAndSource{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ConstantValueAndSource) String() string { @@ -998,7 +945,7 @@ func (*ConstantValueAndSource) ProtoMessage() {} func (x *ConstantValueAndSource) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1035,22 +982,19 @@ func (x *ConstantValueAndSource) GetSource() string { } type SelfTestsStatus struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + LastTimestamp string `protobuf:"bytes,1,opt,name=LastTimestamp,proto3" json:"LastTimestamp,omitempty"` + Success []string `protobuf:"bytes,2,rep,name=Success,proto3" json:"Success,omitempty"` + Fails []string `protobuf:"bytes,3,rep,name=Fails,proto3" json:"Fails,omitempty"` unknownFields protoimpl.UnknownFields - - LastTimestamp string `protobuf:"bytes,1,opt,name=LastTimestamp,proto3" json:"LastTimestamp,omitempty"` - Success []string `protobuf:"bytes,2,rep,name=Success,proto3" json:"Success,omitempty"` - Fails []string `protobuf:"bytes,3,rep,name=Fails,proto3" json:"Fails,omitempty"` + sizeCache protoimpl.SizeCache } func (x *SelfTestsStatus) Reset() { *x = SelfTestsStatus{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SelfTestsStatus) String() string { @@ -1061,7 +1005,7 @@ func (*SelfTestsStatus) ProtoMessage() {} func (x *SelfTestsStatus) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1098,22 +1042,19 @@ func (x *SelfTestsStatus) GetFails() []string { } type RuleStatus struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + Status string `protobuf:"bytes,2,opt,name=Status,proto3" json:"Status,omitempty"` + Error string `protobuf:"bytes,3,opt,name=Error,proto3" json:"Error,omitempty"` unknownFields protoimpl.UnknownFields - - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - Status string `protobuf:"bytes,2,opt,name=Status,proto3" json:"Status,omitempty"` - Error string `protobuf:"bytes,3,opt,name=Error,proto3" json:"Error,omitempty"` + sizeCache protoimpl.SizeCache } func (x *RuleStatus) Reset() { *x = RuleStatus{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *RuleStatus) String() string { @@ -1124,7 +1065,7 @@ func (*RuleStatus) ProtoMessage() {} func (x *RuleStatus) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1161,22 +1102,19 @@ func (x *RuleStatus) GetError() string { } type PolicyStatus struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` + Source string `protobuf:"bytes,2,opt,name=Source,proto3" json:"Source,omitempty"` + Status []*RuleStatus `protobuf:"bytes,3,rep,name=Status,proto3" json:"Status,omitempty"` unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` - Source string `protobuf:"bytes,2,opt,name=Source,proto3" json:"Source,omitempty"` - Status []*RuleStatus `protobuf:"bytes,3,rep,name=Status,proto3" json:"Status,omitempty"` + sizeCache protoimpl.SizeCache } func (x *PolicyStatus) Reset() { *x = PolicyStatus{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *PolicyStatus) String() string { @@ -1187,7 +1125,7 @@ func (*PolicyStatus) ProtoMessage() {} func (x *PolicyStatus) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1224,22 +1162,19 @@ func (x *PolicyStatus) GetStatus() []*RuleStatus { } type Status struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Environment *EnvironmentStatus `protobuf:"bytes,1,opt,name=Environment,proto3" json:"Environment,omitempty"` - SelfTests *SelfTestsStatus `protobuf:"bytes,2,opt,name=SelfTests,proto3" json:"SelfTests,omitempty"` - PoliciesStatus []*PolicyStatus `protobuf:"bytes,3,rep,name=PoliciesStatus,proto3" json:"PoliciesStatus,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Environment *EnvironmentStatus `protobuf:"bytes,1,opt,name=Environment,proto3" json:"Environment,omitempty"` + SelfTests *SelfTestsStatus `protobuf:"bytes,2,opt,name=SelfTests,proto3" json:"SelfTests,omitempty"` + PoliciesStatus []*PolicyStatus `protobuf:"bytes,3,rep,name=PoliciesStatus,proto3" json:"PoliciesStatus,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *Status) Reset() { *x = Status{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Status) String() string { @@ -1250,7 +1185,7 @@ func (*Status) ProtoMessage() {} func (x *Status) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1287,21 +1222,18 @@ func (x *Status) GetPoliciesStatus() []*PolicyStatus { } type ConstantFetcherStatus struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Fetchers []string `protobuf:"bytes,1,rep,name=Fetchers,proto3" json:"Fetchers,omitempty"` + Values []*ConstantValueAndSource `protobuf:"bytes,2,rep,name=Values,proto3" json:"Values,omitempty"` unknownFields protoimpl.UnknownFields - - Fetchers []string `protobuf:"bytes,1,rep,name=Fetchers,proto3" json:"Fetchers,omitempty"` - Values []*ConstantValueAndSource `protobuf:"bytes,2,rep,name=Values,proto3" json:"Values,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ConstantFetcherStatus) Reset() { *x = ConstantFetcherStatus{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ConstantFetcherStatus) String() string { @@ -1312,7 +1244,7 @@ func (*ConstantFetcherStatus) ProtoMessage() {} func (x *ConstantFetcherStatus) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1342,24 +1274,22 @@ func (x *ConstantFetcherStatus) GetValues() []*ConstantValueAndSource { } type EnvironmentStatus struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` Warnings []string `protobuf:"bytes,1,rep,name=Warnings,proto3" json:"Warnings,omitempty"` Constants *ConstantFetcherStatus `protobuf:"bytes,2,opt,name=Constants,proto3" json:"Constants,omitempty"` KernelLockdown string `protobuf:"bytes,3,opt,name=KernelLockdown,proto3" json:"KernelLockdown,omitempty"` UseMmapableMaps bool `protobuf:"varint,4,opt,name=UseMmapableMaps,proto3" json:"UseMmapableMaps,omitempty"` UseRingBuffer bool `protobuf:"varint,5,opt,name=UseRingBuffer,proto3" json:"UseRingBuffer,omitempty"` + UseFentry bool `protobuf:"varint,6,opt,name=UseFentry,proto3" json:"UseFentry,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *EnvironmentStatus) Reset() { *x = EnvironmentStatus{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EnvironmentStatus) String() string { @@ -1370,7 +1300,7 @@ func (*EnvironmentStatus) ProtoMessage() {} func (x *EnvironmentStatus) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1420,20 +1350,25 @@ func (x *EnvironmentStatus) GetUseRingBuffer() bool { return false } +func (x *EnvironmentStatus) GetUseFentry() bool { + if x != nil { + return x.UseFentry + } + return false +} + // Discarders type DumpDiscardersParams struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *DumpDiscardersParams) Reset() { *x = DumpDiscardersParams{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DumpDiscardersParams) String() string { @@ -1444,7 +1379,7 @@ func (*DumpDiscardersParams) ProtoMessage() {} func (x *DumpDiscardersParams) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1460,20 +1395,17 @@ func (*DumpDiscardersParams) Descriptor() ([]byte, []int) { } type DumpDiscardersMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + DumpFilename string `protobuf:"bytes,1,opt,name=DumpFilename,proto3" json:"DumpFilename,omitempty"` unknownFields protoimpl.UnknownFields - - DumpFilename string `protobuf:"bytes,1,opt,name=DumpFilename,proto3" json:"DumpFilename,omitempty"` + sizeCache protoimpl.SizeCache } func (x *DumpDiscardersMessage) Reset() { *x = DumpDiscardersMessage{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *DumpDiscardersMessage) String() string { @@ -1484,7 +1416,7 @@ func (*DumpDiscardersMessage) ProtoMessage() {} func (x *DumpDiscardersMessage) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1507,24 +1439,21 @@ func (x *DumpDiscardersMessage) GetDumpFilename() string { } type StorageRequestParams struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - LocalStorageDirectory string `protobuf:"bytes,1,opt,name=LocalStorageDirectory,proto3" json:"LocalStorageDirectory,omitempty"` - LocalStorageFormats []string `protobuf:"bytes,2,rep,name=LocalStorageFormats,proto3" json:"LocalStorageFormats,omitempty"` - LocalStorageCompression bool `protobuf:"varint,3,opt,name=LocalStorageCompression,proto3" json:"LocalStorageCompression,omitempty"` - RemoteStorageFormats []string `protobuf:"bytes,4,rep,name=RemoteStorageFormats,proto3" json:"RemoteStorageFormats,omitempty"` - RemoteStorageCompression bool `protobuf:"varint,5,opt,name=RemoteStorageCompression,proto3" json:"RemoteStorageCompression,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + LocalStorageDirectory string `protobuf:"bytes,1,opt,name=LocalStorageDirectory,proto3" json:"LocalStorageDirectory,omitempty"` + LocalStorageFormats []string `protobuf:"bytes,2,rep,name=LocalStorageFormats,proto3" json:"LocalStorageFormats,omitempty"` + LocalStorageCompression bool `protobuf:"varint,3,opt,name=LocalStorageCompression,proto3" json:"LocalStorageCompression,omitempty"` + RemoteStorageFormats []string `protobuf:"bytes,4,rep,name=RemoteStorageFormats,proto3" json:"RemoteStorageFormats,omitempty"` + RemoteStorageCompression bool `protobuf:"varint,5,opt,name=RemoteStorageCompression,proto3" json:"RemoteStorageCompression,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *StorageRequestParams) Reset() { *x = StorageRequestParams{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *StorageRequestParams) String() string { @@ -1535,7 +1464,7 @@ func (*StorageRequestParams) ProtoMessage() {} func (x *StorageRequestParams) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1586,24 +1515,21 @@ func (x *StorageRequestParams) GetRemoteStorageCompression() bool { } type ActivityDumpParams struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Timeout string `protobuf:"bytes,1,opt,name=Timeout,proto3" json:"Timeout,omitempty"` - DifferentiateArgs bool `protobuf:"varint,2,opt,name=DifferentiateArgs,proto3" json:"DifferentiateArgs,omitempty"` - Storage *StorageRequestParams `protobuf:"bytes,3,opt,name=Storage,proto3" json:"Storage,omitempty"` - ContainerID string `protobuf:"bytes,4,opt,name=ContainerID,proto3" json:"ContainerID,omitempty"` - CGroupID string `protobuf:"bytes,5,opt,name=CGroupID,proto3" json:"CGroupID,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + Timeout string `protobuf:"bytes,1,opt,name=Timeout,proto3" json:"Timeout,omitempty"` + DifferentiateArgs bool `protobuf:"varint,2,opt,name=DifferentiateArgs,proto3" json:"DifferentiateArgs,omitempty"` + Storage *StorageRequestParams `protobuf:"bytes,3,opt,name=Storage,proto3" json:"Storage,omitempty"` + ContainerID string `protobuf:"bytes,4,opt,name=ContainerID,proto3" json:"ContainerID,omitempty"` + CGroupID string `protobuf:"bytes,5,opt,name=CGroupID,proto3" json:"CGroupID,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ActivityDumpParams) Reset() { *x = ActivityDumpParams{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ActivityDumpParams) String() string { @@ -1614,7 +1540,7 @@ func (*ActivityDumpParams) ProtoMessage() {} func (x *ActivityDumpParams) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1665,18 +1591,15 @@ func (x *ActivityDumpParams) GetCGroupID() string { } type MetadataMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AgentVersion string `protobuf:"bytes,1,opt,name=AgentVersion,proto3" json:"AgentVersion,omitempty"` - AgentCommit string `protobuf:"bytes,2,opt,name=AgentCommit,proto3" json:"AgentCommit,omitempty"` - KernelVersion string `protobuf:"bytes,3,opt,name=KernelVersion,proto3" json:"KernelVersion,omitempty"` - LinuxDistribution string `protobuf:"bytes,4,opt,name=LinuxDistribution,proto3" json:"LinuxDistribution,omitempty"` - Arch string `protobuf:"bytes,5,opt,name=Arch,proto3" json:"Arch,omitempty"` - Name string `protobuf:"bytes,6,opt,name=Name,proto3" json:"Name,omitempty"` - ProtobufVersion string `protobuf:"bytes,7,opt,name=ProtobufVersion,proto3" json:"ProtobufVersion,omitempty"` - DifferentiateArgs bool `protobuf:"varint,8,opt,name=DifferentiateArgs,proto3" json:"DifferentiateArgs,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + AgentVersion string `protobuf:"bytes,1,opt,name=AgentVersion,proto3" json:"AgentVersion,omitempty"` + AgentCommit string `protobuf:"bytes,2,opt,name=AgentCommit,proto3" json:"AgentCommit,omitempty"` + KernelVersion string `protobuf:"bytes,3,opt,name=KernelVersion,proto3" json:"KernelVersion,omitempty"` + LinuxDistribution string `protobuf:"bytes,4,opt,name=LinuxDistribution,proto3" json:"LinuxDistribution,omitempty"` + Arch string `protobuf:"bytes,5,opt,name=Arch,proto3" json:"Arch,omitempty"` + Name string `protobuf:"bytes,6,opt,name=Name,proto3" json:"Name,omitempty"` + ProtobufVersion string `protobuf:"bytes,7,opt,name=ProtobufVersion,proto3" json:"ProtobufVersion,omitempty"` + DifferentiateArgs bool `protobuf:"varint,8,opt,name=DifferentiateArgs,proto3" json:"DifferentiateArgs,omitempty"` // Deprecated: Marked as deprecated in pkg/security/proto/api/api.proto. Comm string `protobuf:"bytes,9,opt,name=Comm,proto3" json:"Comm,omitempty"` ContainerID string `protobuf:"bytes,10,opt,name=ContainerID,proto3" json:"ContainerID,omitempty"` @@ -1685,15 +1608,16 @@ type MetadataMessage struct { Size uint64 `protobuf:"varint,13,opt,name=Size,proto3" json:"Size,omitempty"` Serialization string `protobuf:"bytes,14,opt,name=Serialization,proto3" json:"Serialization,omitempty"` CGroupID string `protobuf:"bytes,15,opt,name=CGroupID,proto3" json:"CGroupID,omitempty"` + CGroupManager string `protobuf:"bytes,16,opt,name=CGroupManager,proto3" json:"CGroupManager,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *MetadataMessage) Reset() { *x = MetadataMessage{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *MetadataMessage) String() string { @@ -1704,7 +1628,7 @@ func (*MetadataMessage) ProtoMessage() {} func (x *MetadataMessage) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1825,24 +1749,28 @@ func (x *MetadataMessage) GetCGroupID() string { return "" } +func (x *MetadataMessage) GetCGroupManager() string { + if x != nil { + return x.CGroupManager + } + return "" +} + type StorageRequestMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Type string `protobuf:"bytes,1,opt,name=Type,proto3" json:"Type,omitempty"` + Format string `protobuf:"bytes,2,opt,name=Format,proto3" json:"Format,omitempty"` + Compression bool `protobuf:"varint,3,opt,name=Compression,proto3" json:"Compression,omitempty"` + File string `protobuf:"bytes,4,opt,name=File,proto3" json:"File,omitempty"` unknownFields protoimpl.UnknownFields - - Type string `protobuf:"bytes,1,opt,name=Type,proto3" json:"Type,omitempty"` - Format string `protobuf:"bytes,2,opt,name=Format,proto3" json:"Format,omitempty"` - Compression bool `protobuf:"varint,3,opt,name=Compression,proto3" json:"Compression,omitempty"` - File string `protobuf:"bytes,4,opt,name=File,proto3" json:"File,omitempty"` + sizeCache protoimpl.SizeCache } func (x *StorageRequestMessage) Reset() { *x = StorageRequestMessage{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *StorageRequestMessage) String() string { @@ -1853,7 +1781,7 @@ func (*StorageRequestMessage) ProtoMessage() {} func (x *StorageRequestMessage) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -1897,28 +1825,25 @@ func (x *StorageRequestMessage) GetFile() string { } type ActivityDumpMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Host string `protobuf:"bytes,1,opt,name=Host,proto3" json:"Host,omitempty"` + Source string `protobuf:"bytes,2,opt,name=Source,proto3" json:"Source,omitempty"` + Service string `protobuf:"bytes,3,opt,name=Service,proto3" json:"Service,omitempty"` + Tags []string `protobuf:"bytes,4,rep,name=Tags,proto3" json:"Tags,omitempty"` + Storage []*StorageRequestMessage `protobuf:"bytes,5,rep,name=Storage,proto3" json:"Storage,omitempty"` + Metadata *MetadataMessage `protobuf:"bytes,6,opt,name=Metadata,proto3" json:"Metadata,omitempty"` + Error string `protobuf:"bytes,7,opt,name=Error,proto3" json:"Error,omitempty"` + DNSNames []string `protobuf:"bytes,8,rep,name=DNSNames,proto3" json:"DNSNames,omitempty"` + Stats *ActivityTreeStatsMessage `protobuf:"bytes,9,opt,name=Stats,proto3" json:"Stats,omitempty"` unknownFields protoimpl.UnknownFields - - Host string `protobuf:"bytes,1,opt,name=Host,proto3" json:"Host,omitempty"` - Source string `protobuf:"bytes,2,opt,name=Source,proto3" json:"Source,omitempty"` - Service string `protobuf:"bytes,3,opt,name=Service,proto3" json:"Service,omitempty"` - Tags []string `protobuf:"bytes,4,rep,name=Tags,proto3" json:"Tags,omitempty"` - Storage []*StorageRequestMessage `protobuf:"bytes,5,rep,name=Storage,proto3" json:"Storage,omitempty"` - Metadata *MetadataMessage `protobuf:"bytes,6,opt,name=Metadata,proto3" json:"Metadata,omitempty"` - Error string `protobuf:"bytes,7,opt,name=Error,proto3" json:"Error,omitempty"` - DNSNames []string `protobuf:"bytes,8,rep,name=DNSNames,proto3" json:"DNSNames,omitempty"` - Stats *ActivityTreeStatsMessage `protobuf:"bytes,9,opt,name=Stats,proto3" json:"Stats,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ActivityDumpMessage) Reset() { *x = ActivityDumpMessage{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ActivityDumpMessage) String() string { @@ -1929,7 +1854,7 @@ func (*ActivityDumpMessage) ProtoMessage() {} func (x *ActivityDumpMessage) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2008,18 +1933,16 @@ func (x *ActivityDumpMessage) GetStats() *ActivityTreeStatsMessage { } type ActivityDumpListParams struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ActivityDumpListParams) Reset() { *x = ActivityDumpListParams{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[33] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ActivityDumpListParams) String() string { @@ -2030,7 +1953,7 @@ func (*ActivityDumpListParams) ProtoMessage() {} func (x *ActivityDumpListParams) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[33] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2046,21 +1969,18 @@ func (*ActivityDumpListParams) Descriptor() ([]byte, []int) { } type ActivityDumpListMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Dumps []*ActivityDumpMessage `protobuf:"bytes,1,rep,name=Dumps,proto3" json:"Dumps,omitempty"` + Error string `protobuf:"bytes,2,opt,name=Error,proto3" json:"Error,omitempty"` unknownFields protoimpl.UnknownFields - - Dumps []*ActivityDumpMessage `protobuf:"bytes,1,rep,name=Dumps,proto3" json:"Dumps,omitempty"` - Error string `protobuf:"bytes,2,opt,name=Error,proto3" json:"Error,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ActivityDumpListMessage) Reset() { *x = ActivityDumpListMessage{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[34] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ActivityDumpListMessage) String() string { @@ -2071,7 +1991,7 @@ func (*ActivityDumpListMessage) ProtoMessage() {} func (x *ActivityDumpListMessage) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[34] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2101,22 +2021,19 @@ func (x *ActivityDumpListMessage) GetError() string { } type ActivityDumpStopParams struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` + ContainerID string `protobuf:"bytes,2,opt,name=ContainerID,proto3" json:"ContainerID,omitempty"` + CGroupID string `protobuf:"bytes,3,opt,name=CGroupID,proto3" json:"CGroupID,omitempty"` unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` - ContainerID string `protobuf:"bytes,2,opt,name=ContainerID,proto3" json:"ContainerID,omitempty"` - CGroupID string `protobuf:"bytes,3,opt,name=CGroupID,proto3" json:"CGroupID,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ActivityDumpStopParams) Reset() { *x = ActivityDumpStopParams{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[35] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ActivityDumpStopParams) String() string { @@ -2127,7 +2044,7 @@ func (*ActivityDumpStopParams) ProtoMessage() {} func (x *ActivityDumpStopParams) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[35] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2164,20 +2081,17 @@ func (x *ActivityDumpStopParams) GetCGroupID() string { } type ActivityDumpStopMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Error string `protobuf:"bytes,1,opt,name=Error,proto3" json:"Error,omitempty"` unknownFields protoimpl.UnknownFields - - Error string `protobuf:"bytes,1,opt,name=Error,proto3" json:"Error,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ActivityDumpStopMessage) Reset() { *x = ActivityDumpStopMessage{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[36] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ActivityDumpStopMessage) String() string { @@ -2188,7 +2102,7 @@ func (*ActivityDumpStopMessage) ProtoMessage() {} func (x *ActivityDumpStopMessage) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[36] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2211,21 +2125,18 @@ func (x *ActivityDumpStopMessage) GetError() string { } type TranscodingRequestParams struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ActivityDumpFile string `protobuf:"bytes,1,opt,name=ActivityDumpFile,proto3" json:"ActivityDumpFile,omitempty"` - Storage *StorageRequestParams `protobuf:"bytes,2,opt,name=Storage,proto3" json:"Storage,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + ActivityDumpFile string `protobuf:"bytes,1,opt,name=ActivityDumpFile,proto3" json:"ActivityDumpFile,omitempty"` + Storage *StorageRequestParams `protobuf:"bytes,2,opt,name=Storage,proto3" json:"Storage,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *TranscodingRequestParams) Reset() { *x = TranscodingRequestParams{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[37] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TranscodingRequestParams) String() string { @@ -2236,7 +2147,7 @@ func (*TranscodingRequestParams) ProtoMessage() {} func (x *TranscodingRequestParams) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[37] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2266,21 +2177,18 @@ func (x *TranscodingRequestParams) GetStorage() *StorageRequestParams { } type TranscodingRequestMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Error string `protobuf:"bytes,1,opt,name=Error,proto3" json:"Error,omitempty"` + Storage []*StorageRequestMessage `protobuf:"bytes,2,rep,name=Storage,proto3" json:"Storage,omitempty"` unknownFields protoimpl.UnknownFields - - Error string `protobuf:"bytes,1,opt,name=Error,proto3" json:"Error,omitempty"` - Storage []*StorageRequestMessage `protobuf:"bytes,2,rep,name=Storage,proto3" json:"Storage,omitempty"` + sizeCache protoimpl.SizeCache } func (x *TranscodingRequestMessage) Reset() { *x = TranscodingRequestMessage{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[38] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *TranscodingRequestMessage) String() string { @@ -2291,7 +2199,7 @@ func (*TranscodingRequestMessage) ProtoMessage() {} func (x *TranscodingRequestMessage) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[38] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2321,18 +2229,16 @@ func (x *TranscodingRequestMessage) GetStorage() []*StorageRequestMessage { } type ActivityDumpStreamParams struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ActivityDumpStreamParams) Reset() { *x = ActivityDumpStreamParams{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[39] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ActivityDumpStreamParams) String() string { @@ -2343,7 +2249,7 @@ func (*ActivityDumpStreamParams) ProtoMessage() {} func (x *ActivityDumpStreamParams) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[39] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2359,21 +2265,18 @@ func (*ActivityDumpStreamParams) Descriptor() ([]byte, []int) { } type ActivityDumpStreamMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Dump *ActivityDumpMessage `protobuf:"bytes,1,opt,name=Dump,proto3" json:"Dump,omitempty"` + Data []byte `protobuf:"bytes,3,opt,name=Data,proto3" json:"Data,omitempty"` unknownFields protoimpl.UnknownFields - - Dump *ActivityDumpMessage `protobuf:"bytes,1,opt,name=Dump,proto3" json:"Dump,omitempty"` - Data []byte `protobuf:"bytes,3,opt,name=Data,proto3" json:"Data,omitempty"` + sizeCache protoimpl.SizeCache } func (x *ActivityDumpStreamMessage) Reset() { *x = ActivityDumpStreamMessage{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[40] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ActivityDumpStreamMessage) String() string { @@ -2384,7 +2287,7 @@ func (*ActivityDumpStreamMessage) ProtoMessage() {} func (x *ActivityDumpStreamMessage) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[40] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2414,21 +2317,18 @@ func (x *ActivityDumpStreamMessage) GetData() []byte { } type WorkloadSelectorMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` + Tag string `protobuf:"bytes,2,opt,name=Tag,proto3" json:"Tag,omitempty"` unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` - Tag string `protobuf:"bytes,2,opt,name=Tag,proto3" json:"Tag,omitempty"` + sizeCache protoimpl.SizeCache } func (x *WorkloadSelectorMessage) Reset() { *x = WorkloadSelectorMessage{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[41] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *WorkloadSelectorMessage) String() string { @@ -2439,7 +2339,7 @@ func (*WorkloadSelectorMessage) ProtoMessage() {} func (x *WorkloadSelectorMessage) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[41] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2469,22 +2369,19 @@ func (x *WorkloadSelectorMessage) GetTag() string { } type LastAnomalyTimestampMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - EventType string `protobuf:"bytes,1,opt,name=EventType,proto3" json:"EventType,omitempty"` - Timestamp string `protobuf:"bytes,2,opt,name=Timestamp,proto3" json:"Timestamp,omitempty"` - IsStableEventType bool `protobuf:"varint,3,opt,name=IsStableEventType,proto3" json:"IsStableEventType,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + EventType string `protobuf:"bytes,1,opt,name=EventType,proto3" json:"EventType,omitempty"` + Timestamp string `protobuf:"bytes,2,opt,name=Timestamp,proto3" json:"Timestamp,omitempty"` + IsStableEventType bool `protobuf:"varint,3,opt,name=IsStableEventType,proto3" json:"IsStableEventType,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *LastAnomalyTimestampMessage) Reset() { *x = LastAnomalyTimestampMessage{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[42] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *LastAnomalyTimestampMessage) String() string { @@ -2495,7 +2392,7 @@ func (*LastAnomalyTimestampMessage) ProtoMessage() {} func (x *LastAnomalyTimestampMessage) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[42] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2532,21 +2429,18 @@ func (x *LastAnomalyTimestampMessage) GetIsStableEventType() bool { } type InstanceMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + ContainerID string `protobuf:"bytes,1,opt,name=ContainerID,proto3" json:"ContainerID,omitempty"` + Tags []string `protobuf:"bytes,2,rep,name=Tags,proto3" json:"Tags,omitempty"` unknownFields protoimpl.UnknownFields - - ContainerID string `protobuf:"bytes,1,opt,name=ContainerID,proto3" json:"ContainerID,omitempty"` - Tags []string `protobuf:"bytes,2,rep,name=Tags,proto3" json:"Tags,omitempty"` + sizeCache protoimpl.SizeCache } func (x *InstanceMessage) Reset() { *x = InstanceMessage{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[43] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *InstanceMessage) String() string { @@ -2557,7 +2451,7 @@ func (*InstanceMessage) ProtoMessage() {} func (x *InstanceMessage) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[43] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2587,24 +2481,24 @@ func (x *InstanceMessage) GetTags() []string { } type ActivityTreeStatsMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ProcessNodesCount int64 `protobuf:"varint,1,opt,name=ProcessNodesCount,proto3" json:"ProcessNodesCount,omitempty"` - FileNodesCount int64 `protobuf:"varint,2,opt,name=FileNodesCount,proto3" json:"FileNodesCount,omitempty"` - DNSNodesCount int64 `protobuf:"varint,3,opt,name=DNSNodesCount,proto3" json:"DNSNodesCount,omitempty"` - SocketNodesCount int64 `protobuf:"varint,4,opt,name=SocketNodesCount,proto3" json:"SocketNodesCount,omitempty"` - ApproximateSize int64 `protobuf:"varint,5,opt,name=ApproximateSize,proto3" json:"ApproximateSize,omitempty"` + state protoimpl.MessageState `protogen:"open.v1"` + ProcessNodesCount int64 `protobuf:"varint,1,opt,name=ProcessNodesCount,proto3" json:"ProcessNodesCount,omitempty"` + FileNodesCount int64 `protobuf:"varint,2,opt,name=FileNodesCount,proto3" json:"FileNodesCount,omitempty"` + DNSNodesCount int64 `protobuf:"varint,3,opt,name=DNSNodesCount,proto3" json:"DNSNodesCount,omitempty"` + SocketNodesCount int64 `protobuf:"varint,4,opt,name=SocketNodesCount,proto3" json:"SocketNodesCount,omitempty"` + ApproximateSize int64 `protobuf:"varint,5,opt,name=ApproximateSize,proto3" json:"ApproximateSize,omitempty"` + IMDSNodesCount int64 `protobuf:"varint,6,opt,name=IMDSNodesCount,proto3" json:"IMDSNodesCount,omitempty"` + SyscallNodesCount int64 `protobuf:"varint,7,opt,name=SyscallNodesCount,proto3" json:"SyscallNodesCount,omitempty"` + FlowNodesCount int64 `protobuf:"varint,8,opt,name=FlowNodesCount,proto3" json:"FlowNodesCount,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ActivityTreeStatsMessage) Reset() { *x = ActivityTreeStatsMessage{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[44] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ActivityTreeStatsMessage) String() string { @@ -2615,7 +2509,7 @@ func (*ActivityTreeStatsMessage) ProtoMessage() {} func (x *ActivityTreeStatsMessage) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[44] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2665,22 +2559,40 @@ func (x *ActivityTreeStatsMessage) GetApproximateSize() int64 { return 0 } -type EventTypeState struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields +func (x *ActivityTreeStatsMessage) GetIMDSNodesCount() int64 { + if x != nil { + return x.IMDSNodesCount + } + return 0 +} + +func (x *ActivityTreeStatsMessage) GetSyscallNodesCount() int64 { + if x != nil { + return x.SyscallNodesCount + } + return 0 +} - LastAnomalyNano uint64 `protobuf:"varint,1,opt,name=last_anomaly_nano,json=lastAnomalyNano,proto3" json:"last_anomaly_nano,omitempty"` - EventProfileState string `protobuf:"bytes,2,opt,name=event_profile_state,json=eventProfileState,proto3" json:"event_profile_state,omitempty"` +func (x *ActivityTreeStatsMessage) GetFlowNodesCount() int64 { + if x != nil { + return x.FlowNodesCount + } + return 0 +} + +type EventTypeState struct { + state protoimpl.MessageState `protogen:"open.v1"` + LastAnomalyNano uint64 `protobuf:"varint,1,opt,name=last_anomaly_nano,json=lastAnomalyNano,proto3" json:"last_anomaly_nano,omitempty"` + EventProfileState string `protobuf:"bytes,2,opt,name=event_profile_state,json=eventProfileState,proto3" json:"event_profile_state,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *EventTypeState) Reset() { *x = EventTypeState{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[45] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *EventTypeState) String() string { @@ -2691,7 +2603,7 @@ func (*EventTypeState) ProtoMessage() {} func (x *EventTypeState) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[45] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2721,23 +2633,20 @@ func (x *EventTypeState) GetEventProfileState() string { } type ProfileContextMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` FirstSeen uint64 `protobuf:"varint,1,opt,name=first_seen,json=firstSeen,proto3" json:"first_seen,omitempty"` LastSeen uint64 `protobuf:"varint,2,opt,name=last_seen,json=lastSeen,proto3" json:"last_seen,omitempty"` - EventTypeState map[string]*EventTypeState `protobuf:"bytes,3,rep,name=event_type_state,json=eventTypeState,proto3" json:"event_type_state,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + EventTypeState map[string]*EventTypeState `protobuf:"bytes,3,rep,name=event_type_state,json=eventTypeState,proto3" json:"event_type_state,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` Tags []string `protobuf:"bytes,4,rep,name=tags,proto3" json:"tags,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *ProfileContextMessage) Reset() { *x = ProfileContextMessage{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[46] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *ProfileContextMessage) String() string { @@ -2748,7 +2657,7 @@ func (*ProfileContextMessage) ProtoMessage() {} func (x *ProfileContextMessage) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[46] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2792,10 +2701,7 @@ func (x *ProfileContextMessage) GetTags() []string { } type SecurityProfileMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - + state protoimpl.MessageState `protogen:"open.v1"` LoadedInKernel bool `protobuf:"varint,1,opt,name=LoadedInKernel,proto3" json:"LoadedInKernel,omitempty"` LoadedInKernelTimestamp string `protobuf:"bytes,2,opt,name=LoadedInKernelTimestamp,proto3" json:"LoadedInKernelTimestamp,omitempty"` Selector *WorkloadSelectorMessage `protobuf:"bytes,3,opt,name=Selector,proto3" json:"Selector,omitempty"` @@ -2813,16 +2719,16 @@ type SecurityProfileMessage struct { Tags []string `protobuf:"bytes,11,rep,name=Tags,proto3" json:"Tags,omitempty"` Stats *ActivityTreeStatsMessage `protobuf:"bytes,12,opt,name=Stats,proto3" json:"Stats,omitempty"` ProfileGlobalState string `protobuf:"bytes,13,opt,name=ProfileGlobalState,proto3" json:"ProfileGlobalState,omitempty"` - ProfileContexts map[string]*ProfileContextMessage `protobuf:"bytes,14,rep,name=profile_contexts,json=profileContexts,proto3" json:"profile_contexts,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + ProfileContexts map[string]*ProfileContextMessage `protobuf:"bytes,14,rep,name=profile_contexts,json=profileContexts,proto3" json:"profile_contexts,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *SecurityProfileMessage) Reset() { *x = SecurityProfileMessage{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[47] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SecurityProfileMessage) String() string { @@ -2833,7 +2739,7 @@ func (*SecurityProfileMessage) ProtoMessage() {} func (x *SecurityProfileMessage) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[47] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2951,20 +2857,17 @@ func (x *SecurityProfileMessage) GetProfileContexts() map[string]*ProfileContext } type SecurityProfileListParams struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + IncludeCache bool `protobuf:"varint,1,opt,name=IncludeCache,proto3" json:"IncludeCache,omitempty"` unknownFields protoimpl.UnknownFields - - IncludeCache bool `protobuf:"varint,1,opt,name=IncludeCache,proto3" json:"IncludeCache,omitempty"` + sizeCache protoimpl.SizeCache } func (x *SecurityProfileListParams) Reset() { *x = SecurityProfileListParams{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[48] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SecurityProfileListParams) String() string { @@ -2975,7 +2878,7 @@ func (*SecurityProfileListParams) ProtoMessage() {} func (x *SecurityProfileListParams) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[48] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -2998,21 +2901,18 @@ func (x *SecurityProfileListParams) GetIncludeCache() bool { } type SecurityProfileListMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Profiles []*SecurityProfileMessage `protobuf:"bytes,1,rep,name=Profiles,proto3" json:"Profiles,omitempty"` + Error string `protobuf:"bytes,2,opt,name=Error,proto3" json:"Error,omitempty"` unknownFields protoimpl.UnknownFields - - Profiles []*SecurityProfileMessage `protobuf:"bytes,1,rep,name=Profiles,proto3" json:"Profiles,omitempty"` - Error string `protobuf:"bytes,2,opt,name=Error,proto3" json:"Error,omitempty"` + sizeCache protoimpl.SizeCache } func (x *SecurityProfileListMessage) Reset() { *x = SecurityProfileListMessage{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[49] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SecurityProfileListMessage) String() string { @@ -3023,7 +2923,7 @@ func (*SecurityProfileListMessage) ProtoMessage() {} func (x *SecurityProfileListMessage) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[49] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3053,20 +2953,17 @@ func (x *SecurityProfileListMessage) GetError() string { } type SecurityProfileSaveParams struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Selector *WorkloadSelectorMessage `protobuf:"bytes,1,opt,name=Selector,proto3" json:"Selector,omitempty"` unknownFields protoimpl.UnknownFields - - Selector *WorkloadSelectorMessage `protobuf:"bytes,1,opt,name=Selector,proto3" json:"Selector,omitempty"` + sizeCache protoimpl.SizeCache } func (x *SecurityProfileSaveParams) Reset() { *x = SecurityProfileSaveParams{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[50] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SecurityProfileSaveParams) String() string { @@ -3077,7 +2974,7 @@ func (*SecurityProfileSaveParams) ProtoMessage() {} func (x *SecurityProfileSaveParams) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[50] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3100,21 +2997,18 @@ func (x *SecurityProfileSaveParams) GetSelector() *WorkloadSelectorMessage { } type SecurityProfileSaveMessage struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + Error string `protobuf:"bytes,1,opt,name=Error,proto3" json:"Error,omitempty"` + File string `protobuf:"bytes,2,opt,name=File,proto3" json:"File,omitempty"` unknownFields protoimpl.UnknownFields - - Error string `protobuf:"bytes,1,opt,name=Error,proto3" json:"Error,omitempty"` - File string `protobuf:"bytes,2,opt,name=File,proto3" json:"File,omitempty"` + sizeCache protoimpl.SizeCache } func (x *SecurityProfileSaveMessage) Reset() { *x = SecurityProfileSaveMessage{} - if protoimpl.UnsafeEnabled { - mi := &file_pkg_security_proto_api_api_proto_msgTypes[51] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_pkg_security_proto_api_api_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *SecurityProfileSaveMessage) String() string { @@ -3125,7 +3019,7 @@ func (*SecurityProfileSaveMessage) ProtoMessage() {} func (x *SecurityProfileSaveMessage) ProtoReflect() protoreflect.Message { mi := &file_pkg_security_proto_api_api_proto_msgTypes[51] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -3284,7 +3178,7 @@ var file_pkg_security_proto_api_api_proto_rawDesc = []byte{ 0x73, 0x12, 0x33, 0x0a, 0x06, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x41, 0x6e, 0x64, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x06, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0xe1, 0x01, 0x0a, 0x11, 0x45, 0x6e, 0x76, 0x69, 0x72, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0xff, 0x01, 0x0a, 0x11, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x73, 0x12, 0x38, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x73, @@ -3298,353 +3192,366 @@ var file_pkg_security_proto_api_api_proto_rawDesc = []byte{ 0x01, 0x28, 0x08, 0x52, 0x0f, 0x55, 0x73, 0x65, 0x4d, 0x6d, 0x61, 0x70, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x61, 0x70, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x55, 0x73, 0x65, 0x52, 0x69, 0x6e, 0x67, 0x42, 0x75, 0x66, 0x66, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x55, 0x73, 0x65, - 0x52, 0x69, 0x6e, 0x67, 0x42, 0x75, 0x66, 0x66, 0x65, 0x72, 0x22, 0x16, 0x0a, 0x14, 0x44, 0x75, - 0x6d, 0x70, 0x44, 0x69, 0x73, 0x63, 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x22, 0x3b, 0x0a, 0x15, 0x44, 0x75, 0x6d, 0x70, 0x44, 0x69, 0x73, 0x63, 0x61, 0x72, - 0x64, 0x65, 0x72, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x44, - 0x75, 0x6d, 0x70, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0c, 0x44, 0x75, 0x6d, 0x70, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x22, - 0xa8, 0x02, 0x0a, 0x14, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x34, 0x0a, 0x15, 0x4c, 0x6f, 0x63, 0x61, - 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x30, - 0x0a, 0x13, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x46, 0x6f, - 0x72, 0x6d, 0x61, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x4c, 0x6f, 0x63, - 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x73, - 0x12, 0x38, 0x0a, 0x17, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x17, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, - 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x14, 0x52, 0x65, - 0x6d, 0x6f, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, - 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, - 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x73, 0x12, 0x3a, - 0x0a, 0x18, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, - 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x18, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, - 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xcf, 0x01, 0x0a, 0x12, 0x41, - 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x12, 0x18, 0x0a, 0x07, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x2c, 0x0a, 0x11, 0x44, - 0x69, 0x66, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x74, 0x65, 0x41, 0x72, 0x67, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x44, 0x69, 0x66, 0x66, 0x65, 0x72, 0x65, 0x6e, - 0x74, 0x69, 0x61, 0x74, 0x65, 0x41, 0x72, 0x67, 0x73, 0x12, 0x33, 0x0a, 0x07, 0x53, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x20, - 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, - 0x12, 0x1a, 0x0a, 0x08, 0x43, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x44, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x08, 0x43, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x44, 0x22, 0xeb, 0x03, 0x0a, - 0x0f, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x12, 0x22, 0x0a, 0x0c, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6d, - 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x41, 0x67, 0x65, 0x6e, 0x74, - 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x24, 0x0a, 0x0d, 0x4b, 0x65, 0x72, 0x6e, 0x65, 0x6c, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x4b, - 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x11, - 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x44, 0x69, - 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x41, 0x72, - 0x63, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x41, 0x72, 0x63, 0x68, 0x12, 0x12, - 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x0f, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x56, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x11, - 0x44, 0x69, 0x66, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x74, 0x65, 0x41, 0x72, 0x67, - 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x44, 0x69, 0x66, 0x66, 0x65, 0x72, 0x65, - 0x6e, 0x74, 0x69, 0x61, 0x74, 0x65, 0x41, 0x72, 0x67, 0x73, 0x12, 0x16, 0x0a, 0x04, 0x43, 0x6f, - 0x6d, 0x6d, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x04, 0x43, 0x6f, - 0x6d, 0x6d, 0x12, 0x20, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, - 0x44, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, - 0x65, 0x72, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x72, 0x74, 0x18, 0x0b, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x54, 0x69, - 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x54, 0x69, 0x6d, - 0x65, 0x6f, 0x75, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x0d, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x04, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x69, - 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0d, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, - 0x0a, 0x08, 0x43, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x44, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x43, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x44, 0x22, 0x79, 0x0a, 0x15, 0x53, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, - 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, - 0x20, 0x0a, 0x0b, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, - 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x46, 0x69, 0x6c, 0x65, 0x22, 0xbe, 0x02, 0x0a, 0x13, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, - 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, - 0x04, 0x48, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x48, 0x6f, 0x73, - 0x74, 0x12, 0x16, 0x0a, 0x06, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x53, 0x65, 0x72, 0x76, - 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x61, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x04, 0x54, 0x61, 0x67, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, - 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, - 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x52, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, - 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, - 0x14, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x44, 0x4e, 0x53, 0x4e, 0x61, 0x6d, 0x65, - 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x44, 0x4e, 0x53, 0x4e, 0x61, 0x6d, 0x65, - 0x73, 0x12, 0x33, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1d, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, - 0x72, 0x65, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, - 0x05, 0x53, 0x74, 0x61, 0x74, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, - 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x22, 0x5f, 0x0a, 0x17, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, - 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2e, 0x0a, 0x05, 0x44, - 0x75, 0x6d, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x52, 0x05, 0x44, 0x75, 0x6d, 0x70, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x45, - 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, - 0x72, 0x22, 0x6a, 0x0a, 0x16, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, - 0x70, 0x53, 0x74, 0x6f, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x4e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, - 0x20, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, - 0x44, 0x12, 0x1a, 0x0a, 0x08, 0x43, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x44, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x43, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x44, 0x22, 0x2f, 0x0a, - 0x17, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x74, 0x6f, - 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x7b, - 0x0a, 0x18, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x2a, 0x0a, 0x10, 0x41, 0x63, - 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x46, 0x69, 0x6c, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, - 0x6d, 0x70, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x52, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x22, 0x67, 0x0a, 0x19, 0x54, - 0x72, 0x61, 0x6e, 0x73, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x34, - 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x07, 0x53, 0x74, 0x6f, - 0x72, 0x61, 0x67, 0x65, 0x22, 0x1a, 0x0a, 0x18, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, - 0x44, 0x75, 0x6d, 0x70, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x22, 0x5d, 0x0a, 0x19, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, - 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2c, 0x0a, - 0x04, 0x44, 0x75, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x04, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x12, 0x0a, 0x04, 0x44, - 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x44, 0x61, 0x74, 0x61, 0x22, - 0x3f, 0x0a, 0x17, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x65, 0x6c, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x4e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x10, - 0x0a, 0x03, 0x54, 0x61, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x54, 0x61, 0x67, - 0x22, 0x87, 0x01, 0x0a, 0x1b, 0x4c, 0x61, 0x73, 0x74, 0x41, 0x6e, 0x6f, 0x6d, 0x61, 0x6c, 0x79, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x12, 0x1c, 0x0a, 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1c, - 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x2c, 0x0a, 0x11, - 0x49, 0x73, 0x53, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x49, 0x73, 0x53, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x22, 0x47, 0x0a, 0x0f, 0x49, 0x6e, - 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x20, 0x0a, - 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, - 0x12, 0x0a, 0x04, 0x54, 0x61, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x54, - 0x61, 0x67, 0x73, 0x22, 0xec, 0x01, 0x0a, 0x18, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, - 0x54, 0x72, 0x65, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x12, 0x2c, 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x73, - 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x11, 0x50, 0x72, 0x6f, - 0x63, 0x65, 0x73, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x26, - 0x0a, 0x0e, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x6f, 0x64, 0x65, - 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0d, 0x44, 0x4e, 0x53, 0x4e, 0x6f, 0x64, - 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x44, - 0x4e, 0x53, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x10, - 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4e, 0x6f, - 0x64, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x0f, 0x41, 0x70, 0x70, 0x72, - 0x6f, 0x78, 0x69, 0x6d, 0x61, 0x74, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x0f, 0x41, 0x70, 0x70, 0x72, 0x6f, 0x78, 0x69, 0x6d, 0x61, 0x74, 0x65, 0x53, 0x69, - 0x7a, 0x65, 0x22, 0x6e, 0x0a, 0x10, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x61, - 0x6e, 0x6f, 0x6d, 0x61, 0x6c, 0x79, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x0f, 0x6c, 0x61, 0x73, 0x74, 0x41, 0x6e, 0x6f, 0x6d, 0x61, 0x6c, 0x79, 0x4e, 0x61, - 0x6e, 0x6f, 0x12, 0x2e, 0x0a, 0x13, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x66, - 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x11, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x22, 0x9b, 0x02, 0x0a, 0x15, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, - 0x6e, 0x74, 0x65, 0x78, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1d, 0x0a, 0x0a, - 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x09, 0x66, 0x69, 0x72, 0x73, 0x74, 0x53, 0x65, 0x65, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6c, - 0x61, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, - 0x6c, 0x61, 0x73, 0x74, 0x53, 0x65, 0x65, 0x6e, 0x12, 0x58, 0x0a, 0x10, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, - 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x53, 0x74, 0x61, - 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x1a, 0x58, 0x0a, 0x13, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, - 0x79, 0x70, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x2b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x22, 0xa0, 0x06, 0x0a, 0x16, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, - 0x66, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x26, 0x0a, 0x0e, 0x4c, - 0x6f, 0x61, 0x64, 0x65, 0x64, 0x49, 0x6e, 0x4b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x0e, 0x4c, 0x6f, 0x61, 0x64, 0x65, 0x64, 0x49, 0x6e, 0x4b, 0x65, 0x72, - 0x6e, 0x65, 0x6c, 0x12, 0x38, 0x0a, 0x17, 0x4c, 0x6f, 0x61, 0x64, 0x65, 0x64, 0x49, 0x6e, 0x4b, - 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x4c, 0x6f, 0x61, 0x64, 0x65, 0x64, 0x49, 0x6e, 0x4b, 0x65, - 0x72, 0x6e, 0x65, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x38, 0x0a, - 0x08, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x53, 0x65, - 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x08, 0x53, - 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x24, 0x0a, 0x0d, 0x50, 0x72, 0x6f, 0x66, 0x69, - 0x6c, 0x65, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, - 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x12, 0x1e, 0x0a, - 0x0a, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x0a, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x4a, 0x0a, - 0x0d, 0x4c, 0x61, 0x73, 0x74, 0x41, 0x6e, 0x6f, 0x6d, 0x61, 0x6c, 0x69, 0x65, 0x73, 0x18, 0x06, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x73, 0x74, 0x41, - 0x6e, 0x6f, 0x6d, 0x61, 0x6c, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0d, 0x4c, 0x61, 0x73, 0x74, - 0x41, 0x6e, 0x6f, 0x6d, 0x61, 0x6c, 0x69, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x09, 0x49, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x52, 0x09, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x1a, 0x0a, - 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, - 0x01, 0x52, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1c, 0x0a, 0x07, 0x56, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x07, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, - 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x16, 0x0a, 0x04, 0x54, 0x61, 0x67, - 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x04, 0x54, 0x61, 0x67, - 0x73, 0x12, 0x33, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1d, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x54, - 0x72, 0x65, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, - 0x05, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, - 0x65, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x18, 0x0d, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x12, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x47, 0x6c, 0x6f, 0x62, 0x61, - 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x5b, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, - 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x30, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, - 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x50, 0x72, - 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, - 0x78, 0x74, 0x73, 0x1a, 0x5e, 0x0a, 0x14, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, - 0x6e, 0x74, 0x65, 0x78, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x30, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x61, - 0x70, 0x69, 0x2e, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, - 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x22, 0x3f, 0x0a, 0x19, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, - 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x12, 0x22, 0x0a, 0x0c, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x43, - 0x61, 0x63, 0x68, 0x65, 0x22, 0x6b, 0x0a, 0x1a, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, - 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, - 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x52, 0x08, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x45, - 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, - 0x72, 0x22, 0x55, 0x0a, 0x19, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, - 0x66, 0x69, 0x6c, 0x65, 0x53, 0x61, 0x76, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x38, - 0x0a, 0x08, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x53, - 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x08, - 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x22, 0x46, 0x0a, 0x1a, 0x53, 0x65, 0x63, 0x75, - 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x53, 0x61, 0x76, 0x65, 0x4d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, - 0x46, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x46, 0x69, 0x6c, 0x65, - 0x32, 0x8a, 0x0a, 0x0a, 0x0e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x4d, 0x6f, 0x64, - 0x75, 0x6c, 0x65, 0x12, 0x3f, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, - 0x12, 0x13, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x19, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, - 0x72, 0x69, 0x74, 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x22, 0x00, 0x30, 0x01, 0x12, 0x57, 0x0a, 0x10, 0x44, 0x75, 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x63, - 0x65, 0x73, 0x73, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x1b, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, - 0x75, 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x43, 0x61, 0x63, 0x68, 0x65, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x24, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, - 0x72, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x43, - 0x61, 0x63, 0x68, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, - 0x09, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x14, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x1a, 0x1a, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x43, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x30, - 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x1a, 0x0b, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x22, 0x00, - 0x12, 0x4b, 0x0a, 0x0b, 0x52, 0x75, 0x6e, 0x53, 0x65, 0x6c, 0x66, 0x54, 0x65, 0x73, 0x74, 0x12, - 0x16, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x75, 0x6e, 0x53, 0x65, 0x6c, 0x66, 0x54, 0x65, 0x73, - 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x22, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, - 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x65, 0x6c, 0x66, 0x54, 0x65, 0x73, 0x74, 0x52, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, - 0x10, 0x47, 0x65, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6f, 0x72, - 0x74, 0x12, 0x1b, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x53, - 0x65, 0x74, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x22, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x53, 0x65, 0x74, 0x52, - 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x0e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x19, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x6c, - 0x6f, 0x61, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x73, 0x1a, 0x20, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x5b, 0x0a, 0x14, 0x44, 0x75, 0x6d, 0x70, 0x4e, 0x65, 0x74, - 0x77, 0x6f, 0x72, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1f, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x44, 0x75, 0x6d, 0x70, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x20, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x75, 0x6d, 0x70, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, - 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x22, 0x00, 0x12, 0x49, 0x0a, 0x0e, 0x44, 0x75, 0x6d, 0x70, 0x44, 0x69, 0x73, 0x63, 0x61, 0x72, - 0x64, 0x65, 0x72, 0x73, 0x12, 0x19, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x75, 0x6d, 0x70, 0x44, - 0x69, 0x73, 0x63, 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, - 0x1a, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x75, 0x6d, 0x70, 0x44, 0x69, 0x73, 0x63, 0x61, 0x72, - 0x64, 0x65, 0x72, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x43, 0x0a, - 0x0c, 0x44, 0x75, 0x6d, 0x70, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x12, 0x17, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, - 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x18, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, - 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x22, 0x00, 0x12, 0x50, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, - 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x73, 0x12, 0x1b, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, + 0x52, 0x69, 0x6e, 0x67, 0x42, 0x75, 0x66, 0x66, 0x65, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x55, 0x73, + 0x65, 0x46, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x55, + 0x73, 0x65, 0x46, 0x65, 0x6e, 0x74, 0x72, 0x79, 0x22, 0x16, 0x0a, 0x14, 0x44, 0x75, 0x6d, 0x70, + 0x44, 0x69, 0x73, 0x63, 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x22, 0x3b, 0x0a, 0x15, 0x44, 0x75, 0x6d, 0x70, 0x44, 0x69, 0x73, 0x63, 0x61, 0x72, 0x64, 0x65, + 0x72, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x44, 0x75, 0x6d, + 0x70, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x44, 0x75, 0x6d, 0x70, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0xa8, 0x02, + 0x0a, 0x14, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x34, 0x0a, 0x15, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x30, 0x0a, 0x13, + 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x46, 0x6f, 0x72, 0x6d, + 0x61, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x13, 0x4c, 0x6f, 0x63, 0x61, 0x6c, + 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x73, 0x12, 0x38, + 0x0a, 0x17, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, + 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x17, 0x4c, 0x6f, 0x63, 0x61, 0x6c, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6d, + 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x14, 0x52, 0x65, 0x6d, 0x6f, + 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x73, + 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x53, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x18, + 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6d, + 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, + 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x43, 0x6f, 0x6d, + 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xcf, 0x01, 0x0a, 0x12, 0x41, 0x63, 0x74, + 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, + 0x18, 0x0a, 0x07, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x2c, 0x0a, 0x11, 0x44, 0x69, 0x66, + 0x66, 0x65, 0x72, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x74, 0x65, 0x41, 0x72, 0x67, 0x73, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x44, 0x69, 0x66, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x74, 0x69, + 0x61, 0x74, 0x65, 0x41, 0x72, 0x67, 0x73, 0x12, 0x33, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x52, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x20, 0x0a, 0x0b, + 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, 0x1a, + 0x0a, 0x08, 0x43, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x44, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x43, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x44, 0x22, 0x91, 0x04, 0x0a, 0x0f, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x22, + 0x0a, 0x0c, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x69, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x43, 0x6f, + 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x24, 0x0a, 0x0d, 0x4b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x4b, 0x65, 0x72, + 0x6e, 0x65, 0x6c, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x11, 0x4c, 0x69, + 0x6e, 0x75, 0x78, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x4c, 0x69, 0x6e, 0x75, 0x78, 0x44, 0x69, 0x73, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x41, 0x72, 0x63, 0x68, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x41, 0x72, 0x63, 0x68, 0x12, 0x12, 0x0a, 0x04, + 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x28, 0x0a, 0x0f, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x56, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x11, 0x44, 0x69, + 0x66, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x74, 0x65, 0x41, 0x72, 0x67, 0x73, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x44, 0x69, 0x66, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x74, + 0x69, 0x61, 0x74, 0x65, 0x41, 0x72, 0x67, 0x73, 0x12, 0x16, 0x0a, 0x04, 0x43, 0x6f, 0x6d, 0x6d, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x04, 0x43, 0x6f, 0x6d, 0x6d, + 0x12, 0x20, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x72, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x54, 0x69, 0x6d, 0x65, + 0x6f, 0x75, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x54, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x04, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x69, 0x61, 0x6c, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x53, + 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, + 0x43, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x44, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x43, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x44, 0x12, 0x24, 0x0a, 0x0d, 0x43, 0x47, 0x72, 0x6f, + 0x75, 0x70, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0d, 0x43, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x22, 0x79, + 0x0a, 0x15, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x46, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x46, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x43, 0x6f, 0x6d, 0x70, 0x72, 0x65, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x22, 0xbe, 0x02, 0x0a, 0x13, 0x41, 0x63, + 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x48, 0x6f, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x48, 0x6f, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x61, 0x67, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x54, 0x61, 0x67, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x12, 0x30, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x1a, 0x0a, 0x08, 0x44, 0x4e, 0x53, + 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x44, 0x4e, 0x53, + 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x33, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x73, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, + 0x69, 0x74, 0x79, 0x54, 0x72, 0x65, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x52, 0x05, 0x53, 0x74, 0x61, 0x74, 0x73, 0x22, 0x18, 0x0a, 0x16, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x1c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, - 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x10, 0x53, 0x74, 0x6f, 0x70, 0x41, 0x63, 0x74, 0x69, - 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x1b, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, - 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x74, 0x6f, 0x70, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x1c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, - 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x74, 0x6f, 0x70, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x12, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x6f, - 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x2e, 0x61, 0x70, - 0x69, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x1e, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x15, - 0x47, 0x65, 0x74, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x53, - 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x1d, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, + 0x72, 0x61, 0x6d, 0x73, 0x22, 0x5f, 0x0a, 0x17, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, + 0x44, 0x75, 0x6d, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, + 0x2e, 0x0a, 0x05, 0x44, 0x75, 0x6d, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, + 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x05, 0x44, 0x75, 0x6d, 0x70, 0x73, 0x12, + 0x14, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x6a, 0x0a, 0x16, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, + 0x79, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x74, 0x6f, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, + 0x12, 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x49, 0x44, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, 0x1a, 0x0a, 0x08, 0x43, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x49, + 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x43, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x49, + 0x44, 0x22, 0x2f, 0x0a, 0x17, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, + 0x70, 0x53, 0x74, 0x6f, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, + 0x6f, 0x72, 0x22, 0x7b, 0x0a, 0x18, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x6f, 0x64, 0x69, 0x6e, + 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x2a, + 0x0a, 0x10, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x46, 0x69, + 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, + 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x46, 0x69, 0x6c, 0x65, 0x12, 0x33, 0x0a, 0x07, 0x53, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x52, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x22, + 0x67, 0x0a, 0x19, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, + 0x6f, 0x72, 0x12, 0x34, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, + 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x22, 0x1a, 0x0a, 0x18, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x1e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, - 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x59, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, - 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, - 0x12, 0x1e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, - 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, - 0x1a, 0x1f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, - 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, - 0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x13, 0x53, 0x61, 0x76, 0x65, 0x53, 0x65, 0x63, 0x75, 0x72, - 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x1e, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, - 0x53, 0x61, 0x76, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x1f, 0x2e, 0x61, 0x70, 0x69, - 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, - 0x53, 0x61, 0x76, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x42, 0x18, 0x5a, - 0x16, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2f, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x72, 0x61, 0x6d, 0x73, 0x22, 0x5d, 0x0a, 0x19, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, + 0x44, 0x75, 0x6d, 0x70, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x2c, 0x0a, 0x04, 0x44, 0x75, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x18, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, + 0x6d, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x04, 0x44, 0x75, 0x6d, 0x70, 0x12, + 0x12, 0x0a, 0x04, 0x44, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x44, + 0x61, 0x74, 0x61, 0x22, 0x3f, 0x0a, 0x17, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x53, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x54, 0x61, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x54, 0x61, 0x67, 0x22, 0x87, 0x01, 0x0a, 0x1b, 0x4c, 0x61, 0x73, 0x74, 0x41, 0x6e, 0x6f, + 0x6d, 0x61, 0x6c, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x12, 0x2c, 0x0a, 0x11, 0x49, 0x73, 0x53, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x49, 0x73, 0x53, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x22, 0x47, + 0x0a, 0x0f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x54, 0x61, 0x67, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x04, 0x54, 0x61, 0x67, 0x73, 0x22, 0xea, 0x02, 0x0a, 0x18, 0x41, 0x63, 0x74, 0x69, + 0x76, 0x69, 0x74, 0x79, 0x54, 0x72, 0x65, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x12, 0x2c, 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x4e, + 0x6f, 0x64, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x11, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0e, 0x46, 0x69, 0x6c, 0x65, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x46, 0x69, 0x6c, 0x65, + 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0d, 0x44, 0x4e, + 0x53, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x0d, 0x44, 0x4e, 0x53, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x12, 0x2a, 0x0a, 0x10, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x53, 0x6f, 0x63, 0x6b, + 0x65, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x0f, + 0x41, 0x70, 0x70, 0x72, 0x6f, 0x78, 0x69, 0x6d, 0x61, 0x74, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0f, 0x41, 0x70, 0x70, 0x72, 0x6f, 0x78, 0x69, 0x6d, 0x61, + 0x74, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x26, 0x0a, 0x0e, 0x49, 0x4d, 0x44, 0x53, 0x4e, 0x6f, + 0x64, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, + 0x49, 0x4d, 0x44, 0x53, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, + 0x0a, 0x11, 0x53, 0x79, 0x73, 0x63, 0x61, 0x6c, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x11, 0x53, 0x79, 0x73, 0x63, 0x61, + 0x6c, 0x6c, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x26, 0x0a, 0x0e, + 0x46, 0x6c, 0x6f, 0x77, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x46, 0x6c, 0x6f, 0x77, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x6e, 0x0a, 0x10, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x6c, 0x61, 0x73, 0x74, + 0x5f, 0x61, 0x6e, 0x6f, 0x6d, 0x61, 0x6c, 0x79, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x0f, 0x6c, 0x61, 0x73, 0x74, 0x41, 0x6e, 0x6f, 0x6d, 0x61, 0x6c, 0x79, + 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x2e, 0x0a, 0x13, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x72, + 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x11, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x22, 0x9b, 0x02, 0x0a, 0x15, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, + 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1d, + 0x0a, 0x0a, 0x66, 0x69, 0x72, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x09, 0x66, 0x69, 0x72, 0x73, 0x74, 0x53, 0x65, 0x65, 0x6e, 0x12, 0x1b, 0x0a, + 0x09, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x73, 0x65, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x08, 0x6c, 0x61, 0x73, 0x74, 0x53, 0x65, 0x65, 0x6e, 0x12, 0x58, 0x0a, 0x10, 0x65, 0x76, + 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x72, 0x6f, 0x66, 0x69, + 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x1a, 0x58, 0x0a, 0x13, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x2b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0xa0, 0x06, 0x0a, 0x16, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, + 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x26, 0x0a, + 0x0e, 0x4c, 0x6f, 0x61, 0x64, 0x65, 0x64, 0x49, 0x6e, 0x4b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x4c, 0x6f, 0x61, 0x64, 0x65, 0x64, 0x49, 0x6e, 0x4b, + 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x12, 0x38, 0x0a, 0x17, 0x4c, 0x6f, 0x61, 0x64, 0x65, 0x64, 0x49, + 0x6e, 0x4b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x17, 0x4c, 0x6f, 0x61, 0x64, 0x65, 0x64, 0x49, 0x6e, + 0x4b, 0x65, 0x72, 0x6e, 0x65, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, + 0x38, 0x0a, 0x08, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, + 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, + 0x08, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x24, 0x0a, 0x0d, 0x50, 0x72, 0x6f, + 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x0d, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6f, 0x6b, 0x69, 0x65, 0x12, + 0x1e, 0x0a, 0x0a, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x0a, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, + 0x4a, 0x0a, 0x0d, 0x4c, 0x61, 0x73, 0x74, 0x41, 0x6e, 0x6f, 0x6d, 0x61, 0x6c, 0x69, 0x65, 0x73, + 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x4c, 0x61, 0x73, + 0x74, 0x41, 0x6e, 0x6f, 0x6d, 0x61, 0x6c, 0x79, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0d, 0x4c, 0x61, + 0x73, 0x74, 0x41, 0x6e, 0x6f, 0x6d, 0x61, 0x6c, 0x69, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x09, 0x49, + 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x52, 0x09, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x12, + 0x1a, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x02, 0x18, 0x01, 0x52, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1c, 0x0a, 0x07, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, + 0x52, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x08, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x61, 0x70, + 0x69, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x52, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x16, 0x0a, 0x04, 0x54, + 0x61, 0x67, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x04, 0x54, + 0x61, 0x67, 0x73, 0x12, 0x33, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x73, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, + 0x79, 0x54, 0x72, 0x65, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x52, 0x05, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x2e, 0x0a, 0x12, 0x50, 0x72, 0x6f, 0x66, + 0x69, 0x6c, 0x65, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x18, 0x0d, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x47, 0x6c, 0x6f, + 0x62, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x5b, 0x0a, 0x10, 0x70, 0x72, 0x6f, 0x66, + 0x69, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x73, 0x18, 0x0e, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, + 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x73, 0x1a, 0x5e, 0x0a, 0x14, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, + 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x30, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x74, + 0x65, 0x78, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3f, 0x0a, 0x19, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x43, 0x61, 0x63, + 0x68, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x64, + 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x22, 0x6b, 0x0a, 0x1a, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, + 0x74, 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, 0x63, + 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x52, 0x08, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x14, 0x0a, + 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, + 0x72, 0x6f, 0x72, 0x22, 0x55, 0x0a, 0x19, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, + 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x53, 0x61, 0x76, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x12, 0x38, 0x0a, 0x08, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, + 0x64, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x52, 0x08, 0x53, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x22, 0x46, 0x0a, 0x1a, 0x53, 0x65, + 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x53, 0x61, 0x76, + 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x12, + 0x0a, 0x04, 0x46, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x46, 0x69, + 0x6c, 0x65, 0x32, 0x8a, 0x0a, 0x0a, 0x0e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x4d, + 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x12, 0x3f, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x73, 0x12, 0x13, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x65, 0x74, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x19, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, + 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x57, 0x0a, 0x10, 0x44, 0x75, 0x6d, 0x70, 0x50, 0x72, + 0x6f, 0x63, 0x65, 0x73, 0x73, 0x43, 0x61, 0x63, 0x68, 0x65, 0x12, 0x1b, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x44, 0x75, 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x43, 0x61, 0x63, 0x68, + 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x24, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, + 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, + 0x73, 0x43, 0x61, 0x63, 0x68, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, + 0x3f, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x14, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x1a, 0x1a, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, + 0x12, 0x30, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x1a, 0x0b, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0b, 0x52, 0x75, 0x6e, 0x53, 0x65, 0x6c, 0x66, 0x54, 0x65, 0x73, + 0x74, 0x12, 0x16, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x75, 0x6e, 0x53, 0x65, 0x6c, 0x66, 0x54, + 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x22, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x53, 0x65, 0x6c, 0x66, 0x54, 0x65, 0x73, 0x74, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, + 0x55, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, + 0x6f, 0x72, 0x74, 0x12, 0x1b, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x75, 0x6c, + 0x65, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x1a, 0x22, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x47, 0x65, 0x74, 0x52, 0x75, 0x6c, 0x65, 0x53, 0x65, + 0x74, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x0e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x12, 0x19, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, + 0x65, 0x6c, 0x6f, 0x61, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x50, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x1a, 0x20, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x52, 0x65, 0x6c, 0x6f, 0x61, 0x64, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x5b, 0x0a, 0x14, 0x44, 0x75, 0x6d, 0x70, 0x4e, + 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, + 0x1f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x75, 0x6d, 0x70, 0x4e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x1a, 0x20, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x75, 0x6d, 0x70, 0x4e, 0x65, 0x74, 0x77, 0x6f, + 0x72, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x22, 0x00, 0x12, 0x49, 0x0a, 0x0e, 0x44, 0x75, 0x6d, 0x70, 0x44, 0x69, 0x73, 0x63, + 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x12, 0x19, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x75, 0x6d, + 0x70, 0x44, 0x69, 0x73, 0x63, 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x1a, 0x1a, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x44, 0x75, 0x6d, 0x70, 0x44, 0x69, 0x73, 0x63, + 0x61, 0x72, 0x64, 0x65, 0x72, 0x73, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, + 0x43, 0x0a, 0x0c, 0x44, 0x75, 0x6d, 0x70, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x12, + 0x17, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, + 0x6d, 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x18, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, + 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x22, 0x00, 0x12, 0x50, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x63, 0x74, 0x69, + 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x73, 0x12, 0x1b, 0x2e, 0x61, 0x70, 0x69, 0x2e, + 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x4c, 0x69, 0x73, 0x74, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x1c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, + 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x10, 0x53, 0x74, 0x6f, 0x70, 0x41, 0x63, + 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x12, 0x1b, 0x2e, 0x61, 0x70, 0x69, + 0x2e, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x74, 0x6f, + 0x70, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x1c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, + 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x74, 0x6f, 0x70, 0x4d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x12, 0x54, 0x72, 0x61, 0x6e, 0x73, + 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x2e, + 0x61, 0x70, 0x69, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x1e, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x5a, + 0x0a, 0x15, 0x47, 0x65, 0x74, 0x41, 0x63, 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, + 0x70, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x1d, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, + 0x74, 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x1e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x41, 0x63, 0x74, + 0x69, 0x76, 0x69, 0x74, 0x79, 0x44, 0x75, 0x6d, 0x70, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x30, 0x01, 0x12, 0x59, 0x0a, 0x14, 0x4c, 0x69, + 0x73, 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, + 0x65, 0x73, 0x12, 0x1e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x1a, 0x1f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x22, 0x00, 0x12, 0x58, 0x0a, 0x13, 0x53, 0x61, 0x76, 0x65, 0x53, 0x65, 0x63, + 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x1e, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, + 0x6c, 0x65, 0x53, 0x61, 0x76, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x1a, 0x1f, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x50, 0x72, 0x6f, 0x66, 0x69, + 0x6c, 0x65, 0x53, 0x61, 0x76, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x00, 0x42, + 0x18, 0x5a, 0x16, 0x70, 0x6b, 0x67, 0x2f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x61, 0x70, 0x69, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, } var ( @@ -3790,632 +3697,6 @@ func file_pkg_security_proto_api_api_proto_init() { if File_pkg_security_proto_api_api_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_pkg_security_proto_api_api_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*GetEventParams); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[1].Exporter = func(v any, i int) any { - switch v := v.(*SecurityEventMessage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[2].Exporter = func(v any, i int) any { - switch v := v.(*DumpProcessCacheParams); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[3].Exporter = func(v any, i int) any { - switch v := v.(*SecurityDumpProcessCacheMessage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[4].Exporter = func(v any, i int) any { - switch v := v.(*DumpNetworkNamespaceParams); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[5].Exporter = func(v any, i int) any { - switch v := v.(*DumpNetworkNamespaceMessage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[6].Exporter = func(v any, i int) any { - switch v := v.(*GetConfigParams); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[7].Exporter = func(v any, i int) any { - switch v := v.(*SecurityConfigMessage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[8].Exporter = func(v any, i int) any { - switch v := v.(*RuleSetReportMessage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[9].Exporter = func(v any, i int) any { - switch v := v.(*EventTypePolicy); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[10].Exporter = func(v any, i int) any { - switch v := v.(*Approvers); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[11].Exporter = func(v any, i int) any { - switch v := v.(*ApproverDetails); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[12].Exporter = func(v any, i int) any { - switch v := v.(*GetRuleSetReportParams); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[13].Exporter = func(v any, i int) any { - switch v := v.(*GetRuleSetReportResultMessage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[14].Exporter = func(v any, i int) any { - switch v := v.(*ReloadPoliciesParams); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[15].Exporter = func(v any, i int) any { - switch v := v.(*ReloadPoliciesResultMessage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[16].Exporter = func(v any, i int) any { - switch v := v.(*RunSelfTestParams); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[17].Exporter = func(v any, i int) any { - switch v := v.(*SecuritySelfTestResultMessage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[18].Exporter = func(v any, i int) any { - switch v := v.(*GetStatusParams); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[19].Exporter = func(v any, i int) any { - switch v := v.(*ConstantValueAndSource); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[20].Exporter = func(v any, i int) any { - switch v := v.(*SelfTestsStatus); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[21].Exporter = func(v any, i int) any { - switch v := v.(*RuleStatus); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[22].Exporter = func(v any, i int) any { - switch v := v.(*PolicyStatus); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[23].Exporter = func(v any, i int) any { - switch v := v.(*Status); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[24].Exporter = func(v any, i int) any { - switch v := v.(*ConstantFetcherStatus); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[25].Exporter = func(v any, i int) any { - switch v := v.(*EnvironmentStatus); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[26].Exporter = func(v any, i int) any { - switch v := v.(*DumpDiscardersParams); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[27].Exporter = func(v any, i int) any { - switch v := v.(*DumpDiscardersMessage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[28].Exporter = func(v any, i int) any { - switch v := v.(*StorageRequestParams); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[29].Exporter = func(v any, i int) any { - switch v := v.(*ActivityDumpParams); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[30].Exporter = func(v any, i int) any { - switch v := v.(*MetadataMessage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[31].Exporter = func(v any, i int) any { - switch v := v.(*StorageRequestMessage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[32].Exporter = func(v any, i int) any { - switch v := v.(*ActivityDumpMessage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[33].Exporter = func(v any, i int) any { - switch v := v.(*ActivityDumpListParams); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[34].Exporter = func(v any, i int) any { - switch v := v.(*ActivityDumpListMessage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[35].Exporter = func(v any, i int) any { - switch v := v.(*ActivityDumpStopParams); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[36].Exporter = func(v any, i int) any { - switch v := v.(*ActivityDumpStopMessage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[37].Exporter = func(v any, i int) any { - switch v := v.(*TranscodingRequestParams); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[38].Exporter = func(v any, i int) any { - switch v := v.(*TranscodingRequestMessage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[39].Exporter = func(v any, i int) any { - switch v := v.(*ActivityDumpStreamParams); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[40].Exporter = func(v any, i int) any { - switch v := v.(*ActivityDumpStreamMessage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[41].Exporter = func(v any, i int) any { - switch v := v.(*WorkloadSelectorMessage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[42].Exporter = func(v any, i int) any { - switch v := v.(*LastAnomalyTimestampMessage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[43].Exporter = func(v any, i int) any { - switch v := v.(*InstanceMessage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[44].Exporter = func(v any, i int) any { - switch v := v.(*ActivityTreeStatsMessage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[45].Exporter = func(v any, i int) any { - switch v := v.(*EventTypeState); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[46].Exporter = func(v any, i int) any { - switch v := v.(*ProfileContextMessage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[47].Exporter = func(v any, i int) any { - switch v := v.(*SecurityProfileMessage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[48].Exporter = func(v any, i int) any { - switch v := v.(*SecurityProfileListParams); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[49].Exporter = func(v any, i int) any { - switch v := v.(*SecurityProfileListMessage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[50].Exporter = func(v any, i int) any { - switch v := v.(*SecurityProfileSaveParams); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_pkg_security_proto_api_api_proto_msgTypes[51].Exporter = func(v any, i int) any { - switch v := v.(*SecurityProfileSaveMessage); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/pkg/security/proto/api/api.proto b/pkg/security/proto/api/api.proto index 7032834f238c3..c87ec7da9b5e3 100644 --- a/pkg/security/proto/api/api.proto +++ b/pkg/security/proto/api/api.proto @@ -123,6 +123,7 @@ message EnvironmentStatus { string KernelLockdown = 3; bool UseMmapableMaps = 4; bool UseRingBuffer = 5; + bool UseFentry = 6; } /*Discarders*/ @@ -167,6 +168,7 @@ message MetadataMessage { uint64 Size = 13; string Serialization = 14; string CGroupID = 15; + string CGroupManager = 16; } message StorageRequestMessage { @@ -244,6 +246,9 @@ message ActivityTreeStatsMessage { int64 DNSNodesCount = 3; int64 SocketNodesCount = 4; int64 ApproximateSize = 5; + int64 IMDSNodesCount = 6; + int64 SyscallNodesCount = 7; + int64 FlowNodesCount = 8; } message event_type_state { diff --git a/pkg/security/proto/api/api_grpc.pb.go b/pkg/security/proto/api/api_grpc.pb.go index c8593df292174..e78b0774a288f 100644 --- a/pkg/security/proto/api/api_grpc.pb.go +++ b/pkg/security/proto/api/api_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.4.0 +// - protoc-gen-go-grpc v1.5.1 // - protoc // source: pkg/security/proto/api/api.proto @@ -15,8 +15,8 @@ import ( // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.62.0 or later. -const _ = grpc.SupportPackageIsVersion8 +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 const ( SecurityModule_GetEvents_FullMethodName = "/api.SecurityModule/GetEvents" @@ -41,7 +41,7 @@ const ( // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type SecurityModuleClient interface { - GetEvents(ctx context.Context, in *GetEventParams, opts ...grpc.CallOption) (SecurityModule_GetEventsClient, error) + GetEvents(ctx context.Context, in *GetEventParams, opts ...grpc.CallOption) (grpc.ServerStreamingClient[SecurityEventMessage], error) DumpProcessCache(ctx context.Context, in *DumpProcessCacheParams, opts ...grpc.CallOption) (*SecurityDumpProcessCacheMessage, error) GetConfig(ctx context.Context, in *GetConfigParams, opts ...grpc.CallOption) (*SecurityConfigMessage, error) GetStatus(ctx context.Context, in *GetStatusParams, opts ...grpc.CallOption) (*Status, error) @@ -55,7 +55,7 @@ type SecurityModuleClient interface { ListActivityDumps(ctx context.Context, in *ActivityDumpListParams, opts ...grpc.CallOption) (*ActivityDumpListMessage, error) StopActivityDump(ctx context.Context, in *ActivityDumpStopParams, opts ...grpc.CallOption) (*ActivityDumpStopMessage, error) TranscodingRequest(ctx context.Context, in *TranscodingRequestParams, opts ...grpc.CallOption) (*TranscodingRequestMessage, error) - GetActivityDumpStream(ctx context.Context, in *ActivityDumpStreamParams, opts ...grpc.CallOption) (SecurityModule_GetActivityDumpStreamClient, error) + GetActivityDumpStream(ctx context.Context, in *ActivityDumpStreamParams, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ActivityDumpStreamMessage], error) // Security Profiles ListSecurityProfiles(ctx context.Context, in *SecurityProfileListParams, opts ...grpc.CallOption) (*SecurityProfileListMessage, error) SaveSecurityProfile(ctx context.Context, in *SecurityProfileSaveParams, opts ...grpc.CallOption) (*SecurityProfileSaveMessage, error) @@ -69,13 +69,13 @@ func NewSecurityModuleClient(cc grpc.ClientConnInterface) SecurityModuleClient { return &securityModuleClient{cc} } -func (c *securityModuleClient) GetEvents(ctx context.Context, in *GetEventParams, opts ...grpc.CallOption) (SecurityModule_GetEventsClient, error) { +func (c *securityModuleClient) GetEvents(ctx context.Context, in *GetEventParams, opts ...grpc.CallOption) (grpc.ServerStreamingClient[SecurityEventMessage], error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) stream, err := c.cc.NewStream(ctx, &SecurityModule_ServiceDesc.Streams[0], SecurityModule_GetEvents_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &securityModuleGetEventsClient{ClientStream: stream} + x := &grpc.GenericClientStream[GetEventParams, SecurityEventMessage]{ClientStream: stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -85,22 +85,8 @@ func (c *securityModuleClient) GetEvents(ctx context.Context, in *GetEventParams return x, nil } -type SecurityModule_GetEventsClient interface { - Recv() (*SecurityEventMessage, error) - grpc.ClientStream -} - -type securityModuleGetEventsClient struct { - grpc.ClientStream -} - -func (x *securityModuleGetEventsClient) Recv() (*SecurityEventMessage, error) { - m := new(SecurityEventMessage) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type SecurityModule_GetEventsClient = grpc.ServerStreamingClient[SecurityEventMessage] func (c *securityModuleClient) DumpProcessCache(ctx context.Context, in *DumpProcessCacheParams, opts ...grpc.CallOption) (*SecurityDumpProcessCacheMessage, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) @@ -222,13 +208,13 @@ func (c *securityModuleClient) TranscodingRequest(ctx context.Context, in *Trans return out, nil } -func (c *securityModuleClient) GetActivityDumpStream(ctx context.Context, in *ActivityDumpStreamParams, opts ...grpc.CallOption) (SecurityModule_GetActivityDumpStreamClient, error) { +func (c *securityModuleClient) GetActivityDumpStream(ctx context.Context, in *ActivityDumpStreamParams, opts ...grpc.CallOption) (grpc.ServerStreamingClient[ActivityDumpStreamMessage], error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) stream, err := c.cc.NewStream(ctx, &SecurityModule_ServiceDesc.Streams[1], SecurityModule_GetActivityDumpStream_FullMethodName, cOpts...) if err != nil { return nil, err } - x := &securityModuleGetActivityDumpStreamClient{ClientStream: stream} + x := &grpc.GenericClientStream[ActivityDumpStreamParams, ActivityDumpStreamMessage]{ClientStream: stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } @@ -238,22 +224,8 @@ func (c *securityModuleClient) GetActivityDumpStream(ctx context.Context, in *Ac return x, nil } -type SecurityModule_GetActivityDumpStreamClient interface { - Recv() (*ActivityDumpStreamMessage, error) - grpc.ClientStream -} - -type securityModuleGetActivityDumpStreamClient struct { - grpc.ClientStream -} - -func (x *securityModuleGetActivityDumpStreamClient) Recv() (*ActivityDumpStreamMessage, error) { - m := new(ActivityDumpStreamMessage) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type SecurityModule_GetActivityDumpStreamClient = grpc.ServerStreamingClient[ActivityDumpStreamMessage] func (c *securityModuleClient) ListSecurityProfiles(ctx context.Context, in *SecurityProfileListParams, opts ...grpc.CallOption) (*SecurityProfileListMessage, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) @@ -277,9 +249,9 @@ func (c *securityModuleClient) SaveSecurityProfile(ctx context.Context, in *Secu // SecurityModuleServer is the server API for SecurityModule service. // All implementations must embed UnimplementedSecurityModuleServer -// for forward compatibility +// for forward compatibility. type SecurityModuleServer interface { - GetEvents(*GetEventParams, SecurityModule_GetEventsServer) error + GetEvents(*GetEventParams, grpc.ServerStreamingServer[SecurityEventMessage]) error DumpProcessCache(context.Context, *DumpProcessCacheParams) (*SecurityDumpProcessCacheMessage, error) GetConfig(context.Context, *GetConfigParams) (*SecurityConfigMessage, error) GetStatus(context.Context, *GetStatusParams) (*Status, error) @@ -293,18 +265,21 @@ type SecurityModuleServer interface { ListActivityDumps(context.Context, *ActivityDumpListParams) (*ActivityDumpListMessage, error) StopActivityDump(context.Context, *ActivityDumpStopParams) (*ActivityDumpStopMessage, error) TranscodingRequest(context.Context, *TranscodingRequestParams) (*TranscodingRequestMessage, error) - GetActivityDumpStream(*ActivityDumpStreamParams, SecurityModule_GetActivityDumpStreamServer) error + GetActivityDumpStream(*ActivityDumpStreamParams, grpc.ServerStreamingServer[ActivityDumpStreamMessage]) error // Security Profiles ListSecurityProfiles(context.Context, *SecurityProfileListParams) (*SecurityProfileListMessage, error) SaveSecurityProfile(context.Context, *SecurityProfileSaveParams) (*SecurityProfileSaveMessage, error) mustEmbedUnimplementedSecurityModuleServer() } -// UnimplementedSecurityModuleServer must be embedded to have forward compatible implementations. -type UnimplementedSecurityModuleServer struct { -} +// UnimplementedSecurityModuleServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedSecurityModuleServer struct{} -func (UnimplementedSecurityModuleServer) GetEvents(*GetEventParams, SecurityModule_GetEventsServer) error { +func (UnimplementedSecurityModuleServer) GetEvents(*GetEventParams, grpc.ServerStreamingServer[SecurityEventMessage]) error { return status.Errorf(codes.Unimplemented, "method GetEvents not implemented") } func (UnimplementedSecurityModuleServer) DumpProcessCache(context.Context, *DumpProcessCacheParams) (*SecurityDumpProcessCacheMessage, error) { @@ -343,7 +318,7 @@ func (UnimplementedSecurityModuleServer) StopActivityDump(context.Context, *Acti func (UnimplementedSecurityModuleServer) TranscodingRequest(context.Context, *TranscodingRequestParams) (*TranscodingRequestMessage, error) { return nil, status.Errorf(codes.Unimplemented, "method TranscodingRequest not implemented") } -func (UnimplementedSecurityModuleServer) GetActivityDumpStream(*ActivityDumpStreamParams, SecurityModule_GetActivityDumpStreamServer) error { +func (UnimplementedSecurityModuleServer) GetActivityDumpStream(*ActivityDumpStreamParams, grpc.ServerStreamingServer[ActivityDumpStreamMessage]) error { return status.Errorf(codes.Unimplemented, "method GetActivityDumpStream not implemented") } func (UnimplementedSecurityModuleServer) ListSecurityProfiles(context.Context, *SecurityProfileListParams) (*SecurityProfileListMessage, error) { @@ -353,6 +328,7 @@ func (UnimplementedSecurityModuleServer) SaveSecurityProfile(context.Context, *S return nil, status.Errorf(codes.Unimplemented, "method SaveSecurityProfile not implemented") } func (UnimplementedSecurityModuleServer) mustEmbedUnimplementedSecurityModuleServer() {} +func (UnimplementedSecurityModuleServer) testEmbeddedByValue() {} // UnsafeSecurityModuleServer may be embedded to opt out of forward compatibility for this service. // Use of this interface is not recommended, as added methods to SecurityModuleServer will @@ -362,6 +338,13 @@ type UnsafeSecurityModuleServer interface { } func RegisterSecurityModuleServer(s grpc.ServiceRegistrar, srv SecurityModuleServer) { + // If the following call pancis, it indicates UnimplementedSecurityModuleServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } s.RegisterService(&SecurityModule_ServiceDesc, srv) } @@ -370,21 +353,11 @@ func _SecurityModule_GetEvents_Handler(srv interface{}, stream grpc.ServerStream if err := stream.RecvMsg(m); err != nil { return err } - return srv.(SecurityModuleServer).GetEvents(m, &securityModuleGetEventsServer{ServerStream: stream}) + return srv.(SecurityModuleServer).GetEvents(m, &grpc.GenericServerStream[GetEventParams, SecurityEventMessage]{ServerStream: stream}) } -type SecurityModule_GetEventsServer interface { - Send(*SecurityEventMessage) error - grpc.ServerStream -} - -type securityModuleGetEventsServer struct { - grpc.ServerStream -} - -func (x *securityModuleGetEventsServer) Send(m *SecurityEventMessage) error { - return x.ServerStream.SendMsg(m) -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type SecurityModule_GetEventsServer = grpc.ServerStreamingServer[SecurityEventMessage] func _SecurityModule_DumpProcessCache_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(DumpProcessCacheParams) @@ -607,21 +580,11 @@ func _SecurityModule_GetActivityDumpStream_Handler(srv interface{}, stream grpc. if err := stream.RecvMsg(m); err != nil { return err } - return srv.(SecurityModuleServer).GetActivityDumpStream(m, &securityModuleGetActivityDumpStreamServer{ServerStream: stream}) -} - -type SecurityModule_GetActivityDumpStreamServer interface { - Send(*ActivityDumpStreamMessage) error - grpc.ServerStream + return srv.(SecurityModuleServer).GetActivityDumpStream(m, &grpc.GenericServerStream[ActivityDumpStreamParams, ActivityDumpStreamMessage]{ServerStream: stream}) } -type securityModuleGetActivityDumpStreamServer struct { - grpc.ServerStream -} - -func (x *securityModuleGetActivityDumpStreamServer) Send(m *ActivityDumpStreamMessage) error { - return x.ServerStream.SendMsg(m) -} +// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name. +type SecurityModule_GetActivityDumpStreamServer = grpc.ServerStreamingServer[ActivityDumpStreamMessage] func _SecurityModule_ListSecurityProfiles_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(SecurityProfileListParams) diff --git a/pkg/security/proto/api/api_vtproto.pb.go b/pkg/security/proto/api/api_vtproto.pb.go index 8ebccda5994a0..b84d6738d678b 100644 --- a/pkg/security/proto/api/api_vtproto.pb.go +++ b/pkg/security/proto/api/api_vtproto.pb.go @@ -1238,6 +1238,16 @@ func (m *EnvironmentStatus) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.UseFentry { + i-- + if m.UseFentry { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } if m.UseRingBuffer { i-- if m.UseRingBuffer { @@ -1542,6 +1552,15 @@ func (m *MetadataMessage) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if len(m.CGroupManager) > 0 { + i -= len(m.CGroupManager) + copy(dAtA[i:], m.CGroupManager) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.CGroupManager))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } if len(m.CGroupID) > 0 { i -= len(m.CGroupID) copy(dAtA[i:], m.CGroupID) @@ -2373,6 +2392,21 @@ func (m *ActivityTreeStatsMessage) MarshalToSizedBufferVT(dAtA []byte) (int, err i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.FlowNodesCount != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.FlowNodesCount)) + i-- + dAtA[i] = 0x40 + } + if m.SyscallNodesCount != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.SyscallNodesCount)) + i-- + dAtA[i] = 0x38 + } + if m.IMDSNodesCount != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.IMDSNodesCount)) + i-- + dAtA[i] = 0x30 + } if m.ApproximateSize != 0 { i = protohelpers.EncodeVarint(dAtA, i, uint64(m.ApproximateSize)) i-- @@ -3336,6 +3370,9 @@ func (m *EnvironmentStatus) SizeVT() (n int) { if m.UseRingBuffer { n += 2 } + if m.UseFentry { + n += 2 + } n += len(m.unknownFields) return n } @@ -3489,6 +3526,10 @@ func (m *MetadataMessage) SizeVT() (n int) { if l > 0 { n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) } + l = len(m.CGroupManager) + if l > 0 { + n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) + } n += len(m.unknownFields) return n } @@ -3782,6 +3823,15 @@ func (m *ActivityTreeStatsMessage) SizeVT() (n int) { if m.ApproximateSize != 0 { n += 1 + protohelpers.SizeOfVarint(uint64(m.ApproximateSize)) } + if m.IMDSNodesCount != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.IMDSNodesCount)) + } + if m.SyscallNodesCount != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.SyscallNodesCount)) + } + if m.FlowNodesCount != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.FlowNodesCount)) + } n += len(m.unknownFields) return n } @@ -6753,6 +6803,26 @@ func (m *EnvironmentStatus) UnmarshalVT(dAtA []byte) error { } } m.UseRingBuffer = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UseFentry", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.UseFentry = bool(v != 0) default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) @@ -7783,6 +7853,38 @@ func (m *MetadataMessage) UnmarshalVT(dAtA []byte) error { } m.CGroupID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CGroupManager", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CGroupManager = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) @@ -9616,6 +9718,63 @@ func (m *ActivityTreeStatsMessage) UnmarshalVT(dAtA []byte) error { break } } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IMDSNodesCount", wireType) + } + m.IMDSNodesCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.IMDSNodesCount |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SyscallNodesCount", wireType) + } + m.SyscallNodesCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SyscallNodesCount |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FlowNodesCount", wireType) + } + m.FlowNodesCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FlowNodesCount |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) diff --git a/pkg/security/proto/api/mocks/security_module_client.go b/pkg/security/proto/api/mocks/security_module_client.go index ae287b8ba299d..d282c4ada69bc 100644 --- a/pkg/security/proto/api/mocks/security_module_client.go +++ b/pkg/security/proto/api/mocks/security_module_client.go @@ -166,7 +166,7 @@ func (_m *SecurityModuleClient) DumpProcessCache(ctx context.Context, in *api.Du } // GetActivityDumpStream provides a mock function with given fields: ctx, in, opts -func (_m *SecurityModuleClient) GetActivityDumpStream(ctx context.Context, in *api.ActivityDumpStreamParams, opts ...grpc.CallOption) (api.SecurityModule_GetActivityDumpStreamClient, error) { +func (_m *SecurityModuleClient) GetActivityDumpStream(ctx context.Context, in *api.ActivityDumpStreamParams, opts ...grpc.CallOption) (grpc.ServerStreamingClient[api.ActivityDumpStreamMessage], error) { _va := make([]interface{}, len(opts)) for _i := range opts { _va[_i] = opts[_i] @@ -180,16 +180,16 @@ func (_m *SecurityModuleClient) GetActivityDumpStream(ctx context.Context, in *a panic("no return value specified for GetActivityDumpStream") } - var r0 api.SecurityModule_GetActivityDumpStreamClient + var r0 grpc.ServerStreamingClient[api.ActivityDumpStreamMessage] var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *api.ActivityDumpStreamParams, ...grpc.CallOption) (api.SecurityModule_GetActivityDumpStreamClient, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, *api.ActivityDumpStreamParams, ...grpc.CallOption) (grpc.ServerStreamingClient[api.ActivityDumpStreamMessage], error)); ok { return rf(ctx, in, opts...) } - if rf, ok := ret.Get(0).(func(context.Context, *api.ActivityDumpStreamParams, ...grpc.CallOption) api.SecurityModule_GetActivityDumpStreamClient); ok { + if rf, ok := ret.Get(0).(func(context.Context, *api.ActivityDumpStreamParams, ...grpc.CallOption) grpc.ServerStreamingClient[api.ActivityDumpStreamMessage]); ok { r0 = rf(ctx, in, opts...) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(api.SecurityModule_GetActivityDumpStreamClient) + r0 = ret.Get(0).(grpc.ServerStreamingClient[api.ActivityDumpStreamMessage]) } } @@ -240,7 +240,7 @@ func (_m *SecurityModuleClient) GetConfig(ctx context.Context, in *api.GetConfig } // GetEvents provides a mock function with given fields: ctx, in, opts -func (_m *SecurityModuleClient) GetEvents(ctx context.Context, in *api.GetEventParams, opts ...grpc.CallOption) (api.SecurityModule_GetEventsClient, error) { +func (_m *SecurityModuleClient) GetEvents(ctx context.Context, in *api.GetEventParams, opts ...grpc.CallOption) (grpc.ServerStreamingClient[api.SecurityEventMessage], error) { _va := make([]interface{}, len(opts)) for _i := range opts { _va[_i] = opts[_i] @@ -254,16 +254,16 @@ func (_m *SecurityModuleClient) GetEvents(ctx context.Context, in *api.GetEventP panic("no return value specified for GetEvents") } - var r0 api.SecurityModule_GetEventsClient + var r0 grpc.ServerStreamingClient[api.SecurityEventMessage] var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *api.GetEventParams, ...grpc.CallOption) (api.SecurityModule_GetEventsClient, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, *api.GetEventParams, ...grpc.CallOption) (grpc.ServerStreamingClient[api.SecurityEventMessage], error)); ok { return rf(ctx, in, opts...) } - if rf, ok := ret.Get(0).(func(context.Context, *api.GetEventParams, ...grpc.CallOption) api.SecurityModule_GetEventsClient); ok { + if rf, ok := ret.Get(0).(func(context.Context, *api.GetEventParams, ...grpc.CallOption) grpc.ServerStreamingClient[api.SecurityEventMessage]); ok { r0 = rf(ctx, in, opts...) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(api.SecurityModule_GetEventsClient) + r0 = ret.Get(0).(grpc.ServerStreamingClient[api.SecurityEventMessage]) } } diff --git a/pkg/security/proto/api/mocks/security_module_server.go b/pkg/security/proto/api/mocks/security_module_server.go index ad9a50e6968e3..a5d3429542a9d 100644 --- a/pkg/security/proto/api/mocks/security_module_server.go +++ b/pkg/security/proto/api/mocks/security_module_server.go @@ -7,6 +7,8 @@ import ( api "github.com/DataDog/datadog-agent/pkg/security/proto/api" + grpc "google.golang.org/grpc" + mock "github.com/stretchr/testify/mock" ) @@ -136,7 +138,7 @@ func (_m *SecurityModuleServer) DumpProcessCache(_a0 context.Context, _a1 *api.D } // GetActivityDumpStream provides a mock function with given fields: _a0, _a1 -func (_m *SecurityModuleServer) GetActivityDumpStream(_a0 *api.ActivityDumpStreamParams, _a1 api.SecurityModule_GetActivityDumpStreamServer) error { +func (_m *SecurityModuleServer) GetActivityDumpStream(_a0 *api.ActivityDumpStreamParams, _a1 grpc.ServerStreamingServer[api.ActivityDumpStreamMessage]) error { ret := _m.Called(_a0, _a1) if len(ret) == 0 { @@ -144,7 +146,7 @@ func (_m *SecurityModuleServer) GetActivityDumpStream(_a0 *api.ActivityDumpStrea } var r0 error - if rf, ok := ret.Get(0).(func(*api.ActivityDumpStreamParams, api.SecurityModule_GetActivityDumpStreamServer) error); ok { + if rf, ok := ret.Get(0).(func(*api.ActivityDumpStreamParams, grpc.ServerStreamingServer[api.ActivityDumpStreamMessage]) error); ok { r0 = rf(_a0, _a1) } else { r0 = ret.Error(0) @@ -184,7 +186,7 @@ func (_m *SecurityModuleServer) GetConfig(_a0 context.Context, _a1 *api.GetConfi } // GetEvents provides a mock function with given fields: _a0, _a1 -func (_m *SecurityModuleServer) GetEvents(_a0 *api.GetEventParams, _a1 api.SecurityModule_GetEventsServer) error { +func (_m *SecurityModuleServer) GetEvents(_a0 *api.GetEventParams, _a1 grpc.ServerStreamingServer[api.SecurityEventMessage]) error { ret := _m.Called(_a0, _a1) if len(ret) == 0 { @@ -192,7 +194,7 @@ func (_m *SecurityModuleServer) GetEvents(_a0 *api.GetEventParams, _a1 api.Secur } var r0 error - if rf, ok := ret.Get(0).(func(*api.GetEventParams, api.SecurityModule_GetEventsServer) error); ok { + if rf, ok := ret.Get(0).(func(*api.GetEventParams, grpc.ServerStreamingServer[api.SecurityEventMessage]) error); ok { r0 = rf(_a0, _a1) } else { r0 = ret.Error(0) diff --git a/pkg/security/proto/ebpfless/msg.go b/pkg/security/proto/ebpfless/msg.go index e8210ce48de67..802f9e5bcf9e0 100644 --- a/pkg/security/proto/ebpfless/msg.go +++ b/pkg/security/proto/ebpfless/msg.go @@ -8,10 +8,11 @@ package ebpfless import ( "encoding/json" + "net" "github.com/DataDog/datadog-agent/pkg/security/secl/containerutils" - "github.com/DataDog/datadog-agent/pkg/security/secl/model" - "modernc.org/mathutil" + "github.com/DataDog/datadog-agent/pkg/security/secl/model/sharedconsts" + "github.com/DataDog/datadog-agent/pkg/security/secl/model/utils" ) // Mode defines ptrace mode @@ -90,6 +91,12 @@ const ( SyscallTypeMount // SyscallTypeUmount umount/umount2 type SyscallTypeUmount + // SyscallTypeAccept accept + SyscallTypeAccept + // SyscallTypeConnect connect + SyscallTypeConnect + // SyscallTypeBind bind + SyscallTypeBind ) // ContainerContext defines a container context @@ -137,7 +144,7 @@ type ForkSyscallMsg struct { // ExitSyscallMsg defines an exit message type ExitSyscallMsg struct { Code uint32 - Cause model.ExitCause + Cause sharedconsts.ExitCause } // FileSyscallMsg defines a file message @@ -166,6 +173,12 @@ type PipeSyscallFakeMsg struct { FdsPtr uint64 } +// SocketSyscallFakeMsg represents the socket message +type SocketSyscallFakeMsg struct { + AddressFamily uint16 + Protocol uint16 +} + // ChdirSyscallMsg defines a chdir message type ChdirSyscallMsg struct { Dir FileSyscallMsg @@ -282,7 +295,7 @@ type UnloadModuleSyscallMsg struct { // SpanContext stores a span context (if any) type SpanContext struct { SpanID uint64 - TraceID mathutil.Int128 + TraceID utils.TraceID } // MountSyscallMsg defines a mount message @@ -297,6 +310,31 @@ type UmountSyscallMsg struct { Path string } +// MsgSocketInfo defines the base information for a socket message +type MsgSocketInfo struct { + AddressFamily uint16 + Addr net.IP + Port uint16 +} + +// BindSyscallMsg defines a bind message +type BindSyscallMsg struct { + MsgSocketInfo + Protocol uint16 +} + +// ConnectSyscallMsg defines a connect message +type ConnectSyscallMsg struct { + MsgSocketInfo + Protocol uint16 +} + +// AcceptSyscallMsg defines an accept message +type AcceptSyscallMsg struct { + MsgSocketInfo + SocketFd int32 +} + // SyscallMsg defines a syscall message type SyscallMsg struct { Type SyscallType @@ -328,10 +366,14 @@ type SyscallMsg struct { Chdir *ChdirSyscallMsg `json:",omitempty"` Mount *MountSyscallMsg `json:",omitempty"` Umount *UmountSyscallMsg `json:",omitempty"` + Bind *BindSyscallMsg `json:",omitempty"` + Connect *ConnectSyscallMsg `json:",omitempty"` + Accept *AcceptSyscallMsg `json:",omitempty"` // internals - Dup *DupSyscallFakeMsg `json:",omitempty"` - Pipe *PipeSyscallFakeMsg `json:",omitempty"` + Dup *DupSyscallFakeMsg `json:",omitempty"` + Pipe *PipeSyscallFakeMsg `json:",omitempty"` + Socket *SocketSyscallFakeMsg `json:",omitempty"` } // String returns string representation diff --git a/pkg/security/ptracer/container_context.go b/pkg/security/ptracer/container_context.go index 76748eb8540dd..87aa142aee834 100644 --- a/pkg/security/ptracer/container_context.go +++ b/pkg/security/ptracer/container_context.go @@ -9,6 +9,9 @@ package ptracer import ( "encoding/json" + "fmt" + "io" + "net/http" "os" "time" @@ -23,9 +26,19 @@ type ECSMetadata struct { } func retrieveECSMetadata(url string) (*ECSMetadata, error) { - body, err := simpleHTTPRequest(url) + res, err := http.Get(url) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to get ECS metadata endpoint response: %w", err) + } + + body, err := io.ReadAll(res.Body) + _ = res.Body.Close() + if err != nil { + return nil, fmt.Errorf("failed to read ECS metadata endpoint response: %w", err) + } + + if res.StatusCode > 299 { + return nil, fmt.Errorf("ECS metadata endpoint returned an invalid http code: %d", res.StatusCode) } data := ECSMetadata{} diff --git a/pkg/security/ptracer/cws.go b/pkg/security/ptracer/cws.go index 2082190ddf400..53cef064a2eaf 100644 --- a/pkg/security/ptracer/cws.go +++ b/pkg/security/ptracer/cws.go @@ -285,6 +285,7 @@ func registerSyscallHandlers() (map[int]syscallHandler, []string) { syscalls := registerFIMHandlers(handlers) syscalls = append(syscalls, registerProcessHandlers(handlers)...) syscalls = append(syscalls, registerERPCHandlers(handlers)...) + syscalls = append(syscalls, registerNetworkHandlers(handlers)...) return handlers, syscalls } diff --git a/pkg/security/ptracer/fim_handlers.go b/pkg/security/ptracer/fim_handlers.go index 154a0f88907d0..8a2f2102e05b5 100644 --- a/pkg/security/ptracer/fim_handlers.go +++ b/pkg/security/ptracer/fim_handlers.go @@ -612,6 +612,7 @@ func handlePipe2(tracer *Tracer, _ *Process, msg *ebpfless.SyscallMsg, regs sysc func handleClose(tracer *Tracer, process *Process, _ *ebpfless.SyscallMsg, regs syscall.PtraceRegs, _ bool) error { fd := tracer.ReadArgInt32(regs, 0) delete(process.FdRes.Fd, fd) + delete(process.FdToSocket, fd) return nil } diff --git a/pkg/security/ptracer/hooks.go b/pkg/security/ptracer/hooks.go index bf6589135777c..350d58b4ff960 100644 --- a/pkg/security/ptracer/hooks.go +++ b/pkg/security/ptracer/hooks.go @@ -16,7 +16,7 @@ import ( "time" "github.com/DataDog/datadog-agent/pkg/security/proto/ebpfless" - "github.com/DataDog/datadog-agent/pkg/security/secl/model" + "github.com/DataDog/datadog-agent/pkg/security/secl/model/sharedconsts" "golang.org/x/sys/unix" ) @@ -169,13 +169,13 @@ func (ctx *CWSPtracerCtx) handleExit(process *Process, waitStatus *syscall.WaitS if process.Pid == process.Tgid && waitStatus != nil { exitCtx := &ebpfless.ExitSyscallMsg{} if waitStatus.Exited() { - exitCtx.Cause = model.ExitExited + exitCtx.Cause = sharedconsts.ExitExited exitCtx.Code = uint32(waitStatus.ExitStatus()) } else if waitStatus.CoreDump() { - exitCtx.Cause = model.ExitCoreDumped + exitCtx.Cause = sharedconsts.ExitCoreDumped exitCtx.Code = uint32(waitStatus.Signal()) } else if waitStatus.Signaled() { - exitCtx.Cause = model.ExitSignaled + exitCtx.Cause = sharedconsts.ExitSignaled exitCtx.Code = uint32(waitStatus.Signal()) } else { exitCtx.Code = uint32(waitStatus.Signal()) diff --git a/pkg/security/ptracer/network_handlers.go b/pkg/security/ptracer/network_handlers.go new file mode 100644 index 0000000000000..a9b5c09685b80 --- /dev/null +++ b/pkg/security/ptracer/network_handlers.go @@ -0,0 +1,276 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux + +// Package ptracer holds the start command of CWS injector +package ptracer + +import ( + "encoding/binary" + "errors" + "golang.org/x/sys/unix" + "net" + "syscall" + + "github.com/DataDog/datadog-agent/pkg/security/proto/ebpfless" +) + +func registerNetworkHandlers(handlers map[int]syscallHandler) []string { + processHandlers := []syscallHandler{ + { + ID: syscallID{ID: AcceptNr, Name: "accept"}, + Func: handleAccept, + ShouldSend: shouldSendAccept, + RetFunc: handleAcceptRet, + }, + { + ID: syscallID{ID: Accept4Nr, Name: "accept4"}, + Func: handleAccept, + ShouldSend: shouldSendAccept, + RetFunc: handleAcceptRet, + }, + { + ID: syscallID{ID: BindNr, Name: "bind"}, + Func: handleBind, + ShouldSend: shouldSendBind, + RetFunc: nil, + }, + { + ID: syscallID{ID: ConnectNr, Name: "connect"}, + Func: handleConnect, + ShouldSend: shouldSendConnect, + RetFunc: nil, + }, + { + ID: syscallID{ID: SocketNr, Name: "socket"}, + Func: handleSocket, + ShouldSend: nil, + RetFunc: handleSocketRet, + }, + } + + syscallList := []string{} + for _, h := range processHandlers { + if h.ID.ID >= 0 { // insert only available syscalls + handlers[h.ID.ID] = h + syscallList = append(syscallList, h.ID.Name) + } + } + return syscallList +} + +type addrInfo struct { + ip net.IP + port uint16 + af uint16 +} + +func parseAddrInfo(tracer *Tracer, process *Process, regs syscall.PtraceRegs, addrlen int32) (*addrInfo, error) { + if addrlen < 16 { + return nil, errors.New("invalid address length") + } + + if addrlen > 28 { + addrlen = 28 + } + + data, err := tracer.ReadArgData(process.Pid, regs, 1, uint(addrlen)) + if err != nil { + return nil, err + } + + var addr addrInfo + + addr.af = binary.NativeEndian.Uint16(data[0:2]) + addr.port = binary.BigEndian.Uint16(data[2:4]) + + if addr.af == unix.AF_INET { + addr.ip = data[4:8] + } else if addr.af == unix.AF_INET6 { + if addrlen < 28 { + return nil, errors.New("invalid address length") + } + + addr.ip = data[8:24] + } else { + return nil, errors.New("unsupported address family") + } + + return &addr, nil +} + +func handleBind(tracer *Tracer, process *Process, msg *ebpfless.SyscallMsg, regs syscall.PtraceRegs, _ bool) error { + socketfd := tracer.ReadArgUint32(regs, 0) + + socketInfo, ok := process.FdToSocket[int32(socketfd)] + if !ok { + return errors.New("unable to find socket") + } + + var addrlen int32 + if socketInfo.AddressFamily == unix.AF_INET { + addrlen = 16 + } else if socketInfo.AddressFamily == unix.AF_INET6 { + addrlen = 28 + } else if socketInfo.AddressFamily == unix.AF_UNIX { + addrlen = 0 + } else { + return errors.New("unsupported address family") + } + + if socketInfo.AddressFamily == unix.AF_UNIX { + msg.Type = ebpfless.SyscallTypeBind + msg.Bind = &ebpfless.BindSyscallMsg{ + MsgSocketInfo: ebpfless.MsgSocketInfo{ + AddressFamily: unix.AF_UNIX, + Addr: net.IP{}, + Port: 0, + }, + Protocol: 0, + } + return nil + } + + addr, err := parseAddrInfo(tracer, process, regs, addrlen) + if err != nil { + return err + } + + msg.Type = ebpfless.SyscallTypeBind + msg.Bind = &ebpfless.BindSyscallMsg{ + MsgSocketInfo: ebpfless.MsgSocketInfo{ + AddressFamily: addr.af, + Addr: addr.ip, + Port: addr.port, + }, + Protocol: socketInfo.Protocol, + } + + socketInfo.BoundToPort = addr.port + process.FdToSocket[int32(socketfd)] = socketInfo + + return nil +} + +func handleConnect(tracer *Tracer, process *Process, msg *ebpfless.SyscallMsg, regs syscall.PtraceRegs, _ bool) error { + addrlen := tracer.ReadArgInt32(regs, 2) + addr, err := parseAddrInfo(tracer, process, regs, int32(addrlen)) + + if err != nil { + return err + } + socketfd := int32(tracer.ReadArgUint32(regs, 0)) + + m, ok := process.FdToSocket[socketfd] + + if !ok { + return errors.New("unable to find protocol") + } + + msg.Type = ebpfless.SyscallTypeConnect + msg.Connect = &ebpfless.ConnectSyscallMsg{ + MsgSocketInfo: ebpfless.MsgSocketInfo{ + AddressFamily: addr.af, + Addr: addr.ip, + Port: addr.port, + }, + Protocol: m.Protocol, + } + + return nil +} + +func handleSocket(tracer *Tracer, _ *Process, msg *ebpfless.SyscallMsg, regs syscall.PtraceRegs, _ bool) error { + socketMsg := &ebpfless.SocketSyscallFakeMsg{ + AddressFamily: uint16(tracer.ReadArgInt32(regs, 0)), + } + + if socketMsg.AddressFamily != unix.AF_INET && socketMsg.AddressFamily != unix.AF_INET6 && socketMsg.AddressFamily != unix.AF_UNIX { + return nil + } + + protocol := int16(tracer.ReadArgInt32(regs, 1)) + // This argument can be masked, so just get what we need + protocol &= 0b1111 + + switch protocol { + case unix.SOCK_STREAM: + socketMsg.Protocol = unix.IPPROTO_TCP + case unix.SOCK_DGRAM: + socketMsg.Protocol = unix.IPPROTO_UDP + default: + return nil + } + + msg.Socket = socketMsg + return nil +} + +func handleAccept(tracer *Tracer, _ *Process, msg *ebpfless.SyscallMsg, regs syscall.PtraceRegs, _ bool) error { + fd := tracer.ReadArgInt32(regs, 0) + + msg.Accept = &ebpfless.AcceptSyscallMsg{ + SocketFd: fd, + } + + return nil +} + +// Handle returns +func handleAcceptRet(tracer *Tracer, process *Process, msg *ebpfless.SyscallMsg, regs syscall.PtraceRegs, _ bool) error { + addrlen, err := tracer.ReadArgInt32Ptr(process.Pid, regs, 2) + if err != nil { + return err + } + + addr, err := parseAddrInfo(tracer, process, regs, addrlen) + if err != nil { + return err + } + + m, ok := process.FdToSocket[msg.Accept.SocketFd] + if !ok { + return errors.New("unable to find socket") + } + + msg.Type = ebpfless.SyscallTypeAccept + + msg.Accept = &ebpfless.AcceptSyscallMsg{ + MsgSocketInfo: ebpfless.MsgSocketInfo{ + AddressFamily: addr.af, + Addr: addr.ip, + Port: m.BoundToPort, + }, + } + + return nil +} + +func handleSocketRet(tracer *Tracer, process *Process, msg *ebpfless.SyscallMsg, regs syscall.PtraceRegs, _ bool) error { + ret := int32(tracer.ReadRet(regs)) + + if msg.Socket != nil && ret != -1 { + process.FdToSocket[ret] = SocketInfo{ + AddressFamily: msg.Socket.AddressFamily, + Protocol: msg.Socket.Protocol, + } + } + + return nil +} + +// Should send messages +func shouldSendConnect(msg *ebpfless.SyscallMsg) bool { + return msg.Retval >= 0 || msg.Retval == -int64(syscall.EACCES) || msg.Retval == -int64(syscall.EPERM) || msg.Retval == -int64(syscall.ECONNREFUSED) || msg.Retval == -int64(syscall.ETIMEDOUT) || msg.Retval == -int64(syscall.EINPROGRESS) +} + +func shouldSendAccept(msg *ebpfless.SyscallMsg) bool { + return msg.Retval >= 0 || msg.Retval == -int64(syscall.EACCES) || msg.Retval == -int64(syscall.EPERM) || msg.Retval == -int64(syscall.ECONNABORTED) +} + +func shouldSendBind(msg *ebpfless.SyscallMsg) bool { + return msg.Retval >= 0 || msg.Retval == -int64(syscall.EACCES) || msg.Retval == -int64(syscall.EPERM) || msg.Retval == -int64(syscall.EADDRINUSE) || msg.Retval == -int64(syscall.EFAULT) +} diff --git a/pkg/security/ptracer/process.go b/pkg/security/ptracer/process.go index bb7d7549feec2..b3273d0678d9a 100644 --- a/pkg/security/ptracer/process.go +++ b/pkg/security/ptracer/process.go @@ -53,21 +53,31 @@ func (f *FSResources) clone() *FSResources { } } +// SocketInfo represents the status of an open socket +type SocketInfo struct { + AddressFamily uint16 + Protocol uint16 + BoundToPort uint16 +} + // Process represents a process context type Process struct { - Pid int - Tgid int - Nr map[int]*ebpfless.SyscallMsg - FdRes *FdResources - FsRes *FSResources + Pid int + Tgid int + Nr map[int]*ebpfless.SyscallMsg + FdRes *FdResources + FsRes *FSResources + FdToSocket map[int32]SocketInfo } // NewProcess returns a new process func NewProcess(pid int) *Process { return &Process{ - Pid: pid, - Tgid: pid, - Nr: make(map[int]*ebpfless.SyscallMsg), + Pid: pid, + Tgid: pid, + Nr: make(map[int]*ebpfless.SyscallMsg), + FdToSocket: make(map[int32]SocketInfo), + FdRes: &FdResources{ Fd: make(map[int32]string), FileHandleCache: make(map[fileHandleKey]*fileHandleVal), diff --git a/pkg/security/ptracer/ptracer.go b/pkg/security/ptracer/ptracer.go index 07d8cdf224172..42719867d3621 100644 --- a/pkg/security/ptracer/ptracer.go +++ b/pkg/security/ptracer/ptracer.go @@ -92,7 +92,6 @@ func processVMReadv(pid int, addr uintptr, data []byte) (int, error) { remoteIov := []unix.RemoteIovec{ {Base: addr, Len: size}, } - return unix.ProcessVMReadv(pid, localIov, remoteIov, 0) } diff --git a/pkg/security/ptracer/span.go b/pkg/security/ptracer/span.go index a45d7bb2ac903..576e89570d5ed 100644 --- a/pkg/security/ptracer/span.go +++ b/pkg/security/ptracer/span.go @@ -12,7 +12,7 @@ import ( "encoding/binary" "github.com/DataDog/datadog-agent/pkg/security/proto/ebpfless" - "modernc.org/mathutil" + "github.com/DataDog/datadog-agent/pkg/security/secl/model/utils" ) // SpanTLS holds the needed informations to retrieve spans on a TLS @@ -47,9 +47,9 @@ func fillSpanContext(tracer *Tracer, pid int, tid int, span *SpanTLS) *ebpfless. return &ebpfless.SpanContext{ SpanID: binary.NativeEndian.Uint64(pSpan[0:8]), - TraceID: mathutil.Int128{ - Lo: int64(binary.NativeEndian.Uint64(pSpan[8:16])), - Hi: int64(binary.NativeEndian.Uint64(pSpan[16:24])), + TraceID: utils.TraceID{ + Lo: binary.NativeEndian.Uint64(pSpan[8:16]), + Hi: binary.NativeEndian.Uint64(pSpan[16:24]), }, } } diff --git a/pkg/security/ptracer/syscalls_amd64.go b/pkg/security/ptracer/syscalls_amd64.go index a531bf3326f86..f14fe8f00af97 100644 --- a/pkg/security/ptracer/syscalls_amd64.go +++ b/pkg/security/ptracer/syscalls_amd64.go @@ -74,8 +74,13 @@ const ( IoctlNr = unix.SYS_IOCTL // IoctlNr defines the syscall ID for amd64 MountNr = unix.SYS_MOUNT // MountNr defines the syscall ID for amd64 Umount2Nr = unix.SYS_UMOUNT2 // Umount2Nr defines the syscall ID for amd64 - PipeNr = unix.SYS_PIPE // PipeNr defines the syscall ID for arm64 - Pipe2Nr = unix.SYS_PIPE2 // Pipe2Nr defines the syscall ID for arm64 + PipeNr = unix.SYS_PIPE // PipeNr defines the syscall ID for amd64 + Pipe2Nr = unix.SYS_PIPE2 // Pipe2Nr defines the syscall ID for amd64 + ConnectNr = unix.SYS_CONNECT // ConnectNr defines the syscall ID for amd64 + Accept4Nr = unix.SYS_ACCEPT4 // Accept4Nr defines the syscall ID for arm64 + AcceptNr = unix.SYS_ACCEPT // AcceptNr defines the syscall ID for amd64 + BindNr = unix.SYS_BIND // BindNr defines the syscall ID for amd64 + SocketNr = unix.SYS_SOCKET // SocketNr defines the syscall ID for amd64 ) // https://github.com/torvalds/linux/blob/v5.0/arch/x86/entry/entry_64.S#L126 diff --git a/pkg/security/ptracer/syscalls_arm64.go b/pkg/security/ptracer/syscalls_arm64.go index 0badb12ee3e27..eecc5e63edefc 100644 --- a/pkg/security/ptracer/syscalls_arm64.go +++ b/pkg/security/ptracer/syscalls_arm64.go @@ -15,9 +15,9 @@ import ( const ( OpenatNr = unix.SYS_OPENAT // OpenatNr defines the syscall ID for arm64 - Openat2Nr = unix.SYS_OPENAT2 // Openat2Nr defines the syscall ID for amd64 - NameToHandleAtNr = unix.SYS_NAME_TO_HANDLE_AT // NameToHandleAtNr defines the syscall ID for amd64 - OpenByHandleAtNr = unix.SYS_OPEN_BY_HANDLE_AT // OpenByHandleAtNr defines the syscall ID for amd64 + Openat2Nr = unix.SYS_OPENAT2 // Openat2Nr defines the syscall ID for arm64 + NameToHandleAtNr = unix.SYS_NAME_TO_HANDLE_AT // NameToHandleAtNr defines the syscall ID for arm64 + OpenByHandleAtNr = unix.SYS_OPEN_BY_HANDLE_AT // OpenByHandleAtNr defines the syscall ID for arm64 ExecveNr = unix.SYS_EXECVE // ExecveNr defines the syscall ID for arm64 ExecveatNr = unix.SYS_EXECVEAT // ExecveatNr defines the syscall ID for arm64 CloneNr = unix.SYS_CLONE // CloneNr defines the syscall ID for arm64 @@ -58,6 +58,11 @@ const ( MountNr = unix.SYS_MOUNT // MountNr defines the syscall ID for arm64 Umount2Nr = unix.SYS_UMOUNT2 // Umount2Nr defines the syscall ID for arm64 Pipe2Nr = unix.SYS_PIPE2 // Pipe2Nr defines the syscall ID for arm64 + ConnectNr = unix.SYS_CONNECT // ConnectNr defines the syscall ID for arm64 + BindNr = unix.SYS_BIND // BindNr defines the syscall ID for arm64 + AcceptNr = unix.SYS_ACCEPT // AcceptNr defines the syscall ID for arm64 + Accept4Nr = unix.SYS_ACCEPT4 // Accept4Nr defines the syscall ID for arm64 + SocketNr = unix.SYS_SOCKET // SocketNr defines the syscall ID for arm64 OpenNr = -1 // OpenNr not available on arm64 ForkNr = -2 // ForkNr not available on arm64 diff --git a/pkg/security/ptracer/utils.go b/pkg/security/ptracer/utils.go index 3d79037582aef..a86d87ca61893 100644 --- a/pkg/security/ptracer/utils.go +++ b/pkg/security/ptracer/utils.go @@ -15,8 +15,6 @@ import ( "fmt" "io" "math/rand" - "net" - "net/url" "os" "path" "path/filepath" @@ -29,7 +27,7 @@ import ( usergrouputils "github.com/DataDog/datadog-agent/pkg/security/common/usergrouputils" "github.com/DataDog/datadog-agent/pkg/security/proto/ebpfless" "github.com/DataDog/datadog-agent/pkg/security/secl/containerutils" - "github.com/DataDog/datadog-agent/pkg/security/secl/model" + "github.com/DataDog/datadog-agent/pkg/security/secl/model/sharedconsts" "github.com/DataDog/datadog-agent/pkg/util/safeelf" ) @@ -104,64 +102,6 @@ func getNSID() uint64 { return stat.Ino } -// simpleHTTPRequest used to avoid importing the crypto golang package -func simpleHTTPRequest(uri string) ([]byte, error) { - u, err := url.Parse(uri) - if err != nil { - return nil, err - } - - addr := u.Host - if u.Port() == "" { - addr += ":80" - } - - tcpAddr, err := net.ResolveTCPAddr("tcp", addr) - if err != nil { - return nil, err - } - - client, err := net.DialTCP("tcp", nil, tcpAddr) - if err != nil { - return nil, err - } - defer client.Close() - - path := u.Path - if path == "" { - path = "/" - } - - req := fmt.Sprintf("GET %s?%s HTTP/1.0\nHost: %s\nConnection: close\n\n", path, u.RawQuery, u.Hostname()) - - _, err = client.Write([]byte(req)) - if err != nil { - return nil, err - } - - var body []byte - buf := make([]byte, 256) - - for { - n, err := client.Read(buf) - if err != nil { - if err != io.EOF { - return nil, err - } - break - } - body = append(body, buf[:n]...) - } - - offset := bytes.Index(body, []byte{'\r', '\n', '\r', '\n'}) - if offset < 0 { - - return nil, errors.New("unable to parse http response") - } - - return body[offset+2:], nil -} - func fillProcessCwd(process *Process) error { cwd, err := os.Readlink(fmt.Sprintf("/proc/%d/cwd", process.Pid)) if err != nil { @@ -340,13 +280,13 @@ func getPidTTY(pid int) string { func truncateArgs(list []string) ([]string, bool) { truncated := false - if len(list) > model.MaxArgsEnvsSize { - list = list[:model.MaxArgsEnvsSize] + if len(list) > sharedconsts.MaxArgsEnvsSize { + list = list[:sharedconsts.MaxArgsEnvsSize] truncated = true } for i, l := range list { - if len(l) > model.MaxArgEnvSize { - list[i] = l[:model.MaxArgEnvSize-4] + "..." + if len(l) > sharedconsts.MaxArgEnvSize { + list[i] = l[:sharedconsts.MaxArgEnvSize-4] + "..." truncated = true } } @@ -410,8 +350,8 @@ func truncateEnvs(it StringIterator) ([]string, bool) { if len(text) > 0 { envCounter++ if matchesOnePrefix(text, priorityEnvsPrefixes) { - if len(text) > model.MaxArgEnvSize { - text = text[:model.MaxArgEnvSize-4] + "..." + if len(text) > sharedconsts.MaxArgEnvSize { + text = text[:sharedconsts.MaxArgEnvSize-4] + "..." truncated = true } priorityEnvs = append(priorityEnvs, text) @@ -421,8 +361,8 @@ func truncateEnvs(it StringIterator) ([]string, bool) { it.Reset() - if envCounter > model.MaxArgsEnvsSize { - envCounter = model.MaxArgsEnvsSize + if envCounter > sharedconsts.MaxArgsEnvsSize { + envCounter = sharedconsts.MaxArgsEnvsSize } // second pass collecting @@ -430,7 +370,7 @@ func truncateEnvs(it StringIterator) ([]string, bool) { envs = append(envs, priorityEnvs...) for it.Next() { - if len(envs) >= model.MaxArgsEnvsSize { + if len(envs) >= sharedconsts.MaxArgsEnvsSize { return envs, true } @@ -438,8 +378,8 @@ func truncateEnvs(it StringIterator) ([]string, bool) { if len(text) > 0 { // if it matches one prefix, it's already in the envs through priority envs if !matchesOnePrefix(text, priorityEnvsPrefixes) { - if len(text) > model.MaxArgEnvSize { - text = text[:model.MaxArgEnvSize-4] + "..." + if len(text) > sharedconsts.MaxArgEnvSize { + text = text[:sharedconsts.MaxArgEnvSize-4] + "..." truncated = true } envs = append(envs, text) diff --git a/pkg/security/reporter/reporter.go b/pkg/security/reporter/reporter.go index 227f94ac456ed..49ebfbf900aa0 100644 --- a/pkg/security/reporter/reporter.go +++ b/pkg/security/reporter/reporter.go @@ -12,6 +12,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/hostname/hostnameimpl" "github.com/DataDog/datadog-agent/comp/logs/agent/agentimpl" logsconfig "github.com/DataDog/datadog-agent/comp/logs/agent/config" + compression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/def" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/logs/auditor" "github.com/DataDog/datadog-agent/pkg/logs/client" @@ -41,18 +42,18 @@ func (r *RuntimeReporter) ReportRaw(content []byte, service string, tags ...stri } // NewCWSReporter returns a new CWS reported based on the fields necessary to communicate with the intake -func NewCWSReporter(hostname string, stopper startstop.Stopper, endpoints *logsconfig.Endpoints, context *client.DestinationsContext) (seccommon.RawReporter, error) { - return newReporter(hostname, stopper, "runtime-security-agent", "runtime-security", endpoints, context) +func NewCWSReporter(hostname string, stopper startstop.Stopper, endpoints *logsconfig.Endpoints, context *client.DestinationsContext, compression compression.Component) (seccommon.RawReporter, error) { + return newReporter(hostname, stopper, "runtime-security-agent", "runtime-security", endpoints, context, compression) } -func newReporter(hostname string, stopper startstop.Stopper, sourceName, sourceType string, endpoints *logsconfig.Endpoints, context *client.DestinationsContext) (seccommon.RawReporter, error) { +func newReporter(hostname string, stopper startstop.Stopper, sourceName, sourceType string, endpoints *logsconfig.Endpoints, context *client.DestinationsContext, compression compression.Component) (seccommon.RawReporter, error) { // setup the auditor auditor := auditor.NewNullAuditor() auditor.Start() stopper.Add(auditor) // setup the pipeline provider that provides pairs of processor and sender - pipelineProvider := pipeline.NewProvider(4, auditor, &diagnostic.NoopMessageReceiver{}, nil, endpoints, context, agentimpl.NewStatusProvider(), hostnameimpl.NewHostnameService(), pkgconfigsetup.Datadog()) + pipelineProvider := pipeline.NewProvider(4, auditor, &diagnostic.NoopMessageReceiver{}, nil, endpoints, context, agentimpl.NewStatusProvider(), hostnameimpl.NewHostnameService(), pkgconfigsetup.Datadog(), compression) pipelineProvider.Start() stopper.Add(pipelineProvider) diff --git a/pkg/security/resolvers/cgroup/resolver.go b/pkg/security/resolvers/cgroup/resolver.go index dcb1de1e6d378..de710c7e7e647 100644 --- a/pkg/security/resolvers/cgroup/resolver.go +++ b/pkg/security/resolvers/cgroup/resolver.go @@ -10,10 +10,14 @@ package cgroup import ( "context" + "fmt" "sync" "github.com/hashicorp/golang-lru/v2/simplelru" + "github.com/DataDog/datadog-go/v5/statsd" + + "github.com/DataDog/datadog-agent/pkg/security/metrics" cgroupModel "github.com/DataDog/datadog-agent/pkg/security/resolvers/cgroup/model" "github.com/DataDog/datadog-agent/pkg/security/secl/containerutils" "github.com/DataDog/datadog-agent/pkg/security/secl/model" @@ -31,6 +35,10 @@ const ( CGroupCreated // CGroupMaxEvent is used cap the event ID CGroupMaxEvent + + maxhostWorkloadEntries = 1024 + maxContainerWorkloadEntries = 1024 + maxCgroupEntries = 2048 ) // ResolverInterface defines the interface implemented by a cgroup resolver @@ -39,7 +47,6 @@ type ResolverInterface interface { AddPID(*model.ProcessCacheEntry) GetWorkload(containerutils.ContainerID) (*cgroupModel.CacheEntry, bool) DelPID(uint32) - DelPIDWithID(containerutils.ContainerID, uint32) Len() int RegisterListener(Event, utils.Listener[*cgroupModel.CacheEntry]) error } @@ -48,15 +55,17 @@ type ResolverInterface interface { type Resolver struct { *utils.Notifier[Event, *cgroupModel.CacheEntry] sync.Mutex + statsdClient statsd.ClientInterface cgroups *simplelru.LRU[model.PathKey, *model.CGroupContext] hostWorkloads *simplelru.LRU[containerutils.CGroupID, *cgroupModel.CacheEntry] containerWorkloads *simplelru.LRU[containerutils.ContainerID, *cgroupModel.CacheEntry] } // NewResolver returns a new cgroups monitor -func NewResolver() (*Resolver, error) { +func NewResolver(statsdClient statsd.ClientInterface) (*Resolver, error) { cr := &Resolver{ - Notifier: utils.NewNotifier[Event, *cgroupModel.CacheEntry](), + Notifier: utils.NewNotifier[Event, *cgroupModel.CacheEntry](), + statsdClient: statsdClient, } cleanup := func(value *cgroupModel.CacheEntry) { @@ -67,21 +76,21 @@ func NewResolver() (*Resolver, error) { } var err error - cr.hostWorkloads, err = simplelru.NewLRU(1024, func(_ containerutils.CGroupID, value *cgroupModel.CacheEntry) { + cr.hostWorkloads, err = simplelru.NewLRU(maxhostWorkloadEntries, func(_ containerutils.CGroupID, value *cgroupModel.CacheEntry) { cleanup(value) }) if err != nil { return nil, err } - cr.containerWorkloads, err = simplelru.NewLRU(1024, func(_ containerutils.ContainerID, value *cgroupModel.CacheEntry) { + cr.containerWorkloads, err = simplelru.NewLRU(maxContainerWorkloadEntries, func(_ containerutils.ContainerID, value *cgroupModel.CacheEntry) { cleanup(value) }) if err != nil { return nil, err } - cr.cgroups, err = simplelru.NewLRU(2048, func(_ model.PathKey, _ *model.CGroupContext) {}) + cr.cgroups, err = simplelru.NewLRU(maxCgroupEntries, func(_ model.PathKey, _ *model.CGroupContext) {}) if err != nil { return nil, err } @@ -166,17 +175,6 @@ func (cr *Resolver) DelPID(pid uint32) { } } -// DelPIDWithID removes a PID from the cgroup cache entry referenced by the provided ID -func (cr *Resolver) DelPIDWithID(id containerutils.ContainerID, pid uint32) { - cr.Lock() - defer cr.Unlock() - - entry, exists := cr.containerWorkloads.Get(id) - if exists { - cr.deleteWorkloadPID(pid, entry) - } -} - // deleteWorkloadPID removes a PID from a workload func (cr *Resolver) deleteWorkloadPID(pid uint32, workload *cgroupModel.CacheEntry) { workload.Lock() @@ -201,3 +199,29 @@ func (cr *Resolver) Len() int { return cr.cgroups.Len() } + +// SendStats sends stats +func (cr *Resolver) SendStats() error { + cr.Lock() + defer cr.Unlock() + + if val := float64(cr.containerWorkloads.Len()); val > 0 { + if err := cr.statsdClient.Gauge(metrics.MetricCGroupResolverActiveContainerWorkloads, val, []string{}, 1.0); err != nil { + return fmt.Errorf("couldn't send MetricCGroupResolverActiveContainerWorkloads: %w", err) + } + } + + if val := float64(cr.hostWorkloads.Len()); val > 0 { + if err := cr.statsdClient.Gauge(metrics.MetricCGroupResolverActiveHostWorkloads, val, []string{}, 1.0); err != nil { + return fmt.Errorf("couldn't send MetricCGroupResolverActiveHostWorkloads: %w", err) + } + } + + if val := float64(cr.cgroups.Len()); val > 0 { + if err := cr.statsdClient.Gauge(metrics.MetricCGroupResolverActiveCGroups, val, []string{}, 1.0); err != nil { + return fmt.Errorf("couldn't send MetricCGroupResolverActiveCGroups: %w", err) + } + } + + return nil +} diff --git a/pkg/security/resolvers/container/resolver.go b/pkg/security/resolvers/container/resolver.go index 9176c87e54e8a..02b51b05fb18c 100644 --- a/pkg/security/resolvers/container/resolver.go +++ b/pkg/security/resolvers/container/resolver.go @@ -15,10 +15,31 @@ import ( ) // Resolver is used to resolve the container context of the events -type Resolver struct{} +type Resolver struct { + fs *utils.CGroupFS +} + +// New creates a new container resolver +func New() *Resolver { + return &Resolver{ + fs: utils.NewCGroupFS(), + } +} + +// GetContainerContext returns the container id, cgroup context, and cgroup sysfs path of the given pid +func (cr *Resolver) GetContainerContext(pid uint32) (containerutils.ContainerID, model.CGroupContext, string, error) { + // Parse /proc/[pid]/task/[pid]/cgroup and /sys/fs/cgroup/[cgroup] + id, ctx, path, err := cr.fs.FindCGroupContext(pid, pid) + if err != nil { + return "", model.CGroupContext{}, "", err + } -// GetContainerContext returns the container id of the given pid along with its flags -func (cr *Resolver) GetContainerContext(pid uint32) (containerutils.ContainerID, model.CGroupContext, error) { - // Parse /proc/[pid]/task/[pid]/cgroup - return utils.GetProcContainerContext(pid, pid) + return id, model.CGroupContext{ + CGroupID: ctx.CGroupID, + CGroupFlags: ctx.CGroupFlags, + CGroupFile: model.PathKey{ + Inode: ctx.CGroupFileInode, + MountID: ctx.CGroupFileMountID, + }, + }, path, nil } diff --git a/pkg/security/resolvers/dentry/resolver.go b/pkg/security/resolvers/dentry/resolver.go index e71c2e8f6f205..61cb7672999ca 100644 --- a/pkg/security/resolvers/dentry/resolver.go +++ b/pkg/security/resolvers/dentry/resolver.go @@ -21,7 +21,6 @@ import ( "github.com/DataDog/datadog-go/v5/statsd" lib "github.com/cilium/ebpf" - lru "github.com/hashicorp/golang-lru/v2" "go.uber.org/atomic" "golang.org/x/sys/unix" @@ -34,6 +33,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/probe/managerhelper" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/utils" + "github.com/DataDog/datadog-agent/pkg/security/utils/cache" ) var ( @@ -57,7 +57,7 @@ type Resolver struct { erpcStats [2]*lib.Map bufferSelector *lib.Map activeERPCStatsBuffer uint32 - cache map[uint32]*lru.Cache[model.PathKey, PathEntry] + cache *cache.TwoLayersLRU[uint32, model.PathKey, PathEntry] erpc *erpc.ERPC erpcSegment []byte erpcSegmentSize int @@ -147,6 +147,8 @@ func (dr *Resolver) SendStats() error { } } + _ = dr.statsdClient.Gauge(metrics.MetricDentryCacheSize, float64(dr.cache.Len()), []string{}, 1) + return dr.sendERPCStats() } @@ -183,40 +185,21 @@ func (dr *Resolver) sendERPCStats() error { // DelCacheEntries removes all the entries belonging to a mountID func (dr *Resolver) DelCacheEntries(mountID uint32) { - delete(dr.cache, mountID) + dr.cache.RemoveKey1(mountID) } func (dr *Resolver) lookupInodeFromCache(pathKey model.PathKey) (PathEntry, error) { - entries, exists := dr.cache[pathKey.MountID] + entry, exists := dr.cache.Get(pathKey.MountID, pathKey) if !exists { return PathEntry{}, ErrEntryNotFound } - - entry, exists := entries.Get(pathKey) - if !exists { - return PathEntry{}, ErrEntryNotFound - } - return entry, nil } // We need to cache inode by inode instead of caching the whole path in order to be // able to invalidate the whole path if one of its element got rename or removed. -func (dr *Resolver) cacheInode(key model.PathKey, path PathEntry) error { - entries, exists := dr.cache[key.MountID] - if !exists { - var err error - - entries, err = lru.New[model.PathKey, PathEntry](dr.config.DentryCacheSize) - if err != nil { - return err - } - dr.cache[key.MountID] = entries - } - - entries.Add(key, path) - - return nil +func (dr *Resolver) cacheInode(key model.PathKey, path PathEntry) { + dr.cache.Add(key.MountID, key, path) } // ResolveNameFromCache returns the name @@ -270,8 +253,7 @@ func (dr *Resolver) ResolveNameFromMap(pathKey model.PathKey) (string, error) { if !IsFakeInode(pathKey.Inode) { cacheEntry := newPathEntry(pathLeaf.Parent, name) - - _ = dr.cacheInode(pathKey, cacheEntry) + dr.cacheInode(pathKey, cacheEntry) } return name, nil @@ -478,7 +460,7 @@ func (dr *Resolver) cacheEntries(keys []model.PathKey, names []string) error { cacheEntry.Parent = keys[i+1] } - _ = dr.cacheInode(k, cacheEntry) + dr.cacheInode(k, cacheEntry) } return nil @@ -734,37 +716,20 @@ func (dr *Resolver) ToJSON() ([]byte, error) { Entries []json.RawMessage }{} - for mountID, cache := range dr.cache { - e := struct { - MountID uint32 - Entries []struct { - PathKey model.PathKey - PathEntry PathEntry - } + dr.cache.Walk(func(_ uint32, pathKey model.PathKey, value PathEntry) { + entry := struct { + PathKey model.PathKey + PathEntry PathEntry }{ - MountID: mountID, - } - - for _, key := range cache.Keys() { - value, exists := cache.Get(key) - if !exists { - continue - } - - e.Entries = append(e.Entries, struct { - PathKey model.PathKey - PathEntry PathEntry - }{ - PathKey: key, - PathEntry: value, - }) + PathKey: pathKey, + PathEntry: value, } - data, err := json.Marshal(e) + data, err := json.Marshal(entry) if err == nil { dump.Entries = append(dump.Entries, data) } - } + }) return json.Marshal(dump) } @@ -803,10 +768,15 @@ func NewResolver(config *config.Config, statsdClient statsd.ClientInterface, e * return nil, fmt.Errorf("couldn't fetch the host CPU count: %w", err) } + cache, err := cache.NewTwoLayersLRU[uint32, model.PathKey, PathEntry](config.DentryCacheSize) + if err != nil { + return nil, err + } + return &Resolver{ config: config, statsdClient: statsdClient, - cache: make(map[uint32]*lru.Cache[model.PathKey, PathEntry]), + cache: cache, erpc: e, erpcRequest: erpc.NewERPCRequest(0), erpcStatsZero: make([]eRPCStats, numCPU), diff --git a/pkg/security/resolvers/envvars/resolver.go b/pkg/security/resolvers/envvars/resolver.go index 844e3b9f14de1..08b44819d7be6 100644 --- a/pkg/security/resolvers/envvars/resolver.go +++ b/pkg/security/resolvers/envvars/resolver.go @@ -10,7 +10,7 @@ package envvars import ( "github.com/DataDog/datadog-agent/pkg/security/probe/config" - "github.com/DataDog/datadog-agent/pkg/security/secl/model" + "github.com/DataDog/datadog-agent/pkg/security/secl/model/sharedconsts" "github.com/DataDog/datadog-agent/pkg/security/utils" ) @@ -42,5 +42,5 @@ func (r *Resolver) ResolveEnvVars(pid uint32) ([]string, bool, error) { // communicate the fact that it was truncated return nil, true, nil } - return utils.EnvVars(r.priorityEnvs, pid, model.MaxArgsEnvsSize) + return utils.EnvVars(r.priorityEnvs, pid, sharedconsts.MaxArgsEnvsSize) } diff --git a/pkg/security/resolvers/mount/resolver_test.go b/pkg/security/resolvers/mount/resolver_test.go index 3182b8c72c9ae..e4b5894cf465a 100644 --- a/pkg/security/resolvers/mount/resolver_test.go +++ b/pkg/security/resolvers/mount/resolver_test.go @@ -436,7 +436,7 @@ func TestMountResolver(t *testing.T) { pid uint32 = 1 ) - cr, _ := cgroup.NewResolver() + cr, _ := cgroup.NewResolver(nil) // Create mount resolver mr, _ := NewResolver(nil, cr, ResolverOpts{}) diff --git a/pkg/security/resolvers/process/resolver_ebpf.go b/pkg/security/resolvers/process/resolver_ebpf.go index 0e80c8aa2ae93..d690040628659 100644 --- a/pkg/security/resolvers/process/resolver_ebpf.go +++ b/pkg/security/resolvers/process/resolver_ebpf.go @@ -39,7 +39,9 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/resolvers/mount" spath "github.com/DataDog/datadog-agent/pkg/security/resolvers/path" "github.com/DataDog/datadog-agent/pkg/security/resolvers/usergroup" + "github.com/DataDog/datadog-agent/pkg/security/secl/containerutils" "github.com/DataDog/datadog-agent/pkg/security/secl/model" + "github.com/DataDog/datadog-agent/pkg/security/secl/model/sharedconsts" "github.com/DataDog/datadog-agent/pkg/security/seclog" "github.com/DataDog/datadog-agent/pkg/security/utils" stime "github.com/DataDog/datadog-agent/pkg/util/ktime" @@ -76,10 +78,10 @@ type EBPFResolver struct { pathResolver spath.ResolverInterface envVarsResolver *envvars.Resolver - execFileCacheMap *lib.Map - procCacheMap *lib.Map - pidCacheMap *lib.Map - opts ResolverOpts + inodeFileMap *lib.Map + procCacheMap *lib.Map + pidCacheMap *lib.Map + opts ResolverOpts // stats cacheSize *atomic.Int64 @@ -253,7 +255,7 @@ var argsEnvsInterner = utils.NewLRUStringInterner(argsEnvsValueCacheSize) func parseStringArray(data []byte) ([]string, bool) { truncated := false values, err := model.UnmarshalStringArray(data) - if err != nil || len(data) == model.MaxArgEnvSize { + if err != nil || len(data) == sharedconsts.MaxArgEnvSize { if len(values) > 0 { values[len(values)-1] += "..." } @@ -347,8 +349,8 @@ func (p *EBPFResolver) ApplyExitEntry(event *model.Event, newEntryCb func(*model } -// enrichEventFromProc uses /proc to enrich a ProcessCacheEntry with additional metadata -func (p *EBPFResolver) enrichEventFromProc(entry *model.ProcessCacheEntry, proc *process.Process, filledProc *utils.FilledProcess) error { +// enrichEventFromProcfs uses /proc to enrich a ProcessCacheEntry with additional metadata +func (p *EBPFResolver) enrichEventFromProcfs(entry *model.ProcessCacheEntry, proc *process.Process, filledProc *utils.FilledProcess) error { // the provided process is a kernel process if its virtual memory size is null if filledProc.MemInfo.VMS == 0 { return fmt.Errorf("cannot snapshot kernel threads") @@ -366,18 +368,27 @@ func (p *EBPFResolver) enrichEventFromProc(entry *model.ProcessCacheEntry, proc } // Get the file fields of the process binary - info, err := p.retrieveExecFileFields(procExecPath) + info, err := p.RetrieveFileFieldsFromProcfs(procExecPath) if err != nil { if !os.IsNotExist(err) { - seclog.Errorf("snapshot failed for %d: couldn't retrieve inode info: %s", proc.Pid, err) + seclog.Errorf("snapshot failed for %d: couldn't retrieve file info: %s", proc.Pid, err) } - return fmt.Errorf("snapshot failed for %d: couldn't retrieve inode info: %w", proc.Pid, err) + return fmt.Errorf("snapshot failed for %d: couldn't retrieve file info: %w", proc.Pid, err) } - // Retrieve the container ID of the process from /proc - containerID, cgroup, err := p.containerResolver.GetContainerContext(pid) + // Retrieve the container ID of the process from /proc and /sys/fs/cgroup/[cgroup] + containerID, cgroup, cgroupSysFSPath, err := p.containerResolver.GetContainerContext(pid) if err != nil { - return fmt.Errorf("snapshot failed for %d: couldn't parse container and cgroup context: %w", proc.Pid, err) + // log error instead of returning it to allow the process to be added to the cache and eBPF maps + seclog.Errorf("snapshot failed for %d: couldn't parse container and cgroup context: %s", proc.Pid, err) + } else if cgroup.CGroupFile.Inode != 0 && cgroup.CGroupFile.MountID == 0 { // the mount id is unavailable through statx + // Get the file fields of the sysfs cgroup file + info, err := p.RetrieveFileFieldsFromProcfs(cgroupSysFSPath) + if err != nil { + seclog.Warnf("snapshot failed for %d: couldn't retrieve file info: %s", proc.Pid, err) + } else { + cgroup.CGroupFile.MountID = info.MountID + } } entry.ContainerID = containerID @@ -392,17 +403,6 @@ func (p *EBPFResolver) enrichEventFromProc(entry *model.ProcessCacheEntry, proc entry.FileEvent.MountOrigin = model.MountOriginProcfs entry.FileEvent.MountSource = model.MountSourceSnapshot - if entry.Process.CGroup.CGroupFile.MountID == 0 { - // Get the file fields of the cgroup file - taskPath := utils.CgroupTaskPath(pid, pid) - info, err := p.retrieveExecFileFields(taskPath) - if err != nil { - seclog.Debugf("snapshot failed for %d: couldn't retrieve inode info: %s", proc.Pid, err) - } else { - entry.Process.CGroup.CGroupFile.MountID = info.MountID - } - } - if entry.FileEvent.IsFileless() { entry.FileEvent.Filesystem = model.TmpFS } else { @@ -498,33 +498,71 @@ func (p *EBPFResolver) enrichEventFromProc(entry *model.ProcessCacheEntry, proc return nil } -// retrieveExecFileFields fetches inode metadata from kernel space -func (p *EBPFResolver) retrieveExecFileFields(procExecPath string) (*model.FileFields, error) { - fi, err := os.Stat(procExecPath) +func (p *EBPFResolver) statFile(filename string) (uint64, []byte, error) { + // first stat to reserve the entry in the map and let the second stat update the entry + fi, err := os.Stat(filename) if err != nil { - return nil, err + return 0, nil, err } stat, ok := fi.Sys().(*syscall.Stat_t) if !ok { - return nil, errors.New("wrong type") + return 0, nil, errors.New("wrong type") } - inode := stat.Ino inodeb := make([]byte, 8) - binary.NativeEndian.PutUint64(inodeb, inode) + binary.NativeEndian.PutUint64(inodeb, stat.Ino) - data, err := p.execFileCacheMap.LookupBytes(inodeb) + // push to allocate the entry + fileFields := model.FileFields{ + PathKey: model.PathKey{ + Inode: stat.Ino, + }, + } + + data := make([]byte, model.FileFieldsSize) + if _, err = fileFields.MarshalBinary(data); err != nil { + return 0, inodeb, err + } + + if err = p.inodeFileMap.Put(inodeb, data); err != nil { + return 0, nil, err + } + + // stat again to let the kernel part update the entry + if _, err = os.Stat(filename); err != nil { + return 0, nil, err + } + + return stat.Ino, inodeb, nil +} + +// RetrieveFileFieldsFromProcfs fetches inode metadata from kernel space. +// stat the file which triggers the security_inode_getattr, which fill a map with the needed data +func (p *EBPFResolver) RetrieveFileFieldsFromProcfs(filename string) (*model.FileFields, error) { + inode, inodeb, err := p.statFile(filename) if err != nil { - return nil, fmt.Errorf("unable to get filename for inode `%d`: %v", inode, err) + return nil, err } + data, err := p.inodeFileMap.LookupBytes(inodeb) + // go back to a sane error value + if data == nil && err == nil { + err = lib.ErrKeyNotExist + } + if err != nil { + return nil, fmt.Errorf("unable to get filename for inode `%d`: %w", inode, err) + } + + // free the slot + _ = p.inodeFileMap.Delete(inodeb) + var fileFields model.FileFields if _, err := fileFields.UnmarshalBinary(data); err != nil { - return nil, fmt.Errorf("unable to unmarshal entry for inode `%d`: %v", inode, err) + return nil, fmt.Errorf("unable to unmarshal entry for inode `%d`: %w", inode, err) } if fileFields.Inode == 0 { - return nil, fmt.Errorf("inode `%d` not found: %v", inode, err) + return nil, fmt.Errorf("inode `%d` not found: %w", inode, err) } return &fileFields, nil @@ -621,7 +659,7 @@ func (p *EBPFResolver) deleteEntry(pid uint32, exitTime time.Time) { } if p.cgroupResolver != nil { - p.cgroupResolver.DelPIDWithID(entry.ContainerID, entry.Pid) + p.cgroupResolver.DelPID(entry.Pid) } entry.Exit(exitTime) @@ -887,7 +925,7 @@ func (p *EBPFResolver) resolveFromKernelMaps(pid, tid uint32, inode uint64, newE // the parent is in a container. In other words, we have to fall back to /proc to query the container ID of the // process. if entry.CGroup.CGroupFile.Inode == 0 { - if containerID, cgroup, err := p.containerResolver.GetContainerContext(pid); err == nil { + if containerID, cgroup, _, err := p.containerResolver.GetContainerContext(pid); err == nil { entry.CGroup.Merge(&cgroup) entry.ContainerID = containerID } @@ -1184,7 +1222,7 @@ func (p *EBPFResolver) FetchAWSSecurityCredentials(e *model.Event) []model.AWSSe // Start starts the resolver func (p *EBPFResolver) Start(ctx context.Context) error { var err error - if p.execFileCacheMap, err = managerhelper.Map(p.manager, "exec_file_cache"); err != nil { + if p.inodeFileMap, err = managerhelper.Map(p.manager, "inode_file"); err != nil { return err } @@ -1262,7 +1300,7 @@ func (p *EBPFResolver) newEntryFromProcfsAndSyncKernelMaps(proc *process.Process entry := p.NewProcessCacheEntry(model.PIDContext{Pid: pid, Tid: pid}) // update the cache entry - if err := p.enrichEventFromProc(entry, proc, filledProc); err != nil { + if err := p.enrichEventFromProcfs(entry, proc, filledProc); err != nil { entry.Release() seclog.Trace(err) @@ -1461,6 +1499,26 @@ func (p *EBPFResolver) Walk(callback func(entry *model.ProcessCacheEntry)) { } } +// UpdateProcessCGroupContext updates the cgroup context and container ID of the process matching the provided PID +func (p *EBPFResolver) UpdateProcessCGroupContext(pid uint32, cgroupContext *model.CGroupContext, newEntryCb func(entry *model.ProcessCacheEntry, err error)) bool { + p.Lock() + defer p.Unlock() + + pce := p.resolve(pid, pid, 0, false, newEntryCb) + if pce == nil { + return false + } + + pce.Process.CGroup = *cgroupContext + pce.CGroup = *cgroupContext + if cgroupContext.CGroupFlags.IsContainer() { + containerID, _ := containerutils.FindContainerID(cgroupContext.CGroupID) + pce.ContainerID = containerID + pce.Process.ContainerID = containerID + } + return true +} + // NewEBPFResolver returns a new process resolver func NewEBPFResolver(manager *manager.Manager, config *config.Config, statsdClient statsd.ClientInterface, scrubber *procutil.DataScrubber, containerResolver *container.Resolver, mountResolver mount.ResolverInterface, diff --git a/pkg/security/resolvers/process/resolver_test.go b/pkg/security/resolvers/process/resolver_test.go index 16432e67338e1..c5bdb3e326346 100644 --- a/pkg/security/resolvers/process/resolver_test.go +++ b/pkg/security/resolvers/process/resolver_test.go @@ -73,7 +73,7 @@ func newResolver() (*EBPFResolver, error) { return nil, err } - cgroupsResolver, err := cgroup.NewResolver() + cgroupsResolver, err := cgroup.NewResolver(nil) if err != nil { return nil, err } diff --git a/pkg/security/resolvers/process/resolver_windows.go b/pkg/security/resolvers/process/resolver_windows.go index 2694f5fda471e..785ebed02c516 100644 --- a/pkg/security/resolvers/process/resolver_windows.go +++ b/pkg/security/resolvers/process/resolver_windows.go @@ -22,7 +22,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/metrics" "github.com/DataDog/datadog-agent/pkg/security/secl/model" - "github.com/DataDog/datadog-agent/pkg/security/utils" + "github.com/DataDog/datadog-agent/pkg/security/utils/pathutils" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -135,8 +135,8 @@ func (p *Resolver) AddNewEntry(pid uint32, ppid uint32, file string, envs []stri e := p.processCacheEntryPool.Get() e.PIDContext.Pid = pid e.PPid = ppid - e.Process.CmdLine = utils.NormalizePath(commandLine) - e.Process.FileEvent.PathnameStr = utils.NormalizePath(file) + e.Process.CmdLine = pathutils.NormalizePath(commandLine) + e.Process.FileEvent.PathnameStr = pathutils.NormalizePath(file) e.Process.FileEvent.BasenameStr = filepath.Base(e.Process.FileEvent.PathnameStr) e.Process.EnvsEntry = &model.EnvsEntry{ Values: envs, @@ -244,8 +244,8 @@ func (p *Resolver) Snapshot() { e.PIDContext.Pid = Pid(pid) e.PPid = Pid(proc.Ppid) - e.Process.CmdLine = utils.NormalizePath(strings.Join(proc.GetCmdline(), " ")) - e.Process.FileEvent.PathnameStr = utils.NormalizePath(proc.Exe) + e.Process.CmdLine = pathutils.NormalizePath(strings.Join(proc.GetCmdline(), " ")) + e.Process.FileEvent.PathnameStr = pathutils.NormalizePath(proc.Exe) e.Process.FileEvent.BasenameStr = filepath.Base(e.Process.FileEvent.PathnameStr) e.ExecTime = time.Unix(0, proc.Stats.CreateTime*int64(time.Millisecond)) entries = append(entries, e) diff --git a/pkg/security/resolvers/resolvers_ebpf.go b/pkg/security/resolvers/resolvers_ebpf.go index c9adeb82e557b..9b9dce457043e 100644 --- a/pkg/security/resolvers/resolvers_ebpf.go +++ b/pkg/security/resolvers/resolvers_ebpf.go @@ -12,7 +12,6 @@ import ( "context" "fmt" "os" - "path/filepath" "sort" "github.com/DataDog/datadog-go/v5/statsd" @@ -93,7 +92,7 @@ func NewEBPFResolvers(config *config.Config, manager *manager.Manager, statsdCli } } - cgroupsResolver, err := cgroup.NewResolver() + cgroupsResolver, err := cgroup.NewResolver(statsdClient) if err != nil { return nil, err } @@ -133,7 +132,7 @@ func NewEBPFResolvers(config *config.Config, manager *manager.Manager, statsdCli mountResolver = &mount.NoOpResolver{} pathResolver = &path.NoOpResolver{} } - containerResolver := &container.Resolver{} + containerResolver := container.New() processOpts := process.NewResolverOpts() processOpts.WithEnvsValue(config.Probe.EnvsWithValue) @@ -223,20 +222,16 @@ func (r *EBPFResolvers) ResolveCGroupContext(pathKey model.PathKey, cgroupFlags return cgroupContext, nil } - path, err := r.DentryResolver.Resolve(pathKey, true) + cgroup, err := r.DentryResolver.Resolve(pathKey, true) if err != nil { return nil, fmt.Errorf("failed to resolve cgroup file %v: %w", pathKey, err) } - cgroup := filepath.Dir(string(path)) - if cgroup == "/" { - cgroup = path - } - cgroupContext := &model.CGroupContext{ - CGroupID: containerutils.CGroupID(cgroup), - CGroupFlags: containerutils.CGroupFlags(cgroupFlags), - CGroupFile: pathKey, + CGroupID: containerutils.CGroupID(cgroup), + CGroupFlags: containerutils.CGroupFlags(cgroupFlags), + CGroupFile: pathKey, + CGroupManager: containerutils.CGroupManager(cgroupFlags & containerutils.CGroupManagerMask).String(), } return cgroupContext, nil diff --git a/pkg/security/resolvers/resolvers_ebpfless.go b/pkg/security/resolvers/resolvers_ebpfless.go index 25c799fbfa06f..2720f5be15a86 100644 --- a/pkg/security/resolvers/resolvers_ebpfless.go +++ b/pkg/security/resolvers/resolvers_ebpfless.go @@ -16,7 +16,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/process/procutil" "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/resolvers/cgroup" - "github.com/DataDog/datadog-agent/pkg/security/resolvers/container" "github.com/DataDog/datadog-agent/pkg/security/resolvers/hash" "github.com/DataDog/datadog-agent/pkg/security/resolvers/process" "github.com/DataDog/datadog-agent/pkg/security/resolvers/tags" @@ -24,15 +23,14 @@ import ( // EBPFLessResolvers holds the list of the event attribute resolvers type EBPFLessResolvers struct { - ContainerResolver *container.Resolver - TagsResolver *tags.LinuxResolver - ProcessResolver *process.EBPFLessResolver - HashResolver *hash.Resolver + TagsResolver *tags.LinuxResolver + ProcessResolver *process.EBPFLessResolver + HashResolver *hash.Resolver } // NewEBPFLessResolvers creates a new instance of EBPFLessResolvers func NewEBPFLessResolvers(config *config.Config, statsdClient statsd.ClientInterface, scrubber *procutil.DataScrubber, opts Opts) (*EBPFLessResolvers, error) { - cgroupsResolver, err := cgroup.NewResolver() + cgroupsResolver, err := cgroup.NewResolver(statsdClient) if err != nil { return nil, err } diff --git a/pkg/security/resolvers/sbom/resolver.go b/pkg/security/resolvers/sbom/resolver.go index e282dbd6f23d9..17405035e3b1f 100644 --- a/pkg/security/resolvers/sbom/resolver.go +++ b/pkg/security/resolvers/sbom/resolver.go @@ -13,7 +13,7 @@ import ( "errors" "fmt" "os" - "strings" + "slices" "sync" "syscall" "time" @@ -26,7 +26,6 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" - configUtils "github.com/DataDog/datadog-agent/pkg/config/utils" "github.com/DataDog/datadog-agent/pkg/sbom/collectors" "github.com/DataDog/datadog-agent/pkg/sbom/collectors/host" sbomscanner "github.com/DataDog/datadog-agent/pkg/sbom/scanner" @@ -38,91 +37,101 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/seclog" "github.com/DataDog/datadog-agent/pkg/security/utils" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/util/trivy" ) -// SBOMSource defines is the default log source for the SBOM events -const SBOMSource = "runtime-security-agent" +const ( + // state of the sboms + pendingState int64 = iota + 1 + computedState + stoppedState -const maxSBOMGenerationRetries = 3 + maxSBOMGenerationRetries = 3 + maxSBOMEntries = 1024 + scanQueueSize = 100 +) var errNoProcessForContainerID = errors.New("found no running process matching the given container ID") +// Data use the keep the result of a scan of a same workload across multiple +// container +type Data struct { + files fileQuerier +} + // SBOM defines an SBOM type SBOM struct { sync.RWMutex - report *trivy.Report - files fileQuerier - - Host string - Source string - Service string ContainerID containerutils.ContainerID - workloadKey string - deleted *atomic.Bool - scanSuccessful *atomic.Bool - cgroup *cgroupModel.CacheEntry + data *Data + + workloadKey workloadKey - refresh *debouncer.Debouncer + cgroup *cgroupModel.CacheEntry + state *atomic.Int64 + + refresher *debouncer.Debouncer } -func getWorkloadKey(selector *cgroupModel.WorkloadSelector) string { - return selector.Image + ":" + selector.Tag +type workloadKey string + +func getWorkloadKey(selector *cgroupModel.WorkloadSelector) workloadKey { + return workloadKey(selector.Image + ":" + selector.Tag) } // IsComputed returns true if SBOM was successfully generated func (s *SBOM) IsComputed() bool { - return s.scanSuccessful.Load() + return s.state.Load() == computedState } // SetReport sets the SBOM report -func (s *SBOM) SetReport(report *trivy.Report) { +func (s *SBOM) setReport(report *trivy.Report) { // build file cache - s.files = newFileQuerier(report) + s.data.files = newFileQuerier(report) } -// reset (thread unsafe) cleans up internal fields before a SBOM is inserted in cache, the goal is to save space and delete references -// to structs that will be GCed -func (s *SBOM) reset() { - s.Host = "" - s.Source = "" - s.Service = "" - s.ContainerID = "" - s.cgroup = nil - s.deleted.Store(true) - if s.refresh != nil { - s.refresh.Stop() - s.refresh = nil +func (s *SBOM) stop() { + if s.refresher != nil { + s.refresher.Stop() + + // don't forget to set the refresher to nil otherwise it generates a memleak + s.refresher = nil } + + // change the state so that already queued sbom won't be handled + s.state.Store(stoppedState) } // NewSBOM returns a new empty instance of SBOM -func NewSBOM(host string, source string, id containerutils.ContainerID, cgroup *cgroupModel.CacheEntry, workloadKey string) (*SBOM, error) { - sbom := &SBOM{ - files: fileQuerier{}, - Host: host, - Source: source, - ContainerID: id, - workloadKey: workloadKey, - deleted: atomic.NewBool(false), - scanSuccessful: atomic.NewBool(false), - cgroup: cgroup, - } - - return sbom, nil +func NewSBOM(id containerutils.ContainerID, cgroup *cgroupModel.CacheEntry, workloadKey workloadKey) *SBOM { + return &SBOM{ + ContainerID: id, + workloadKey: workloadKey, + state: atomic.NewInt64(pendingState), + cgroup: cgroup, + data: &Data{}, + } } // Resolver is the Software Bill-Of-material resolver type Resolver struct { - cfg *config.RuntimeSecurityConfig - sbomsLock sync.RWMutex - sboms map[containerutils.ContainerID]*SBOM - sbomsCacheLock sync.RWMutex - sbomsCache *simplelru.LRU[string, *SBOM] - scannerChan chan *SBOM + cfg *config.RuntimeSecurityConfig + + sbomsLock sync.RWMutex + sboms *simplelru.LRU[containerutils.ContainerID, *SBOM] + + // cache + dataCacheLock sync.RWMutex + dataCache *simplelru.LRU[workloadKey, *Data] // cache per workload key + + // queue + scanChan chan *SBOM + pendingScanLock sync.Mutex + pendingScan []containerutils.ContainerID + statsdClient statsd.ClientInterface sbomScanner *sbomscanner.Scanner hostRootDevice uint64 @@ -132,16 +141,11 @@ type Resolver struct { failedSBOMGenerations *atomic.Uint64 sbomsCacheHit *atomic.Uint64 sbomsCacheMiss *atomic.Uint64 - - // context tags and attributes - hostname string - source string - contextTags []string } // NewSBOMResolver returns a new instance of Resolver func NewSBOMResolver(c *config.RuntimeSecurityConfig, statsdClient statsd.ClientInterface) (*Resolver, error) { - sbomScanner, err := sbomscanner.CreateGlobalScanner(pkgconfigsetup.SystemProbe(), optional.NewNoneOption[workloadmeta.Component]()) + sbomScanner, err := sbomscanner.CreateGlobalScanner(pkgconfigsetup.SystemProbe(), option.None[workloadmeta.Component]()) if err != nil { return nil, err } @@ -149,7 +153,7 @@ func NewSBOMResolver(c *config.RuntimeSecurityConfig, statsdClient statsd.Client return nil, errors.New("sbom is disabled") } - sbomsCache, err := simplelru.NewLRU[string, *SBOM](c.SBOMResolverWorkloadsCacheSize, nil) + dataCache, err := simplelru.NewLRU[workloadKey, *Data](c.SBOMResolverWorkloadsCacheSize, nil) if err != nil { return nil, fmt.Errorf("couldn't create new SBOMResolver: %w", err) } @@ -167,9 +171,8 @@ func NewSBOMResolver(c *config.RuntimeSecurityConfig, statsdClient statsd.Client resolver := &Resolver{ cfg: c, statsdClient: statsdClient, - sboms: make(map[containerutils.ContainerID]*SBOM), - sbomsCache: sbomsCache, - scannerChan: make(chan *SBOM, 100), + dataCache: dataCache, + scanChan: make(chan *SBOM, 100), sbomScanner: sbomScanner, hostRootDevice: stat.Dev, sbomGenerations: atomic.NewUint64(0), @@ -178,39 +181,23 @@ func NewSBOMResolver(c *config.RuntimeSecurityConfig, statsdClient statsd.Client failedSBOMGenerations: atomic.NewUint64(0), } + sboms, err := simplelru.NewLRU[containerutils.ContainerID, *SBOM](maxSBOMEntries, func(_ containerutils.ContainerID, sbom *SBOM) { + // should be trigger from a function already locking the sbom, see Add, Delete + sbom.stop() + resolver.removePendingScan(sbom.ContainerID) + }) + if err != nil { + return nil, fmt.Errorf("couldn't create new SBOM resolver: %w", err) + } + resolver.sboms = sboms + if !c.SBOMResolverEnabled { return resolver, nil } - resolver.prepareContextTags() return resolver, nil } -func (r *Resolver) prepareContextTags() { - // add hostname tag - hostname, err := utils.GetHostname() - if err != nil || hostname == "" { - hostname = "unknown" - } - r.hostname = hostname - r.contextTags = append(r.contextTags, fmt.Sprintf("host:%s", r.hostname)) - - // merge tags from config - for _, tag := range configUtils.GetConfiguredTags(pkgconfigsetup.Datadog(), true) { - if strings.HasPrefix(tag, "host") { - continue - } - r.contextTags = append(r.contextTags, tag) - } - - // add source tag - r.source = utils.GetTagValue("source", r.contextTags) - if len(r.source) == 0 { - r.source = SBOMSource - r.contextTags = append(r.contextTags, fmt.Sprintf("source:%s", SBOMSource)) - } -} - // Start starts the goroutine of the SBOM resolver func (r *Resolver) Start(ctx context.Context) error { r.sbomScanner.Start(ctx) @@ -221,17 +208,14 @@ func (r *Resolver) Start(ctx context.Context) error { hostRoot = "/" } - hostSBOM, err := NewSBOM(r.hostname, r.source, "", nil, "") - if err != nil { - return err - } - r.hostSBOM = hostSBOM + r.hostSBOM = NewSBOM("", nil, "") report, err := r.generateSBOM(hostRoot) if err != nil { return err } - r.hostSBOM.SetReport(report) + r.hostSBOM.setReport(report) + r.hostSBOM.state.Store(computedState) } go func() { @@ -242,7 +226,7 @@ func (r *Resolver) Start(ctx context.Context) error { select { case <-ctx.Done(): return - case sbom := <-r.scannerChan: + case sbom := <-r.scanChan: if err := retry.Do(func() error { return r.analyzeWorkload(sbom) }, retry.Attempts(maxSBOMGenerationRetries), retry.Delay(200*time.Millisecond)); err != nil { @@ -263,7 +247,30 @@ func (r *Resolver) Start(ctx context.Context) error { func (r *Resolver) RefreshSBOM(containerID containerutils.ContainerID) error { if sbom := r.getSBOM(containerID); sbom != nil { seclog.Debugf("Refreshing SBOM for container %s", containerID) - sbom.refresh.Call() + + var refresher *debouncer.Debouncer + + // create a refresher debouncer on demand + sbom.Lock() + refresher = sbom.refresher + if refresher == nil { + refresher = debouncer.New( + 3*time.Second, func() { + // invalid cache data + r.removeSBOMData(sbom.workloadKey) + + sbom.Lock() + r.triggerScan(sbom) + sbom.Unlock() + }, + ) + refresher.Start() + sbom.refresher = refresher + } + sbom.Unlock() + + refresher.Call() + return nil } return fmt.Errorf("container %s not found", containerID) @@ -274,7 +281,7 @@ func (r *Resolver) generateSBOM(root string) (report *trivy.Report, err error) { seclog.Infof("Generating SBOM for %s", root) r.sbomGenerations.Inc() - scanRequest := host.NewScanRequest(root, os.DirFS("/")) + scanRequest := host.NewScanRequest(root) ch := collectors.GetHostScanner().Channel() if ch == nil { return nil, fmt.Errorf("couldn't retrieve global host scanner result channel") @@ -317,11 +324,9 @@ func (r *Resolver) doScan(sbom *SBOM) (*trivy.Report, error) { // the container ID reduces drastically the likelihood of this race) computedID, err := utils.GetProcContainerID(rootCandidatePID, rootCandidatePID) if err != nil { - sbom.cgroup.RemovePID(rootCandidatePID) continue } if computedID != sbom.ContainerID { - sbom.cgroup.RemovePID(rootCandidatePID) continue } @@ -341,7 +346,7 @@ func (r *Resolver) doScan(sbom *SBOM) (*trivy.Report, error) { } if report, lastErr = r.generateSBOM(containerProcRootPath); lastErr == nil { - sbom.SetReport(report) + sbom.setReport(report) scanned = true break } @@ -357,51 +362,85 @@ func (r *Resolver) doScan(sbom *SBOM) (*trivy.Report, error) { return report, nil } -func (r *Resolver) invalidateWorkflow(sbom *SBOM) { - r.sbomsCacheLock.Lock() - r.sbomsCache.Remove(sbom.workloadKey) - r.sbomsCacheLock.Unlock() +func (r *Resolver) removeSBOMData(key workloadKey) { + r.dataCacheLock.Lock() + r.dataCache.Remove(key) + r.dataCacheLock.Unlock() +} + +func (r *Resolver) addPendingScan(containerID containerutils.ContainerID) bool { + r.pendingScanLock.Lock() + defer r.pendingScanLock.Unlock() + + if len(r.pendingScan) >= scanQueueSize { + return false + } + + if slices.Contains(r.pendingScan, containerID) { + return false + } + r.pendingScan = append(r.pendingScan, containerID) + + return true +} + +func (r *Resolver) removePendingScan(containerID containerutils.ContainerID) { + r.pendingScanLock.Lock() + defer r.pendingScanLock.Unlock() + + r.pendingScan = slices.DeleteFunc(r.pendingScan, func(v containerutils.ContainerID) bool { + return v == containerID + }) } // analyzeWorkload generates the SBOM of the provided sbom and send it to the security agent func (r *Resolver) analyzeWorkload(sbom *SBOM) error { - seclog.Infof("analyzing sbom '%s'", sbom.ContainerID) sbom.Lock() defer sbom.Unlock() - if sbom.deleted.Load() { - // this sbom has been deleted, ignore + seclog.Infof("analyzing sbom '%s'", sbom.ContainerID) + + if sbom.state.Load() != pendingState { + r.removePendingScan(sbom.ContainerID) + + // should not append, ignore + seclog.Warnf("trying to analyze a sbom not in pending state for '%s': %d", sbom.ContainerID, sbom.state.Load()) return nil } // bail out if the workload has been analyzed while queued up - r.sbomsCacheLock.RLock() - if r.sbomsCache.Contains(sbom.workloadKey) { - r.sbomsCacheLock.RUnlock() + r.dataCacheLock.RLock() + if data, exists := r.dataCache.Get(sbom.workloadKey); exists { + r.dataCacheLock.RUnlock() + sbom.data = data + + r.removePendingScan(sbom.ContainerID) + return nil } - r.sbomsCacheLock.RUnlock() + r.dataCacheLock.RUnlock() report, err := r.doScan(sbom) if err != nil { return err } - // build file cache - sbom.files = newFileQuerier(report) - - // we can get rid of the report now that we've generate the file mapping - sbom.report = nil + data := &Data{ + files: newFileQuerier(report), + } + sbom.data = data - // mark the SBOM ass successful - sbom.scanSuccessful.Store(true) + // mark the SBOM as successful + sbom.state.Store(computedState) // add to cache - r.sbomsCacheLock.Lock() - r.sbomsCache.Add(sbom.workloadKey, sbom) - r.sbomsCacheLock.Unlock() + r.dataCacheLock.Lock() + r.dataCache.Add(sbom.workloadKey, data) + r.dataCacheLock.Unlock() - seclog.Infof("new sbom generated for '%s': %d files added", sbom.ContainerID, sbom.files.len()) + r.removePendingScan(sbom.ContainerID) + + seclog.Infof("new sbom generated for '%s': %d files added", sbom.ContainerID, data.files.len()) return nil } @@ -411,7 +450,7 @@ func (r *Resolver) getSBOM(containerID containerutils.ContainerID) *SBOM { sbom := r.hostSBOM if containerID != "" { - sbom = r.sboms[containerID] + sbom, _ = r.sboms.Get(containerID) } return sbom } @@ -427,49 +466,37 @@ func (r *Resolver) ResolvePackage(containerID containerutils.ContainerID, file * sbom.Lock() defer sbom.Unlock() - return sbom.files.queryFile(file.PathnameStr) + return sbom.data.files.queryFile(file.PathnameStr) } -// newWorkloadEntry (thread unsafe) creates a new SBOM entry for the sbom designated by the provided process cache +// newSBOM (thread unsafe) creates a new SBOM entry for the sbom designated by the provided process cache // entry -func (r *Resolver) newWorkloadEntry(id containerutils.ContainerID, cgroup *cgroupModel.CacheEntry, workloadKey string) (*SBOM, error) { - sbom, err := NewSBOM(r.hostname, r.source, id, cgroup, workloadKey) - if err != nil { - return nil, err - } - - sbom.refresh = debouncer.New( - 3*time.Second, func() { - r.invalidateWorkflow(sbom) - r.triggerScan(sbom) - }, - ) - r.sboms[id] = sbom - sbom.refresh.Start() - - return sbom, nil +func (r *Resolver) newSBOM(id containerutils.ContainerID, cgroup *cgroupModel.CacheEntry, workloadKey workloadKey) *SBOM { + sbom := NewSBOM(id, cgroup, workloadKey) + r.sboms.Add(id, sbom) + return sbom } -// queueWorkload inserts the provided sbom in a SBOM resolver chan, it will be inserted in the scannerChan or the +// queueWorkload inserts the provided sbom in a SBOM resolver chan, it will be inserted in the scanChan or the // delayerChan depending on the tags that have been resolved func (r *Resolver) queueWorkload(sbom *SBOM) { sbom.Lock() defer sbom.Unlock() - if sbom.deleted.Load() { + if sbom.state.Load() != pendingState { // this sbom was deleted before we could scan it, ignore it return } // check if this sbom has been scanned before - r.sbomsCacheLock.Lock() - defer r.sbomsCacheLock.Unlock() - - cachedSBOM, ok := r.sbomsCache.Get(sbom.workloadKey) - if ok { - // copy report and file cache (keeping a reference is fine, we won't be modifying the content) - sbom.files = cachedSBOM.files - sbom.report = cachedSBOM.report + r.dataCacheLock.Lock() + defer r.dataCacheLock.Unlock() + + if data, ok := r.dataCache.Get(sbom.workloadKey); ok { + sbom.data = data + + sbom.state.Store(computedState) + r.sbomsCacheHit.Inc() return } @@ -479,10 +506,17 @@ func (r *Resolver) queueWorkload(sbom *SBOM) { } func (r *Resolver) triggerScan(sbom *SBOM) { + if !r.addPendingScan(sbom.ContainerID) { + r.deleteSBOM(sbom) + return + } + // push sbom to the scanner chan select { - case r.scannerChan <- sbom: + case r.scanChan <- sbom: default: + r.removePendingScan(sbom.ContainerID) + r.deleteSBOM(sbom) } } @@ -501,13 +535,10 @@ func (r *Resolver) OnWorkloadSelectorResolvedEvent(workload *tags.Workload) { return } - _, ok := r.sboms[id] + _, ok := r.sboms.Get(id) if !ok { workloadKey := getWorkloadKey(workload.Selector.Copy()) - sbom, err := r.newWorkloadEntry(id, workload.CacheEntry, workloadKey) - if err != nil { - seclog.Errorf("couldn't create new SBOM entry for sbom '%s': %v", id, err) - } + sbom := r.newSBOM(id, workload.CacheEntry, workloadKey) r.queueWorkload(sbom) } } @@ -521,12 +552,15 @@ func (r *Resolver) GetWorkload(id containerutils.ContainerID) *SBOM { return r.hostSBOM } - return r.sboms[id] + sbom, _ := r.sboms.Get(id) + return sbom } // OnCGroupDeletedEvent is used to handle a CGroupDeleted event func (r *Resolver) OnCGroupDeletedEvent(cgroup *cgroupModel.CacheEntry) { - r.Delete(cgroup.ContainerID) + if cgroup.ContainerID != "" { + r.Delete(cgroup.ContainerID) + } } // Delete removes the SBOM of the provided cgroup id @@ -548,32 +582,16 @@ func (r *Resolver) deleteSBOM(sbom *SBOM) { defer r.sbomsLock.Unlock() seclog.Infof("deleting SBOM entry for '%s'", sbom.ContainerID) - // remove SBOM entry - delete(r.sboms, sbom.ContainerID) - - // check if the scan was successful - if !sbom.scanSuccessful.Load() { - // exit now, we don't want to cache a failed scan - return - } - - // save the sbom key before reset - sbomKey := sbom.workloadKey - - // cleanup and insert SBOM in cache - sbom.reset() - // push the sbom to the cache - r.sbomsCacheLock.Lock() - defer r.sbomsCacheLock.Unlock() - r.sbomsCache.Add(sbomKey, sbom) + // should be called under sbom.Lock + r.sboms.Remove(sbom.ContainerID) } // SendStats sends stats func (r *Resolver) SendStats() error { r.sbomsLock.RLock() defer r.sbomsLock.RUnlock() - if val := float64(len(r.sboms)); val > 0 { + if val := float64(r.sboms.Len()); val > 0 { if err := r.statsdClient.Gauge(metrics.MetricSBOMResolverActiveSBOMs, val, []string{}, 1.0); err != nil { return fmt.Errorf("couldn't send MetricSBOMResolverActiveSBOMs: %w", err) } @@ -585,9 +603,9 @@ func (r *Resolver) SendStats() error { } } - r.sbomsCacheLock.Lock() - defer r.sbomsCacheLock.Unlock() - if val := float64(r.sbomsCache.Len()); val > 0 { + r.dataCacheLock.Lock() + defer r.dataCacheLock.Unlock() + if val := float64(r.dataCache.Len()); val > 0 { if err := r.statsdClient.Gauge(metrics.MetricSBOMResolverSBOMCacheLen, val, []string{}, 1.0); err != nil { return fmt.Errorf("couldn't send MetricSBOMResolverSBOMCacheLen: %w", err) } diff --git a/pkg/security/resolvers/tags/resolver_linux.go b/pkg/security/resolvers/tags/resolver_linux.go index e029f003696b6..00ddf7dcccdd1 100644 --- a/pkg/security/resolvers/tags/resolver_linux.go +++ b/pkg/security/resolvers/tags/resolver_linux.go @@ -50,7 +50,10 @@ func (t *LinuxResolver) Start(ctx context.Context) error { } if err := t.cgroupResolver.RegisterListener(cgroup.CGroupDeleted, func(cgce *cgroupModel.CacheEntry) { - delete(t.workloads, cgce.CGroupID) + if workload, ok := t.workloads[cgce.CGroupID]; ok { + t.NotifyListeners(WorkloadSelectorDeleted, workload) + delete(t.workloads, cgce.CGroupID) + } }); err != nil { return err } diff --git a/pkg/security/rules/engine.go b/pkg/security/rules/engine.go index ffb73244b6c2d..ee50f77a593ce 100644 --- a/pkg/security/rules/engine.go +++ b/pkg/security/rules/engine.go @@ -65,6 +65,7 @@ type RuleEngine struct { rulesetListeners []rules.RuleSetListener AutoSuppression autosuppression.AutoSuppression pid uint32 + wg sync.WaitGroup } // APIServer defines the API server @@ -109,7 +110,7 @@ func NewRuleEngine(evm *eventmonitor.EventMonitor, config *config.RuntimeSecurit } // Start the rule engine -func (e *RuleEngine) Start(ctx context.Context, reloadChan <-chan struct{}, wg *sync.WaitGroup) error { +func (e *RuleEngine) Start(ctx context.Context, reloadChan <-chan struct{}) error { // monitor policies if e.config.PolicyMonitorEnabled { e.policyMonitor.Start(ctx) @@ -132,7 +133,11 @@ func (e *RuleEngine) Start(ctx context.Context, reloadChan <-chan struct{}, wg * ruleFilters = append(ruleFilters, agentVersionFilter) } - ruleFilterModel, err := filtermodel.NewRuleFilterModel(e.probe.Config, e.probe.Origin()) + rfmCfg := filtermodel.RuleFilterEventConfig{ + COREEnabled: e.probe.Config.Probe.EnableCORE, + Origin: e.probe.Origin(), + } + ruleFilterModel, err := filtermodel.NewRuleFilterModel(rfmCfg) if err != nil { return fmt.Errorf("failed to create rule filter: %w", err) } @@ -150,9 +155,9 @@ func (e *RuleEngine) Start(ctx context.Context, reloadChan <-chan struct{}, wg * return fmt.Errorf("failed to load policies: %w", err) } - wg.Add(1) + e.wg.Add(1) go func() { - defer wg.Done() + defer e.wg.Done() for range reloadChan { if err := e.ReloadPolicies(); err != nil { @@ -161,9 +166,9 @@ func (e *RuleEngine) Start(ctx context.Context, reloadChan <-chan struct{}, wg * } }() - wg.Add(1) + e.wg.Add(1) go func() { - defer wg.Done() + defer e.wg.Done() for range e.policyLoader.NewPolicyReady() { if err := e.ReloadPolicies(); err != nil { @@ -176,9 +181,52 @@ func (e *RuleEngine) Start(ctx context.Context, reloadChan <-chan struct{}, wg * provider.Start() } - wg.Add(1) + e.startSendHeartbeatEvents(ctx) + + return nil +} + +func (e *RuleEngine) startSendHeartbeatEvents(ctx context.Context) { + // Sending an heartbeat event every minute + e.wg.Add(1) + go func() { + defer e.wg.Done() + + // 5 heartbeats with a period of 1 min, after that we move the period to 10 min + // if the policies change we go back to 5 beats every 1 min + + heartbeatTicker := time.NewTicker(1 * time.Minute) + defer heartbeatTicker.Stop() + + heartBeatCounter := 5 + + for { + select { + case <-ctx.Done(): + return + case <-e.policyLoader.NewPolicyReady(): + heartBeatCounter = 5 + heartbeatTicker.Reset(1 * time.Minute) + // we report a heartbeat anyway + e.policyMonitor.ReportHeartbeatEvent(e.probe.GetAgentContainerContext(), e.eventSender) + case <-heartbeatTicker.C: + e.policyMonitor.ReportHeartbeatEvent(e.probe.GetAgentContainerContext(), e.eventSender) + if heartBeatCounter > 0 { + heartBeatCounter-- + if heartBeatCounter == 0 { + heartbeatTicker.Reset(10 * time.Minute) + } + } + } + } + }() +} + +// StartRunningMetrics starts sending the running metrics +func (e *RuleEngine) StartRunningMetrics(ctx context.Context) { + e.wg.Add(1) go func() { - defer wg.Done() + defer e.wg.Done() heartbeatTicker := time.NewTicker(15 * time.Second) defer heartbeatTicker.Stop() @@ -231,41 +279,6 @@ func (e *RuleEngine) Start(ctx context.Context, reloadChan <-chan struct{}, wg * } } }() - - // Sending an heartbeat event every minute - wg.Add(1) - go func() { - defer wg.Done() - - // 5 heartbeats with a period of 1 min, after that we move the period to 10 min - // if the policies change we go back to 5 beats every 1 min - - heartbeatTicker := time.NewTicker(1 * time.Minute) - defer heartbeatTicker.Stop() - - heartBeatCounter := 5 - - for { - select { - case <-ctx.Done(): - return - case <-e.policyLoader.NewPolicyReady(): - heartBeatCounter = 5 - heartbeatTicker.Reset(1 * time.Minute) - // we report a heartbeat anyway - e.policyMonitor.ReportHeartbeatEvent(e.probe.GetAgentContainerContext(), e.eventSender) - case <-heartbeatTicker.C: - e.policyMonitor.ReportHeartbeatEvent(e.probe.GetAgentContainerContext(), e.eventSender) - if heartBeatCounter > 0 { - heartBeatCounter-- - if heartBeatCounter == 0 { - heartbeatTicker.Reset(10 * time.Minute) - } - } - } - } - }() - return nil } // ReloadPolicies reloads the policies @@ -466,6 +479,8 @@ func (e *RuleEngine) Stop() { if e.policyLoader != nil { e.policyLoader.Close() } + + e.wg.Wait() } func (e *RuleEngine) getEventTypeEnabled() map[eval.EventType]bool { @@ -487,6 +502,8 @@ func (e *RuleEngine) getEventTypeEnabled() map[eval.EventType]bool { switch eventType { case model.RawPacketEventType.String(): enabled[eventType] = e.probe.IsNetworkRawPacketEnabled() + case model.NetworkFlowMonitorEventType.String(): + enabled[eventType] = e.probe.IsNetworkFlowMonitorEnabled() default: enabled[eventType] = true } diff --git a/pkg/security/rules/filtermodel/os_only_filter.go b/pkg/security/rules/filtermodel/os_only_filter.go index 515203c44b6f9..254074f7283b9 100644 --- a/pkg/security/rules/filtermodel/os_only_filter.go +++ b/pkg/security/rules/filtermodel/os_only_filter.go @@ -62,24 +62,19 @@ func (e *OSOnlyFilterEvent) GetFieldValue(field eval.Field) (interface{}, error) // Init inits the rule filter event func (e *OSOnlyFilterEvent) Init() {} -// GetFieldEventType returns the event type for the given field -func (e *OSOnlyFilterEvent) GetFieldEventType(_ eval.Field) (string, error) { - return "*", nil -} - // SetFieldValue sets the value for the given field func (e *OSOnlyFilterEvent) SetFieldValue(field eval.Field, _ interface{}) error { return &eval.ErrFieldNotFound{Field: field} } -// GetFieldType get the type of the field -func (e *OSOnlyFilterEvent) GetFieldType(field eval.Field) (reflect.Kind, error) { +// GetFieldMetadata get the type of the field +func (e *OSOnlyFilterEvent) GetFieldMetadata(field eval.Field) (eval.EventType, reflect.Kind, error) { switch field { case "os": - return reflect.String, nil + return "*", reflect.String, nil } - return reflect.Invalid, &eval.ErrFieldNotFound{Field: field} + return "", reflect.Invalid, &eval.ErrFieldNotFound{Field: field} } // GetType returns the type for this event diff --git a/pkg/security/rules/filtermodel/rule_filters_model.go b/pkg/security/rules/filtermodel/rule_filters_model.go index ba8aefed13bba..b932313c7f927 100644 --- a/pkg/security/rules/filtermodel/rule_filters_model.go +++ b/pkg/security/rules/filtermodel/rule_filters_model.go @@ -10,36 +10,37 @@ import ( "reflect" "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" - "github.com/DataDog/datadog-agent/pkg/security/utils" + "github.com/DataDog/datadog-agent/pkg/security/utils/hostnameutils" ) +// RuleFilterEventConfig holds the config used by the rule filter event +type RuleFilterEventConfig struct { + COREEnabled bool + Origin string +} + // Init inits the rule filter event func (e *RuleFilterEvent) Init() {} -// GetFieldEventType returns the event type for the given field -func (e *RuleFilterEvent) GetFieldEventType(_ eval.Field) (string, error) { - return "*", nil -} - // SetFieldValue sets the value for the given field func (e *RuleFilterEvent) SetFieldValue(field eval.Field, _ interface{}) error { return &eval.ErrFieldNotFound{Field: field} } -// GetFieldType get the type of the field -func (e *RuleFilterEvent) GetFieldType(field eval.Field) (reflect.Kind, error) { +// GetFieldMetadata get the type of the field +func (e *RuleFilterEvent) GetFieldMetadata(field eval.Field) (eval.Field, reflect.Kind, error) { switch field { case "kernel.version.major", "kernel.version.minor", "kernel.version.patch", "kernel.version.abi": - return reflect.Int, nil + return "*", reflect.Int, nil case "kernel.version.flavor", "os", "os.id", "os.platform_id", "os.version_id", "envs", "origin", "hostname": - return reflect.String, nil + return "*", reflect.String, nil case "os.is_amazon_linux", "os.is_cos", "os.is_debian", "os.is_oracle", "os.is_rhel", "os.is_rhel7", "os.is_rhel8", "os.is_sles", "os.is_sles12", "os.is_sles15", "kernel.core.enabled": - return reflect.Bool, nil + return "*", reflect.Bool, nil } - return reflect.Invalid, &eval.ErrFieldNotFound{Field: field} + return "", reflect.Invalid, &eval.ErrFieldNotFound{Field: field} } // GetType returns the type for this event @@ -63,7 +64,7 @@ func (m *RuleFilterModel) GetFieldRestrictions(_ eval.Field) []eval.EventType { } func getHostname() string { - hostname, err := utils.GetHostname() + hostname, err := hostnameutils.GetHostname() if err != nil || hostname == "" { hostname = "unknown" } diff --git a/pkg/security/rules/filtermodel/rule_filters_model_linux.go b/pkg/security/rules/filtermodel/rule_filters_model_linux.go index d35678f1253c5..465a3b7254c9d 100644 --- a/pkg/security/rules/filtermodel/rule_filters_model_linux.go +++ b/pkg/security/rules/filtermodel/rule_filters_model_linux.go @@ -12,7 +12,6 @@ import ( "os" "runtime" - "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/ebpf/kernel" "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" ) @@ -20,26 +19,23 @@ import ( // RuleFilterEvent defines a rule filter event type RuleFilterEvent struct { *kernel.Version - origin string - cfg *config.Config + cfg RuleFilterEventConfig } // RuleFilterModel defines a filter model type RuleFilterModel struct { *kernel.Version - origin string - cfg *config.Config + cfg RuleFilterEventConfig } // NewRuleFilterModel returns a new rule filter model -func NewRuleFilterModel(cfg *config.Config, origin string) (*RuleFilterModel, error) { +func NewRuleFilterModel(cfg RuleFilterEventConfig) (*RuleFilterModel, error) { kv, err := kernel.NewKernelVersion() if err != nil { return nil, err } return &RuleFilterModel{ Version: kv, - origin: origin, cfg: cfg, }, nil } @@ -48,7 +44,6 @@ func NewRuleFilterModel(cfg *config.Config, origin string) (*RuleFilterModel, er func (m *RuleFilterModel) NewEvent() eval.Event { return &RuleFilterEvent{ Version: m.Version, - origin: m.origin, cfg: m.cfg, } } @@ -191,7 +186,7 @@ func (m *RuleFilterModel) GetEvaluator(field eval.Field, _ eval.RegisterID) (eva }, nil case "origin": return &eval.StringEvaluator{ - Value: m.origin, + Value: m.cfg.Origin, Field: field, }, nil case "hostname": @@ -203,7 +198,7 @@ func (m *RuleFilterModel) GetEvaluator(field eval.Field, _ eval.RegisterID) (eva return &eval.BoolEvaluator{ EvalFnc: func(ctx *eval.Context) bool { revt := ctx.Event.(*RuleFilterEvent) - return revt.cfg != nil && revt.cfg.Probe.EnableCORE && revt.SupportCORE() + return revt.cfg.COREEnabled && revt.SupportCORE() }, Field: field, }, nil @@ -273,11 +268,11 @@ func (e *RuleFilterEvent) GetFieldValue(field eval.Field) (interface{}, error) { case "envs": return os.Environ(), nil case "origin": - return e.origin, nil + return e.cfg.Origin, nil case "hostname": return getHostname(), nil case "kernel.core.enabled": - return e.cfg != nil && e.cfg.Probe.EnableCORE && e.SupportCORE(), nil + return e.cfg.COREEnabled && e.SupportCORE(), nil } return nil, &eval.ErrFieldNotFound{Field: field} diff --git a/pkg/security/rules/filtermodel/rule_filters_model_other.go b/pkg/security/rules/filtermodel/rule_filters_model_other.go index be851798782f7..533a580e1bf9f 100644 --- a/pkg/security/rules/filtermodel/rule_filters_model_other.go +++ b/pkg/security/rules/filtermodel/rule_filters_model_other.go @@ -12,31 +12,30 @@ import ( "os" "runtime" - "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" ) // RuleFilterEvent represents a rule filtering event type RuleFilterEvent struct { - origin string + cfg RuleFilterEventConfig } // RuleFilterModel represents a rule fitlering model type RuleFilterModel struct { - origin string + cfg RuleFilterEventConfig } // NewRuleFilterModel returns a new rule filtering model -func NewRuleFilterModel(_ *config.Config, origin string) (*RuleFilterModel, error) { +func NewRuleFilterModel(cfg RuleFilterEventConfig) (*RuleFilterModel, error) { return &RuleFilterModel{ - origin: origin, + cfg: cfg, }, nil } // NewEvent returns a new rule filtering event func (m *RuleFilterModel) NewEvent() eval.Event { return &RuleFilterEvent{ - origin: m.origin, + cfg: m.cfg, } } @@ -74,7 +73,7 @@ func (m *RuleFilterModel) GetEvaluator(field eval.Field, _ eval.RegisterID) (eva }, nil case "origin": return &eval.StringEvaluator{ - Value: m.origin, + Value: m.cfg.Origin, Field: field, }, nil case "hostname": @@ -106,7 +105,7 @@ func (e *RuleFilterEvent) GetFieldValue(field eval.Field) (interface{}, error) { case "envs": return os.Environ(), nil case "origin": - return e.origin, nil + return e.cfg.Origin, nil case "hostname": return getHostname(), nil } diff --git a/pkg/security/secl/compiler/eval/context.go b/pkg/security/secl/compiler/eval/context.go index 038a24634b353..473c4480e6a1c 100644 --- a/pkg/security/secl/compiler/eval/context.go +++ b/pkg/security/secl/compiler/eval/context.go @@ -7,6 +7,7 @@ package eval import ( + "net" "sync" "time" ) @@ -23,6 +24,7 @@ type Context struct { // cache available across all the evaluations StringCache map[string][]string + IPNetCache map[string][]net.IPNet IntCache map[string][]int BoolCache map[string][]bool @@ -34,9 +36,11 @@ type Context struct { now time.Time - CachedAncestorsCount int + AncestorsCounters map[string]int resolvedFields []string + + Error error } // Now return and cache the `now` timestamp @@ -56,14 +60,16 @@ func (c *Context) SetEvent(evt Event) { func (c *Context) Reset() { c.Event = nil c.now = time.Time{} + c.Error = nil clear(c.StringCache) + clear(c.IPNetCache) clear(c.IntCache) clear(c.BoolCache) clear(c.Registers) clear(c.RegisterCache) - c.CachedAncestorsCount = 0 - clear(c.resolvedFields) + clear(c.AncestorsCounters) + c.resolvedFields = nil } // GetResolvedFields returns the resolved fields, always empty outside of functional tests @@ -74,12 +80,14 @@ func (c *Context) GetResolvedFields() []string { // NewContext return a new Context func NewContext(evt Event) *Context { return &Context{ - Event: evt, - StringCache: make(map[string][]string), - IntCache: make(map[string][]int), - BoolCache: make(map[string][]bool), - Registers: make(map[RegisterID]int), - RegisterCache: make(map[RegisterID]*RegisterCacheEntry), + Event: evt, + StringCache: make(map[string][]string), + IPNetCache: make(map[string][]net.IPNet), + IntCache: make(map[string][]int), + BoolCache: make(map[string][]bool), + Registers: make(map[RegisterID]int), + RegisterCache: make(map[RegisterID]*RegisterCacheEntry), + AncestorsCounters: make(map[string]int), } } diff --git a/pkg/security/secl/compiler/eval/eval.go b/pkg/security/secl/compiler/eval/eval.go index f417cbd82ee05..edc8cb94b3999 100644 --- a/pkg/security/secl/compiler/eval/eval.go +++ b/pkg/security/secl/compiler/eval/eval.go @@ -71,7 +71,7 @@ func identToEvaluator(obj *ident, opts *Opts, state *State) (interface{}, lexer. } if state.macros != nil { - if macro, ok := state.macros[*obj.Ident]; ok { + if macro, ok := state.macros.GetMacroEvaluator(*obj.Ident); ok { return macro.Value, obj.Pos, nil } } @@ -129,7 +129,7 @@ func arrayToEvaluator(array *ast.Array, opts *Opts, state *State) (interface{}, return &evaluator, array.Pos, nil } else if array.Ident != nil { if state.macros != nil { - if macro, ok := state.macros[*array.Ident]; ok { + if macro, ok := state.macros.GetMacroEvaluator(*array.Ident); ok { return macro.Value, array.Pos, nil } } diff --git a/pkg/security/secl/compiler/eval/eval_test.go b/pkg/security/secl/compiler/eval/eval_test.go index 6a217e8877444..02736a64623f7 100644 --- a/pkg/security/secl/compiler/eval/eval_test.go +++ b/pkg/security/secl/compiler/eval/eval_test.go @@ -39,15 +39,13 @@ func newOptsWithParams(constants map[string]interface{}, legacyFields map[Field] } func parseRule(expr string, model Model, opts *Opts) (*Rule, error) { - rule := NewRule("id1", expr, opts) - pc := ast.NewParsingContext(false) - - if err := rule.Parse(pc); err != nil { + rule, err := NewRule("id1", expr, pc, opts) + if err != nil { return nil, fmt.Errorf("parsing error: %v", err) } - if err := rule.GenEvaluator(model, pc); err != nil { + if err := rule.GenEvaluator(model); err != nil { return rule, fmt.Errorf("compilation error: %v", err) } @@ -1561,8 +1559,7 @@ func BenchmarkPartial(b *testing.B) { b.Fatal(err) } - pc := ast.NewParsingContext(false) - if err := rule.GenEvaluator(model, pc); err != nil { + if err := rule.GenEvaluator(model); err != nil { b.Fatal(err) } diff --git a/pkg/security/secl/compiler/eval/event.go b/pkg/security/secl/compiler/eval/event.go index d180df6bd4650..1fb7c534b6f61 100644 --- a/pkg/security/secl/compiler/eval/event.go +++ b/pkg/security/secl/compiler/eval/event.go @@ -19,14 +19,12 @@ type Event interface { Init() // GetType returns the Type of the Event GetType() EventType - // GetFieldEventType returns the Event Type for the given Field - GetFieldEventType(field Field) (EventType, error) + // GetFieldEventType returns the Event Field Metadata for the given Field + GetFieldMetadata(field Field) (EventType, reflect.Kind, error) // SetFieldValue sets the value of the given Field SetFieldValue(field Field, value interface{}) error // GetFieldValue returns the value of the given Field GetFieldValue(field Field) (interface{}, error) - // GetFieldType returns the Type of the Field - GetFieldType(field Field) (reflect.Kind, error) // GetTags returns a list of tags GetTags() []string } @@ -34,8 +32,9 @@ type Event interface { func eventTypeFromFields(model Model, state *State) (EventType, error) { var eventType EventType + ev := model.NewEvent() for field := range state.fieldValues { - evt, err := model.NewEvent().GetFieldEventType(field) + evt, _, err := ev.GetFieldMetadata(field) if err != nil { return "", err } diff --git a/pkg/security/secl/compiler/eval/macro.go b/pkg/security/secl/compiler/eval/macro.go index 8f465f697440d..2d4b06978c0cc 100644 --- a/pkg/security/secl/compiler/eval/macro.go +++ b/pkg/security/secl/compiler/eval/macro.go @@ -95,11 +95,7 @@ func (m *Macro) Parse(parsingContext *ast.ParsingContext, expression string) err } func macroToEvaluator(macro *ast.Macro, model Model, opts *Opts, field Field) (*MacroEvaluator, error) { - macros := make(map[MacroID]*MacroEvaluator) - for _, macro := range opts.MacroStore.List() { - macros[macro.ID] = macro.evaluator - } - state := NewState(model, field, macros) + state := NewState(model, field, opts.MacroStore) var eval interface{} var err error diff --git a/pkg/security/secl/compiler/eval/model_test.go b/pkg/security/secl/compiler/eval/model_test.go index 6b8e0ed0f1ba8..b3fbe050ba24b 100644 --- a/pkg/security/secl/compiler/eval/model_test.go +++ b/pkg/security/secl/compiler/eval/model_test.go @@ -650,116 +650,116 @@ func (e *testEvent) GetFieldValue(field Field) (interface{}, error) { return nil, &ErrFieldNotFound{Field: field} } -func (e *testEvent) GetFieldEventType(field Field) (string, error) { +func (e *testEvent) GetFieldMetadata(field Field) (string, reflect.Kind, error) { switch field { case "network.ip": - return "network", nil + return "network", reflect.Struct, nil case "network.ips": - return "network", nil + return "network", reflect.Array, nil case "network.cidr": - return "network", nil + return "network", reflect.Struct, nil case "network.cidrs": - return "network", nil + return "network", reflect.Array, nil case "process.name": - return "", nil + return "", reflect.String, nil case "process.argv0": - return "", nil + return "", reflect.String, nil case "process.uid": - return "", nil + return "", reflect.Int, nil case "process.gid": - return "", nil + return "", reflect.Int, nil case "process.pid": - return "", nil + return "", reflect.Int, nil case "process.is_root": - return "", nil + return "", reflect.Bool, nil case "process.list.key": - return "", nil + return "", reflect.Int, nil case "process.list.value": - return "", nil + return "", reflect.String, nil case "process.list.flag": - return "", nil + return "", reflect.Bool, nil case "process.array.key": - return "", nil + return "", reflect.Int, nil case "process.array.value": - return "", nil + return "", reflect.String, nil case "process.array.flag": - return "", nil + return "", reflect.Bool, nil case "process.created_at": - return "", nil + return "", reflect.Int, nil case "process.or_name": - return "", nil + return "", reflect.String, nil case "process.or_array.value": - return "", nil + return "", reflect.String, nil case "open.filename": - return "open", nil + return "open", reflect.String, nil case "retval": - return "", nil + return "", reflect.Int, nil case "open.flags": - return "open", nil + return "open", reflect.Int, nil case "open.mode": - return "open", nil + return "open", reflect.Int, nil case "open.opened_at": - return "open", nil + return "open", reflect.Int, nil case "mkdir.filename": - return "mkdir", nil + return "mkdir", reflect.String, nil case "mkdir.mode": - return "mkdir", nil + return "mkdir", reflect.Int, nil } - return "", &ErrFieldNotFound{Field: field} + return "", reflect.Invalid, &ErrFieldNotFound{Field: field} } func (e *testEvent) SetFieldValue(field Field, value interface{}) error { @@ -859,96 +859,6 @@ func (e *testEvent) SetFieldValue(field Field, value interface{}) error { return &ErrFieldNotFound{Field: field} } -func (e *testEvent) GetFieldType(field Field) (reflect.Kind, error) { - switch field { - - case "network.ip": - - return reflect.Struct, nil - - case "network.ips": - - return reflect.Array, nil - - case "network.cidr": - - return reflect.Struct, nil - - case "network.cidrs": - - return reflect.Array, nil - - case "process.name": - - return reflect.String, nil - - case "process.argv0": - - return reflect.String, nil - - case "process.uid": - - return reflect.Int, nil - - case "process.gid": - - return reflect.Int, nil - - case "process.pid": - - return reflect.Int, nil - - case "process.is_root": - - return reflect.Bool, nil - - case "process.list.key": - return reflect.Int, nil - - case "process.list.value": - return reflect.Int, nil - - case "process.list.flag": - return reflect.Bool, nil - - case "process.array.key": - return reflect.Int, nil - - case "process.array.value": - return reflect.String, nil - - case "process.array.flag": - return reflect.Bool, nil - - case "open.filename": - - return reflect.String, nil - - case "retval": - - return reflect.Int, nil - - case "open.flags": - - return reflect.Int, nil - - case "open.mode": - - return reflect.Int, nil - - case "mkdir.filename": - - return reflect.String, nil - - case "mkdir.mode": - - return reflect.Int, nil - - } - - return reflect.Invalid, &ErrFieldNotFound{Field: field} -} - var testConstants = map[string]interface{}{ // boolean "true": &BoolEvaluator{Value: true}, diff --git a/pkg/security/secl/compiler/eval/opts.go b/pkg/security/secl/compiler/eval/opts.go index ac7f499f84299..4552c49850266 100644 --- a/pkg/security/secl/compiler/eval/opts.go +++ b/pkg/security/secl/compiler/eval/opts.go @@ -27,7 +27,7 @@ func (s *MacroStore) List() []*Macro { } // Get returns the marcro -func (s *MacroStore) Get(id string) *Macro { +func (s *MacroStore) Get(id MacroID) *Macro { if s == nil { return nil } @@ -40,6 +40,15 @@ func (s *MacroStore) Get(id string) *Macro { return nil } +// GetMacroEvaluator returns the macro evaluator associated with the macro ID +func (s *MacroStore) GetMacroEvaluator(id MacroID) (*MacroEvaluator, bool) { + macro := s.Get(id) + if macro == nil { + return nil, false + } + return macro.evaluator, true +} + // Contains returns returns true is there is already a macro with this ID in the store func (s *MacroStore) Contains(id string) bool { return s.Get(id) != nil diff --git a/pkg/security/secl/compiler/eval/rule.go b/pkg/security/secl/compiler/eval/rule.go index cb130f767676b..096eae4c9e021 100644 --- a/pkg/security/secl/compiler/eval/rule.go +++ b/pkg/security/secl/compiler/eval/rule.go @@ -53,7 +53,7 @@ type RuleEvaluator struct { } // NewRule returns a new rule -func NewRule(id string, expression string, opts *Opts, tags ...string) *Rule { +func NewRule(id string, expression string, parsingContext *ast.ParsingContext, opts *Opts, tags ...string) (*Rule, error) { if opts.MacroStore == nil { opts.WithMacroStore(&MacroStore{}) } @@ -66,13 +66,19 @@ func NewRule(id string, expression string, opts *Opts, tags ...string) *Rule { panic(err) } + astRule, err := parsingContext.ParseRule(expression) + if err != nil { + return nil, err + } + return &Rule{ ID: id, Expression: expression, Opts: opts, Tags: tags, pprofLabels: labelSet, - } + ast: astRule, + }, nil } // IsPartialAvailable checks if partial have been generated for the given Field @@ -196,11 +202,7 @@ func (r *Rule) Parse(parsingContext *ast.ParsingContext) error { // NewRuleEvaluator returns a new evaluator for a rule func NewRuleEvaluator(rule *ast.Rule, model Model, opts *Opts) (*RuleEvaluator, error) { - macros := make(map[MacroID]*MacroEvaluator) - for _, macro := range opts.MacroStore.List() { - macros[macro.ID] = macro.evaluator - } - state := NewState(model, "", macros) + state := NewState(model, "", opts.MacroStore) eval, _, err := nodeToEvaluator(rule.BooleanExpression, opts, state) if err != nil { @@ -274,15 +276,9 @@ func NewRuleEvaluator(rule *ast.Rule, model Model, opts *Opts) (*RuleEvaluator, } // GenEvaluator - Compile and generates the RuleEvaluator -func (r *Rule) GenEvaluator(model Model, parsingCtx *ast.ParsingContext) error { +func (r *Rule) GenEvaluator(model Model) error { r.Model = model - if r.ast == nil { - if err := r.Parse(parsingCtx); err != nil { - return err - } - } - evaluator, err := NewRuleEvaluator(r.ast, model, r.Opts) if err != nil { if err, ok := err.(*ErrAstToEval); ok { @@ -331,7 +327,7 @@ func (r *Rule) genPartials(field Field) error { return err } - state := NewState(r.Model, field, macroPartial) + state := NewState(r.Model, field, partialMacroEvaluatorGetter(macroPartial)) pEval, _, err := nodeToEvaluator(r.ast.BooleanExpression, r.Opts, state) if err != nil { return fmt.Errorf("couldn't generate partial for field %s and rule %s: %w", field, r.ID, err) @@ -352,3 +348,10 @@ func (r *Rule) genPartials(field Field) error { return nil } + +type partialMacroEvaluatorGetter map[MacroID]*MacroEvaluator + +func (p partialMacroEvaluatorGetter) GetMacroEvaluator(macroID string) (*MacroEvaluator, bool) { + v, ok := p[macroID] + return v, ok +} diff --git a/pkg/security/secl/compiler/eval/state.go b/pkg/security/secl/compiler/eval/state.go index b50fb42d7fe7c..cd1b6a4eb3440 100644 --- a/pkg/security/secl/compiler/eval/state.go +++ b/pkg/security/secl/compiler/eval/state.go @@ -21,7 +21,7 @@ type State struct { model Model field Field fieldValues map[Field][]FieldValue - macros map[MacroID]*MacroEvaluator + macros MacroEvaluatorGetter regexpCache StateRegexpCache registers []Register } @@ -52,10 +52,7 @@ func (s *State) UpdateFieldValues(field Field, value FieldValue) error { } // NewState returns a new State -func NewState(model Model, field Field, macros map[MacroID]*MacroEvaluator) *State { - if macros == nil { - macros = make(map[MacroID]*MacroEvaluator) - } +func NewState(model Model, field Field, macros MacroEvaluatorGetter) *State { return &State{ field: field, macros: macros, @@ -63,3 +60,8 @@ func NewState(model Model, field Field, macros map[MacroID]*MacroEvaluator) *Sta fieldValues: make(map[Field][]FieldValue), } } + +// MacroEvaluatorGetter is an interface to get a MacroEvaluator +type MacroEvaluatorGetter interface { + GetMacroEvaluator(macroID string) (*MacroEvaluator, bool) +} diff --git a/pkg/security/secl/compiler/eval/variables.go b/pkg/security/secl/compiler/eval/variables.go index 3d0d865054276..b20fe1b996f47 100644 --- a/pkg/security/secl/compiler/eval/variables.go +++ b/pkg/security/secl/compiler/eval/variables.go @@ -505,6 +505,13 @@ func (v *Variables) Set(name string, value interface{}) bool { return !existed } +// Stop the underlying ttl lru +func (v *Variables) Stop() { + if v.lru != nil { + v.lru.Stop() + } +} + // ScopedVariables holds a set of scoped variables type ScopedVariables struct { scoper Scoper @@ -516,13 +523,13 @@ func (v *ScopedVariables) Len() int { return len(v.vars) } +func (v *ScopedVariables) getVariables(ctx *Context) *Variables { + key := v.scoper(ctx) + return v.vars[key] +} + // GetVariable returns new variable of the type of the specified value func (v *ScopedVariables) GetVariable(name string, value interface{}, _ VariableOpts) (VariableValue, error) { - getVariables := func(ctx *Context) *Variables { - v := v.vars[v.scoper(ctx)] - return v - } - setVariable := func(ctx *Context, value interface{}) error { key := v.scoper(ctx) if key == nil { @@ -543,35 +550,35 @@ func (v *ScopedVariables) GetVariable(name string, value interface{}, _ Variable switch value.(type) { case int: return NewIntVariable(func(ctx *Context) int { - if vars := getVariables(ctx); vars != nil { + if vars := v.getVariables(ctx); vars != nil { return vars.GetInt(name) } return 0 }, setVariable), nil case bool: return NewBoolVariable(func(ctx *Context) bool { - if vars := getVariables(ctx); vars != nil { + if vars := v.getVariables(ctx); vars != nil { return vars.GetBool(name) } return false }, setVariable), nil case string: return NewStringVariable(func(ctx *Context) string { - if vars := getVariables(ctx); vars != nil { + if vars := v.getVariables(ctx); vars != nil { return vars.GetString(name) } return "" }, setVariable), nil case []string: return NewStringArrayVariable(func(ctx *Context) []string { - if vars := getVariables(ctx); vars != nil { + if vars := v.getVariables(ctx); vars != nil { return vars.GetStringArray(name) } return nil }, setVariable), nil case []int: return NewIntArrayVariable(func(ctx *Context) []int { - if vars := getVariables(ctx); vars != nil { + if vars := v.getVariables(ctx); vars != nil { return vars.GetIntArray(name) } return nil @@ -584,7 +591,10 @@ func (v *ScopedVariables) GetVariable(name string, value interface{}, _ Variable // ReleaseVariable releases a scoped variable func (v *ScopedVariables) ReleaseVariable(key ScopedVariable) { - delete(v.vars, key) + if variables, ok := v.vars[key]; ok { + variables.Stop() + delete(v.vars, key) + } } // NewScopedVariables returns a new set of scope variables diff --git a/pkg/security/secl/go.mod b/pkg/security/secl/go.mod index da9770680ce9c..9032535921ef4 100644 --- a/pkg/security/secl/go.mod +++ b/pkg/security/secl/go.mod @@ -4,44 +4,29 @@ go 1.23.0 require ( github.com/Masterminds/semver/v3 v3.3.1 - github.com/Masterminds/sprig/v3 v3.3.0 github.com/alecthomas/participle v0.7.1 - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc - github.com/fatih/structtag v1.2.0 github.com/google/go-cmp v0.6.0 github.com/google/gopacket v1.1.19 github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/jellydator/ttlcache/v3 v3.3.0 - github.com/skydive-project/go-debouncer v1.0.0 - github.com/spf13/cast v1.7.0 + github.com/skydive-project/go-debouncer v1.0.1 + github.com/spf13/cast v1.7.1 github.com/stretchr/testify v1.10.0 github.com/xeipuuv/gojsonschema v1.2.0 - golang.org/x/sys v0.28.0 - golang.org/x/text v0.21.0 - golang.org/x/tools v0.28.0 + golang.org/x/sys v0.29.0 gopkg.in/yaml.v3 v3.0.1 - modernc.org/mathutil v1.6.0 sigs.k8s.io/yaml v1.4.0 ) require ( - dario.cat/mergo v1.0.1 // indirect - github.com/Masterminds/goutils v1.1.1 // indirect - github.com/google/uuid v1.6.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/hashicorp/errwrap v1.1.0 // indirect - github.com/huandu/xstrings v1.5.0 // indirect - github.com/mitchellh/copystructure v1.2.0 // indirect - github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/rogpeppe/go-internal v1.13.1 // indirect - github.com/shopspring/decimal v1.4.0 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect - golang.org/x/crypto v0.31.0 // indirect - golang.org/x/mod v0.22.0 // indirect - golang.org/x/net v0.33.0 // indirect + golang.org/x/net v0.34.0 // indirect golang.org/x/sync v0.10.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect ) diff --git a/pkg/security/secl/go.sum b/pkg/security/secl/go.sum index b3daad8d9516f..6ffb67e782021 100644 --- a/pkg/security/secl/go.sum +++ b/pkg/security/secl/go.sum @@ -1,11 +1,5 @@ -dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= -dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= -github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= -github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= -github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= -github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0= github.com/alecthomas/participle v0.7.1 h1:2bN7reTw//5f0cugJcTOnY/NYZcWQOaajW+BwZB5xWs= github.com/alecthomas/participle v0.7.1/go.mod h1:HfdmEuwvr12HXQN44HPWXR0lHmVolVYe4dyL6lQ3duY= github.com/alecthomas/repr v0.0.0-20181024024818-d37bc2a10ba1/go.mod h1:xTS7Pm1pD1mvyM075QCDSRqH6qRLXylzS24ZTpRiSzQ= @@ -13,8 +7,6 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= -github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -22,8 +14,6 @@ github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -31,8 +21,6 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= -github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= -github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/jellydator/ttlcache/v3 v3.3.0 h1:BdoC9cE81qXfrxeb9eoJi9dWrdhSuwXMAnHTbnBm4Wc= github.com/jellydator/ttlcache/v3 v3.3.0/go.mod h1:bj2/e0l4jRnQdrnSTaGTsh4GSXvMjQcy41i7th0GVGw= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -42,23 +30,15 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= -github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= -github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= -github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= -github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= -github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= -github.com/skydive-project/go-debouncer v1.0.0 h1:cqU19PyN7WXsnSlMTANvnHws6lGcbVOH2aDQzwe6qbk= -github.com/skydive-project/go-debouncer v1.0.0/go.mod h1:7pK+5HBlYCD8W2cXhvMRsMsdWelDEPfpbE6PwSlDX68= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/skydive-project/go-debouncer v1.0.1 h1:N75Mdusd65Jjbc7k5t2oo+7qLIdMtSNJKssmpEYuSgo= +github.com/skydive-project/go-debouncer v1.0.1/go.mod h1:7pK+5HBlYCD8W2cXhvMRsMsdWelDEPfpbE6PwSlDX68= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -75,29 +55,21 @@ go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= -golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= -golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= @@ -105,7 +77,5 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EV gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4= -modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/pkg/security/secl/model/accessors_unix.go b/pkg/security/secl/model/accessors_unix.go index 7b0b033160229..8970d8871b2fd 100644 --- a/pkg/security/secl/model/accessors_unix.go +++ b/pkg/security/secl/model/accessors_unix.go @@ -18,9 +18,11 @@ import ( // to always require the math package var _ = math.MaxUint16 +var _ = net.IP{} -func (m *Model) GetEventTypes() []eval.EventType { +func (_ *Model) GetEventTypes() []eval.EventType { return []eval.EventType{ + eval.EventType("accept"), eval.EventType("bind"), eval.EventType("bpf"), eval.EventType("capset"), @@ -38,6 +40,7 @@ func (m *Model) GetEventTypes() []eval.EventType { eval.EventType("mmap"), eval.EventType("mount"), eval.EventType("mprotect"), + eval.EventType("network_flow_monitor"), eval.EventType("ondemand"), eval.EventType("open"), eval.EventType("packet"), @@ -56,7 +59,7 @@ func (m *Model) GetEventTypes() []eval.EventType { eval.EventType("utimes"), } } -func (m *Model) GetFieldRestrictions(field eval.Field) []eval.EventType { +func (_ *Model) GetFieldRestrictions(field eval.Field) []eval.EventType { switch field { case "network.destination.ip": return []eval.EventType{"dns", "imds"} @@ -70,6 +73,8 @@ func (m *Model) GetFieldRestrictions(field eval.Field) []eval.EventType { return []eval.EventType{"dns", "imds"} case "network.l4_protocol": return []eval.EventType{"dns", "imds"} + case "network.network_direction": + return []eval.EventType{"dns", "imds"} case "network.size": return []eval.EventType{"dns", "imds"} case "network.source.ip": @@ -81,8 +86,58 @@ func (m *Model) GetFieldRestrictions(field eval.Field) []eval.EventType { } return nil } -func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Evaluator, error) { +func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Evaluator, error) { switch field { + case "accept.addr.family": + return &eval.IntEvaluator{ + EvalFnc: func(ctx *eval.Context) int { + ctx.AppendResolvedField(field) + ev := ctx.Event.(*Event) + return int(ev.Accept.AddrFamily) + }, + Field: field, + Weight: eval.FunctionWeight, + }, nil + case "accept.addr.ip": + return &eval.CIDREvaluator{ + EvalFnc: func(ctx *eval.Context) net.IPNet { + ctx.AppendResolvedField(field) + ev := ctx.Event.(*Event) + return ev.Accept.Addr.IPNet + }, + Field: field, + Weight: eval.FunctionWeight, + }, nil + case "accept.addr.is_public": + return &eval.BoolEvaluator{ + EvalFnc: func(ctx *eval.Context) bool { + ctx.AppendResolvedField(field) + ev := ctx.Event.(*Event) + return ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.Accept.Addr) + }, + Field: field, + Weight: eval.HandlerWeight, + }, nil + case "accept.addr.port": + return &eval.IntEvaluator{ + EvalFnc: func(ctx *eval.Context) int { + ctx.AppendResolvedField(field) + ev := ctx.Event.(*Event) + return int(ev.Accept.Addr.Port) + }, + Field: field, + Weight: eval.FunctionWeight, + }, nil + case "accept.retval": + return &eval.IntEvaluator{ + EvalFnc: func(ctx *eval.Context) int { + ctx.AppendResolvedField(field) + ev := ctx.Event.(*Event) + return int(ev.Accept.SyscallEvent.Retval) + }, + Field: field, + Weight: eval.FunctionWeight, + }, nil case "bind.addr.family": return &eval.IntEvaluator{ EvalFnc: func(ctx *eval.Context) int { @@ -1547,6 +1602,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exec.Process.FileEvent.FileFields.CTime) @@ -1560,6 +1616,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Exec.Process.FileEvent) @@ -1573,6 +1630,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exec.Process.FileEvent.FileFields.GID) @@ -1586,6 +1644,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Exec.Process.FileEvent.FileFields) @@ -1599,6 +1658,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Exec.Process.FileEvent) @@ -1612,6 +1672,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Exec.Process.FileEvent.FileFields) @@ -1625,6 +1686,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exec.Process.FileEvent.FileFields.PathKey.Inode) @@ -1638,6 +1700,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exec.Process.FileEvent.FileFields.Mode) @@ -1651,6 +1714,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exec.Process.FileEvent.FileFields.MTime) @@ -1664,6 +1728,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exec.Process.FileEvent.FileFields.PathKey.MountID) @@ -1678,6 +1743,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Exec.Process.FileEvent) @@ -1702,6 +1768,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, &ev.Exec.Process.FileEvent) @@ -1715,6 +1782,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Exec.Process.FileEvent) @@ -1728,6 +1796,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Exec.Process.FileEvent) @@ -1742,6 +1811,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exec.Process.FileEvent) @@ -1766,6 +1836,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, &ev.Exec.Process.FileEvent.FileFields)) @@ -1779,6 +1850,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exec.Process.FileEvent.FileFields.UID) @@ -1792,6 +1864,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Exec.Process.FileEvent.FileFields) @@ -1865,6 +1938,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.CTime) @@ -1878,6 +1952,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Exec.Process.LinuxBinprm.FileEvent) @@ -1891,6 +1966,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.GID) @@ -1904,6 +1980,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Exec.Process.LinuxBinprm.FileEvent.FileFields) @@ -1917,6 +1994,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Exec.Process.LinuxBinprm.FileEvent) @@ -1930,6 +2008,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Exec.Process.LinuxBinprm.FileEvent.FileFields) @@ -1943,6 +2022,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode) @@ -1956,6 +2036,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.Mode) @@ -1969,6 +2050,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.MTime) @@ -1982,6 +2064,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID) @@ -1996,6 +2079,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Exec.Process.LinuxBinprm.FileEvent) @@ -2020,6 +2104,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, &ev.Exec.Process.LinuxBinprm.FileEvent) @@ -2033,6 +2118,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Exec.Process.LinuxBinprm.FileEvent) @@ -2046,6 +2132,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Exec.Process.LinuxBinprm.FileEvent) @@ -2060,6 +2147,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exec.Process.LinuxBinprm.FileEvent) @@ -2084,6 +2172,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, &ev.Exec.Process.LinuxBinprm.FileEvent.FileFields)) @@ -2097,6 +2186,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.UID) @@ -2110,6 +2200,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exec.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Exec.Process.LinuxBinprm.FileEvent.FileFields) @@ -2513,6 +2604,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exit.Process.FileEvent.FileFields.CTime) @@ -2526,6 +2618,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Exit.Process.FileEvent) @@ -2539,6 +2632,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exit.Process.FileEvent.FileFields.GID) @@ -2552,6 +2646,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Exit.Process.FileEvent.FileFields) @@ -2565,6 +2660,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Exit.Process.FileEvent) @@ -2578,6 +2674,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Exit.Process.FileEvent.FileFields) @@ -2591,6 +2688,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exit.Process.FileEvent.FileFields.PathKey.Inode) @@ -2604,6 +2702,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exit.Process.FileEvent.FileFields.Mode) @@ -2617,6 +2716,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exit.Process.FileEvent.FileFields.MTime) @@ -2630,6 +2730,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exit.Process.FileEvent.FileFields.PathKey.MountID) @@ -2644,6 +2745,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Exit.Process.FileEvent) @@ -2668,6 +2770,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, &ev.Exit.Process.FileEvent) @@ -2681,6 +2784,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Exit.Process.FileEvent) @@ -2694,6 +2798,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Exit.Process.FileEvent) @@ -2708,6 +2813,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exit.Process.FileEvent) @@ -2732,6 +2838,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, &ev.Exit.Process.FileEvent.FileFields)) @@ -2745,6 +2852,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exit.Process.FileEvent.FileFields.UID) @@ -2758,6 +2866,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Exit.Process.FileEvent.FileFields) @@ -2831,6 +2940,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.CTime) @@ -2844,6 +2954,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Exit.Process.LinuxBinprm.FileEvent) @@ -2857,6 +2968,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.GID) @@ -2870,6 +2982,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Exit.Process.LinuxBinprm.FileEvent.FileFields) @@ -2883,6 +2996,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Exit.Process.LinuxBinprm.FileEvent) @@ -2896,6 +3010,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Exit.Process.LinuxBinprm.FileEvent.FileFields) @@ -2909,6 +3024,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode) @@ -2922,6 +3038,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.Mode) @@ -2935,6 +3052,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.MTime) @@ -2948,6 +3066,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID) @@ -2962,6 +3081,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Exit.Process.LinuxBinprm.FileEvent) @@ -2986,6 +3106,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, &ev.Exit.Process.LinuxBinprm.FileEvent) @@ -2999,6 +3120,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Exit.Process.LinuxBinprm.FileEvent) @@ -3012,6 +3134,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Exit.Process.LinuxBinprm.FileEvent) @@ -3026,6 +3149,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exit.Process.LinuxBinprm.FileEvent) @@ -3050,6 +3174,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, &ev.Exit.Process.LinuxBinprm.FileEvent.FileFields)) @@ -3063,6 +3188,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.UID) @@ -3076,6 +3202,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Exit.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Exit.Process.LinuxBinprm.FileEvent.FileFields) @@ -4219,6 +4346,26 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval Field: field, Weight: eval.FunctionWeight, }, nil + case "mkdir.syscall.mode": + return &eval.IntEvaluator{ + EvalFnc: func(ctx *eval.Context) int { + ctx.AppendResolvedField(field) + ev := ctx.Event.(*Event) + return int(ev.FieldHandlers.ResolveSyscallCtxArgsInt2(ev, &ev.Mkdir.SyscallContext)) + }, + Field: field, + Weight: 900 * eval.HandlerWeight, + }, nil + case "mkdir.syscall.path": + return &eval.StringEvaluator{ + EvalFnc: func(ctx *eval.Context) string { + ctx.AppendResolvedField(field) + ev := ctx.Event.(*Event) + return ev.FieldHandlers.ResolveSyscallCtxArgsStr1(ev, &ev.Mkdir.SyscallContext) + }, + Field: field, + Weight: 900 * eval.HandlerWeight, + }, nil case "mmap.file.change_time": return &eval.IntEvaluator{ EvalFnc: func(ctx *eval.Context) int { @@ -4623,6 +4770,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval Field: field, Weight: eval.FunctionWeight, }, nil + case "network.network_direction": + return &eval.IntEvaluator{ + EvalFnc: func(ctx *eval.Context) int { + ctx.AppendResolvedField(field) + ev := ctx.Event.(*Event) + return int(ev.NetworkContext.NetworkDirection) + }, + Field: field, + Weight: eval.FunctionWeight, + }, nil case "network.size": return &eval.IntEvaluator{ EvalFnc: func(ctx *eval.Context) int { @@ -4663,6 +4820,328 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval Field: field, Weight: eval.FunctionWeight, }, nil + case "network_flow_monitor.device.ifname": + return &eval.StringEvaluator{ + EvalFnc: func(ctx *eval.Context) string { + ctx.AppendResolvedField(field) + ev := ctx.Event.(*Event) + return ev.FieldHandlers.ResolveNetworkDeviceIfName(ev, &ev.NetworkFlowMonitor.Device) + }, + Field: field, + Weight: eval.HandlerWeight, + }, nil + case "network_flow_monitor.flows.destination.ip": + return &eval.CIDRArrayEvaluator{ + EvalFnc: func(ctx *eval.Context) []net.IPNet { + ctx.AppendResolvedField(field) + iterator := &FlowsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return nil + } + element := *value + result := element.Destination.IPNet + return []net.IPNet{result} + } + if result, ok := ctx.IPNetCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *Flow) net.IPNet { + return current.Destination.IPNet + }) + ctx.IPNetCache[field] = results + return results + }, Field: field, + Weight: eval.IteratorWeight, + }, nil + case "network_flow_monitor.flows.destination.is_public": + return &eval.BoolArrayEvaluator{ + EvalFnc: func(ctx *eval.Context) []bool { + ctx.AppendResolvedField(field) + ev := ctx.Event.(*Event) + iterator := &FlowsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return nil + } + element := *value + result := ev.FieldHandlers.ResolveIsIPPublic(ev, &element.Destination) + return []bool{result} + } + if result, ok := ctx.BoolCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *Flow) bool { + return ev.FieldHandlers.ResolveIsIPPublic(ev, ¤t.Destination) + }) + ctx.BoolCache[field] = results + return results + }, Field: field, + Weight: eval.IteratorWeight, + }, nil + case "network_flow_monitor.flows.destination.port": + return &eval.IntArrayEvaluator{ + EvalFnc: func(ctx *eval.Context) []int { + ctx.AppendResolvedField(field) + iterator := &FlowsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return nil + } + element := *value + result := int(element.Destination.Port) + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *Flow) int { + return int(current.Destination.Port) + }) + ctx.IntCache[field] = results + return results + }, Field: field, + Weight: eval.IteratorWeight, + }, nil + case "network_flow_monitor.flows.egress.data_size": + return &eval.IntArrayEvaluator{ + EvalFnc: func(ctx *eval.Context) []int { + ctx.AppendResolvedField(field) + iterator := &FlowsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return nil + } + element := *value + result := int(element.Egress.DataSize) + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *Flow) int { + return int(current.Egress.DataSize) + }) + ctx.IntCache[field] = results + return results + }, Field: field, + Weight: eval.IteratorWeight, + }, nil + case "network_flow_monitor.flows.egress.packet_count": + return &eval.IntArrayEvaluator{ + EvalFnc: func(ctx *eval.Context) []int { + ctx.AppendResolvedField(field) + iterator := &FlowsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return nil + } + element := *value + result := int(element.Egress.PacketCount) + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *Flow) int { + return int(current.Egress.PacketCount) + }) + ctx.IntCache[field] = results + return results + }, Field: field, + Weight: eval.IteratorWeight, + }, nil + case "network_flow_monitor.flows.ingress.data_size": + return &eval.IntArrayEvaluator{ + EvalFnc: func(ctx *eval.Context) []int { + ctx.AppendResolvedField(field) + iterator := &FlowsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return nil + } + element := *value + result := int(element.Ingress.DataSize) + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *Flow) int { + return int(current.Ingress.DataSize) + }) + ctx.IntCache[field] = results + return results + }, Field: field, + Weight: eval.IteratorWeight, + }, nil + case "network_flow_monitor.flows.ingress.packet_count": + return &eval.IntArrayEvaluator{ + EvalFnc: func(ctx *eval.Context) []int { + ctx.AppendResolvedField(field) + iterator := &FlowsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return nil + } + element := *value + result := int(element.Ingress.PacketCount) + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *Flow) int { + return int(current.Ingress.PacketCount) + }) + ctx.IntCache[field] = results + return results + }, Field: field, + Weight: eval.IteratorWeight, + }, nil + case "network_flow_monitor.flows.l3_protocol": + return &eval.IntArrayEvaluator{ + EvalFnc: func(ctx *eval.Context) []int { + ctx.AppendResolvedField(field) + iterator := &FlowsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return nil + } + element := *value + result := int(element.L3Protocol) + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *Flow) int { + return int(current.L3Protocol) + }) + ctx.IntCache[field] = results + return results + }, Field: field, + Weight: eval.IteratorWeight, + }, nil + case "network_flow_monitor.flows.l4_protocol": + return &eval.IntArrayEvaluator{ + EvalFnc: func(ctx *eval.Context) []int { + ctx.AppendResolvedField(field) + iterator := &FlowsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return nil + } + element := *value + result := int(element.L4Protocol) + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *Flow) int { + return int(current.L4Protocol) + }) + ctx.IntCache[field] = results + return results + }, Field: field, + Weight: eval.IteratorWeight, + }, nil + case "network_flow_monitor.flows.length": + return &eval.IntEvaluator{ + EvalFnc: func(ctx *eval.Context) int { + ctx.AppendResolvedField(field) + iterator := &FlowsIterator{} + return iterator.Len(ctx) + }, + Field: field, + Weight: eval.IteratorWeight, + }, nil + case "network_flow_monitor.flows.source.ip": + return &eval.CIDRArrayEvaluator{ + EvalFnc: func(ctx *eval.Context) []net.IPNet { + ctx.AppendResolvedField(field) + iterator := &FlowsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return nil + } + element := *value + result := element.Source.IPNet + return []net.IPNet{result} + } + if result, ok := ctx.IPNetCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *Flow) net.IPNet { + return current.Source.IPNet + }) + ctx.IPNetCache[field] = results + return results + }, Field: field, + Weight: eval.IteratorWeight, + }, nil + case "network_flow_monitor.flows.source.is_public": + return &eval.BoolArrayEvaluator{ + EvalFnc: func(ctx *eval.Context) []bool { + ctx.AppendResolvedField(field) + ev := ctx.Event.(*Event) + iterator := &FlowsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return nil + } + element := *value + result := ev.FieldHandlers.ResolveIsIPPublic(ev, &element.Source) + return []bool{result} + } + if result, ok := ctx.BoolCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *Flow) bool { + return ev.FieldHandlers.ResolveIsIPPublic(ev, ¤t.Source) + }) + ctx.BoolCache[field] = results + return results + }, Field: field, + Weight: eval.IteratorWeight, + }, nil + case "network_flow_monitor.flows.source.port": + return &eval.IntArrayEvaluator{ + EvalFnc: func(ctx *eval.Context) []int { + ctx.AppendResolvedField(field) + iterator := &FlowsIterator{} + if regID != "" { + value := iterator.At(ctx, regID, ctx.Registers[regID]) + if value == nil { + return nil + } + element := *value + result := int(element.Source.Port) + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *Flow) int { + return int(current.Source.Port) + }) + ctx.IntCache[field] = results + return results + }, Field: field, + Weight: eval.IteratorWeight, + }, nil case "ondemand.arg1.str": return &eval.StringEvaluator{ EvalFnc: func(ctx *eval.Context) string { @@ -5088,6 +5567,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval Field: field, Weight: eval.FunctionWeight, }, nil + case "packet.network_direction": + return &eval.IntEvaluator{ + EvalFnc: func(ctx *eval.Context) int { + ctx.AppendResolvedField(field) + ev := ctx.Event.(*Event) + return int(ev.RawPacket.NetworkContext.NetworkDirection) + }, + Field: field, + Weight: eval.FunctionWeight, + }, nil case "packet.size": return &eval.IntEvaluator{ EvalFnc: func(ctx *eval.Context) int { @@ -5143,23 +5632,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessArgs(ev, &element.ProcessContext.Process) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - return ev.FieldHandlers.ResolveProcessArgs(ev, &pce.ProcessContext.Process) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + return ev.FieldHandlers.ResolveProcessArgs(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results return results @@ -5171,23 +5657,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessArgsFlags(ev, &element.ProcessContext.Process) - results = append(results, result...) - return results + return result } - results = newAncestorsIteratorArray(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) []string { - return ev.FieldHandlers.ResolveProcessArgsFlags(ev, &pce.ProcessContext.Process) + if result, ok := ctx.StringCache[field]; ok { + return result + } + results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + return ev.FieldHandlers.ResolveProcessArgsFlags(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results return results @@ -5199,23 +5682,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessArgsOptions(ev, &element.ProcessContext.Process) - results = append(results, result...) - return results + return result + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIteratorArray(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) []string { - return ev.FieldHandlers.ResolveProcessArgsOptions(ev, &pce.ProcessContext.Process) + results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + return ev.FieldHandlers.ResolveProcessArgsOptions(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results return results @@ -5227,23 +5707,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.BoolCache[field]; ok { - return result - } - var results []bool iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessArgsTruncated(ev, &element.ProcessContext.Process) - results = append(results, result) - return results + return []bool{result} + } + if result, ok := ctx.BoolCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) bool { - return ev.FieldHandlers.ResolveProcessArgsTruncated(ev, &pce.ProcessContext.Process) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + return ev.FieldHandlers.ResolveProcessArgsTruncated(ev, ¤t.ProcessContext.Process) }) ctx.BoolCache[field] = results return results @@ -5255,23 +5732,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessArgv(ev, &element.ProcessContext.Process) - results = append(results, result...) - return results + return result + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIteratorArray(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) []string { - return ev.FieldHandlers.ResolveProcessArgv(ev, &pce.ProcessContext.Process) + results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + return ev.FieldHandlers.ResolveProcessArgv(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results return results @@ -5283,23 +5757,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessArgv0(ev, &element.ProcessContext.Process) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - return ev.FieldHandlers.ResolveProcessArgv0(ev, &pce.ProcessContext.Process) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + return ev.FieldHandlers.ResolveProcessArgv0(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results return results @@ -5310,23 +5781,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.Credentials.AUID) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.Credentials.AUID) + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.Credentials.AUID) }) ctx.IntCache[field] = results return results @@ -5337,23 +5805,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.Credentials.CapEffective) - results = append(results, result) - return results + return []int{result} } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.Credentials.CapEffective) + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.Credentials.CapEffective) }) ctx.IntCache[field] = results return results @@ -5364,23 +5829,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.Credentials.CapPermitted) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.Credentials.CapPermitted) + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.Credentials.CapPermitted) }) ctx.IntCache[field] = results return results @@ -5391,23 +5853,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.CGroup.CGroupFile.Inode) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.CGroup.CGroupFile.Inode) + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.CGroup.CGroupFile.Inode) }) ctx.IntCache[field] = results return results @@ -5418,23 +5877,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.CGroup.CGroupFile.MountID) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.CGroup.CGroupFile.MountID) + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.CGroup.CGroupFile.MountID) }) ctx.IntCache[field] = results return results @@ -5446,23 +5902,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveCGroupID(ev, &element.ProcessContext.Process.CGroup) - results = append(results, result) - return results + return []string{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - return ev.FieldHandlers.ResolveCGroupID(ev, &pce.ProcessContext.Process.CGroup) + if result, ok := ctx.StringCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + return ev.FieldHandlers.ResolveCGroupID(ev, ¤t.ProcessContext.Process.CGroup) }) ctx.StringCache[field] = results return results @@ -5474,23 +5927,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveCGroupManager(ev, &element.ProcessContext.Process.CGroup) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - return ev.FieldHandlers.ResolveCGroupManager(ev, &pce.ProcessContext.Process.CGroup) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + return ev.FieldHandlers.ResolveCGroupManager(ev, ¤t.ProcessContext.Process.CGroup) }) ctx.StringCache[field] = results return results @@ -5502,23 +5952,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(ev.FieldHandlers.ResolveCGroupVersion(ev, &element.ProcessContext.Process.CGroup)) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int { - return int(ev.FieldHandlers.ResolveCGroupVersion(ev, &pce.ProcessContext.Process.CGroup)) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + return int(ev.FieldHandlers.ResolveCGroupVersion(ev, ¤t.ProcessContext.Process.CGroup)) }) ctx.IntCache[field] = results return results @@ -5529,23 +5976,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := element.ProcessContext.Process.Comm - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) string { - return pce.ProcessContext.Process.Comm + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + return current.ProcessContext.Process.Comm }) ctx.StringCache[field] = results return results @@ -5557,23 +6001,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessContainerID(ev, &element.ProcessContext.Process) - results = append(results, result) - return results + return []string{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - return ev.FieldHandlers.ResolveProcessContainerID(ev, &pce.ProcessContext.Process) + if result, ok := ctx.StringCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + return ev.FieldHandlers.ResolveProcessContainerID(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results return results @@ -5585,23 +6026,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &element.ProcessContext.Process)) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int { - return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &pce.ProcessContext.Process)) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, ¤t.ProcessContext.Process)) }) ctx.IntCache[field] = results return results @@ -5612,23 +6050,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.Credentials.EGID) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.Credentials.EGID) + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.Credentials.EGID) }) ctx.IntCache[field] = results return results @@ -5639,23 +6074,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := element.ProcessContext.Process.Credentials.EGroup - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) string { - return pce.ProcessContext.Process.Credentials.EGroup + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + return current.ProcessContext.Process.Credentials.EGroup }) ctx.StringCache[field] = results return results @@ -5667,23 +6099,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessEnvp(ev, &element.ProcessContext.Process) - results = append(results, result...) - return results + return result + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIteratorArray(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) []string { - return ev.FieldHandlers.ResolveProcessEnvp(ev, &pce.ProcessContext.Process) + results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + return ev.FieldHandlers.ResolveProcessEnvp(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results return results @@ -5695,23 +6124,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessEnvs(ev, &element.ProcessContext.Process) - results = append(results, result...) - return results + return result + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIteratorArray(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) []string { - return ev.FieldHandlers.ResolveProcessEnvs(ev, &pce.ProcessContext.Process) + results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + return ev.FieldHandlers.ResolveProcessEnvs(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results return results @@ -5723,23 +6149,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.BoolCache[field]; ok { - return result - } - var results []bool iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, &element.ProcessContext.Process) - results = append(results, result) - return results + return []bool{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) bool { - return ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, &pce.ProcessContext.Process) + if result, ok := ctx.BoolCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + return ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, ¤t.ProcessContext.Process) }) ctx.BoolCache[field] = results return results @@ -5750,23 +6173,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.Credentials.EUID) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.Credentials.EUID) + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.Credentials.EUID) }) ctx.IntCache[field] = results return results @@ -5777,23 +6197,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := element.ProcessContext.Process.Credentials.EUser - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) string { - return pce.ProcessContext.Process.Credentials.EUser + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + return current.ProcessContext.Process.Credentials.EUser }) ctx.StringCache[field] = results return results @@ -5804,29 +6221,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.CTime) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.FileEvent.FileFields.CTime) + return int(current.ProcessContext.Process.FileEvent.FileFields.CTime) }) ctx.IntCache[field] = results return results @@ -5838,29 +6254,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolveFileFilesystem(ev, &element.ProcessContext.Process.FileEvent) - results = append(results, result) - return results + return []string{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.IsNotKworker() { + if result, ok := ctx.StringCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &pce.ProcessContext.Process.FileEvent) + return ev.FieldHandlers.ResolveFileFilesystem(ev, ¤t.ProcessContext.Process.FileEvent) }) ctx.StringCache[field] = results return results @@ -5871,29 +6286,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.GID) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.FileEvent.FileFields.GID) + return int(current.ProcessContext.Process.FileEvent.FileFields.GID) }) ctx.IntCache[field] = results return results @@ -5905,29 +6319,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolveFileFieldsGroup(ev, &element.ProcessContext.Process.FileEvent.FileFields) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &pce.ProcessContext.Process.FileEvent.FileFields) + return ev.FieldHandlers.ResolveFileFieldsGroup(ev, ¤t.ProcessContext.Process.FileEvent.FileFields) }) ctx.StringCache[field] = results return results @@ -5939,29 +6352,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolveHashesFromEvent(ev, &element.ProcessContext.Process.FileEvent) - results = append(results, result...) - return results + return result } - results = newAncestorsIteratorArray(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) []string { - if !pce.ProcessContext.Process.IsNotKworker() { + if result, ok := ctx.StringCache[field]; ok { + return result + } + results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return nil } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &pce.ProcessContext.Process.FileEvent) + return ev.FieldHandlers.ResolveHashesFromEvent(ev, ¤t.ProcessContext.Process.FileEvent) }) ctx.StringCache[field] = results return results @@ -5973,29 +6385,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.BoolCache[field]; ok { - return result - } - var results []bool iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, false) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []bool{false} } result := ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &element.ProcessContext.Process.FileEvent.FileFields) - results = append(results, result) - return results + return []bool{result} + } + if result, ok := ctx.BoolCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) bool { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &pce.ProcessContext.Process.FileEvent.FileFields) + return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, ¤t.ProcessContext.Process.FileEvent.FileFields) }) ctx.BoolCache[field] = results return results @@ -6006,29 +6417,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.PathKey.Inode) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.FileEvent.FileFields.PathKey.Inode) + return int(current.ProcessContext.Process.FileEvent.FileFields.PathKey.Inode) }) ctx.IntCache[field] = results return results @@ -6039,29 +6449,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.Mode) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.FileEvent.FileFields.Mode) + return int(current.ProcessContext.Process.FileEvent.FileFields.Mode) }) ctx.IntCache[field] = results return results @@ -6072,29 +6481,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.MTime) - results = append(results, result) - return results + return []int{result} } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.IsNotKworker() { + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.FileEvent.FileFields.MTime) + return int(current.ProcessContext.Process.FileEvent.FileFields.MTime) }) ctx.IntCache[field] = results return results @@ -6105,29 +6513,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.PathKey.MountID) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.FileEvent.FileFields.PathKey.MountID) + return int(current.ProcessContext.Process.FileEvent.FileFields.PathKey.MountID) }) ctx.IntCache[field] = results return results @@ -6140,29 +6547,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolveFileBasename(ev, &pce.ProcessContext.Process.FileEvent) + return ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.FileEvent) }) ctx.StringCache[field] = results return results @@ -6175,23 +6581,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent)) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int { - return len(ev.FieldHandlers.ResolveFileBasename(ev, &pce.ProcessContext.Process.FileEvent)) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + return len(ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.FileEvent)) }) ctx.IntCache[field] = results return results @@ -6203,29 +6606,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolvePackageName(ev, &element.ProcessContext.Process.FileEvent) - results = append(results, result) - return results + return []string{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.IsNotKworker() { + if result, ok := ctx.StringCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolvePackageName(ev, &pce.ProcessContext.Process.FileEvent) + return ev.FieldHandlers.ResolvePackageName(ev, ¤t.ProcessContext.Process.FileEvent) }) ctx.StringCache[field] = results return results @@ -6237,29 +6639,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolvePackageSourceVersion(ev, &element.ProcessContext.Process.FileEvent) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &pce.ProcessContext.Process.FileEvent) + return ev.FieldHandlers.ResolvePackageSourceVersion(ev, ¤t.ProcessContext.Process.FileEvent) }) ctx.StringCache[field] = results return results @@ -6271,29 +6672,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolvePackageVersion(ev, &element.ProcessContext.Process.FileEvent) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolvePackageVersion(ev, &pce.ProcessContext.Process.FileEvent) + return ev.FieldHandlers.ResolvePackageVersion(ev, ¤t.ProcessContext.Process.FileEvent) }) ctx.StringCache[field] = results return results @@ -6306,29 +6706,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolveFilePath(ev, &pce.ProcessContext.Process.FileEvent) + return ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.FileEvent) }) ctx.StringCache[field] = results return results @@ -6341,23 +6740,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent)) - results = append(results, result) - return results + return []int{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int { - return len(ev.FieldHandlers.ResolveFilePath(ev, &pce.ProcessContext.Process.FileEvent)) + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + return len(ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.FileEvent)) }) ctx.IntCache[field] = results return results @@ -6369,29 +6765,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(ev.FieldHandlers.ResolveRights(ev, &element.ProcessContext.Process.FileEvent.FileFields)) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(ev.FieldHandlers.ResolveRights(ev, &pce.ProcessContext.Process.FileEvent.FileFields)) + return int(ev.FieldHandlers.ResolveRights(ev, ¤t.ProcessContext.Process.FileEvent.FileFields)) }) ctx.IntCache[field] = results return results @@ -6402,29 +6797,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.UID) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.FileEvent.FileFields.UID) + return int(current.ProcessContext.Process.FileEvent.FileFields.UID) }) ctx.IntCache[field] = results return results @@ -6436,29 +6830,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolveFileFieldsUser(ev, &element.ProcessContext.Process.FileEvent.FileFields) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &pce.ProcessContext.Process.FileEvent.FileFields) + return ev.FieldHandlers.ResolveFileFieldsUser(ev, ¤t.ProcessContext.Process.FileEvent.FileFields) }) ctx.StringCache[field] = results return results @@ -6469,23 +6862,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.Credentials.FSGID) - results = append(results, result) - return results + return []int{result} } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.Credentials.FSGID) + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.Credentials.FSGID) }) ctx.IntCache[field] = results return results @@ -6496,23 +6886,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := element.ProcessContext.Process.Credentials.FSGroup - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) string { - return pce.ProcessContext.Process.Credentials.FSGroup + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + return current.ProcessContext.Process.Credentials.FSGroup }) ctx.StringCache[field] = results return results @@ -6523,23 +6910,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.Credentials.FSUID) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.Credentials.FSUID) + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.Credentials.FSUID) }) ctx.IntCache[field] = results return results @@ -6550,23 +6934,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := element.ProcessContext.Process.Credentials.FSUser - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) string { - return pce.ProcessContext.Process.Credentials.FSUser + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + return current.ProcessContext.Process.Credentials.FSUser }) ctx.StringCache[field] = results return results @@ -6577,23 +6958,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.Credentials.GID) - results = append(results, result) - return results + return []int{result} } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.Credentials.GID) + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.Credentials.GID) }) ctx.IntCache[field] = results return results @@ -6604,23 +6982,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := element.ProcessContext.Process.Credentials.Group - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) string { - return pce.ProcessContext.Process.Credentials.Group + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + return current.ProcessContext.Process.Credentials.Group }) ctx.StringCache[field] = results return results @@ -6631,29 +7006,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.CTime) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.HasInterpreter() { + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.CTime) + return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.CTime) }) ctx.IntCache[field] = results return results @@ -6665,29 +7039,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolveFileFilesystem(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.HasInterpreter() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent) + return ev.FieldHandlers.ResolveFileFilesystem(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) }) ctx.StringCache[field] = results return results @@ -6698,29 +7071,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.GID) - results = append(results, result) - return results + return []int{result} } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.HasInterpreter() { + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.GID) + return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.GID) }) ctx.IntCache[field] = results return results @@ -6732,29 +7104,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolveFileFieldsGroup(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.HasInterpreter() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) + return ev.FieldHandlers.ResolveFileFieldsGroup(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) }) ctx.StringCache[field] = results return results @@ -6766,29 +7137,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolveHashesFromEvent(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - results = append(results, result...) - return results + return result + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIteratorArray(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) []string { - if !pce.ProcessContext.Process.HasInterpreter() { + results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return nil } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent) + return ev.FieldHandlers.ResolveHashesFromEvent(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) }) ctx.StringCache[field] = results return results @@ -6800,29 +7170,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.BoolCache[field]; ok { - return result - } - var results []bool iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, false) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []bool{false} } result := ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) - results = append(results, result) - return results + return []bool{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) bool { - if !pce.ProcessContext.Process.HasInterpreter() { + if result, ok := ctx.BoolCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) + return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) }) ctx.BoolCache[field] = results return results @@ -6833,29 +7202,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.HasInterpreter() { + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode) + return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode) }) ctx.IntCache[field] = results return results @@ -6866,29 +7234,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.HasInterpreter() { + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode) + return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode) }) ctx.IntCache[field] = results return results @@ -6899,29 +7266,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.MTime) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.HasInterpreter() { + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.MTime) + return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.MTime) }) ctx.IntCache[field] = results return results @@ -6932,29 +7298,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID) - results = append(results, result) - return results + return []int{result} } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.HasInterpreter() { + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID) + return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID) }) ctx.IntCache[field] = results return results @@ -6967,29 +7332,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.HasInterpreter() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolveFileBasename(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent) + return ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) }) ctx.StringCache[field] = results return results @@ -7002,23 +7366,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int { - return len(ev.FieldHandlers.ResolveFileBasename(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent)) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + return len(ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent)) }) ctx.IntCache[field] = results return results @@ -7030,29 +7391,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolvePackageName(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.HasInterpreter() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolvePackageName(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent) + return ev.FieldHandlers.ResolvePackageName(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) }) ctx.StringCache[field] = results return results @@ -7064,29 +7424,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolvePackageSourceVersion(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - results = append(results, result) - return results + return []string{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.HasInterpreter() { + if result, ok := ctx.StringCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent) + return ev.FieldHandlers.ResolvePackageSourceVersion(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) }) ctx.StringCache[field] = results return results @@ -7098,29 +7457,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolvePackageVersion(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.HasInterpreter() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolvePackageVersion(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent) + return ev.FieldHandlers.ResolvePackageVersion(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) }) ctx.StringCache[field] = results return results @@ -7133,29 +7491,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.HasInterpreter() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolveFilePath(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent) + return ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) }) ctx.StringCache[field] = results return results @@ -7168,23 +7525,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int { - return len(ev.FieldHandlers.ResolveFilePath(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent)) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + return len(ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent)) }) ctx.IntCache[field] = results return results @@ -7196,29 +7550,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(ev.FieldHandlers.ResolveRights(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)) - results = append(results, result) - return results + return []int{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.HasInterpreter() { + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(ev.FieldHandlers.ResolveRights(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)) + return int(ev.FieldHandlers.ResolveRights(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)) }) ctx.IntCache[field] = results return results @@ -7229,29 +7582,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.UID) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.HasInterpreter() { + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.UID) + return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.UID) }) ctx.IntCache[field] = results return results @@ -7263,29 +7615,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolveFileFieldsUser(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.HasInterpreter() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) + return ev.FieldHandlers.ResolveFileFieldsUser(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) }) ctx.StringCache[field] = results return results @@ -7296,23 +7647,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.BoolArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) - if result, ok := ctx.BoolCache[field]; ok { - return result - } - var results []bool iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := element.ProcessContext.Process.IsExec - results = append(results, result) - return results + return []bool{result} + } + if result, ok := ctx.BoolCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) bool { - return pce.ProcessContext.Process.IsExec + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) bool { + return current.ProcessContext.Process.IsExec }) ctx.BoolCache[field] = results return results @@ -7323,23 +7671,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.BoolArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) - if result, ok := ctx.BoolCache[field]; ok { - return result - } - var results []bool iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := element.ProcessContext.Process.PIDContext.IsKworker - results = append(results, result) - return results + return []bool{result} } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) bool { - return pce.ProcessContext.Process.PIDContext.IsKworker + if result, ok := ctx.BoolCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) bool { + return current.ProcessContext.Process.PIDContext.IsKworker }) ctx.BoolCache[field] = results return results @@ -7351,23 +7696,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.BoolCache[field]; ok { - return result - } - var results []bool iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessIsThread(ev, &element.ProcessContext.Process) - results = append(results, result) - return results + return []bool{result} + } + if result, ok := ctx.BoolCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) bool { - return ev.FieldHandlers.ResolveProcessIsThread(ev, &pce.ProcessContext.Process) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + return ev.FieldHandlers.ResolveProcessIsThread(ev, ¤t.ProcessContext.Process) }) ctx.BoolCache[field] = results return results @@ -7388,23 +7730,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.PIDContext.Pid) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.PIDContext.Pid) + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.PIDContext.Pid) }) ctx.IntCache[field] = results return results @@ -7415,23 +7754,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.PPid) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.PPid) + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.PPid) }) ctx.IntCache[field] = results return results @@ -7442,23 +7778,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.PIDContext.Tid) - results = append(results, result) - return results + return []int{result} } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.PIDContext.Tid) + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.PIDContext.Tid) }) ctx.IntCache[field] = results return results @@ -7469,23 +7802,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := element.ProcessContext.Process.TTYName - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) string { - return pce.ProcessContext.Process.TTYName + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + return current.ProcessContext.Process.TTYName }) ctx.StringCache[field] = results return results @@ -7496,23 +7826,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.Credentials.UID) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.Credentials.UID) + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.Credentials.UID) }) ctx.IntCache[field] = results return results @@ -7523,23 +7850,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := element.ProcessContext.Process.Credentials.User - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) string { - return pce.ProcessContext.Process.Credentials.User + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + return current.ProcessContext.Process.Credentials.User }) ctx.StringCache[field] = results return results @@ -7551,23 +7875,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveK8SGroups(ev, &element.ProcessContext.Process.UserSession) - results = append(results, result...) - return results + return result + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIteratorArray(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) []string { - return ev.FieldHandlers.ResolveK8SGroups(ev, &pce.ProcessContext.Process.UserSession) + results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + return ev.FieldHandlers.ResolveK8SGroups(ev, ¤t.ProcessContext.Process.UserSession) }) ctx.StringCache[field] = results return results @@ -7579,23 +7900,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveK8SUID(ev, &element.ProcessContext.Process.UserSession) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - return ev.FieldHandlers.ResolveK8SUID(ev, &pce.ProcessContext.Process.UserSession) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + return ev.FieldHandlers.ResolveK8SUID(ev, ¤t.ProcessContext.Process.UserSession) }) ctx.StringCache[field] = results return results @@ -7607,23 +7925,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveK8SUsername(ev, &element.ProcessContext.Process.UserSession) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - return ev.FieldHandlers.ResolveK8SUsername(ev, &pce.ProcessContext.Process.UserSession) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + return ev.FieldHandlers.ResolveK8SUsername(ev, ¤t.ProcessContext.Process.UserSession) }) ctx.StringCache[field] = results return results @@ -7876,6 +8191,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.CTime) @@ -7889,6 +8205,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent) @@ -7902,6 +8219,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.GID) @@ -7915,6 +8233,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields) @@ -7928,6 +8247,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent) @@ -7941,6 +8261,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields) @@ -7954,6 +8275,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.PathKey.Inode) @@ -7967,6 +8289,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.Mode) @@ -7980,6 +8303,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.MTime) @@ -7993,6 +8317,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.PathKey.MountID) @@ -8007,6 +8332,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent) @@ -8031,6 +8357,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent) @@ -8044,6 +8371,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent) @@ -8057,6 +8385,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent) @@ -8071,6 +8400,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent) @@ -8095,6 +8425,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields)) @@ -8108,6 +8439,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.UID) @@ -8121,6 +8453,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields) @@ -8194,6 +8527,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.CTime) @@ -8207,6 +8541,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -8220,6 +8555,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.GID) @@ -8233,6 +8569,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) @@ -8246,6 +8583,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -8259,6 +8597,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) @@ -8272,6 +8611,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode) @@ -8285,6 +8625,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode) @@ -8298,6 +8639,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.MTime) @@ -8311,6 +8653,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID) @@ -8325,6 +8668,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -8349,6 +8693,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -8362,6 +8707,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -8375,6 +8721,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -8389,6 +8736,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent) @@ -8413,6 +8761,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)) @@ -8426,6 +8775,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.UID) @@ -8439,6 +8789,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) @@ -8482,6 +8833,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveProcessArgs(ev, ev.BaseEvent.ProcessContext.Parent) @@ -8495,6 +8847,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessArgsFlags(ev, ev.BaseEvent.ProcessContext.Parent) @@ -8508,6 +8861,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessArgsOptions(ev, ev.BaseEvent.ProcessContext.Parent) @@ -8521,6 +8875,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveProcessArgsTruncated(ev, ev.BaseEvent.ProcessContext.Parent) @@ -8534,6 +8889,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessArgv(ev, ev.BaseEvent.ProcessContext.Parent) @@ -8547,6 +8903,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveProcessArgv0(ev, ev.BaseEvent.ProcessContext.Parent) @@ -8560,6 +8917,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.Credentials.AUID) @@ -8573,6 +8931,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.Credentials.CapEffective) @@ -8586,6 +8945,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.Credentials.CapPermitted) @@ -8599,6 +8959,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.CGroup.CGroupFile.Inode) @@ -8612,6 +8973,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.CGroup.CGroupFile.MountID) @@ -8625,6 +8987,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveCGroupID(ev, &ev.BaseEvent.ProcessContext.Parent.CGroup) @@ -8638,6 +9001,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveCGroupManager(ev, &ev.BaseEvent.ProcessContext.Parent.CGroup) @@ -8651,6 +9015,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return ev.FieldHandlers.ResolveCGroupVersion(ev, &ev.BaseEvent.ProcessContext.Parent.CGroup) @@ -8664,6 +9029,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.BaseEvent.ProcessContext.Parent.Comm @@ -8677,6 +9043,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveProcessContainerID(ev, ev.BaseEvent.ProcessContext.Parent) @@ -8690,6 +9057,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, ev.BaseEvent.ProcessContext.Parent)) @@ -8703,6 +9071,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.Credentials.EGID) @@ -8716,6 +9085,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.BaseEvent.ProcessContext.Parent.Credentials.EGroup @@ -8729,6 +9099,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessEnvp(ev, ev.BaseEvent.ProcessContext.Parent) @@ -8742,6 +9113,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessEnvs(ev, ev.BaseEvent.ProcessContext.Parent) @@ -8755,6 +9127,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, ev.BaseEvent.ProcessContext.Parent) @@ -8768,6 +9141,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.Credentials.EUID) @@ -8781,6 +9155,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.BaseEvent.ProcessContext.Parent.Credentials.EUser @@ -8794,9 +9169,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.CTime) @@ -8810,9 +9187,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent) @@ -8826,9 +9205,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.GID) @@ -8842,9 +9223,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields) @@ -8858,9 +9241,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent) @@ -8874,9 +9259,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields) @@ -8890,9 +9277,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.PathKey.Inode) @@ -8906,9 +9295,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.Mode) @@ -8922,9 +9313,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.MTime) @@ -8938,9 +9331,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.PathKey.MountID) @@ -8955,9 +9350,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent) @@ -8982,9 +9379,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent) @@ -8998,9 +9397,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent) @@ -9014,9 +9415,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent) @@ -9031,9 +9434,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent) @@ -9058,9 +9463,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields)) @@ -9074,9 +9481,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.UID) @@ -9090,9 +9499,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields) @@ -9106,6 +9517,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.Credentials.FSGID) @@ -9119,6 +9531,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.BaseEvent.ProcessContext.Parent.Credentials.FSGroup @@ -9132,6 +9545,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.Credentials.FSUID) @@ -9145,6 +9559,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.BaseEvent.ProcessContext.Parent.Credentials.FSUser @@ -9158,6 +9573,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.Credentials.GID) @@ -9171,6 +9587,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.BaseEvent.ProcessContext.Parent.Credentials.Group @@ -9184,9 +9601,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.CTime) @@ -9200,9 +9619,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent) @@ -9216,9 +9637,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.GID) @@ -9232,9 +9655,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields) @@ -9248,9 +9673,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent) @@ -9264,9 +9691,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields) @@ -9280,9 +9709,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.PathKey.Inode) @@ -9296,9 +9727,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.Mode) @@ -9312,9 +9745,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.MTime) @@ -9328,9 +9763,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.PathKey.MountID) @@ -9345,9 +9782,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent) @@ -9372,9 +9811,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent) @@ -9388,9 +9829,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent) @@ -9404,9 +9847,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent) @@ -9421,9 +9866,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent) @@ -9448,9 +9895,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields)) @@ -9464,9 +9913,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.UID) @@ -9480,9 +9931,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields) @@ -9496,6 +9949,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.BaseEvent.ProcessContext.Parent.IsExec @@ -9509,6 +9963,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.BaseEvent.ProcessContext.Parent.PIDContext.IsKworker @@ -9522,6 +9977,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveProcessIsThread(ev, ev.BaseEvent.ProcessContext.Parent) @@ -9535,6 +9991,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.PIDContext.Pid) @@ -9548,6 +10005,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.PPid) @@ -9561,6 +10019,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.PIDContext.Tid) @@ -9574,6 +10033,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.BaseEvent.ProcessContext.Parent.TTYName @@ -9587,6 +10047,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.Credentials.UID) @@ -9600,6 +10061,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.BaseEvent.ProcessContext.Parent.Credentials.User @@ -9613,6 +10075,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveK8SGroups(ev, &ev.BaseEvent.ProcessContext.Parent.UserSession) @@ -9626,6 +10089,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveK8SUID(ev, &ev.BaseEvent.ProcessContext.Parent.UserSession) @@ -9639,6 +10103,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveK8SUsername(ev, &ev.BaseEvent.ProcessContext.Parent.UserSession) @@ -9761,23 +10226,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessArgs(ev, &element.ProcessContext.Process) - results = append(results, result) - return results + return []string{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - return ev.FieldHandlers.ResolveProcessArgs(ev, &pce.ProcessContext.Process) + if result, ok := ctx.StringCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + return ev.FieldHandlers.ResolveProcessArgs(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results return results @@ -9789,23 +10251,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessArgsFlags(ev, &element.ProcessContext.Process) - results = append(results, result...) - return results + return result + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIteratorArray(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) []string { - return ev.FieldHandlers.ResolveProcessArgsFlags(ev, &pce.ProcessContext.Process) + results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + return ev.FieldHandlers.ResolveProcessArgsFlags(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results return results @@ -9817,23 +10276,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessArgsOptions(ev, &element.ProcessContext.Process) - results = append(results, result...) - return results + return result } - results = newAncestorsIteratorArray(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) []string { - return ev.FieldHandlers.ResolveProcessArgsOptions(ev, &pce.ProcessContext.Process) + if result, ok := ctx.StringCache[field]; ok { + return result + } + results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + return ev.FieldHandlers.ResolveProcessArgsOptions(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results return results @@ -9845,23 +10301,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.BoolCache[field]; ok { - return result - } - var results []bool iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessArgsTruncated(ev, &element.ProcessContext.Process) - results = append(results, result) - return results + return []bool{result} + } + if result, ok := ctx.BoolCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) bool { - return ev.FieldHandlers.ResolveProcessArgsTruncated(ev, &pce.ProcessContext.Process) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + return ev.FieldHandlers.ResolveProcessArgsTruncated(ev, ¤t.ProcessContext.Process) }) ctx.BoolCache[field] = results return results @@ -9873,23 +10326,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessArgv(ev, &element.ProcessContext.Process) - results = append(results, result...) - return results + return result + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIteratorArray(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) []string { - return ev.FieldHandlers.ResolveProcessArgv(ev, &pce.ProcessContext.Process) + results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + return ev.FieldHandlers.ResolveProcessArgv(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results return results @@ -9901,23 +10351,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessArgv0(ev, &element.ProcessContext.Process) - results = append(results, result) - return results + return []string{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - return ev.FieldHandlers.ResolveProcessArgv0(ev, &pce.ProcessContext.Process) + if result, ok := ctx.StringCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + return ev.FieldHandlers.ResolveProcessArgv0(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results return results @@ -9928,23 +10375,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.Credentials.AUID) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.Credentials.AUID) + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.Credentials.AUID) }) ctx.IntCache[field] = results return results @@ -9955,23 +10399,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.Credentials.CapEffective) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.Credentials.CapEffective) + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.Credentials.CapEffective) }) ctx.IntCache[field] = results return results @@ -9982,23 +10423,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.Credentials.CapPermitted) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.Credentials.CapPermitted) + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.Credentials.CapPermitted) }) ctx.IntCache[field] = results return results @@ -10009,23 +10447,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.CGroup.CGroupFile.Inode) - results = append(results, result) - return results + return []int{result} } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.CGroup.CGroupFile.Inode) + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.CGroup.CGroupFile.Inode) }) ctx.IntCache[field] = results return results @@ -10036,23 +10471,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.CGroup.CGroupFile.MountID) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.CGroup.CGroupFile.MountID) + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.CGroup.CGroupFile.MountID) }) ctx.IntCache[field] = results return results @@ -10064,23 +10496,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveCGroupID(ev, &element.ProcessContext.Process.CGroup) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - return ev.FieldHandlers.ResolveCGroupID(ev, &pce.ProcessContext.Process.CGroup) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + return ev.FieldHandlers.ResolveCGroupID(ev, ¤t.ProcessContext.Process.CGroup) }) ctx.StringCache[field] = results return results @@ -10092,23 +10521,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveCGroupManager(ev, &element.ProcessContext.Process.CGroup) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - return ev.FieldHandlers.ResolveCGroupManager(ev, &pce.ProcessContext.Process.CGroup) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + return ev.FieldHandlers.ResolveCGroupManager(ev, ¤t.ProcessContext.Process.CGroup) }) ctx.StringCache[field] = results return results @@ -10120,23 +10546,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(ev.FieldHandlers.ResolveCGroupVersion(ev, &element.ProcessContext.Process.CGroup)) - results = append(results, result) - return results + return []int{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int { - return int(ev.FieldHandlers.ResolveCGroupVersion(ev, &pce.ProcessContext.Process.CGroup)) + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + return int(ev.FieldHandlers.ResolveCGroupVersion(ev, ¤t.ProcessContext.Process.CGroup)) }) ctx.IntCache[field] = results return results @@ -10147,23 +10570,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := element.ProcessContext.Process.Comm - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) string { - return pce.ProcessContext.Process.Comm + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + return current.ProcessContext.Process.Comm }) ctx.StringCache[field] = results return results @@ -10175,23 +10595,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessContainerID(ev, &element.ProcessContext.Process) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - return ev.FieldHandlers.ResolveProcessContainerID(ev, &pce.ProcessContext.Process) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + return ev.FieldHandlers.ResolveProcessContainerID(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results return results @@ -10203,23 +10620,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &element.ProcessContext.Process)) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int { - return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &pce.ProcessContext.Process)) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, ¤t.ProcessContext.Process)) }) ctx.IntCache[field] = results return results @@ -10230,23 +10644,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.Credentials.EGID) - results = append(results, result) - return results + return []int{result} } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.Credentials.EGID) + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.Credentials.EGID) }) ctx.IntCache[field] = results return results @@ -10257,23 +10668,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := element.ProcessContext.Process.Credentials.EGroup - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) string { - return pce.ProcessContext.Process.Credentials.EGroup + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + return current.ProcessContext.Process.Credentials.EGroup }) ctx.StringCache[field] = results return results @@ -10285,23 +10693,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessEnvp(ev, &element.ProcessContext.Process) - results = append(results, result...) - return results + return result + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIteratorArray(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) []string { - return ev.FieldHandlers.ResolveProcessEnvp(ev, &pce.ProcessContext.Process) + results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + return ev.FieldHandlers.ResolveProcessEnvp(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results return results @@ -10313,23 +10718,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessEnvs(ev, &element.ProcessContext.Process) - results = append(results, result...) - return results + return result + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIteratorArray(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) []string { - return ev.FieldHandlers.ResolveProcessEnvs(ev, &pce.ProcessContext.Process) + results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + return ev.FieldHandlers.ResolveProcessEnvs(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results return results @@ -10341,23 +10743,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.BoolCache[field]; ok { - return result - } - var results []bool iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, &element.ProcessContext.Process) - results = append(results, result) - return results + return []bool{result} + } + if result, ok := ctx.BoolCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) bool { - return ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, &pce.ProcessContext.Process) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + return ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, ¤t.ProcessContext.Process) }) ctx.BoolCache[field] = results return results @@ -10368,23 +10767,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.Credentials.EUID) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.Credentials.EUID) + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.Credentials.EUID) }) ctx.IntCache[field] = results return results @@ -10395,23 +10791,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := element.ProcessContext.Process.Credentials.EUser - results = append(results, result) - return results + return []string{result} } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) string { - return pce.ProcessContext.Process.Credentials.EUser + if result, ok := ctx.StringCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + return current.ProcessContext.Process.Credentials.EUser }) ctx.StringCache[field] = results return results @@ -10422,29 +10815,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.CTime) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.FileEvent.FileFields.CTime) + return int(current.ProcessContext.Process.FileEvent.FileFields.CTime) }) ctx.IntCache[field] = results return results @@ -10456,29 +10848,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolveFileFilesystem(ev, &element.ProcessContext.Process.FileEvent) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &pce.ProcessContext.Process.FileEvent) + return ev.FieldHandlers.ResolveFileFilesystem(ev, ¤t.ProcessContext.Process.FileEvent) }) ctx.StringCache[field] = results return results @@ -10489,29 +10880,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.GID) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.FileEvent.FileFields.GID) + return int(current.ProcessContext.Process.FileEvent.FileFields.GID) }) ctx.IntCache[field] = results return results @@ -10523,29 +10913,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolveFileFieldsGroup(ev, &element.ProcessContext.Process.FileEvent.FileFields) - results = append(results, result) - return results + return []string{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.IsNotKworker() { + if result, ok := ctx.StringCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &pce.ProcessContext.Process.FileEvent.FileFields) + return ev.FieldHandlers.ResolveFileFieldsGroup(ev, ¤t.ProcessContext.Process.FileEvent.FileFields) }) ctx.StringCache[field] = results return results @@ -10557,29 +10946,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolveHashesFromEvent(ev, &element.ProcessContext.Process.FileEvent) - results = append(results, result...) - return results + return result + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIteratorArray(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) []string { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return nil } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &pce.ProcessContext.Process.FileEvent) + return ev.FieldHandlers.ResolveHashesFromEvent(ev, ¤t.ProcessContext.Process.FileEvent) }) ctx.StringCache[field] = results return results @@ -10591,29 +10979,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.BoolCache[field]; ok { - return result - } - var results []bool iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, false) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []bool{false} } result := ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &element.ProcessContext.Process.FileEvent.FileFields) - results = append(results, result) - return results + return []bool{result} + } + if result, ok := ctx.BoolCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) bool { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &pce.ProcessContext.Process.FileEvent.FileFields) + return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, ¤t.ProcessContext.Process.FileEvent.FileFields) }) ctx.BoolCache[field] = results return results @@ -10624,29 +11011,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.PathKey.Inode) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.FileEvent.FileFields.PathKey.Inode) + return int(current.ProcessContext.Process.FileEvent.FileFields.PathKey.Inode) }) ctx.IntCache[field] = results return results @@ -10657,29 +11043,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.Mode) - results = append(results, result) - return results + return []int{result} } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.IsNotKworker() { + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.FileEvent.FileFields.Mode) + return int(current.ProcessContext.Process.FileEvent.FileFields.Mode) }) ctx.IntCache[field] = results return results @@ -10690,29 +11075,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.MTime) - results = append(results, result) - return results + return []int{result} } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.IsNotKworker() { + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.FileEvent.FileFields.MTime) + return int(current.ProcessContext.Process.FileEvent.FileFields.MTime) }) ctx.IntCache[field] = results return results @@ -10723,29 +11107,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.PathKey.MountID) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.FileEvent.FileFields.PathKey.MountID) + return int(current.ProcessContext.Process.FileEvent.FileFields.PathKey.MountID) }) ctx.IntCache[field] = results return results @@ -10758,29 +11141,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolveFileBasename(ev, &pce.ProcessContext.Process.FileEvent) + return ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.FileEvent) }) ctx.StringCache[field] = results return results @@ -10793,23 +11175,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent)) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int { - return len(ev.FieldHandlers.ResolveFileBasename(ev, &pce.ProcessContext.Process.FileEvent)) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + return len(ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.FileEvent)) }) ctx.IntCache[field] = results return results @@ -10821,29 +11200,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolvePackageName(ev, &element.ProcessContext.Process.FileEvent) - results = append(results, result) - return results + return []string{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.IsNotKworker() { + if result, ok := ctx.StringCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolvePackageName(ev, &pce.ProcessContext.Process.FileEvent) + return ev.FieldHandlers.ResolvePackageName(ev, ¤t.ProcessContext.Process.FileEvent) }) ctx.StringCache[field] = results return results @@ -10855,29 +11233,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolvePackageSourceVersion(ev, &element.ProcessContext.Process.FileEvent) - results = append(results, result) - return results + return []string{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.IsNotKworker() { + if result, ok := ctx.StringCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &pce.ProcessContext.Process.FileEvent) + return ev.FieldHandlers.ResolvePackageSourceVersion(ev, ¤t.ProcessContext.Process.FileEvent) }) ctx.StringCache[field] = results return results @@ -10889,29 +11266,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolvePackageVersion(ev, &element.ProcessContext.Process.FileEvent) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolvePackageVersion(ev, &pce.ProcessContext.Process.FileEvent) + return ev.FieldHandlers.ResolvePackageVersion(ev, ¤t.ProcessContext.Process.FileEvent) }) ctx.StringCache[field] = results return results @@ -10924,29 +11300,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolveFilePath(ev, &pce.ProcessContext.Process.FileEvent) + return ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.FileEvent) }) ctx.StringCache[field] = results return results @@ -10959,23 +11334,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent)) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int { - return len(ev.FieldHandlers.ResolveFilePath(ev, &pce.ProcessContext.Process.FileEvent)) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + return len(ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.FileEvent)) }) ctx.IntCache[field] = results return results @@ -10987,29 +11359,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(ev.FieldHandlers.ResolveRights(ev, &element.ProcessContext.Process.FileEvent.FileFields)) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(ev.FieldHandlers.ResolveRights(ev, &pce.ProcessContext.Process.FileEvent.FileFields)) + return int(ev.FieldHandlers.ResolveRights(ev, ¤t.ProcessContext.Process.FileEvent.FileFields)) }) ctx.IntCache[field] = results return results @@ -11020,29 +11391,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.UID) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.FileEvent.FileFields.UID) + return int(current.ProcessContext.Process.FileEvent.FileFields.UID) }) ctx.IntCache[field] = results return results @@ -11054,29 +11424,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolveFileFieldsUser(ev, &element.ProcessContext.Process.FileEvent.FileFields) - results = append(results, result) - return results + return []string{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.IsNotKworker() { + if result, ok := ctx.StringCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &pce.ProcessContext.Process.FileEvent.FileFields) + return ev.FieldHandlers.ResolveFileFieldsUser(ev, ¤t.ProcessContext.Process.FileEvent.FileFields) }) ctx.StringCache[field] = results return results @@ -11087,23 +11456,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.Credentials.FSGID) - results = append(results, result) - return results + return []int{result} } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.Credentials.FSGID) + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.Credentials.FSGID) }) ctx.IntCache[field] = results return results @@ -11114,23 +11480,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := element.ProcessContext.Process.Credentials.FSGroup - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) string { - return pce.ProcessContext.Process.Credentials.FSGroup + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + return current.ProcessContext.Process.Credentials.FSGroup }) ctx.StringCache[field] = results return results @@ -11141,23 +11504,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.Credentials.FSUID) - results = append(results, result) - return results + return []int{result} } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.Credentials.FSUID) + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.Credentials.FSUID) }) ctx.IntCache[field] = results return results @@ -11168,23 +11528,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := element.ProcessContext.Process.Credentials.FSUser - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) string { - return pce.ProcessContext.Process.Credentials.FSUser + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + return current.ProcessContext.Process.Credentials.FSUser }) ctx.StringCache[field] = results return results @@ -11195,23 +11552,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.Credentials.GID) - results = append(results, result) - return results + return []int{result} } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.Credentials.GID) + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.Credentials.GID) }) ctx.IntCache[field] = results return results @@ -11222,23 +11576,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := element.ProcessContext.Process.Credentials.Group - results = append(results, result) - return results + return []string{result} } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) string { - return pce.ProcessContext.Process.Credentials.Group + if result, ok := ctx.StringCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + return current.ProcessContext.Process.Credentials.Group }) ctx.StringCache[field] = results return results @@ -11249,29 +11600,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.CTime) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.HasInterpreter() { + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.CTime) + return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.CTime) }) ctx.IntCache[field] = results return results @@ -11283,29 +11633,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolveFileFilesystem(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - results = append(results, result) - return results + return []string{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.HasInterpreter() { + if result, ok := ctx.StringCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent) + return ev.FieldHandlers.ResolveFileFilesystem(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) }) ctx.StringCache[field] = results return results @@ -11316,29 +11665,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.GID) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.HasInterpreter() { + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.GID) + return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.GID) }) ctx.IntCache[field] = results return results @@ -11350,29 +11698,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolveFileFieldsGroup(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) - results = append(results, result) - return results + return []string{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.HasInterpreter() { + if result, ok := ctx.StringCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) + return ev.FieldHandlers.ResolveFileFieldsGroup(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) }) ctx.StringCache[field] = results return results @@ -11384,29 +11731,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolveHashesFromEvent(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - results = append(results, result...) - return results + return result + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIteratorArray(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) []string { - if !pce.ProcessContext.Process.HasInterpreter() { + results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return nil } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent) + return ev.FieldHandlers.ResolveHashesFromEvent(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) }) ctx.StringCache[field] = results return results @@ -11418,29 +11764,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.BoolCache[field]; ok { - return result - } - var results []bool iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, false) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []bool{false} } result := ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) - results = append(results, result) - return results + return []bool{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) bool { - if !pce.ProcessContext.Process.HasInterpreter() { + if result, ok := ctx.BoolCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) + return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) }) ctx.BoolCache[field] = results return results @@ -11451,29 +11796,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.HasInterpreter() { + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode) + return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode) }) ctx.IntCache[field] = results return results @@ -11484,29 +11828,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.HasInterpreter() { + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode) + return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode) }) ctx.IntCache[field] = results return results @@ -11517,29 +11860,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.MTime) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.HasInterpreter() { + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.MTime) + return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.MTime) }) ctx.IntCache[field] = results return results @@ -11550,29 +11892,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID) - results = append(results, result) - return results + return []int{result} } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.HasInterpreter() { + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID) + return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID) }) ctx.IntCache[field] = results return results @@ -11585,29 +11926,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - results = append(results, result) - return results + return []string{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.HasInterpreter() { + if result, ok := ctx.StringCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolveFileBasename(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent) + return ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) }) ctx.StringCache[field] = results return results @@ -11620,23 +11960,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int { - return len(ev.FieldHandlers.ResolveFileBasename(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent)) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + return len(ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent)) }) ctx.IntCache[field] = results return results @@ -11648,29 +11985,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolvePackageName(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.HasInterpreter() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolvePackageName(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent) + return ev.FieldHandlers.ResolvePackageName(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) }) ctx.StringCache[field] = results return results @@ -11682,29 +12018,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolvePackageSourceVersion(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.HasInterpreter() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent) + return ev.FieldHandlers.ResolvePackageSourceVersion(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) }) ctx.StringCache[field] = results return results @@ -11716,29 +12051,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolvePackageVersion(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - results = append(results, result) - return results + return []string{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.HasInterpreter() { + if result, ok := ctx.StringCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolvePackageVersion(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent) + return ev.FieldHandlers.ResolvePackageVersion(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) }) ctx.StringCache[field] = results return results @@ -11751,29 +12085,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - results = append(results, result) - return results + return []string{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.HasInterpreter() { + if result, ok := ctx.StringCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolveFilePath(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent) + return ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) }) ctx.StringCache[field] = results return results @@ -11786,23 +12119,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int { - return len(ev.FieldHandlers.ResolveFilePath(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent)) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + return len(ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent)) }) ctx.IntCache[field] = results return results @@ -11814,29 +12144,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(ev.FieldHandlers.ResolveRights(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)) - results = append(results, result) - return results + return []int{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.HasInterpreter() { + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(ev.FieldHandlers.ResolveRights(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)) + return int(ev.FieldHandlers.ResolveRights(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)) }) ctx.IntCache[field] = results return results @@ -11847,29 +12176,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.UID) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.HasInterpreter() { + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.UID) + return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.UID) }) ctx.IntCache[field] = results return results @@ -11881,29 +12209,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolveFileFieldsUser(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) - results = append(results, result) - return results + return []string{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.HasInterpreter() { + if result, ok := ctx.StringCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) + return ev.FieldHandlers.ResolveFileFieldsUser(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) }) ctx.StringCache[field] = results return results @@ -11914,23 +12241,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.BoolArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) - if result, ok := ctx.BoolCache[field]; ok { - return result - } - var results []bool iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := element.ProcessContext.Process.IsExec - results = append(results, result) - return results + return []bool{result} + } + if result, ok := ctx.BoolCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) bool { - return pce.ProcessContext.Process.IsExec + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) bool { + return current.ProcessContext.Process.IsExec }) ctx.BoolCache[field] = results return results @@ -11941,23 +12265,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.BoolArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) - if result, ok := ctx.BoolCache[field]; ok { - return result - } - var results []bool iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := element.ProcessContext.Process.PIDContext.IsKworker - results = append(results, result) - return results + return []bool{result} } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) bool { - return pce.ProcessContext.Process.PIDContext.IsKworker + if result, ok := ctx.BoolCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) bool { + return current.ProcessContext.Process.PIDContext.IsKworker }) ctx.BoolCache[field] = results return results @@ -11969,23 +12290,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.BoolCache[field]; ok { - return result - } - var results []bool iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessIsThread(ev, &element.ProcessContext.Process) - results = append(results, result) - return results + return []bool{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) bool { - return ev.FieldHandlers.ResolveProcessIsThread(ev, &pce.ProcessContext.Process) + if result, ok := ctx.BoolCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + return ev.FieldHandlers.ResolveProcessIsThread(ev, ¤t.ProcessContext.Process) }) ctx.BoolCache[field] = results return results @@ -12006,23 +12324,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.PIDContext.Pid) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.PIDContext.Pid) + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.PIDContext.Pid) }) ctx.IntCache[field] = results return results @@ -12033,23 +12348,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.PPid) - results = append(results, result) - return results + return []int{result} } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.PPid) + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.PPid) }) ctx.IntCache[field] = results return results @@ -12060,23 +12372,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.PIDContext.Tid) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.PIDContext.Tid) + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.PIDContext.Tid) }) ctx.IntCache[field] = results return results @@ -12087,23 +12396,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := element.ProcessContext.Process.TTYName - results = append(results, result) - return results + return []string{result} } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) string { - return pce.ProcessContext.Process.TTYName + if result, ok := ctx.StringCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + return current.ProcessContext.Process.TTYName }) ctx.StringCache[field] = results return results @@ -12114,23 +12420,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.Credentials.UID) - results = append(results, result) - return results + return []int{result} } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.Credentials.UID) + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.Credentials.UID) }) ctx.IntCache[field] = results return results @@ -12141,23 +12444,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := element.ProcessContext.Process.Credentials.User - results = append(results, result) - return results + return []string{result} } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) string { - return pce.ProcessContext.Process.Credentials.User + if result, ok := ctx.StringCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + return current.ProcessContext.Process.Credentials.User }) ctx.StringCache[field] = results return results @@ -12169,23 +12469,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveK8SGroups(ev, &element.ProcessContext.Process.UserSession) - results = append(results, result...) - return results + return result } - results = newAncestorsIteratorArray(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) []string { - return ev.FieldHandlers.ResolveK8SGroups(ev, &pce.ProcessContext.Process.UserSession) + if result, ok := ctx.StringCache[field]; ok { + return result + } + results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + return ev.FieldHandlers.ResolveK8SGroups(ev, ¤t.ProcessContext.Process.UserSession) }) ctx.StringCache[field] = results return results @@ -12197,23 +12494,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveK8SUID(ev, &element.ProcessContext.Process.UserSession) - results = append(results, result) - return results + return []string{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - return ev.FieldHandlers.ResolveK8SUID(ev, &pce.ProcessContext.Process.UserSession) + if result, ok := ctx.StringCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + return ev.FieldHandlers.ResolveK8SUID(ev, ¤t.ProcessContext.Process.UserSession) }) ctx.StringCache[field] = results return results @@ -12225,23 +12519,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveK8SUsername(ev, &element.ProcessContext.Process.UserSession) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - return ev.FieldHandlers.ResolveK8SUsername(ev, &pce.ProcessContext.Process.UserSession) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + return ev.FieldHandlers.ResolveK8SUsername(ev, ¤t.ProcessContext.Process.UserSession) }) ctx.StringCache[field] = results return results @@ -12494,6 +12785,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Process.FileEvent.FileFields.CTime) @@ -12507,6 +12799,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.PTrace.Tracee.Process.FileEvent) @@ -12520,6 +12813,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Process.FileEvent.FileFields.GID) @@ -12533,6 +12827,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.PTrace.Tracee.Process.FileEvent.FileFields) @@ -12546,6 +12841,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.PTrace.Tracee.Process.FileEvent) @@ -12559,6 +12855,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.PTrace.Tracee.Process.FileEvent.FileFields) @@ -12572,6 +12869,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Process.FileEvent.FileFields.PathKey.Inode) @@ -12585,6 +12883,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Process.FileEvent.FileFields.Mode) @@ -12598,6 +12897,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Process.FileEvent.FileFields.MTime) @@ -12611,6 +12911,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Process.FileEvent.FileFields.PathKey.MountID) @@ -12625,6 +12926,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, &ev.PTrace.Tracee.Process.FileEvent) @@ -12649,6 +12951,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, &ev.PTrace.Tracee.Process.FileEvent) @@ -12662,6 +12965,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.PTrace.Tracee.Process.FileEvent) @@ -12675,6 +12979,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.PTrace.Tracee.Process.FileEvent) @@ -12689,6 +12994,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Process.FileEvent) @@ -12713,6 +13019,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, &ev.PTrace.Tracee.Process.FileEvent.FileFields)) @@ -12726,6 +13033,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Process.FileEvent.FileFields.UID) @@ -12739,6 +13047,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.PTrace.Tracee.Process.FileEvent.FileFields) @@ -12812,6 +13121,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.CTime) @@ -12825,6 +13135,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent) @@ -12838,6 +13149,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.GID) @@ -12851,6 +13163,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields) @@ -12864,6 +13177,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent) @@ -12877,6 +13191,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields) @@ -12890,6 +13205,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode) @@ -12903,6 +13219,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.Mode) @@ -12916,6 +13233,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.MTime) @@ -12929,6 +13247,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID) @@ -12943,6 +13262,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent) @@ -12967,6 +13287,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent) @@ -12980,6 +13301,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent) @@ -12993,6 +13315,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent) @@ -13007,6 +13330,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent) @@ -13031,6 +13355,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields)) @@ -13044,6 +13369,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.UID) @@ -13057,6 +13383,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields) @@ -13100,6 +13427,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveProcessArgs(ev, ev.PTrace.Tracee.Parent) @@ -13113,6 +13441,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessArgsFlags(ev, ev.PTrace.Tracee.Parent) @@ -13126,6 +13455,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessArgsOptions(ev, ev.PTrace.Tracee.Parent) @@ -13139,6 +13469,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveProcessArgsTruncated(ev, ev.PTrace.Tracee.Parent) @@ -13152,6 +13483,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessArgv(ev, ev.PTrace.Tracee.Parent) @@ -13165,6 +13497,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveProcessArgv0(ev, ev.PTrace.Tracee.Parent) @@ -13178,6 +13511,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.Credentials.AUID) @@ -13191,6 +13525,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.Credentials.CapEffective) @@ -13204,6 +13539,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.Credentials.CapPermitted) @@ -13217,6 +13553,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.CGroup.CGroupFile.Inode) @@ -13230,6 +13567,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.CGroup.CGroupFile.MountID) @@ -13243,6 +13581,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveCGroupID(ev, &ev.PTrace.Tracee.Parent.CGroup) @@ -13256,6 +13595,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveCGroupManager(ev, &ev.PTrace.Tracee.Parent.CGroup) @@ -13269,6 +13609,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return ev.FieldHandlers.ResolveCGroupVersion(ev, &ev.PTrace.Tracee.Parent.CGroup) @@ -13282,6 +13623,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.PTrace.Tracee.Parent.Comm @@ -13295,6 +13637,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveProcessContainerID(ev, ev.PTrace.Tracee.Parent) @@ -13308,6 +13651,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, ev.PTrace.Tracee.Parent)) @@ -13321,6 +13665,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.Credentials.EGID) @@ -13334,6 +13679,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.PTrace.Tracee.Parent.Credentials.EGroup @@ -13347,6 +13693,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessEnvp(ev, ev.PTrace.Tracee.Parent) @@ -13360,6 +13707,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessEnvs(ev, ev.PTrace.Tracee.Parent) @@ -13373,6 +13721,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, ev.PTrace.Tracee.Parent) @@ -13386,6 +13735,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.Credentials.EUID) @@ -13399,6 +13749,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.PTrace.Tracee.Parent.Credentials.EUser @@ -13412,9 +13763,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.PTrace.Tracee.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.FileEvent.FileFields.CTime) @@ -13428,9 +13781,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.PTrace.Tracee.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.PTrace.Tracee.Parent.FileEvent) @@ -13444,9 +13799,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.PTrace.Tracee.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.FileEvent.FileFields.GID) @@ -13460,9 +13817,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.PTrace.Tracee.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.PTrace.Tracee.Parent.FileEvent.FileFields) @@ -13476,9 +13835,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } if !ev.PTrace.Tracee.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.PTrace.Tracee.Parent.FileEvent) @@ -13492,9 +13853,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } if !ev.PTrace.Tracee.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.PTrace.Tracee.Parent.FileEvent.FileFields) @@ -13508,9 +13871,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.PTrace.Tracee.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.FileEvent.FileFields.PathKey.Inode) @@ -13524,9 +13889,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.PTrace.Tracee.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.FileEvent.FileFields.Mode) @@ -13540,9 +13907,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.PTrace.Tracee.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.FileEvent.FileFields.MTime) @@ -13556,9 +13925,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.PTrace.Tracee.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.FileEvent.FileFields.PathKey.MountID) @@ -13573,9 +13944,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.PTrace.Tracee.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, &ev.PTrace.Tracee.Parent.FileEvent) @@ -13600,9 +13973,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.PTrace.Tracee.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, &ev.PTrace.Tracee.Parent.FileEvent) @@ -13616,9 +13991,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.PTrace.Tracee.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.PTrace.Tracee.Parent.FileEvent) @@ -13632,9 +14009,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.PTrace.Tracee.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.PTrace.Tracee.Parent.FileEvent) @@ -13649,9 +14028,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.PTrace.Tracee.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Parent.FileEvent) @@ -13676,9 +14057,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.PTrace.Tracee.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, &ev.PTrace.Tracee.Parent.FileEvent.FileFields)) @@ -13692,9 +14075,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.PTrace.Tracee.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.FileEvent.FileFields.UID) @@ -13708,9 +14093,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.PTrace.Tracee.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.PTrace.Tracee.Parent.FileEvent.FileFields) @@ -13724,6 +14111,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.Credentials.FSGID) @@ -13737,6 +14125,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.PTrace.Tracee.Parent.Credentials.FSGroup @@ -13750,6 +14139,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.Credentials.FSUID) @@ -13763,6 +14153,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.PTrace.Tracee.Parent.Credentials.FSUser @@ -13776,6 +14167,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.Credentials.GID) @@ -13789,6 +14181,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.PTrace.Tracee.Parent.Credentials.Group @@ -13802,9 +14195,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.PTrace.Tracee.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.CTime) @@ -13818,9 +14213,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.PTrace.Tracee.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent) @@ -13834,9 +14231,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.PTrace.Tracee.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.GID) @@ -13850,9 +14249,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.PTrace.Tracee.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields) @@ -13866,9 +14267,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } if !ev.PTrace.Tracee.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent) @@ -13882,9 +14285,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } if !ev.PTrace.Tracee.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields) @@ -13898,9 +14303,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.PTrace.Tracee.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.PathKey.Inode) @@ -13914,9 +14321,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.PTrace.Tracee.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.Mode) @@ -13930,9 +14339,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.PTrace.Tracee.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.MTime) @@ -13946,9 +14357,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.PTrace.Tracee.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.PathKey.MountID) @@ -13963,9 +14376,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.PTrace.Tracee.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent) @@ -13990,9 +14405,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.PTrace.Tracee.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent) @@ -14006,9 +14423,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.PTrace.Tracee.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent) @@ -14022,9 +14441,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.PTrace.Tracee.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent) @@ -14039,9 +14460,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.PTrace.Tracee.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent) @@ -14066,9 +14489,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.PTrace.Tracee.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields)) @@ -14082,9 +14507,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.PTrace.Tracee.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.UID) @@ -14098,9 +14525,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.PTrace.Tracee.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields) @@ -14114,6 +14543,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.PTrace.Tracee.Parent.IsExec @@ -14127,6 +14557,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.PTrace.Tracee.Parent.PIDContext.IsKworker @@ -14140,6 +14571,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveProcessIsThread(ev, ev.PTrace.Tracee.Parent) @@ -14153,6 +14585,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.PIDContext.Pid) @@ -14166,6 +14599,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.PPid) @@ -14179,6 +14613,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.PIDContext.Tid) @@ -14192,6 +14627,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.PTrace.Tracee.Parent.TTYName @@ -14205,6 +14641,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.PTrace.Tracee.Parent.Credentials.UID) @@ -14218,6 +14655,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.PTrace.Tracee.Parent.Credentials.User @@ -14231,6 +14669,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveK8SGroups(ev, &ev.PTrace.Tracee.Parent.UserSession) @@ -14244,6 +14683,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveK8SUID(ev, &ev.PTrace.Tracee.Parent.UserSession) @@ -14257,6 +14697,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.PTrace.Tracee.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveK8SUsername(ev, &ev.PTrace.Tracee.Parent.UserSession) @@ -15240,6 +15681,16 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval Field: field, Weight: eval.FunctionWeight, }, nil + case "rmdir.syscall.path": + return &eval.StringEvaluator{ + EvalFnc: func(ctx *eval.Context) string { + ctx.AppendResolvedField(field) + ev := ctx.Event.(*Event) + return ev.FieldHandlers.ResolveSyscallCtxArgsStr1(ev, &ev.Rmdir.SyscallContext) + }, + Field: field, + Weight: 900 * eval.HandlerWeight, + }, nil case "selinux.bool.name": return &eval.StringEvaluator{ EvalFnc: func(ctx *eval.Context) string { @@ -15659,23 +16110,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessArgs(ev, &element.ProcessContext.Process) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - return ev.FieldHandlers.ResolveProcessArgs(ev, &pce.ProcessContext.Process) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + return ev.FieldHandlers.ResolveProcessArgs(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results return results @@ -15687,23 +16135,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessArgsFlags(ev, &element.ProcessContext.Process) - results = append(results, result...) - return results + return result } - results = newAncestorsIteratorArray(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) []string { - return ev.FieldHandlers.ResolveProcessArgsFlags(ev, &pce.ProcessContext.Process) + if result, ok := ctx.StringCache[field]; ok { + return result + } + results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + return ev.FieldHandlers.ResolveProcessArgsFlags(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results return results @@ -15715,23 +16160,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessArgsOptions(ev, &element.ProcessContext.Process) - results = append(results, result...) - return results + return result + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIteratorArray(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) []string { - return ev.FieldHandlers.ResolveProcessArgsOptions(ev, &pce.ProcessContext.Process) + results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + return ev.FieldHandlers.ResolveProcessArgsOptions(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results return results @@ -15743,23 +16185,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.BoolCache[field]; ok { - return result - } - var results []bool iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessArgsTruncated(ev, &element.ProcessContext.Process) - results = append(results, result) - return results + return []bool{result} + } + if result, ok := ctx.BoolCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) bool { - return ev.FieldHandlers.ResolveProcessArgsTruncated(ev, &pce.ProcessContext.Process) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + return ev.FieldHandlers.ResolveProcessArgsTruncated(ev, ¤t.ProcessContext.Process) }) ctx.BoolCache[field] = results return results @@ -15771,23 +16210,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessArgv(ev, &element.ProcessContext.Process) - results = append(results, result...) - return results + return result + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIteratorArray(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) []string { - return ev.FieldHandlers.ResolveProcessArgv(ev, &pce.ProcessContext.Process) + results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + return ev.FieldHandlers.ResolveProcessArgv(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results return results @@ -15799,23 +16235,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessArgv0(ev, &element.ProcessContext.Process) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - return ev.FieldHandlers.ResolveProcessArgv0(ev, &pce.ProcessContext.Process) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + return ev.FieldHandlers.ResolveProcessArgv0(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results return results @@ -15826,23 +16259,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.Credentials.AUID) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.Credentials.AUID) + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.Credentials.AUID) }) ctx.IntCache[field] = results return results @@ -15853,23 +16283,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.Credentials.CapEffective) - results = append(results, result) - return results + return []int{result} } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.Credentials.CapEffective) + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.Credentials.CapEffective) }) ctx.IntCache[field] = results return results @@ -15880,23 +16307,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.Credentials.CapPermitted) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.Credentials.CapPermitted) + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.Credentials.CapPermitted) }) ctx.IntCache[field] = results return results @@ -15907,23 +16331,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.CGroup.CGroupFile.Inode) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.CGroup.CGroupFile.Inode) + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.CGroup.CGroupFile.Inode) }) ctx.IntCache[field] = results return results @@ -15934,23 +16355,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.CGroup.CGroupFile.MountID) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.CGroup.CGroupFile.MountID) + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.CGroup.CGroupFile.MountID) }) ctx.IntCache[field] = results return results @@ -15962,23 +16380,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveCGroupID(ev, &element.ProcessContext.Process.CGroup) - results = append(results, result) - return results + return []string{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - return ev.FieldHandlers.ResolveCGroupID(ev, &pce.ProcessContext.Process.CGroup) + if result, ok := ctx.StringCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + return ev.FieldHandlers.ResolveCGroupID(ev, ¤t.ProcessContext.Process.CGroup) }) ctx.StringCache[field] = results return results @@ -15990,23 +16405,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveCGroupManager(ev, &element.ProcessContext.Process.CGroup) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - return ev.FieldHandlers.ResolveCGroupManager(ev, &pce.ProcessContext.Process.CGroup) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + return ev.FieldHandlers.ResolveCGroupManager(ev, ¤t.ProcessContext.Process.CGroup) }) ctx.StringCache[field] = results return results @@ -16018,23 +16430,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(ev.FieldHandlers.ResolveCGroupVersion(ev, &element.ProcessContext.Process.CGroup)) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int { - return int(ev.FieldHandlers.ResolveCGroupVersion(ev, &pce.ProcessContext.Process.CGroup)) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + return int(ev.FieldHandlers.ResolveCGroupVersion(ev, ¤t.ProcessContext.Process.CGroup)) }) ctx.IntCache[field] = results return results @@ -16045,23 +16454,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := element.ProcessContext.Process.Comm - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) string { - return pce.ProcessContext.Process.Comm + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + return current.ProcessContext.Process.Comm }) ctx.StringCache[field] = results return results @@ -16073,23 +16479,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessContainerID(ev, &element.ProcessContext.Process) - results = append(results, result) - return results + return []string{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - return ev.FieldHandlers.ResolveProcessContainerID(ev, &pce.ProcessContext.Process) + if result, ok := ctx.StringCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + return ev.FieldHandlers.ResolveProcessContainerID(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results return results @@ -16101,23 +16504,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &element.ProcessContext.Process)) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int { - return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &pce.ProcessContext.Process)) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, ¤t.ProcessContext.Process)) }) ctx.IntCache[field] = results return results @@ -16128,23 +16528,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.Credentials.EGID) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.Credentials.EGID) + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.Credentials.EGID) }) ctx.IntCache[field] = results return results @@ -16155,23 +16552,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := element.ProcessContext.Process.Credentials.EGroup - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) string { - return pce.ProcessContext.Process.Credentials.EGroup + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + return current.ProcessContext.Process.Credentials.EGroup }) ctx.StringCache[field] = results return results @@ -16183,23 +16577,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessEnvp(ev, &element.ProcessContext.Process) - results = append(results, result...) - return results + return result + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIteratorArray(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) []string { - return ev.FieldHandlers.ResolveProcessEnvp(ev, &pce.ProcessContext.Process) + results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + return ev.FieldHandlers.ResolveProcessEnvp(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results return results @@ -16211,23 +16602,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessEnvs(ev, &element.ProcessContext.Process) - results = append(results, result...) - return results + return result + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIteratorArray(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) []string { - return ev.FieldHandlers.ResolveProcessEnvs(ev, &pce.ProcessContext.Process) + results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + return ev.FieldHandlers.ResolveProcessEnvs(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results return results @@ -16239,23 +16627,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.BoolCache[field]; ok { - return result - } - var results []bool iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, &element.ProcessContext.Process) - results = append(results, result) - return results + return []bool{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) bool { - return ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, &pce.ProcessContext.Process) + if result, ok := ctx.BoolCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + return ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, ¤t.ProcessContext.Process) }) ctx.BoolCache[field] = results return results @@ -16266,23 +16651,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.Credentials.EUID) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.Credentials.EUID) + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.Credentials.EUID) }) ctx.IntCache[field] = results return results @@ -16293,23 +16675,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := element.ProcessContext.Process.Credentials.EUser - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) string { - return pce.ProcessContext.Process.Credentials.EUser + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + return current.ProcessContext.Process.Credentials.EUser }) ctx.StringCache[field] = results return results @@ -16320,29 +16699,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.CTime) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.FileEvent.FileFields.CTime) + return int(current.ProcessContext.Process.FileEvent.FileFields.CTime) }) ctx.IntCache[field] = results return results @@ -16354,29 +16732,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolveFileFilesystem(ev, &element.ProcessContext.Process.FileEvent) - results = append(results, result) - return results + return []string{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.IsNotKworker() { + if result, ok := ctx.StringCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &pce.ProcessContext.Process.FileEvent) + return ev.FieldHandlers.ResolveFileFilesystem(ev, ¤t.ProcessContext.Process.FileEvent) }) ctx.StringCache[field] = results return results @@ -16387,29 +16764,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.GID) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.FileEvent.FileFields.GID) + return int(current.ProcessContext.Process.FileEvent.FileFields.GID) }) ctx.IntCache[field] = results return results @@ -16421,29 +16797,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolveFileFieldsGroup(ev, &element.ProcessContext.Process.FileEvent.FileFields) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &pce.ProcessContext.Process.FileEvent.FileFields) + return ev.FieldHandlers.ResolveFileFieldsGroup(ev, ¤t.ProcessContext.Process.FileEvent.FileFields) }) ctx.StringCache[field] = results return results @@ -16455,29 +16830,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolveHashesFromEvent(ev, &element.ProcessContext.Process.FileEvent) - results = append(results, result...) - return results + return result } - results = newAncestorsIteratorArray(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) []string { - if !pce.ProcessContext.Process.IsNotKworker() { + if result, ok := ctx.StringCache[field]; ok { + return result + } + results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return nil } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &pce.ProcessContext.Process.FileEvent) + return ev.FieldHandlers.ResolveHashesFromEvent(ev, ¤t.ProcessContext.Process.FileEvent) }) ctx.StringCache[field] = results return results @@ -16489,29 +16863,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.BoolCache[field]; ok { - return result - } - var results []bool iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, false) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []bool{false} } result := ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &element.ProcessContext.Process.FileEvent.FileFields) - results = append(results, result) - return results + return []bool{result} + } + if result, ok := ctx.BoolCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) bool { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &pce.ProcessContext.Process.FileEvent.FileFields) + return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, ¤t.ProcessContext.Process.FileEvent.FileFields) }) ctx.BoolCache[field] = results return results @@ -16522,29 +16895,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.PathKey.Inode) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.FileEvent.FileFields.PathKey.Inode) + return int(current.ProcessContext.Process.FileEvent.FileFields.PathKey.Inode) }) ctx.IntCache[field] = results return results @@ -16555,29 +16927,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.Mode) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.FileEvent.FileFields.Mode) + return int(current.ProcessContext.Process.FileEvent.FileFields.Mode) }) ctx.IntCache[field] = results return results @@ -16588,29 +16959,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.MTime) - results = append(results, result) - return results + return []int{result} } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.IsNotKworker() { + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.FileEvent.FileFields.MTime) + return int(current.ProcessContext.Process.FileEvent.FileFields.MTime) }) ctx.IntCache[field] = results return results @@ -16621,29 +16991,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.PathKey.MountID) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.FileEvent.FileFields.PathKey.MountID) + return int(current.ProcessContext.Process.FileEvent.FileFields.PathKey.MountID) }) ctx.IntCache[field] = results return results @@ -16656,29 +17025,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolveFileBasename(ev, &pce.ProcessContext.Process.FileEvent) + return ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.FileEvent) }) ctx.StringCache[field] = results return results @@ -16691,23 +17059,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent)) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int { - return len(ev.FieldHandlers.ResolveFileBasename(ev, &pce.ProcessContext.Process.FileEvent)) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + return len(ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.FileEvent)) }) ctx.IntCache[field] = results return results @@ -16719,29 +17084,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolvePackageName(ev, &element.ProcessContext.Process.FileEvent) - results = append(results, result) - return results + return []string{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.IsNotKworker() { + if result, ok := ctx.StringCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolvePackageName(ev, &pce.ProcessContext.Process.FileEvent) + return ev.FieldHandlers.ResolvePackageName(ev, ¤t.ProcessContext.Process.FileEvent) }) ctx.StringCache[field] = results return results @@ -16753,29 +17117,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolvePackageSourceVersion(ev, &element.ProcessContext.Process.FileEvent) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &pce.ProcessContext.Process.FileEvent) + return ev.FieldHandlers.ResolvePackageSourceVersion(ev, ¤t.ProcessContext.Process.FileEvent) }) ctx.StringCache[field] = results return results @@ -16787,29 +17150,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolvePackageVersion(ev, &element.ProcessContext.Process.FileEvent) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolvePackageVersion(ev, &pce.ProcessContext.Process.FileEvent) + return ev.FieldHandlers.ResolvePackageVersion(ev, ¤t.ProcessContext.Process.FileEvent) }) ctx.StringCache[field] = results return results @@ -16822,29 +17184,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolveFilePath(ev, &pce.ProcessContext.Process.FileEvent) + return ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.FileEvent) }) ctx.StringCache[field] = results return results @@ -16857,23 +17218,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent)) - results = append(results, result) - return results + return []int{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int { - return len(ev.FieldHandlers.ResolveFilePath(ev, &pce.ProcessContext.Process.FileEvent)) + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + return len(ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.FileEvent)) }) ctx.IntCache[field] = results return results @@ -16885,29 +17243,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(ev.FieldHandlers.ResolveRights(ev, &element.ProcessContext.Process.FileEvent.FileFields)) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(ev.FieldHandlers.ResolveRights(ev, &pce.ProcessContext.Process.FileEvent.FileFields)) + return int(ev.FieldHandlers.ResolveRights(ev, ¤t.ProcessContext.Process.FileEvent.FileFields)) }) ctx.IntCache[field] = results return results @@ -16918,29 +17275,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.FileEvent.FileFields.UID) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.FileEvent.FileFields.UID) + return int(current.ProcessContext.Process.FileEvent.FileFields.UID) }) ctx.IntCache[field] = results return results @@ -16952,29 +17308,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.IsNotKworker() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolveFileFieldsUser(ev, &element.ProcessContext.Process.FileEvent.FileFields) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.IsNotKworker() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &pce.ProcessContext.Process.FileEvent.FileFields) + return ev.FieldHandlers.ResolveFileFieldsUser(ev, ¤t.ProcessContext.Process.FileEvent.FileFields) }) ctx.StringCache[field] = results return results @@ -16985,23 +17340,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.Credentials.FSGID) - results = append(results, result) - return results + return []int{result} } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.Credentials.FSGID) + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.Credentials.FSGID) }) ctx.IntCache[field] = results return results @@ -17012,23 +17364,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := element.ProcessContext.Process.Credentials.FSGroup - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) string { - return pce.ProcessContext.Process.Credentials.FSGroup + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + return current.ProcessContext.Process.Credentials.FSGroup }) ctx.StringCache[field] = results return results @@ -17039,23 +17388,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.Credentials.FSUID) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.Credentials.FSUID) + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.Credentials.FSUID) }) ctx.IntCache[field] = results return results @@ -17066,23 +17412,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := element.ProcessContext.Process.Credentials.FSUser - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) string { - return pce.ProcessContext.Process.Credentials.FSUser + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + return current.ProcessContext.Process.Credentials.FSUser }) ctx.StringCache[field] = results return results @@ -17093,23 +17436,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.Credentials.GID) - results = append(results, result) - return results + return []int{result} } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.Credentials.GID) + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.Credentials.GID) }) ctx.IntCache[field] = results return results @@ -17120,23 +17460,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := element.ProcessContext.Process.Credentials.Group - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) string { - return pce.ProcessContext.Process.Credentials.Group + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + return current.ProcessContext.Process.Credentials.Group }) ctx.StringCache[field] = results return results @@ -17147,29 +17484,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.CTime) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.HasInterpreter() { + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.CTime) + return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.CTime) }) ctx.IntCache[field] = results return results @@ -17181,29 +17517,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolveFileFilesystem(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.HasInterpreter() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent) + return ev.FieldHandlers.ResolveFileFilesystem(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) }) ctx.StringCache[field] = results return results @@ -17214,29 +17549,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.GID) - results = append(results, result) - return results + return []int{result} } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.HasInterpreter() { + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.GID) + return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.GID) }) ctx.IntCache[field] = results return results @@ -17248,29 +17582,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolveFileFieldsGroup(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.HasInterpreter() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) + return ev.FieldHandlers.ResolveFileFieldsGroup(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) }) ctx.StringCache[field] = results return results @@ -17282,29 +17615,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolveHashesFromEvent(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - results = append(results, result...) - return results + return result + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIteratorArray(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) []string { - if !pce.ProcessContext.Process.HasInterpreter() { + results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return nil } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent) + return ev.FieldHandlers.ResolveHashesFromEvent(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) }) ctx.StringCache[field] = results return results @@ -17316,29 +17648,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.BoolCache[field]; ok { - return result - } - var results []bool iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, false) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []bool{false} } result := ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) - results = append(results, result) - return results + return []bool{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) bool { - if !pce.ProcessContext.Process.HasInterpreter() { + if result, ok := ctx.BoolCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) + return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) }) ctx.BoolCache[field] = results return results @@ -17349,29 +17680,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.HasInterpreter() { + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode) + return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode) }) ctx.IntCache[field] = results return results @@ -17382,29 +17712,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.HasInterpreter() { + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode) + return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode) }) ctx.IntCache[field] = results return results @@ -17415,29 +17744,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.MTime) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.HasInterpreter() { + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.MTime) + return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.MTime) }) ctx.IntCache[field] = results return results @@ -17448,29 +17776,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID) - results = append(results, result) - return results + return []int{result} } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.HasInterpreter() { + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID) + return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID) }) ctx.IntCache[field] = results return results @@ -17483,29 +17810,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.HasInterpreter() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolveFileBasename(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent) + return ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) }) ctx.StringCache[field] = results return results @@ -17518,23 +17844,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int { - return len(ev.FieldHandlers.ResolveFileBasename(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent)) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + return len(ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent)) }) ctx.IntCache[field] = results return results @@ -17546,29 +17869,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolvePackageName(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.HasInterpreter() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolvePackageName(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent) + return ev.FieldHandlers.ResolvePackageName(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) }) ctx.StringCache[field] = results return results @@ -17580,29 +17902,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolvePackageSourceVersion(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - results = append(results, result) - return results + return []string{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.HasInterpreter() { + if result, ok := ctx.StringCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent) + return ev.FieldHandlers.ResolvePackageSourceVersion(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) }) ctx.StringCache[field] = results return results @@ -17614,29 +17935,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolvePackageVersion(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.HasInterpreter() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolvePackageVersion(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent) + return ev.FieldHandlers.ResolvePackageVersion(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) }) ctx.StringCache[field] = results return results @@ -17649,29 +17969,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.HasInterpreter() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolveFilePath(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent) + return ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent) }) ctx.StringCache[field] = results return results @@ -17684,23 +18003,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int { - return len(ev.FieldHandlers.ResolveFilePath(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent)) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + return len(ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent)) }) ctx.IntCache[field] = results return results @@ -17712,29 +18028,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(ev.FieldHandlers.ResolveRights(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)) - results = append(results, result) - return results + return []int{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.HasInterpreter() { + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(ev.FieldHandlers.ResolveRights(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)) + return int(ev.FieldHandlers.ResolveRights(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)) }) ctx.IntCache[field] = results return results @@ -17745,29 +18060,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, 0) + ctx.Error = &eval.ErrNotSupported{Field: field} + return []int{0} } result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.UID) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - if !pce.ProcessContext.Process.HasInterpreter() { + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } - return int(pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.UID) + return int(current.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.UID) }) ctx.IntCache[field] = results return results @@ -17779,29 +18093,28 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value if !element.ProcessContext.Process.HasInterpreter() { - return append(results, "") + ctx.Error = &eval.ErrNotSupported{Field: field} + return []string{""} } result := ev.FieldHandlers.ResolveFileFieldsUser(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - if !pce.ProcessContext.Process.HasInterpreter() { + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + if !current.ProcessContext.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &pce.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) + return ev.FieldHandlers.ResolveFileFieldsUser(ev, ¤t.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) }) ctx.StringCache[field] = results return results @@ -17812,23 +18125,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.BoolArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) - if result, ok := ctx.BoolCache[field]; ok { - return result - } - var results []bool iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := element.ProcessContext.Process.IsExec - results = append(results, result) - return results + return []bool{result} + } + if result, ok := ctx.BoolCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) bool { - return pce.ProcessContext.Process.IsExec + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) bool { + return current.ProcessContext.Process.IsExec }) ctx.BoolCache[field] = results return results @@ -17839,23 +18149,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.BoolArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) - if result, ok := ctx.BoolCache[field]; ok { - return result - } - var results []bool iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := element.ProcessContext.Process.PIDContext.IsKworker - results = append(results, result) - return results + return []bool{result} } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) bool { - return pce.ProcessContext.Process.PIDContext.IsKworker + if result, ok := ctx.BoolCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) bool { + return current.ProcessContext.Process.PIDContext.IsKworker }) ctx.BoolCache[field] = results return results @@ -17867,23 +18174,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []bool { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.BoolCache[field]; ok { - return result - } - var results []bool iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessIsThread(ev, &element.ProcessContext.Process) - results = append(results, result) - return results + return []bool{result} + } + if result, ok := ctx.BoolCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) bool { - return ev.FieldHandlers.ResolveProcessIsThread(ev, &pce.ProcessContext.Process) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) bool { + return ev.FieldHandlers.ResolveProcessIsThread(ev, ¤t.ProcessContext.Process) }) ctx.BoolCache[field] = results return results @@ -17904,23 +18208,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.PIDContext.Pid) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.PIDContext.Pid) + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.PIDContext.Pid) }) ctx.IntCache[field] = results return results @@ -17931,23 +18232,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.PPid) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.PPid) + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.PPid) }) ctx.IntCache[field] = results return results @@ -17958,23 +18256,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.PIDContext.Tid) - results = append(results, result) - return results + return []int{result} } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.PIDContext.Tid) + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.PIDContext.Tid) }) ctx.IntCache[field] = results return results @@ -17985,23 +18280,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := element.ProcessContext.Process.TTYName - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) string { - return pce.ProcessContext.Process.TTYName + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + return current.ProcessContext.Process.TTYName }) ctx.StringCache[field] = results return results @@ -18012,23 +18304,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.Credentials.UID) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.Credentials.UID) + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.Credentials.UID) }) ctx.IntCache[field] = results return results @@ -18039,23 +18328,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := element.ProcessContext.Process.Credentials.User - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) string { - return pce.ProcessContext.Process.Credentials.User + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + return current.ProcessContext.Process.Credentials.User }) ctx.StringCache[field] = results return results @@ -18067,23 +18353,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveK8SGroups(ev, &element.ProcessContext.Process.UserSession) - results = append(results, result...) - return results + return result + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIteratorArray(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) []string { - return ev.FieldHandlers.ResolveK8SGroups(ev, &pce.ProcessContext.Process.UserSession) + results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + return ev.FieldHandlers.ResolveK8SGroups(ev, ¤t.ProcessContext.Process.UserSession) }) ctx.StringCache[field] = results return results @@ -18095,23 +18378,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveK8SUID(ev, &element.ProcessContext.Process.UserSession) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - return ev.FieldHandlers.ResolveK8SUID(ev, &pce.ProcessContext.Process.UserSession) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + return ev.FieldHandlers.ResolveK8SUID(ev, ¤t.ProcessContext.Process.UserSession) }) ctx.StringCache[field] = results return results @@ -18123,23 +18403,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveK8SUsername(ev, &element.ProcessContext.Process.UserSession) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - return ev.FieldHandlers.ResolveK8SUsername(ev, &pce.ProcessContext.Process.UserSession) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + return ev.FieldHandlers.ResolveK8SUsername(ev, ¤t.ProcessContext.Process.UserSession) }) ctx.StringCache[field] = results return results @@ -18392,6 +18669,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Process.FileEvent.FileFields.CTime) @@ -18405,6 +18683,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Signal.Target.Process.FileEvent) @@ -18418,6 +18697,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Process.FileEvent.FileFields.GID) @@ -18431,6 +18711,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Signal.Target.Process.FileEvent.FileFields) @@ -18444,6 +18725,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Signal.Target.Process.FileEvent) @@ -18457,6 +18739,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Signal.Target.Process.FileEvent.FileFields) @@ -18470,6 +18753,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Process.FileEvent.FileFields.PathKey.Inode) @@ -18483,6 +18767,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Process.FileEvent.FileFields.Mode) @@ -18496,6 +18781,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Process.FileEvent.FileFields.MTime) @@ -18509,6 +18795,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Process.FileEvent.FileFields.PathKey.MountID) @@ -18523,6 +18810,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Signal.Target.Process.FileEvent) @@ -18547,6 +18835,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, &ev.Signal.Target.Process.FileEvent) @@ -18560,6 +18849,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Signal.Target.Process.FileEvent) @@ -18573,6 +18863,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Signal.Target.Process.FileEvent) @@ -18587,6 +18878,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Process.FileEvent) @@ -18611,6 +18903,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, &ev.Signal.Target.Process.FileEvent.FileFields)) @@ -18624,6 +18917,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Process.FileEvent.FileFields.UID) @@ -18637,6 +18931,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Signal.Target.Process.FileEvent.FileFields) @@ -18710,6 +19005,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.CTime) @@ -18723,6 +19019,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent) @@ -18736,6 +19033,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.GID) @@ -18749,6 +19047,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields) @@ -18762,6 +19061,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent) @@ -18775,6 +19075,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields) @@ -18788,6 +19089,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode) @@ -18801,6 +19103,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.Mode) @@ -18814,6 +19117,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.MTime) @@ -18827,6 +19131,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID) @@ -18841,6 +19146,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent) @@ -18865,6 +19171,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent) @@ -18878,6 +19185,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent) @@ -18891,6 +19199,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent) @@ -18905,6 +19214,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent) @@ -18929,6 +19239,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields)) @@ -18942,6 +19253,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.UID) @@ -18955,6 +19267,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.Process.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields) @@ -18998,6 +19311,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveProcessArgs(ev, ev.Signal.Target.Parent) @@ -19011,6 +19325,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessArgsFlags(ev, ev.Signal.Target.Parent) @@ -19024,6 +19339,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessArgsOptions(ev, ev.Signal.Target.Parent) @@ -19037,6 +19353,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveProcessArgsTruncated(ev, ev.Signal.Target.Parent) @@ -19050,6 +19367,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessArgv(ev, ev.Signal.Target.Parent) @@ -19063,6 +19381,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveProcessArgv0(ev, ev.Signal.Target.Parent) @@ -19076,6 +19395,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.Credentials.AUID) @@ -19089,6 +19409,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.Credentials.CapEffective) @@ -19102,6 +19423,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.Credentials.CapPermitted) @@ -19115,6 +19437,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.CGroup.CGroupFile.Inode) @@ -19128,6 +19451,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.CGroup.CGroupFile.MountID) @@ -19141,6 +19465,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveCGroupID(ev, &ev.Signal.Target.Parent.CGroup) @@ -19154,6 +19479,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveCGroupManager(ev, &ev.Signal.Target.Parent.CGroup) @@ -19167,6 +19493,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return ev.FieldHandlers.ResolveCGroupVersion(ev, &ev.Signal.Target.Parent.CGroup) @@ -19180,6 +19507,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.Signal.Target.Parent.Comm @@ -19193,6 +19521,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveProcessContainerID(ev, ev.Signal.Target.Parent) @@ -19206,6 +19535,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, ev.Signal.Target.Parent)) @@ -19219,6 +19549,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.Credentials.EGID) @@ -19232,6 +19563,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.Signal.Target.Parent.Credentials.EGroup @@ -19245,6 +19577,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessEnvp(ev, ev.Signal.Target.Parent) @@ -19258,6 +19591,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessEnvs(ev, ev.Signal.Target.Parent) @@ -19271,6 +19605,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, ev.Signal.Target.Parent) @@ -19284,6 +19619,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.Credentials.EUID) @@ -19297,6 +19633,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.Signal.Target.Parent.Credentials.EUser @@ -19310,9 +19647,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.Signal.Target.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.FileEvent.FileFields.CTime) @@ -19326,9 +19665,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.Signal.Target.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Signal.Target.Parent.FileEvent) @@ -19342,9 +19683,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.Signal.Target.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.FileEvent.FileFields.GID) @@ -19358,9 +19701,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.Signal.Target.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Signal.Target.Parent.FileEvent.FileFields) @@ -19374,9 +19719,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } if !ev.Signal.Target.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Signal.Target.Parent.FileEvent) @@ -19390,9 +19737,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } if !ev.Signal.Target.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Signal.Target.Parent.FileEvent.FileFields) @@ -19406,9 +19755,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.Signal.Target.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.FileEvent.FileFields.PathKey.Inode) @@ -19422,9 +19773,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.Signal.Target.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.FileEvent.FileFields.Mode) @@ -19438,9 +19791,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.Signal.Target.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.FileEvent.FileFields.MTime) @@ -19454,9 +19809,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.Signal.Target.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.FileEvent.FileFields.PathKey.MountID) @@ -19471,9 +19828,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.Signal.Target.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Signal.Target.Parent.FileEvent) @@ -19498,9 +19857,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.Signal.Target.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, &ev.Signal.Target.Parent.FileEvent) @@ -19514,9 +19875,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.Signal.Target.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Signal.Target.Parent.FileEvent) @@ -19530,9 +19893,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.Signal.Target.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Signal.Target.Parent.FileEvent) @@ -19547,9 +19912,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.Signal.Target.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Parent.FileEvent) @@ -19574,9 +19941,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.Signal.Target.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, &ev.Signal.Target.Parent.FileEvent.FileFields)) @@ -19590,9 +19959,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.Signal.Target.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.FileEvent.FileFields.UID) @@ -19606,9 +19977,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.Signal.Target.Parent.IsNotKworker() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Signal.Target.Parent.FileEvent.FileFields) @@ -19622,6 +19995,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.Credentials.FSGID) @@ -19635,6 +20009,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.Signal.Target.Parent.Credentials.FSGroup @@ -19648,6 +20023,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.Credentials.FSUID) @@ -19661,6 +20037,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.Signal.Target.Parent.Credentials.FSUser @@ -19674,6 +20051,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.Credentials.GID) @@ -19687,6 +20065,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.Signal.Target.Parent.Credentials.Group @@ -19700,9 +20079,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.Signal.Target.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.CTime) @@ -19716,9 +20097,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.Signal.Target.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent) @@ -19732,9 +20115,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.Signal.Target.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.GID) @@ -19748,9 +20133,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.Signal.Target.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields) @@ -19764,9 +20151,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } if !ev.Signal.Target.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent) @@ -19780,9 +20169,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } if !ev.Signal.Target.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields) @@ -19796,9 +20187,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.Signal.Target.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.PathKey.Inode) @@ -19812,9 +20205,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.Signal.Target.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.Mode) @@ -19828,9 +20223,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.Signal.Target.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.MTime) @@ -19844,9 +20241,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.Signal.Target.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.PathKey.MountID) @@ -19861,9 +20260,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.Signal.Target.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent) @@ -19888,9 +20289,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.Signal.Target.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageName(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent) @@ -19904,9 +20307,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.Signal.Target.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent) @@ -19920,9 +20325,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.Signal.Target.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent) @@ -19937,9 +20344,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.Signal.Target.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent) @@ -19964,9 +20373,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.Signal.Target.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveRights(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields)) @@ -19980,9 +20391,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } if !ev.Signal.Target.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.UID) @@ -19996,9 +20409,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } if !ev.Signal.Target.Parent.HasInterpreter() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields) @@ -20012,6 +20427,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.Signal.Target.Parent.IsExec @@ -20025,6 +20441,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.Signal.Target.Parent.PIDContext.IsKworker @@ -20038,6 +20455,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return false } return ev.FieldHandlers.ResolveProcessIsThread(ev, ev.Signal.Target.Parent) @@ -20051,6 +20469,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.PIDContext.Pid) @@ -20064,6 +20483,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.PPid) @@ -20077,6 +20497,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.PIDContext.Tid) @@ -20090,6 +20511,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.Signal.Target.Parent.TTYName @@ -20103,6 +20525,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.Signal.Target.Parent.Credentials.UID) @@ -20116,6 +20539,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.Signal.Target.Parent.Credentials.User @@ -20129,6 +20553,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveK8SGroups(ev, &ev.Signal.Target.Parent.UserSession) @@ -20142,6 +20567,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveK8SUID(ev, &ev.Signal.Target.Parent.UserSession) @@ -20155,6 +20581,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.Signal.Target.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveK8SUsername(ev, &ev.Signal.Target.Parent.UserSession) @@ -20999,6 +21426,11 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval } func (ev *Event) GetFields() []eval.Field { return []eval.Field{ + "accept.addr.family", + "accept.addr.ip", + "accept.addr.is_public", + "accept.addr.port", + "accept.retval", "bind.addr.family", "bind.addr.ip", "bind.addr.is_public", @@ -21386,6 +21818,8 @@ func (ev *Event) GetFields() []eval.Field { "mkdir.file.uid", "mkdir.file.user", "mkdir.retval", + "mkdir.syscall.mode", + "mkdir.syscall.path", "mmap.file.change_time", "mmap.file.filesystem", "mmap.file.gid", @@ -21426,10 +21860,25 @@ func (ev *Event) GetFields() []eval.Field { "network.device.ifname", "network.l3_protocol", "network.l4_protocol", + "network.network_direction", "network.size", "network.source.ip", "network.source.is_public", "network.source.port", + "network_flow_monitor.device.ifname", + "network_flow_monitor.flows.destination.ip", + "network_flow_monitor.flows.destination.is_public", + "network_flow_monitor.flows.destination.port", + "network_flow_monitor.flows.egress.data_size", + "network_flow_monitor.flows.egress.packet_count", + "network_flow_monitor.flows.ingress.data_size", + "network_flow_monitor.flows.ingress.packet_count", + "network_flow_monitor.flows.l3_protocol", + "network_flow_monitor.flows.l4_protocol", + "network_flow_monitor.flows.length", + "network_flow_monitor.flows.source.ip", + "network_flow_monitor.flows.source.is_public", + "network_flow_monitor.flows.source.port", "ondemand.arg1.str", "ondemand.arg1.uint", "ondemand.arg2.str", @@ -21472,6 +21921,7 @@ func (ev *Event) GetFields() []eval.Field { "packet.filter", "packet.l3_protocol", "packet.l4_protocol", + "packet.network_direction", "packet.size", "packet.source.ip", "packet.source.is_public", @@ -22030,12940 +22480,3429 @@ func (ev *Event) GetFields() []eval.Field { "rename.file.name.length", "rename.file.package.name", "rename.file.package.source_version", - "rename.file.package.version", - "rename.file.path", - "rename.file.path.length", - "rename.file.rights", - "rename.file.uid", - "rename.file.user", - "rename.retval", - "rename.syscall.destination.path", - "rename.syscall.path", - "rmdir.file.change_time", - "rmdir.file.filesystem", - "rmdir.file.gid", - "rmdir.file.group", - "rmdir.file.hashes", - "rmdir.file.in_upper_layer", - "rmdir.file.inode", - "rmdir.file.mode", - "rmdir.file.modification_time", - "rmdir.file.mount_id", - "rmdir.file.name", - "rmdir.file.name.length", - "rmdir.file.package.name", - "rmdir.file.package.source_version", - "rmdir.file.package.version", - "rmdir.file.path", - "rmdir.file.path.length", - "rmdir.file.rights", - "rmdir.file.uid", - "rmdir.file.user", - "rmdir.retval", - "selinux.bool.name", - "selinux.bool.state", - "selinux.bool_commit.state", - "selinux.enforce.status", - "setgid.egid", - "setgid.egroup", - "setgid.fsgid", - "setgid.fsgroup", - "setgid.gid", - "setgid.group", - "setuid.euid", - "setuid.euser", - "setuid.fsuid", - "setuid.fsuser", - "setuid.uid", - "setuid.user", - "setxattr.file.change_time", - "setxattr.file.destination.name", - "setxattr.file.destination.namespace", - "setxattr.file.filesystem", - "setxattr.file.gid", - "setxattr.file.group", - "setxattr.file.hashes", - "setxattr.file.in_upper_layer", - "setxattr.file.inode", - "setxattr.file.mode", - "setxattr.file.modification_time", - "setxattr.file.mount_id", - "setxattr.file.name", - "setxattr.file.name.length", - "setxattr.file.package.name", - "setxattr.file.package.source_version", - "setxattr.file.package.version", - "setxattr.file.path", - "setxattr.file.path.length", - "setxattr.file.rights", - "setxattr.file.uid", - "setxattr.file.user", - "setxattr.retval", - "signal.pid", - "signal.retval", - "signal.target.ancestors.args", - "signal.target.ancestors.args_flags", - "signal.target.ancestors.args_options", - "signal.target.ancestors.args_truncated", - "signal.target.ancestors.argv", - "signal.target.ancestors.argv0", - "signal.target.ancestors.auid", - "signal.target.ancestors.cap_effective", - "signal.target.ancestors.cap_permitted", - "signal.target.ancestors.cgroup.file.inode", - "signal.target.ancestors.cgroup.file.mount_id", - "signal.target.ancestors.cgroup.id", - "signal.target.ancestors.cgroup.manager", - "signal.target.ancestors.cgroup.version", - "signal.target.ancestors.comm", - "signal.target.ancestors.container.id", - "signal.target.ancestors.created_at", - "signal.target.ancestors.egid", - "signal.target.ancestors.egroup", - "signal.target.ancestors.envp", - "signal.target.ancestors.envs", - "signal.target.ancestors.envs_truncated", - "signal.target.ancestors.euid", - "signal.target.ancestors.euser", - "signal.target.ancestors.file.change_time", - "signal.target.ancestors.file.filesystem", - "signal.target.ancestors.file.gid", - "signal.target.ancestors.file.group", - "signal.target.ancestors.file.hashes", - "signal.target.ancestors.file.in_upper_layer", - "signal.target.ancestors.file.inode", - "signal.target.ancestors.file.mode", - "signal.target.ancestors.file.modification_time", - "signal.target.ancestors.file.mount_id", - "signal.target.ancestors.file.name", - "signal.target.ancestors.file.name.length", - "signal.target.ancestors.file.package.name", - "signal.target.ancestors.file.package.source_version", - "signal.target.ancestors.file.package.version", - "signal.target.ancestors.file.path", - "signal.target.ancestors.file.path.length", - "signal.target.ancestors.file.rights", - "signal.target.ancestors.file.uid", - "signal.target.ancestors.file.user", - "signal.target.ancestors.fsgid", - "signal.target.ancestors.fsgroup", - "signal.target.ancestors.fsuid", - "signal.target.ancestors.fsuser", - "signal.target.ancestors.gid", - "signal.target.ancestors.group", - "signal.target.ancestors.interpreter.file.change_time", - "signal.target.ancestors.interpreter.file.filesystem", - "signal.target.ancestors.interpreter.file.gid", - "signal.target.ancestors.interpreter.file.group", - "signal.target.ancestors.interpreter.file.hashes", - "signal.target.ancestors.interpreter.file.in_upper_layer", - "signal.target.ancestors.interpreter.file.inode", - "signal.target.ancestors.interpreter.file.mode", - "signal.target.ancestors.interpreter.file.modification_time", - "signal.target.ancestors.interpreter.file.mount_id", - "signal.target.ancestors.interpreter.file.name", - "signal.target.ancestors.interpreter.file.name.length", - "signal.target.ancestors.interpreter.file.package.name", - "signal.target.ancestors.interpreter.file.package.source_version", - "signal.target.ancestors.interpreter.file.package.version", - "signal.target.ancestors.interpreter.file.path", - "signal.target.ancestors.interpreter.file.path.length", - "signal.target.ancestors.interpreter.file.rights", - "signal.target.ancestors.interpreter.file.uid", - "signal.target.ancestors.interpreter.file.user", - "signal.target.ancestors.is_exec", - "signal.target.ancestors.is_kworker", - "signal.target.ancestors.is_thread", - "signal.target.ancestors.length", - "signal.target.ancestors.pid", - "signal.target.ancestors.ppid", - "signal.target.ancestors.tid", - "signal.target.ancestors.tty_name", - "signal.target.ancestors.uid", - "signal.target.ancestors.user", - "signal.target.ancestors.user_session.k8s_groups", - "signal.target.ancestors.user_session.k8s_uid", - "signal.target.ancestors.user_session.k8s_username", - "signal.target.args", - "signal.target.args_flags", - "signal.target.args_options", - "signal.target.args_truncated", - "signal.target.argv", - "signal.target.argv0", - "signal.target.auid", - "signal.target.cap_effective", - "signal.target.cap_permitted", - "signal.target.cgroup.file.inode", - "signal.target.cgroup.file.mount_id", - "signal.target.cgroup.id", - "signal.target.cgroup.manager", - "signal.target.cgroup.version", - "signal.target.comm", - "signal.target.container.id", - "signal.target.created_at", - "signal.target.egid", - "signal.target.egroup", - "signal.target.envp", - "signal.target.envs", - "signal.target.envs_truncated", - "signal.target.euid", - "signal.target.euser", - "signal.target.file.change_time", - "signal.target.file.filesystem", - "signal.target.file.gid", - "signal.target.file.group", - "signal.target.file.hashes", - "signal.target.file.in_upper_layer", - "signal.target.file.inode", - "signal.target.file.mode", - "signal.target.file.modification_time", - "signal.target.file.mount_id", - "signal.target.file.name", - "signal.target.file.name.length", - "signal.target.file.package.name", - "signal.target.file.package.source_version", - "signal.target.file.package.version", - "signal.target.file.path", - "signal.target.file.path.length", - "signal.target.file.rights", - "signal.target.file.uid", - "signal.target.file.user", - "signal.target.fsgid", - "signal.target.fsgroup", - "signal.target.fsuid", - "signal.target.fsuser", - "signal.target.gid", - "signal.target.group", - "signal.target.interpreter.file.change_time", - "signal.target.interpreter.file.filesystem", - "signal.target.interpreter.file.gid", - "signal.target.interpreter.file.group", - "signal.target.interpreter.file.hashes", - "signal.target.interpreter.file.in_upper_layer", - "signal.target.interpreter.file.inode", - "signal.target.interpreter.file.mode", - "signal.target.interpreter.file.modification_time", - "signal.target.interpreter.file.mount_id", - "signal.target.interpreter.file.name", - "signal.target.interpreter.file.name.length", - "signal.target.interpreter.file.package.name", - "signal.target.interpreter.file.package.source_version", - "signal.target.interpreter.file.package.version", - "signal.target.interpreter.file.path", - "signal.target.interpreter.file.path.length", - "signal.target.interpreter.file.rights", - "signal.target.interpreter.file.uid", - "signal.target.interpreter.file.user", - "signal.target.is_exec", - "signal.target.is_kworker", - "signal.target.is_thread", - "signal.target.parent.args", - "signal.target.parent.args_flags", - "signal.target.parent.args_options", - "signal.target.parent.args_truncated", - "signal.target.parent.argv", - "signal.target.parent.argv0", - "signal.target.parent.auid", - "signal.target.parent.cap_effective", - "signal.target.parent.cap_permitted", - "signal.target.parent.cgroup.file.inode", - "signal.target.parent.cgroup.file.mount_id", - "signal.target.parent.cgroup.id", - "signal.target.parent.cgroup.manager", - "signal.target.parent.cgroup.version", - "signal.target.parent.comm", - "signal.target.parent.container.id", - "signal.target.parent.created_at", - "signal.target.parent.egid", - "signal.target.parent.egroup", - "signal.target.parent.envp", - "signal.target.parent.envs", - "signal.target.parent.envs_truncated", - "signal.target.parent.euid", - "signal.target.parent.euser", - "signal.target.parent.file.change_time", - "signal.target.parent.file.filesystem", - "signal.target.parent.file.gid", - "signal.target.parent.file.group", - "signal.target.parent.file.hashes", - "signal.target.parent.file.in_upper_layer", - "signal.target.parent.file.inode", - "signal.target.parent.file.mode", - "signal.target.parent.file.modification_time", - "signal.target.parent.file.mount_id", - "signal.target.parent.file.name", - "signal.target.parent.file.name.length", - "signal.target.parent.file.package.name", - "signal.target.parent.file.package.source_version", - "signal.target.parent.file.package.version", - "signal.target.parent.file.path", - "signal.target.parent.file.path.length", - "signal.target.parent.file.rights", - "signal.target.parent.file.uid", - "signal.target.parent.file.user", - "signal.target.parent.fsgid", - "signal.target.parent.fsgroup", - "signal.target.parent.fsuid", - "signal.target.parent.fsuser", - "signal.target.parent.gid", - "signal.target.parent.group", - "signal.target.parent.interpreter.file.change_time", - "signal.target.parent.interpreter.file.filesystem", - "signal.target.parent.interpreter.file.gid", - "signal.target.parent.interpreter.file.group", - "signal.target.parent.interpreter.file.hashes", - "signal.target.parent.interpreter.file.in_upper_layer", - "signal.target.parent.interpreter.file.inode", - "signal.target.parent.interpreter.file.mode", - "signal.target.parent.interpreter.file.modification_time", - "signal.target.parent.interpreter.file.mount_id", - "signal.target.parent.interpreter.file.name", - "signal.target.parent.interpreter.file.name.length", - "signal.target.parent.interpreter.file.package.name", - "signal.target.parent.interpreter.file.package.source_version", - "signal.target.parent.interpreter.file.package.version", - "signal.target.parent.interpreter.file.path", - "signal.target.parent.interpreter.file.path.length", - "signal.target.parent.interpreter.file.rights", - "signal.target.parent.interpreter.file.uid", - "signal.target.parent.interpreter.file.user", - "signal.target.parent.is_exec", - "signal.target.parent.is_kworker", - "signal.target.parent.is_thread", - "signal.target.parent.pid", - "signal.target.parent.ppid", - "signal.target.parent.tid", - "signal.target.parent.tty_name", - "signal.target.parent.uid", - "signal.target.parent.user", - "signal.target.parent.user_session.k8s_groups", - "signal.target.parent.user_session.k8s_uid", - "signal.target.parent.user_session.k8s_username", - "signal.target.pid", - "signal.target.ppid", - "signal.target.tid", - "signal.target.tty_name", - "signal.target.uid", - "signal.target.user", - "signal.target.user_session.k8s_groups", - "signal.target.user_session.k8s_uid", - "signal.target.user_session.k8s_username", - "signal.type", - "splice.file.change_time", - "splice.file.filesystem", - "splice.file.gid", - "splice.file.group", - "splice.file.hashes", - "splice.file.in_upper_layer", - "splice.file.inode", - "splice.file.mode", - "splice.file.modification_time", - "splice.file.mount_id", - "splice.file.name", - "splice.file.name.length", - "splice.file.package.name", - "splice.file.package.source_version", - "splice.file.package.version", - "splice.file.path", - "splice.file.path.length", - "splice.file.rights", - "splice.file.uid", - "splice.file.user", - "splice.pipe_entry_flag", - "splice.pipe_exit_flag", - "splice.retval", - "unlink.file.change_time", - "unlink.file.filesystem", - "unlink.file.gid", - "unlink.file.group", - "unlink.file.hashes", - "unlink.file.in_upper_layer", - "unlink.file.inode", - "unlink.file.mode", - "unlink.file.modification_time", - "unlink.file.mount_id", - "unlink.file.name", - "unlink.file.name.length", - "unlink.file.package.name", - "unlink.file.package.source_version", - "unlink.file.package.version", - "unlink.file.path", - "unlink.file.path.length", - "unlink.file.rights", - "unlink.file.uid", - "unlink.file.user", - "unlink.flags", - "unlink.retval", - "unlink.syscall.dirfd", - "unlink.syscall.flags", - "unlink.syscall.path", - "unload_module.name", - "unload_module.retval", - "utimes.file.change_time", - "utimes.file.filesystem", - "utimes.file.gid", - "utimes.file.group", - "utimes.file.hashes", - "utimes.file.in_upper_layer", - "utimes.file.inode", - "utimes.file.mode", - "utimes.file.modification_time", - "utimes.file.mount_id", - "utimes.file.name", - "utimes.file.name.length", - "utimes.file.package.name", - "utimes.file.package.source_version", - "utimes.file.package.version", - "utimes.file.path", - "utimes.file.path.length", - "utimes.file.rights", - "utimes.file.uid", - "utimes.file.user", - "utimes.retval", - "utimes.syscall.path", - } -} -func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { - switch field { - case "bind.addr.family": - return int(ev.Bind.AddrFamily), nil - case "bind.addr.ip": - return ev.Bind.Addr.IPNet, nil - case "bind.addr.is_public": - return ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.Bind.Addr), nil - case "bind.addr.port": - return int(ev.Bind.Addr.Port), nil - case "bind.protocol": - return int(ev.Bind.Protocol), nil - case "bind.retval": - return int(ev.Bind.SyscallEvent.Retval), nil - case "bpf.cmd": - return int(ev.BPF.Cmd), nil - case "bpf.map.name": - return ev.BPF.Map.Name, nil - case "bpf.map.type": - return int(ev.BPF.Map.Type), nil - case "bpf.prog.attach_type": - return int(ev.BPF.Program.AttachType), nil - case "bpf.prog.helpers": - result := make([]int, len(ev.BPF.Program.Helpers)) - for i, v := range ev.BPF.Program.Helpers { - result[i] = int(v) - } - return result, nil - case "bpf.prog.name": - return ev.BPF.Program.Name, nil - case "bpf.prog.tag": - return ev.BPF.Program.Tag, nil - case "bpf.prog.type": - return int(ev.BPF.Program.Type), nil - case "bpf.retval": - return int(ev.BPF.SyscallEvent.Retval), nil - case "capset.cap_effective": - return int(ev.Capset.CapEffective), nil - case "capset.cap_permitted": - return int(ev.Capset.CapPermitted), nil - case "cgroup.file.inode": - return int(ev.CGroupContext.CGroupFile.Inode), nil - case "cgroup.file.mount_id": - return int(ev.CGroupContext.CGroupFile.MountID), nil - case "cgroup.id": - return ev.FieldHandlers.ResolveCGroupID(ev, &ev.CGroupContext), nil - case "cgroup.manager": - return ev.FieldHandlers.ResolveCGroupManager(ev, &ev.CGroupContext), nil - case "cgroup.version": - return ev.FieldHandlers.ResolveCGroupVersion(ev, &ev.CGroupContext), nil - case "chdir.file.change_time": - return int(ev.Chdir.File.FileFields.CTime), nil - case "chdir.file.filesystem": - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Chdir.File), nil - case "chdir.file.gid": - return int(ev.Chdir.File.FileFields.GID), nil - case "chdir.file.group": - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Chdir.File.FileFields), nil - case "chdir.file.hashes": - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Chdir.File), nil - case "chdir.file.in_upper_layer": - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Chdir.File.FileFields), nil - case "chdir.file.inode": - return int(ev.Chdir.File.FileFields.PathKey.Inode), nil - case "chdir.file.mode": - return int(ev.Chdir.File.FileFields.Mode), nil - case "chdir.file.modification_time": - return int(ev.Chdir.File.FileFields.MTime), nil - case "chdir.file.mount_id": - return int(ev.Chdir.File.FileFields.PathKey.MountID), nil - case "chdir.file.name": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Chdir.File), nil - case "chdir.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Chdir.File), nil - case "chdir.file.package.name": - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Chdir.File), nil - case "chdir.file.package.source_version": - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Chdir.File), nil - case "chdir.file.package.version": - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Chdir.File), nil - case "chdir.file.path": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Chdir.File), nil - case "chdir.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Chdir.File), nil - case "chdir.file.rights": - return int(ev.FieldHandlers.ResolveRights(ev, &ev.Chdir.File.FileFields)), nil - case "chdir.file.uid": - return int(ev.Chdir.File.FileFields.UID), nil - case "chdir.file.user": - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Chdir.File.FileFields), nil - case "chdir.retval": - return int(ev.Chdir.SyscallEvent.Retval), nil - case "chdir.syscall.path": - return ev.FieldHandlers.ResolveSyscallCtxArgsStr1(ev, &ev.Chdir.SyscallContext), nil - case "chmod.file.change_time": - return int(ev.Chmod.File.FileFields.CTime), nil - case "chmod.file.destination.mode": - return int(ev.Chmod.Mode), nil - case "chmod.file.destination.rights": - return int(ev.Chmod.Mode), nil - case "chmod.file.filesystem": - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Chmod.File), nil - case "chmod.file.gid": - return int(ev.Chmod.File.FileFields.GID), nil - case "chmod.file.group": - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Chmod.File.FileFields), nil - case "chmod.file.hashes": - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Chmod.File), nil - case "chmod.file.in_upper_layer": - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Chmod.File.FileFields), nil - case "chmod.file.inode": - return int(ev.Chmod.File.FileFields.PathKey.Inode), nil - case "chmod.file.mode": - return int(ev.Chmod.File.FileFields.Mode), nil - case "chmod.file.modification_time": - return int(ev.Chmod.File.FileFields.MTime), nil - case "chmod.file.mount_id": - return int(ev.Chmod.File.FileFields.PathKey.MountID), nil - case "chmod.file.name": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Chmod.File), nil - case "chmod.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Chmod.File), nil - case "chmod.file.package.name": - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Chmod.File), nil - case "chmod.file.package.source_version": - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Chmod.File), nil - case "chmod.file.package.version": - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Chmod.File), nil - case "chmod.file.path": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Chmod.File), nil - case "chmod.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Chmod.File), nil - case "chmod.file.rights": - return int(ev.FieldHandlers.ResolveRights(ev, &ev.Chmod.File.FileFields)), nil - case "chmod.file.uid": - return int(ev.Chmod.File.FileFields.UID), nil - case "chmod.file.user": - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Chmod.File.FileFields), nil - case "chmod.retval": - return int(ev.Chmod.SyscallEvent.Retval), nil - case "chmod.syscall.mode": - return int(ev.FieldHandlers.ResolveSyscallCtxArgsInt2(ev, &ev.Chmod.SyscallContext)), nil - case "chmod.syscall.path": - return ev.FieldHandlers.ResolveSyscallCtxArgsStr1(ev, &ev.Chmod.SyscallContext), nil - case "chown.file.change_time": - return int(ev.Chown.File.FileFields.CTime), nil - case "chown.file.destination.gid": - return int(ev.Chown.GID), nil - case "chown.file.destination.group": - return ev.FieldHandlers.ResolveChownGID(ev, &ev.Chown), nil - case "chown.file.destination.uid": - return int(ev.Chown.UID), nil - case "chown.file.destination.user": - return ev.FieldHandlers.ResolveChownUID(ev, &ev.Chown), nil - case "chown.file.filesystem": - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Chown.File), nil - case "chown.file.gid": - return int(ev.Chown.File.FileFields.GID), nil - case "chown.file.group": - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Chown.File.FileFields), nil - case "chown.file.hashes": - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Chown.File), nil - case "chown.file.in_upper_layer": - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Chown.File.FileFields), nil - case "chown.file.inode": - return int(ev.Chown.File.FileFields.PathKey.Inode), nil - case "chown.file.mode": - return int(ev.Chown.File.FileFields.Mode), nil - case "chown.file.modification_time": - return int(ev.Chown.File.FileFields.MTime), nil - case "chown.file.mount_id": - return int(ev.Chown.File.FileFields.PathKey.MountID), nil - case "chown.file.name": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Chown.File), nil - case "chown.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Chown.File), nil - case "chown.file.package.name": - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Chown.File), nil - case "chown.file.package.source_version": - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Chown.File), nil - case "chown.file.package.version": - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Chown.File), nil - case "chown.file.path": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Chown.File), nil - case "chown.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Chown.File), nil - case "chown.file.rights": - return int(ev.FieldHandlers.ResolveRights(ev, &ev.Chown.File.FileFields)), nil - case "chown.file.uid": - return int(ev.Chown.File.FileFields.UID), nil - case "chown.file.user": - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Chown.File.FileFields), nil - case "chown.retval": - return int(ev.Chown.SyscallEvent.Retval), nil - case "chown.syscall.gid": - return int(ev.FieldHandlers.ResolveSyscallCtxArgsInt3(ev, &ev.Chown.SyscallContext)), nil - case "chown.syscall.path": - return ev.FieldHandlers.ResolveSyscallCtxArgsStr1(ev, &ev.Chown.SyscallContext), nil - case "chown.syscall.uid": - return int(ev.FieldHandlers.ResolveSyscallCtxArgsInt2(ev, &ev.Chown.SyscallContext)), nil - case "connect.addr.family": - return int(ev.Connect.AddrFamily), nil - case "connect.addr.ip": - return ev.Connect.Addr.IPNet, nil - case "connect.addr.is_public": - return ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.Connect.Addr), nil - case "connect.addr.port": - return int(ev.Connect.Addr.Port), nil - case "connect.protocol": - return int(ev.Connect.Protocol), nil - case "connect.retval": - return int(ev.Connect.SyscallEvent.Retval), nil - case "container.created_at": - return int(ev.FieldHandlers.ResolveContainerCreatedAt(ev, ev.BaseEvent.ContainerContext)), nil - case "container.id": - return ev.FieldHandlers.ResolveContainerID(ev, ev.BaseEvent.ContainerContext), nil - case "container.runtime": - return ev.FieldHandlers.ResolveContainerRuntime(ev, ev.BaseEvent.ContainerContext), nil - case "container.tags": - return ev.FieldHandlers.ResolveContainerTags(ev, ev.BaseEvent.ContainerContext), nil - case "dns.id": - return int(ev.DNS.ID), nil - case "dns.question.class": - return int(ev.DNS.Class), nil - case "dns.question.count": - return int(ev.DNS.Count), nil - case "dns.question.length": - return int(ev.DNS.Size), nil - case "dns.question.name": - return ev.DNS.Name, nil - case "dns.question.name.length": - return len(ev.DNS.Name), nil - case "dns.question.type": - return int(ev.DNS.Type), nil - case "event.async": - return ev.FieldHandlers.ResolveAsync(ev), nil - case "event.hostname": - return ev.FieldHandlers.ResolveHostname(ev, &ev.BaseEvent), nil - case "event.origin": - return ev.BaseEvent.Origin, nil - case "event.os": - return ev.BaseEvent.Os, nil - case "event.service": - return ev.FieldHandlers.ResolveService(ev, &ev.BaseEvent), nil - case "event.timestamp": - return int(ev.FieldHandlers.ResolveEventTimestamp(ev, &ev.BaseEvent)), nil - case "exec.args": - return ev.FieldHandlers.ResolveProcessArgs(ev, ev.Exec.Process), nil - case "exec.args_flags": - return ev.FieldHandlers.ResolveProcessArgsFlags(ev, ev.Exec.Process), nil - case "exec.args_options": - return ev.FieldHandlers.ResolveProcessArgsOptions(ev, ev.Exec.Process), nil - case "exec.args_truncated": - return ev.FieldHandlers.ResolveProcessArgsTruncated(ev, ev.Exec.Process), nil - case "exec.argv": - return ev.FieldHandlers.ResolveProcessArgv(ev, ev.Exec.Process), nil - case "exec.argv0": - return ev.FieldHandlers.ResolveProcessArgv0(ev, ev.Exec.Process), nil - case "exec.auid": - return int(ev.Exec.Process.Credentials.AUID), nil - case "exec.cap_effective": - return int(ev.Exec.Process.Credentials.CapEffective), nil - case "exec.cap_permitted": - return int(ev.Exec.Process.Credentials.CapPermitted), nil - case "exec.cgroup.file.inode": - return int(ev.Exec.Process.CGroup.CGroupFile.Inode), nil - case "exec.cgroup.file.mount_id": - return int(ev.Exec.Process.CGroup.CGroupFile.MountID), nil - case "exec.cgroup.id": - return ev.FieldHandlers.ResolveCGroupID(ev, &ev.Exec.Process.CGroup), nil - case "exec.cgroup.manager": - return ev.FieldHandlers.ResolveCGroupManager(ev, &ev.Exec.Process.CGroup), nil - case "exec.cgroup.version": - return ev.FieldHandlers.ResolveCGroupVersion(ev, &ev.Exec.Process.CGroup), nil - case "exec.comm": - return ev.Exec.Process.Comm, nil - case "exec.container.id": - return ev.FieldHandlers.ResolveProcessContainerID(ev, ev.Exec.Process), nil - case "exec.created_at": - return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, ev.Exec.Process)), nil - case "exec.egid": - return int(ev.Exec.Process.Credentials.EGID), nil - case "exec.egroup": - return ev.Exec.Process.Credentials.EGroup, nil - case "exec.envp": - return ev.FieldHandlers.ResolveProcessEnvp(ev, ev.Exec.Process), nil - case "exec.envs": - return ev.FieldHandlers.ResolveProcessEnvs(ev, ev.Exec.Process), nil - case "exec.envs_truncated": - return ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, ev.Exec.Process), nil - case "exec.euid": - return int(ev.Exec.Process.Credentials.EUID), nil - case "exec.euser": - return ev.Exec.Process.Credentials.EUser, nil - case "exec.file.change_time": - if !ev.Exec.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Exec.Process.FileEvent.FileFields.CTime), nil - case "exec.file.filesystem": - if !ev.Exec.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Exec.Process.FileEvent), nil - case "exec.file.gid": - if !ev.Exec.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Exec.Process.FileEvent.FileFields.GID), nil - case "exec.file.group": - if !ev.Exec.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Exec.Process.FileEvent.FileFields), nil - case "exec.file.hashes": - if !ev.Exec.Process.IsNotKworker() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Exec.Process.FileEvent), nil - case "exec.file.in_upper_layer": - if !ev.Exec.Process.IsNotKworker() { - return false, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Exec.Process.FileEvent.FileFields), nil - case "exec.file.inode": - if !ev.Exec.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Exec.Process.FileEvent.FileFields.PathKey.Inode), nil - case "exec.file.mode": - if !ev.Exec.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Exec.Process.FileEvent.FileFields.Mode), nil - case "exec.file.modification_time": - if !ev.Exec.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Exec.Process.FileEvent.FileFields.MTime), nil - case "exec.file.mount_id": - if !ev.Exec.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Exec.Process.FileEvent.FileFields.PathKey.MountID), nil - case "exec.file.name": - if !ev.Exec.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Exec.Process.FileEvent), nil - case "exec.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Exec.Process.FileEvent), nil - case "exec.file.package.name": - if !ev.Exec.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Exec.Process.FileEvent), nil - case "exec.file.package.source_version": - if !ev.Exec.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Exec.Process.FileEvent), nil - case "exec.file.package.version": - if !ev.Exec.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Exec.Process.FileEvent), nil - case "exec.file.path": - if !ev.Exec.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exec.Process.FileEvent), nil - case "exec.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exec.Process.FileEvent), nil - case "exec.file.rights": - if !ev.Exec.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.FieldHandlers.ResolveRights(ev, &ev.Exec.Process.FileEvent.FileFields)), nil - case "exec.file.uid": - if !ev.Exec.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Exec.Process.FileEvent.FileFields.UID), nil - case "exec.file.user": - if !ev.Exec.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Exec.Process.FileEvent.FileFields), nil - case "exec.fsgid": - return int(ev.Exec.Process.Credentials.FSGID), nil - case "exec.fsgroup": - return ev.Exec.Process.Credentials.FSGroup, nil - case "exec.fsuid": - return int(ev.Exec.Process.Credentials.FSUID), nil - case "exec.fsuser": - return ev.Exec.Process.Credentials.FSUser, nil - case "exec.gid": - return int(ev.Exec.Process.Credentials.GID), nil - case "exec.group": - return ev.Exec.Process.Credentials.Group, nil - case "exec.interpreter.file.change_time": - if !ev.Exec.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.CTime), nil - case "exec.interpreter.file.filesystem": - if !ev.Exec.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Exec.Process.LinuxBinprm.FileEvent), nil - case "exec.interpreter.file.gid": - if !ev.Exec.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.GID), nil - case "exec.interpreter.file.group": - if !ev.Exec.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Exec.Process.LinuxBinprm.FileEvent.FileFields), nil - case "exec.interpreter.file.hashes": - if !ev.Exec.Process.HasInterpreter() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Exec.Process.LinuxBinprm.FileEvent), nil - case "exec.interpreter.file.in_upper_layer": - if !ev.Exec.Process.HasInterpreter() { - return false, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Exec.Process.LinuxBinprm.FileEvent.FileFields), nil - case "exec.interpreter.file.inode": - if !ev.Exec.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode), nil - case "exec.interpreter.file.mode": - if !ev.Exec.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.Mode), nil - case "exec.interpreter.file.modification_time": - if !ev.Exec.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.MTime), nil - case "exec.interpreter.file.mount_id": - if !ev.Exec.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID), nil - case "exec.interpreter.file.name": - if !ev.Exec.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Exec.Process.LinuxBinprm.FileEvent), nil - case "exec.interpreter.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Exec.Process.LinuxBinprm.FileEvent), nil - case "exec.interpreter.file.package.name": - if !ev.Exec.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Exec.Process.LinuxBinprm.FileEvent), nil - case "exec.interpreter.file.package.source_version": - if !ev.Exec.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Exec.Process.LinuxBinprm.FileEvent), nil - case "exec.interpreter.file.package.version": - if !ev.Exec.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Exec.Process.LinuxBinprm.FileEvent), nil - case "exec.interpreter.file.path": - if !ev.Exec.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exec.Process.LinuxBinprm.FileEvent), nil - case "exec.interpreter.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exec.Process.LinuxBinprm.FileEvent), nil - case "exec.interpreter.file.rights": - if !ev.Exec.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.FieldHandlers.ResolveRights(ev, &ev.Exec.Process.LinuxBinprm.FileEvent.FileFields)), nil - case "exec.interpreter.file.uid": - if !ev.Exec.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.UID), nil - case "exec.interpreter.file.user": - if !ev.Exec.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Exec.Process.LinuxBinprm.FileEvent.FileFields), nil - case "exec.is_exec": - return ev.Exec.Process.IsExec, nil - case "exec.is_kworker": - return ev.Exec.Process.PIDContext.IsKworker, nil - case "exec.is_thread": - return ev.FieldHandlers.ResolveProcessIsThread(ev, ev.Exec.Process), nil - case "exec.pid": - return int(ev.Exec.Process.PIDContext.Pid), nil - case "exec.ppid": - return int(ev.Exec.Process.PPid), nil - case "exec.syscall.path": - return ev.FieldHandlers.ResolveSyscallCtxArgsStr1(ev, &ev.Exec.SyscallContext), nil - case "exec.tid": - return int(ev.Exec.Process.PIDContext.Tid), nil - case "exec.tty_name": - return ev.Exec.Process.TTYName, nil - case "exec.uid": - return int(ev.Exec.Process.Credentials.UID), nil - case "exec.user": - return ev.Exec.Process.Credentials.User, nil - case "exec.user_session.k8s_groups": - return ev.FieldHandlers.ResolveK8SGroups(ev, &ev.Exec.Process.UserSession), nil - case "exec.user_session.k8s_uid": - return ev.FieldHandlers.ResolveK8SUID(ev, &ev.Exec.Process.UserSession), nil - case "exec.user_session.k8s_username": - return ev.FieldHandlers.ResolveK8SUsername(ev, &ev.Exec.Process.UserSession), nil - case "exit.args": - return ev.FieldHandlers.ResolveProcessArgs(ev, ev.Exit.Process), nil - case "exit.args_flags": - return ev.FieldHandlers.ResolveProcessArgsFlags(ev, ev.Exit.Process), nil - case "exit.args_options": - return ev.FieldHandlers.ResolveProcessArgsOptions(ev, ev.Exit.Process), nil - case "exit.args_truncated": - return ev.FieldHandlers.ResolveProcessArgsTruncated(ev, ev.Exit.Process), nil - case "exit.argv": - return ev.FieldHandlers.ResolveProcessArgv(ev, ev.Exit.Process), nil - case "exit.argv0": - return ev.FieldHandlers.ResolveProcessArgv0(ev, ev.Exit.Process), nil - case "exit.auid": - return int(ev.Exit.Process.Credentials.AUID), nil - case "exit.cap_effective": - return int(ev.Exit.Process.Credentials.CapEffective), nil - case "exit.cap_permitted": - return int(ev.Exit.Process.Credentials.CapPermitted), nil - case "exit.cause": - return int(ev.Exit.Cause), nil - case "exit.cgroup.file.inode": - return int(ev.Exit.Process.CGroup.CGroupFile.Inode), nil - case "exit.cgroup.file.mount_id": - return int(ev.Exit.Process.CGroup.CGroupFile.MountID), nil - case "exit.cgroup.id": - return ev.FieldHandlers.ResolveCGroupID(ev, &ev.Exit.Process.CGroup), nil - case "exit.cgroup.manager": - return ev.FieldHandlers.ResolveCGroupManager(ev, &ev.Exit.Process.CGroup), nil - case "exit.cgroup.version": - return ev.FieldHandlers.ResolveCGroupVersion(ev, &ev.Exit.Process.CGroup), nil - case "exit.code": - return int(ev.Exit.Code), nil - case "exit.comm": - return ev.Exit.Process.Comm, nil - case "exit.container.id": - return ev.FieldHandlers.ResolveProcessContainerID(ev, ev.Exit.Process), nil - case "exit.created_at": - return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, ev.Exit.Process)), nil - case "exit.egid": - return int(ev.Exit.Process.Credentials.EGID), nil - case "exit.egroup": - return ev.Exit.Process.Credentials.EGroup, nil - case "exit.envp": - return ev.FieldHandlers.ResolveProcessEnvp(ev, ev.Exit.Process), nil - case "exit.envs": - return ev.FieldHandlers.ResolveProcessEnvs(ev, ev.Exit.Process), nil - case "exit.envs_truncated": - return ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, ev.Exit.Process), nil - case "exit.euid": - return int(ev.Exit.Process.Credentials.EUID), nil - case "exit.euser": - return ev.Exit.Process.Credentials.EUser, nil - case "exit.file.change_time": - if !ev.Exit.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Exit.Process.FileEvent.FileFields.CTime), nil - case "exit.file.filesystem": - if !ev.Exit.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Exit.Process.FileEvent), nil - case "exit.file.gid": - if !ev.Exit.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Exit.Process.FileEvent.FileFields.GID), nil - case "exit.file.group": - if !ev.Exit.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Exit.Process.FileEvent.FileFields), nil - case "exit.file.hashes": - if !ev.Exit.Process.IsNotKworker() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Exit.Process.FileEvent), nil - case "exit.file.in_upper_layer": - if !ev.Exit.Process.IsNotKworker() { - return false, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Exit.Process.FileEvent.FileFields), nil - case "exit.file.inode": - if !ev.Exit.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Exit.Process.FileEvent.FileFields.PathKey.Inode), nil - case "exit.file.mode": - if !ev.Exit.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Exit.Process.FileEvent.FileFields.Mode), nil - case "exit.file.modification_time": - if !ev.Exit.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Exit.Process.FileEvent.FileFields.MTime), nil - case "exit.file.mount_id": - if !ev.Exit.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Exit.Process.FileEvent.FileFields.PathKey.MountID), nil - case "exit.file.name": - if !ev.Exit.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Exit.Process.FileEvent), nil - case "exit.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Exit.Process.FileEvent), nil - case "exit.file.package.name": - if !ev.Exit.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Exit.Process.FileEvent), nil - case "exit.file.package.source_version": - if !ev.Exit.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Exit.Process.FileEvent), nil - case "exit.file.package.version": - if !ev.Exit.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Exit.Process.FileEvent), nil - case "exit.file.path": - if !ev.Exit.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exit.Process.FileEvent), nil - case "exit.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exit.Process.FileEvent), nil - case "exit.file.rights": - if !ev.Exit.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.FieldHandlers.ResolveRights(ev, &ev.Exit.Process.FileEvent.FileFields)), nil - case "exit.file.uid": - if !ev.Exit.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Exit.Process.FileEvent.FileFields.UID), nil - case "exit.file.user": - if !ev.Exit.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Exit.Process.FileEvent.FileFields), nil - case "exit.fsgid": - return int(ev.Exit.Process.Credentials.FSGID), nil - case "exit.fsgroup": - return ev.Exit.Process.Credentials.FSGroup, nil - case "exit.fsuid": - return int(ev.Exit.Process.Credentials.FSUID), nil - case "exit.fsuser": - return ev.Exit.Process.Credentials.FSUser, nil - case "exit.gid": - return int(ev.Exit.Process.Credentials.GID), nil - case "exit.group": - return ev.Exit.Process.Credentials.Group, nil - case "exit.interpreter.file.change_time": - if !ev.Exit.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.CTime), nil - case "exit.interpreter.file.filesystem": - if !ev.Exit.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Exit.Process.LinuxBinprm.FileEvent), nil - case "exit.interpreter.file.gid": - if !ev.Exit.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.GID), nil - case "exit.interpreter.file.group": - if !ev.Exit.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Exit.Process.LinuxBinprm.FileEvent.FileFields), nil - case "exit.interpreter.file.hashes": - if !ev.Exit.Process.HasInterpreter() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Exit.Process.LinuxBinprm.FileEvent), nil - case "exit.interpreter.file.in_upper_layer": - if !ev.Exit.Process.HasInterpreter() { - return false, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Exit.Process.LinuxBinprm.FileEvent.FileFields), nil - case "exit.interpreter.file.inode": - if !ev.Exit.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode), nil - case "exit.interpreter.file.mode": - if !ev.Exit.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.Mode), nil - case "exit.interpreter.file.modification_time": - if !ev.Exit.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.MTime), nil - case "exit.interpreter.file.mount_id": - if !ev.Exit.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID), nil - case "exit.interpreter.file.name": - if !ev.Exit.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Exit.Process.LinuxBinprm.FileEvent), nil - case "exit.interpreter.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Exit.Process.LinuxBinprm.FileEvent), nil - case "exit.interpreter.file.package.name": - if !ev.Exit.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Exit.Process.LinuxBinprm.FileEvent), nil - case "exit.interpreter.file.package.source_version": - if !ev.Exit.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Exit.Process.LinuxBinprm.FileEvent), nil - case "exit.interpreter.file.package.version": - if !ev.Exit.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Exit.Process.LinuxBinprm.FileEvent), nil - case "exit.interpreter.file.path": - if !ev.Exit.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exit.Process.LinuxBinprm.FileEvent), nil - case "exit.interpreter.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exit.Process.LinuxBinprm.FileEvent), nil - case "exit.interpreter.file.rights": - if !ev.Exit.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.FieldHandlers.ResolveRights(ev, &ev.Exit.Process.LinuxBinprm.FileEvent.FileFields)), nil - case "exit.interpreter.file.uid": - if !ev.Exit.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.UID), nil - case "exit.interpreter.file.user": - if !ev.Exit.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Exit.Process.LinuxBinprm.FileEvent.FileFields), nil - case "exit.is_exec": - return ev.Exit.Process.IsExec, nil - case "exit.is_kworker": - return ev.Exit.Process.PIDContext.IsKworker, nil - case "exit.is_thread": - return ev.FieldHandlers.ResolveProcessIsThread(ev, ev.Exit.Process), nil - case "exit.pid": - return int(ev.Exit.Process.PIDContext.Pid), nil - case "exit.ppid": - return int(ev.Exit.Process.PPid), nil - case "exit.tid": - return int(ev.Exit.Process.PIDContext.Tid), nil - case "exit.tty_name": - return ev.Exit.Process.TTYName, nil - case "exit.uid": - return int(ev.Exit.Process.Credentials.UID), nil - case "exit.user": - return ev.Exit.Process.Credentials.User, nil - case "exit.user_session.k8s_groups": - return ev.FieldHandlers.ResolveK8SGroups(ev, &ev.Exit.Process.UserSession), nil - case "exit.user_session.k8s_uid": - return ev.FieldHandlers.ResolveK8SUID(ev, &ev.Exit.Process.UserSession), nil - case "exit.user_session.k8s_username": - return ev.FieldHandlers.ResolveK8SUsername(ev, &ev.Exit.Process.UserSession), nil - case "imds.aws.is_imds_v2": - return ev.IMDS.AWS.IsIMDSv2, nil - case "imds.aws.security_credentials.type": - return ev.IMDS.AWS.SecurityCredentials.Type, nil - case "imds.cloud_provider": - return ev.IMDS.CloudProvider, nil - case "imds.host": - return ev.IMDS.Host, nil - case "imds.server": - return ev.IMDS.Server, nil - case "imds.type": - return ev.IMDS.Type, nil - case "imds.url": - return ev.IMDS.URL, nil - case "imds.user_agent": - return ev.IMDS.UserAgent, nil - case "link.file.change_time": - return int(ev.Link.Source.FileFields.CTime), nil - case "link.file.destination.change_time": - return int(ev.Link.Target.FileFields.CTime), nil - case "link.file.destination.filesystem": - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Link.Target), nil - case "link.file.destination.gid": - return int(ev.Link.Target.FileFields.GID), nil - case "link.file.destination.group": - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Link.Target.FileFields), nil - case "link.file.destination.hashes": - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Link.Target), nil - case "link.file.destination.in_upper_layer": - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Link.Target.FileFields), nil - case "link.file.destination.inode": - return int(ev.Link.Target.FileFields.PathKey.Inode), nil - case "link.file.destination.mode": - return int(ev.Link.Target.FileFields.Mode), nil - case "link.file.destination.modification_time": - return int(ev.Link.Target.FileFields.MTime), nil - case "link.file.destination.mount_id": - return int(ev.Link.Target.FileFields.PathKey.MountID), nil - case "link.file.destination.name": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Link.Target), nil - case "link.file.destination.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Link.Target), nil - case "link.file.destination.package.name": - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Link.Target), nil - case "link.file.destination.package.source_version": - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Link.Target), nil - case "link.file.destination.package.version": - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Link.Target), nil - case "link.file.destination.path": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Link.Target), nil - case "link.file.destination.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Link.Target), nil - case "link.file.destination.rights": - return int(ev.FieldHandlers.ResolveRights(ev, &ev.Link.Target.FileFields)), nil - case "link.file.destination.uid": - return int(ev.Link.Target.FileFields.UID), nil - case "link.file.destination.user": - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Link.Target.FileFields), nil - case "link.file.filesystem": - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Link.Source), nil - case "link.file.gid": - return int(ev.Link.Source.FileFields.GID), nil - case "link.file.group": - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Link.Source.FileFields), nil - case "link.file.hashes": - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Link.Source), nil - case "link.file.in_upper_layer": - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Link.Source.FileFields), nil - case "link.file.inode": - return int(ev.Link.Source.FileFields.PathKey.Inode), nil - case "link.file.mode": - return int(ev.Link.Source.FileFields.Mode), nil - case "link.file.modification_time": - return int(ev.Link.Source.FileFields.MTime), nil - case "link.file.mount_id": - return int(ev.Link.Source.FileFields.PathKey.MountID), nil - case "link.file.name": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Link.Source), nil - case "link.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Link.Source), nil - case "link.file.package.name": - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Link.Source), nil - case "link.file.package.source_version": - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Link.Source), nil - case "link.file.package.version": - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Link.Source), nil - case "link.file.path": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Link.Source), nil - case "link.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Link.Source), nil - case "link.file.rights": - return int(ev.FieldHandlers.ResolveRights(ev, &ev.Link.Source.FileFields)), nil - case "link.file.uid": - return int(ev.Link.Source.FileFields.UID), nil - case "link.file.user": - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Link.Source.FileFields), nil - case "link.retval": - return int(ev.Link.SyscallEvent.Retval), nil - case "link.syscall.destination.path": - return ev.FieldHandlers.ResolveSyscallCtxArgsStr2(ev, &ev.Link.SyscallContext), nil - case "link.syscall.path": - return ev.FieldHandlers.ResolveSyscallCtxArgsStr1(ev, &ev.Link.SyscallContext), nil - case "load_module.args": - return ev.FieldHandlers.ResolveModuleArgs(ev, &ev.LoadModule), nil - case "load_module.args_truncated": - return ev.LoadModule.ArgsTruncated, nil - case "load_module.argv": - return ev.FieldHandlers.ResolveModuleArgv(ev, &ev.LoadModule), nil - case "load_module.file.change_time": - return int(ev.LoadModule.File.FileFields.CTime), nil - case "load_module.file.filesystem": - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.LoadModule.File), nil - case "load_module.file.gid": - return int(ev.LoadModule.File.FileFields.GID), nil - case "load_module.file.group": - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.LoadModule.File.FileFields), nil - case "load_module.file.hashes": - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.LoadModule.File), nil - case "load_module.file.in_upper_layer": - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.LoadModule.File.FileFields), nil - case "load_module.file.inode": - return int(ev.LoadModule.File.FileFields.PathKey.Inode), nil - case "load_module.file.mode": - return int(ev.LoadModule.File.FileFields.Mode), nil - case "load_module.file.modification_time": - return int(ev.LoadModule.File.FileFields.MTime), nil - case "load_module.file.mount_id": - return int(ev.LoadModule.File.FileFields.PathKey.MountID), nil - case "load_module.file.name": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.LoadModule.File), nil - case "load_module.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.LoadModule.File), nil - case "load_module.file.package.name": - return ev.FieldHandlers.ResolvePackageName(ev, &ev.LoadModule.File), nil - case "load_module.file.package.source_version": - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.LoadModule.File), nil - case "load_module.file.package.version": - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.LoadModule.File), nil - case "load_module.file.path": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.LoadModule.File), nil - case "load_module.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.LoadModule.File), nil - case "load_module.file.rights": - return int(ev.FieldHandlers.ResolveRights(ev, &ev.LoadModule.File.FileFields)), nil - case "load_module.file.uid": - return int(ev.LoadModule.File.FileFields.UID), nil - case "load_module.file.user": - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.LoadModule.File.FileFields), nil - case "load_module.loaded_from_memory": - return ev.LoadModule.LoadedFromMemory, nil - case "load_module.name": - return ev.LoadModule.Name, nil - case "load_module.retval": - return int(ev.LoadModule.SyscallEvent.Retval), nil - case "mkdir.file.change_time": - return int(ev.Mkdir.File.FileFields.CTime), nil - case "mkdir.file.destination.mode": - return int(ev.Mkdir.Mode), nil - case "mkdir.file.destination.rights": - return int(ev.Mkdir.Mode), nil - case "mkdir.file.filesystem": - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Mkdir.File), nil - case "mkdir.file.gid": - return int(ev.Mkdir.File.FileFields.GID), nil - case "mkdir.file.group": - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Mkdir.File.FileFields), nil - case "mkdir.file.hashes": - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Mkdir.File), nil - case "mkdir.file.in_upper_layer": - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Mkdir.File.FileFields), nil - case "mkdir.file.inode": - return int(ev.Mkdir.File.FileFields.PathKey.Inode), nil - case "mkdir.file.mode": - return int(ev.Mkdir.File.FileFields.Mode), nil - case "mkdir.file.modification_time": - return int(ev.Mkdir.File.FileFields.MTime), nil - case "mkdir.file.mount_id": - return int(ev.Mkdir.File.FileFields.PathKey.MountID), nil - case "mkdir.file.name": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Mkdir.File), nil - case "mkdir.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Mkdir.File), nil - case "mkdir.file.package.name": - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Mkdir.File), nil - case "mkdir.file.package.source_version": - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Mkdir.File), nil - case "mkdir.file.package.version": - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Mkdir.File), nil - case "mkdir.file.path": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Mkdir.File), nil - case "mkdir.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Mkdir.File), nil - case "mkdir.file.rights": - return int(ev.FieldHandlers.ResolveRights(ev, &ev.Mkdir.File.FileFields)), nil - case "mkdir.file.uid": - return int(ev.Mkdir.File.FileFields.UID), nil - case "mkdir.file.user": - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Mkdir.File.FileFields), nil - case "mkdir.retval": - return int(ev.Mkdir.SyscallEvent.Retval), nil - case "mmap.file.change_time": - return int(ev.MMap.File.FileFields.CTime), nil - case "mmap.file.filesystem": - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.MMap.File), nil - case "mmap.file.gid": - return int(ev.MMap.File.FileFields.GID), nil - case "mmap.file.group": - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.MMap.File.FileFields), nil - case "mmap.file.hashes": - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.MMap.File), nil - case "mmap.file.in_upper_layer": - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.MMap.File.FileFields), nil - case "mmap.file.inode": - return int(ev.MMap.File.FileFields.PathKey.Inode), nil - case "mmap.file.mode": - return int(ev.MMap.File.FileFields.Mode), nil - case "mmap.file.modification_time": - return int(ev.MMap.File.FileFields.MTime), nil - case "mmap.file.mount_id": - return int(ev.MMap.File.FileFields.PathKey.MountID), nil - case "mmap.file.name": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.MMap.File), nil - case "mmap.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.MMap.File), nil - case "mmap.file.package.name": - return ev.FieldHandlers.ResolvePackageName(ev, &ev.MMap.File), nil - case "mmap.file.package.source_version": - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.MMap.File), nil - case "mmap.file.package.version": - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.MMap.File), nil - case "mmap.file.path": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.MMap.File), nil - case "mmap.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.MMap.File), nil - case "mmap.file.rights": - return int(ev.FieldHandlers.ResolveRights(ev, &ev.MMap.File.FileFields)), nil - case "mmap.file.uid": - return int(ev.MMap.File.FileFields.UID), nil - case "mmap.file.user": - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.MMap.File.FileFields), nil - case "mmap.flags": - return int(ev.MMap.Flags), nil - case "mmap.protection": - return int(ev.MMap.Protection), nil - case "mmap.retval": - return int(ev.MMap.SyscallEvent.Retval), nil - case "mount.fs_type": - return ev.Mount.Mount.FSType, nil - case "mount.mountpoint.path": - return ev.FieldHandlers.ResolveMountPointPath(ev, &ev.Mount), nil - case "mount.retval": - return int(ev.Mount.SyscallEvent.Retval), nil - case "mount.root.path": - return ev.FieldHandlers.ResolveMountRootPath(ev, &ev.Mount), nil - case "mount.source.path": - return ev.FieldHandlers.ResolveMountSourcePath(ev, &ev.Mount), nil - case "mount.syscall.fs_type": - return ev.FieldHandlers.ResolveSyscallCtxArgsStr3(ev, &ev.Mount.SyscallContext), nil - case "mount.syscall.mountpoint.path": - return ev.FieldHandlers.ResolveSyscallCtxArgsStr2(ev, &ev.Mount.SyscallContext), nil - case "mount.syscall.source.path": - return ev.FieldHandlers.ResolveSyscallCtxArgsStr1(ev, &ev.Mount.SyscallContext), nil - case "mprotect.req_protection": - return ev.MProtect.ReqProtection, nil - case "mprotect.retval": - return int(ev.MProtect.SyscallEvent.Retval), nil - case "mprotect.vm_protection": - return ev.MProtect.VMProtection, nil - case "network.destination.ip": - return ev.NetworkContext.Destination.IPNet, nil - case "network.destination.is_public": - return ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.NetworkContext.Destination), nil - case "network.destination.port": - return int(ev.NetworkContext.Destination.Port), nil - case "network.device.ifname": - return ev.FieldHandlers.ResolveNetworkDeviceIfName(ev, &ev.NetworkContext.Device), nil - case "network.l3_protocol": - return int(ev.NetworkContext.L3Protocol), nil - case "network.l4_protocol": - return int(ev.NetworkContext.L4Protocol), nil - case "network.size": - return int(ev.NetworkContext.Size), nil - case "network.source.ip": - return ev.NetworkContext.Source.IPNet, nil - case "network.source.is_public": - return ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.NetworkContext.Source), nil - case "network.source.port": - return int(ev.NetworkContext.Source.Port), nil - case "ondemand.arg1.str": - return ev.FieldHandlers.ResolveOnDemandArg1Str(ev, &ev.OnDemand), nil - case "ondemand.arg1.uint": - return int(ev.FieldHandlers.ResolveOnDemandArg1Uint(ev, &ev.OnDemand)), nil - case "ondemand.arg2.str": - return ev.FieldHandlers.ResolveOnDemandArg2Str(ev, &ev.OnDemand), nil - case "ondemand.arg2.uint": - return int(ev.FieldHandlers.ResolveOnDemandArg2Uint(ev, &ev.OnDemand)), nil - case "ondemand.arg3.str": - return ev.FieldHandlers.ResolveOnDemandArg3Str(ev, &ev.OnDemand), nil - case "ondemand.arg3.uint": - return int(ev.FieldHandlers.ResolveOnDemandArg3Uint(ev, &ev.OnDemand)), nil - case "ondemand.arg4.str": - return ev.FieldHandlers.ResolveOnDemandArg4Str(ev, &ev.OnDemand), nil - case "ondemand.arg4.uint": - return int(ev.FieldHandlers.ResolveOnDemandArg4Uint(ev, &ev.OnDemand)), nil - case "ondemand.name": - return ev.FieldHandlers.ResolveOnDemandName(ev, &ev.OnDemand), nil - case "open.file.change_time": - return int(ev.Open.File.FileFields.CTime), nil - case "open.file.destination.mode": - return int(ev.Open.Mode), nil - case "open.file.filesystem": - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Open.File), nil - case "open.file.gid": - return int(ev.Open.File.FileFields.GID), nil - case "open.file.group": - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Open.File.FileFields), nil - case "open.file.hashes": - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Open.File), nil - case "open.file.in_upper_layer": - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Open.File.FileFields), nil - case "open.file.inode": - return int(ev.Open.File.FileFields.PathKey.Inode), nil - case "open.file.mode": - return int(ev.Open.File.FileFields.Mode), nil - case "open.file.modification_time": - return int(ev.Open.File.FileFields.MTime), nil - case "open.file.mount_id": - return int(ev.Open.File.FileFields.PathKey.MountID), nil - case "open.file.name": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Open.File), nil - case "open.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Open.File), nil - case "open.file.package.name": - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Open.File), nil - case "open.file.package.source_version": - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Open.File), nil - case "open.file.package.version": - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Open.File), nil - case "open.file.path": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Open.File), nil - case "open.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Open.File), nil - case "open.file.rights": - return int(ev.FieldHandlers.ResolveRights(ev, &ev.Open.File.FileFields)), nil - case "open.file.uid": - return int(ev.Open.File.FileFields.UID), nil - case "open.file.user": - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Open.File.FileFields), nil - case "open.flags": - return int(ev.Open.Flags), nil - case "open.retval": - return int(ev.Open.SyscallEvent.Retval), nil - case "open.syscall.flags": - return int(ev.FieldHandlers.ResolveSyscallCtxArgsInt2(ev, &ev.Open.SyscallContext)), nil - case "open.syscall.mode": - return int(ev.FieldHandlers.ResolveSyscallCtxArgsInt3(ev, &ev.Open.SyscallContext)), nil - case "open.syscall.path": - return ev.FieldHandlers.ResolveSyscallCtxArgsStr1(ev, &ev.Open.SyscallContext), nil - case "packet.destination.ip": - return ev.RawPacket.NetworkContext.Destination.IPNet, nil - case "packet.destination.is_public": - return ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.RawPacket.NetworkContext.Destination), nil - case "packet.destination.port": - return int(ev.RawPacket.NetworkContext.Destination.Port), nil - case "packet.device.ifname": - return ev.FieldHandlers.ResolveNetworkDeviceIfName(ev, &ev.RawPacket.NetworkContext.Device), nil - case "packet.filter": - return ev.RawPacket.Filter, nil - case "packet.l3_protocol": - return int(ev.RawPacket.NetworkContext.L3Protocol), nil - case "packet.l4_protocol": - return int(ev.RawPacket.NetworkContext.L4Protocol), nil - case "packet.size": - return int(ev.RawPacket.NetworkContext.Size), nil - case "packet.source.ip": - return ev.RawPacket.NetworkContext.Source.IPNet, nil - case "packet.source.is_public": - return ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.RawPacket.NetworkContext.Source), nil - case "packet.source.port": - return int(ev.RawPacket.NetworkContext.Source.Port), nil - case "packet.tls.version": - return int(ev.RawPacket.TLSContext.Version), nil - case "process.ancestors.args": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessArgs(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.args_flags": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessArgsFlags(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.args_options": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessArgsOptions(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.args_truncated": - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessArgsTruncated(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.argv": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessArgv(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.argv0": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessArgv0(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.auid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.Credentials.AUID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.cap_effective": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.Credentials.CapEffective) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.cap_permitted": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.Credentials.CapPermitted) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.cgroup.file.inode": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.CGroup.CGroupFile.Inode) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.cgroup.file.mount_id": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.CGroup.CGroupFile.MountID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.cgroup.id": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveCGroupID(ev, &element.ProcessContext.Process.CGroup) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.cgroup.manager": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveCGroupManager(ev, &element.ProcessContext.Process.CGroup) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.cgroup.version": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveCGroupVersion(ev, &element.ProcessContext.Process.CGroup) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.comm": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := element.ProcessContext.Process.Comm - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.container.id": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessContainerID(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.created_at": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &element.ProcessContext.Process)) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.egid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.Credentials.EGID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.egroup": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := element.ProcessContext.Process.Credentials.EGroup - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.envp": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessEnvp(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.envs": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessEnvs(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.envs_truncated": - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.euid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.Credentials.EUID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.euser": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := element.ProcessContext.Process.Credentials.EUser - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.file.change_time": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.FileEvent.FileFields.CTime) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.file.filesystem": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFileFilesystem(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.file.gid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.FileEvent.FileFields.GID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.file.group": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFileFieldsGroup(ev, &element.ProcessContext.Process.FileEvent.FileFields) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.file.hashes": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveHashesFromEvent(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result...) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.file.in_upper_layer": - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &element.ProcessContext.Process.FileEvent.FileFields) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.file.inode": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.FileEvent.FileFields.PathKey.Inode) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.file.mode": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.FileEvent.FileFields.Mode) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.file.modification_time": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.FileEvent.FileFields.MTime) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.file.mount_id": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.FileEvent.FileFields.PathKey.MountID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.file.name": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent), nil - case "process.ancestors.file.package.name": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolvePackageName(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.file.package.source_version": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolvePackageSourceVersion(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.file.package.version": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolvePackageVersion(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.file.path": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent), nil - case "process.ancestors.file.rights": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(ev.FieldHandlers.ResolveRights(ev, &element.ProcessContext.Process.FileEvent.FileFields)) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.file.uid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.FileEvent.FileFields.UID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.file.user": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFileFieldsUser(ev, &element.ProcessContext.Process.FileEvent.FileFields) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.fsgid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.Credentials.FSGID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.fsgroup": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := element.ProcessContext.Process.Credentials.FSGroup - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.fsuid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.Credentials.FSUID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.fsuser": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := element.ProcessContext.Process.Credentials.FSUser - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.gid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.Credentials.GID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.group": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := element.ProcessContext.Process.Credentials.Group - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.interpreter.file.change_time": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.CTime) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.interpreter.file.filesystem": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFileFilesystem(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.interpreter.file.gid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.GID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.interpreter.file.group": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFileFieldsGroup(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.interpreter.file.hashes": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveHashesFromEvent(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result...) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.interpreter.file.in_upper_layer": - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.interpreter.file.inode": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.interpreter.file.mode": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.interpreter.file.modification_time": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.MTime) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.interpreter.file.mount_id": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.interpreter.file.name": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.interpreter.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent), nil - case "process.ancestors.interpreter.file.package.name": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolvePackageName(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.interpreter.file.package.source_version": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolvePackageSourceVersion(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.interpreter.file.package.version": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolvePackageVersion(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.interpreter.file.path": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.interpreter.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent), nil - case "process.ancestors.interpreter.file.rights": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(ev.FieldHandlers.ResolveRights(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.interpreter.file.uid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.UID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.interpreter.file.user": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFileFieldsUser(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.is_exec": - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := element.ProcessContext.Process.IsExec - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.is_kworker": - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := element.ProcessContext.Process.PIDContext.IsKworker - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.is_thread": - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessIsThread(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.length": - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - return iterator.Len(ctx), nil - case "process.ancestors.pid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.PIDContext.Pid) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.ppid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.PPid) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.tid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.PIDContext.Tid) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.tty_name": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := element.ProcessContext.Process.TTYName - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.uid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.Credentials.UID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.user": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := element.ProcessContext.Process.Credentials.User - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.user_session.k8s_groups": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveK8SGroups(ev, &element.ProcessContext.Process.UserSession) - values = append(values, result...) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.user_session.k8s_uid": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveK8SUID(ev, &element.ProcessContext.Process.UserSession) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.user_session.k8s_username": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveK8SUsername(ev, &element.ProcessContext.Process.UserSession) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.args": - return ev.FieldHandlers.ResolveProcessArgs(ev, &ev.BaseEvent.ProcessContext.Process), nil - case "process.args_flags": - return ev.FieldHandlers.ResolveProcessArgsFlags(ev, &ev.BaseEvent.ProcessContext.Process), nil - case "process.args_options": - return ev.FieldHandlers.ResolveProcessArgsOptions(ev, &ev.BaseEvent.ProcessContext.Process), nil - case "process.args_truncated": - return ev.FieldHandlers.ResolveProcessArgsTruncated(ev, &ev.BaseEvent.ProcessContext.Process), nil - case "process.argv": - return ev.FieldHandlers.ResolveProcessArgv(ev, &ev.BaseEvent.ProcessContext.Process), nil - case "process.argv0": - return ev.FieldHandlers.ResolveProcessArgv0(ev, &ev.BaseEvent.ProcessContext.Process), nil - case "process.auid": - return int(ev.BaseEvent.ProcessContext.Process.Credentials.AUID), nil - case "process.cap_effective": - return int(ev.BaseEvent.ProcessContext.Process.Credentials.CapEffective), nil - case "process.cap_permitted": - return int(ev.BaseEvent.ProcessContext.Process.Credentials.CapPermitted), nil - case "process.cgroup.file.inode": - return int(ev.BaseEvent.ProcessContext.Process.CGroup.CGroupFile.Inode), nil - case "process.cgroup.file.mount_id": - return int(ev.BaseEvent.ProcessContext.Process.CGroup.CGroupFile.MountID), nil - case "process.cgroup.id": - return ev.FieldHandlers.ResolveCGroupID(ev, &ev.BaseEvent.ProcessContext.Process.CGroup), nil - case "process.cgroup.manager": - return ev.FieldHandlers.ResolveCGroupManager(ev, &ev.BaseEvent.ProcessContext.Process.CGroup), nil - case "process.cgroup.version": - return ev.FieldHandlers.ResolveCGroupVersion(ev, &ev.BaseEvent.ProcessContext.Process.CGroup), nil - case "process.comm": - return ev.BaseEvent.ProcessContext.Process.Comm, nil - case "process.container.id": - return ev.FieldHandlers.ResolveProcessContainerID(ev, &ev.BaseEvent.ProcessContext.Process), nil - case "process.created_at": - return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &ev.BaseEvent.ProcessContext.Process)), nil - case "process.egid": - return int(ev.BaseEvent.ProcessContext.Process.Credentials.EGID), nil - case "process.egroup": - return ev.BaseEvent.ProcessContext.Process.Credentials.EGroup, nil - case "process.envp": - return ev.FieldHandlers.ResolveProcessEnvp(ev, &ev.BaseEvent.ProcessContext.Process), nil - case "process.envs": - return ev.FieldHandlers.ResolveProcessEnvs(ev, &ev.BaseEvent.ProcessContext.Process), nil - case "process.envs_truncated": - return ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, &ev.BaseEvent.ProcessContext.Process), nil - case "process.euid": - return int(ev.BaseEvent.ProcessContext.Process.Credentials.EUID), nil - case "process.euser": - return ev.BaseEvent.ProcessContext.Process.Credentials.EUser, nil - case "process.file.change_time": - if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.CTime), nil - case "process.file.filesystem": - if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent), nil - case "process.file.gid": - if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.GID), nil - case "process.file.group": - if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields), nil - case "process.file.hashes": - if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent), nil - case "process.file.in_upper_layer": - if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - return false, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields), nil - case "process.file.inode": - if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.PathKey.Inode), nil - case "process.file.mode": - if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.Mode), nil - case "process.file.modification_time": - if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.MTime), nil - case "process.file.mount_id": - if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.PathKey.MountID), nil - case "process.file.name": - if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent), nil - case "process.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent), nil - case "process.file.package.name": - if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent), nil - case "process.file.package.source_version": - if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent), nil - case "process.file.package.version": - if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent), nil - case "process.file.path": - if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent), nil - case "process.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent), nil - case "process.file.rights": - if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.FieldHandlers.ResolveRights(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields)), nil - case "process.file.uid": - if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.UID), nil - case "process.file.user": - if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields), nil - case "process.fsgid": - return int(ev.BaseEvent.ProcessContext.Process.Credentials.FSGID), nil - case "process.fsgroup": - return ev.BaseEvent.ProcessContext.Process.Credentials.FSGroup, nil - case "process.fsuid": - return int(ev.BaseEvent.ProcessContext.Process.Credentials.FSUID), nil - case "process.fsuser": - return ev.BaseEvent.ProcessContext.Process.Credentials.FSUser, nil - case "process.gid": - return int(ev.BaseEvent.ProcessContext.Process.Credentials.GID), nil - case "process.group": - return ev.BaseEvent.ProcessContext.Process.Credentials.Group, nil - case "process.interpreter.file.change_time": - if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.CTime), nil - case "process.interpreter.file.filesystem": - if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent), nil - case "process.interpreter.file.gid": - if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.GID), nil - case "process.interpreter.file.group": - if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields), nil - case "process.interpreter.file.hashes": - if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent), nil - case "process.interpreter.file.in_upper_layer": - if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - return false, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields), nil - case "process.interpreter.file.inode": - if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode), nil - case "process.interpreter.file.mode": - if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode), nil - case "process.interpreter.file.modification_time": - if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.MTime), nil - case "process.interpreter.file.mount_id": - if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID), nil - case "process.interpreter.file.name": - if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent), nil - case "process.interpreter.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent), nil - case "process.interpreter.file.package.name": - if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent), nil - case "process.interpreter.file.package.source_version": - if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent), nil - case "process.interpreter.file.package.version": - if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent), nil - case "process.interpreter.file.path": - if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent), nil - case "process.interpreter.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent), nil - case "process.interpreter.file.rights": - if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.FieldHandlers.ResolveRights(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)), nil - case "process.interpreter.file.uid": - if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.UID), nil - case "process.interpreter.file.user": - if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields), nil - case "process.is_exec": - return ev.BaseEvent.ProcessContext.Process.IsExec, nil - case "process.is_kworker": - return ev.BaseEvent.ProcessContext.Process.PIDContext.IsKworker, nil - case "process.is_thread": - return ev.FieldHandlers.ResolveProcessIsThread(ev, &ev.BaseEvent.ProcessContext.Process), nil - case "process.parent.args": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessArgs(ev, ev.BaseEvent.ProcessContext.Parent), nil - case "process.parent.args_flags": - if !ev.BaseEvent.ProcessContext.HasParent() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessArgsFlags(ev, ev.BaseEvent.ProcessContext.Parent), nil - case "process.parent.args_options": - if !ev.BaseEvent.ProcessContext.HasParent() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessArgsOptions(ev, ev.BaseEvent.ProcessContext.Parent), nil - case "process.parent.args_truncated": - if !ev.BaseEvent.ProcessContext.HasParent() { - return false, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessArgsTruncated(ev, ev.BaseEvent.ProcessContext.Parent), nil - case "process.parent.argv": - if !ev.BaseEvent.ProcessContext.HasParent() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessArgv(ev, ev.BaseEvent.ProcessContext.Parent), nil - case "process.parent.argv0": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessArgv0(ev, ev.BaseEvent.ProcessContext.Parent), nil - case "process.parent.auid": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Parent.Credentials.AUID), nil - case "process.parent.cap_effective": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Parent.Credentials.CapEffective), nil - case "process.parent.cap_permitted": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Parent.Credentials.CapPermitted), nil - case "process.parent.cgroup.file.inode": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Parent.CGroup.CGroupFile.Inode), nil - case "process.parent.cgroup.file.mount_id": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Parent.CGroup.CGroupFile.MountID), nil - case "process.parent.cgroup.id": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveCGroupID(ev, &ev.BaseEvent.ProcessContext.Parent.CGroup), nil - case "process.parent.cgroup.manager": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveCGroupManager(ev, &ev.BaseEvent.ProcessContext.Parent.CGroup), nil - case "process.parent.cgroup.version": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveCGroupVersion(ev, &ev.BaseEvent.ProcessContext.Parent.CGroup), nil - case "process.parent.comm": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.BaseEvent.ProcessContext.Parent.Comm, nil - case "process.parent.container.id": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessContainerID(ev, ev.BaseEvent.ProcessContext.Parent), nil - case "process.parent.created_at": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, ev.BaseEvent.ProcessContext.Parent)), nil - case "process.parent.egid": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Parent.Credentials.EGID), nil - case "process.parent.egroup": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.BaseEvent.ProcessContext.Parent.Credentials.EGroup, nil - case "process.parent.envp": - if !ev.BaseEvent.ProcessContext.HasParent() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessEnvp(ev, ev.BaseEvent.ProcessContext.Parent), nil - case "process.parent.envs": - if !ev.BaseEvent.ProcessContext.HasParent() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessEnvs(ev, ev.BaseEvent.ProcessContext.Parent), nil - case "process.parent.envs_truncated": - if !ev.BaseEvent.ProcessContext.HasParent() { - return false, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, ev.BaseEvent.ProcessContext.Parent), nil - case "process.parent.euid": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Parent.Credentials.EUID), nil - case "process.parent.euser": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.BaseEvent.ProcessContext.Parent.Credentials.EUser, nil - case "process.parent.file.change_time": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.CTime), nil - case "process.parent.file.filesystem": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent), nil - case "process.parent.file.gid": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.GID), nil - case "process.parent.file.group": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields), nil - case "process.parent.file.hashes": - if !ev.BaseEvent.ProcessContext.HasParent() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent), nil - case "process.parent.file.in_upper_layer": - if !ev.BaseEvent.ProcessContext.HasParent() { - return false, &eval.ErrNotSupported{Field: field} - } - if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - return false, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields), nil - case "process.parent.file.inode": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.PathKey.Inode), nil - case "process.parent.file.mode": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.Mode), nil - case "process.parent.file.modification_time": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.MTime), nil - case "process.parent.file.mount_id": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.PathKey.MountID), nil - case "process.parent.file.name": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent), nil - case "process.parent.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent), nil - case "process.parent.file.package.name": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent), nil - case "process.parent.file.package.source_version": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent), nil - case "process.parent.file.package.version": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent), nil - case "process.parent.file.path": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent), nil - case "process.parent.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent), nil - case "process.parent.file.rights": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.FieldHandlers.ResolveRights(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields)), nil - case "process.parent.file.uid": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.UID), nil - case "process.parent.file.user": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields), nil - case "process.parent.fsgid": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Parent.Credentials.FSGID), nil - case "process.parent.fsgroup": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.BaseEvent.ProcessContext.Parent.Credentials.FSGroup, nil - case "process.parent.fsuid": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Parent.Credentials.FSUID), nil - case "process.parent.fsuser": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.BaseEvent.ProcessContext.Parent.Credentials.FSUser, nil - case "process.parent.gid": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Parent.Credentials.GID), nil - case "process.parent.group": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.BaseEvent.ProcessContext.Parent.Credentials.Group, nil - case "process.parent.interpreter.file.change_time": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.CTime), nil - case "process.parent.interpreter.file.filesystem": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent), nil - case "process.parent.interpreter.file.gid": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.GID), nil - case "process.parent.interpreter.file.group": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields), nil - case "process.parent.interpreter.file.hashes": - if !ev.BaseEvent.ProcessContext.HasParent() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent), nil - case "process.parent.interpreter.file.in_upper_layer": - if !ev.BaseEvent.ProcessContext.HasParent() { - return false, &eval.ErrNotSupported{Field: field} - } - if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - return false, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields), nil - case "process.parent.interpreter.file.inode": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.PathKey.Inode), nil - case "process.parent.interpreter.file.mode": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.Mode), nil - case "process.parent.interpreter.file.modification_time": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.MTime), nil - case "process.parent.interpreter.file.mount_id": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.PathKey.MountID), nil - case "process.parent.interpreter.file.name": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent), nil - case "process.parent.interpreter.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent), nil - case "process.parent.interpreter.file.package.name": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent), nil - case "process.parent.interpreter.file.package.source_version": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent), nil - case "process.parent.interpreter.file.package.version": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent), nil - case "process.parent.interpreter.file.path": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent), nil - case "process.parent.interpreter.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent), nil - case "process.parent.interpreter.file.rights": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.FieldHandlers.ResolveRights(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields)), nil - case "process.parent.interpreter.file.uid": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.UID), nil - case "process.parent.interpreter.file.user": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields), nil - case "process.parent.is_exec": - if !ev.BaseEvent.ProcessContext.HasParent() { - return false, &eval.ErrNotSupported{Field: field} - } - return ev.BaseEvent.ProcessContext.Parent.IsExec, nil - case "process.parent.is_kworker": - if !ev.BaseEvent.ProcessContext.HasParent() { - return false, &eval.ErrNotSupported{Field: field} - } - return ev.BaseEvent.ProcessContext.Parent.PIDContext.IsKworker, nil - case "process.parent.is_thread": - if !ev.BaseEvent.ProcessContext.HasParent() { - return false, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessIsThread(ev, ev.BaseEvent.ProcessContext.Parent), nil - case "process.parent.pid": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Parent.PIDContext.Pid), nil - case "process.parent.ppid": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Parent.PPid), nil - case "process.parent.tid": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Parent.PIDContext.Tid), nil - case "process.parent.tty_name": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.BaseEvent.ProcessContext.Parent.TTYName, nil - case "process.parent.uid": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Parent.Credentials.UID), nil - case "process.parent.user": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.BaseEvent.ProcessContext.Parent.Credentials.User, nil - case "process.parent.user_session.k8s_groups": - if !ev.BaseEvent.ProcessContext.HasParent() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveK8SGroups(ev, &ev.BaseEvent.ProcessContext.Parent.UserSession), nil - case "process.parent.user_session.k8s_uid": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveK8SUID(ev, &ev.BaseEvent.ProcessContext.Parent.UserSession), nil - case "process.parent.user_session.k8s_username": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveK8SUsername(ev, &ev.BaseEvent.ProcessContext.Parent.UserSession), nil - case "process.pid": - return int(ev.BaseEvent.ProcessContext.Process.PIDContext.Pid), nil - case "process.ppid": - return int(ev.BaseEvent.ProcessContext.Process.PPid), nil - case "process.tid": - return int(ev.BaseEvent.ProcessContext.Process.PIDContext.Tid), nil - case "process.tty_name": - return ev.BaseEvent.ProcessContext.Process.TTYName, nil - case "process.uid": - return int(ev.BaseEvent.ProcessContext.Process.Credentials.UID), nil - case "process.user": - return ev.BaseEvent.ProcessContext.Process.Credentials.User, nil - case "process.user_session.k8s_groups": - return ev.FieldHandlers.ResolveK8SGroups(ev, &ev.BaseEvent.ProcessContext.Process.UserSession), nil - case "process.user_session.k8s_uid": - return ev.FieldHandlers.ResolveK8SUID(ev, &ev.BaseEvent.ProcessContext.Process.UserSession), nil - case "process.user_session.k8s_username": - return ev.FieldHandlers.ResolveK8SUsername(ev, &ev.BaseEvent.ProcessContext.Process.UserSession), nil - case "ptrace.request": - return int(ev.PTrace.Request), nil - case "ptrace.retval": - return int(ev.PTrace.SyscallEvent.Retval), nil - case "ptrace.tracee.ancestors.args": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessArgs(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.args_flags": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessArgsFlags(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.args_options": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessArgsOptions(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.args_truncated": - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessArgsTruncated(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.argv": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessArgv(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.argv0": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessArgv0(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.auid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.Credentials.AUID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.cap_effective": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.Credentials.CapEffective) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.cap_permitted": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.Credentials.CapPermitted) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.cgroup.file.inode": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.CGroup.CGroupFile.Inode) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.cgroup.file.mount_id": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.CGroup.CGroupFile.MountID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.cgroup.id": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveCGroupID(ev, &element.ProcessContext.Process.CGroup) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.cgroup.manager": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveCGroupManager(ev, &element.ProcessContext.Process.CGroup) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.cgroup.version": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveCGroupVersion(ev, &element.ProcessContext.Process.CGroup) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.comm": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := element.ProcessContext.Process.Comm - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.container.id": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessContainerID(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.created_at": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &element.ProcessContext.Process)) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.egid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.Credentials.EGID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.egroup": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := element.ProcessContext.Process.Credentials.EGroup - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.envp": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessEnvp(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.envs": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessEnvs(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.envs_truncated": - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.euid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.Credentials.EUID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.euser": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := element.ProcessContext.Process.Credentials.EUser - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.file.change_time": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.FileEvent.FileFields.CTime) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.file.filesystem": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFileFilesystem(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.file.gid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.FileEvent.FileFields.GID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.file.group": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFileFieldsGroup(ev, &element.ProcessContext.Process.FileEvent.FileFields) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.file.hashes": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveHashesFromEvent(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result...) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.file.in_upper_layer": - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &element.ProcessContext.Process.FileEvent.FileFields) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.file.inode": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.FileEvent.FileFields.PathKey.Inode) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.file.mode": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.FileEvent.FileFields.Mode) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.file.modification_time": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.FileEvent.FileFields.MTime) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.file.mount_id": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.FileEvent.FileFields.PathKey.MountID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.file.name": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent), nil - case "ptrace.tracee.ancestors.file.package.name": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolvePackageName(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.file.package.source_version": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolvePackageSourceVersion(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.file.package.version": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolvePackageVersion(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.file.path": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent), nil - case "ptrace.tracee.ancestors.file.rights": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(ev.FieldHandlers.ResolveRights(ev, &element.ProcessContext.Process.FileEvent.FileFields)) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.file.uid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.FileEvent.FileFields.UID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.file.user": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFileFieldsUser(ev, &element.ProcessContext.Process.FileEvent.FileFields) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.fsgid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.Credentials.FSGID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.fsgroup": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := element.ProcessContext.Process.Credentials.FSGroup - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.fsuid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.Credentials.FSUID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.fsuser": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := element.ProcessContext.Process.Credentials.FSUser - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.gid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.Credentials.GID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.group": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := element.ProcessContext.Process.Credentials.Group - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.interpreter.file.change_time": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.CTime) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.interpreter.file.filesystem": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFileFilesystem(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.interpreter.file.gid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.GID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.interpreter.file.group": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFileFieldsGroup(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.interpreter.file.hashes": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveHashesFromEvent(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result...) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.interpreter.file.in_upper_layer": - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.interpreter.file.inode": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.interpreter.file.mode": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.interpreter.file.modification_time": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.MTime) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.interpreter.file.mount_id": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.interpreter.file.name": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.interpreter.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent), nil - case "ptrace.tracee.ancestors.interpreter.file.package.name": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolvePackageName(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.interpreter.file.package.source_version": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolvePackageSourceVersion(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.interpreter.file.package.version": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolvePackageVersion(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.interpreter.file.path": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.interpreter.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent), nil - case "ptrace.tracee.ancestors.interpreter.file.rights": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(ev.FieldHandlers.ResolveRights(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.interpreter.file.uid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.UID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.interpreter.file.user": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFileFieldsUser(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.is_exec": - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := element.ProcessContext.Process.IsExec - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.is_kworker": - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := element.ProcessContext.Process.PIDContext.IsKworker - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.is_thread": - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessIsThread(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.length": - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - return iterator.Len(ctx), nil - case "ptrace.tracee.ancestors.pid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.PIDContext.Pid) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.ppid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.PPid) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.tid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.PIDContext.Tid) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.tty_name": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := element.ProcessContext.Process.TTYName - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.uid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.Credentials.UID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.user": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := element.ProcessContext.Process.Credentials.User - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.user_session.k8s_groups": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveK8SGroups(ev, &element.ProcessContext.Process.UserSession) - values = append(values, result...) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.user_session.k8s_uid": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveK8SUID(ev, &element.ProcessContext.Process.UserSession) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.ancestors.user_session.k8s_username": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveK8SUsername(ev, &element.ProcessContext.Process.UserSession) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "ptrace.tracee.args": - return ev.FieldHandlers.ResolveProcessArgs(ev, &ev.PTrace.Tracee.Process), nil - case "ptrace.tracee.args_flags": - return ev.FieldHandlers.ResolveProcessArgsFlags(ev, &ev.PTrace.Tracee.Process), nil - case "ptrace.tracee.args_options": - return ev.FieldHandlers.ResolveProcessArgsOptions(ev, &ev.PTrace.Tracee.Process), nil - case "ptrace.tracee.args_truncated": - return ev.FieldHandlers.ResolveProcessArgsTruncated(ev, &ev.PTrace.Tracee.Process), nil - case "ptrace.tracee.argv": - return ev.FieldHandlers.ResolveProcessArgv(ev, &ev.PTrace.Tracee.Process), nil - case "ptrace.tracee.argv0": - return ev.FieldHandlers.ResolveProcessArgv0(ev, &ev.PTrace.Tracee.Process), nil - case "ptrace.tracee.auid": - return int(ev.PTrace.Tracee.Process.Credentials.AUID), nil - case "ptrace.tracee.cap_effective": - return int(ev.PTrace.Tracee.Process.Credentials.CapEffective), nil - case "ptrace.tracee.cap_permitted": - return int(ev.PTrace.Tracee.Process.Credentials.CapPermitted), nil - case "ptrace.tracee.cgroup.file.inode": - return int(ev.PTrace.Tracee.Process.CGroup.CGroupFile.Inode), nil - case "ptrace.tracee.cgroup.file.mount_id": - return int(ev.PTrace.Tracee.Process.CGroup.CGroupFile.MountID), nil - case "ptrace.tracee.cgroup.id": - return ev.FieldHandlers.ResolveCGroupID(ev, &ev.PTrace.Tracee.Process.CGroup), nil - case "ptrace.tracee.cgroup.manager": - return ev.FieldHandlers.ResolveCGroupManager(ev, &ev.PTrace.Tracee.Process.CGroup), nil - case "ptrace.tracee.cgroup.version": - return ev.FieldHandlers.ResolveCGroupVersion(ev, &ev.PTrace.Tracee.Process.CGroup), nil - case "ptrace.tracee.comm": - return ev.PTrace.Tracee.Process.Comm, nil - case "ptrace.tracee.container.id": - return ev.FieldHandlers.ResolveProcessContainerID(ev, &ev.PTrace.Tracee.Process), nil - case "ptrace.tracee.created_at": - return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &ev.PTrace.Tracee.Process)), nil - case "ptrace.tracee.egid": - return int(ev.PTrace.Tracee.Process.Credentials.EGID), nil - case "ptrace.tracee.egroup": - return ev.PTrace.Tracee.Process.Credentials.EGroup, nil - case "ptrace.tracee.envp": - return ev.FieldHandlers.ResolveProcessEnvp(ev, &ev.PTrace.Tracee.Process), nil - case "ptrace.tracee.envs": - return ev.FieldHandlers.ResolveProcessEnvs(ev, &ev.PTrace.Tracee.Process), nil - case "ptrace.tracee.envs_truncated": - return ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, &ev.PTrace.Tracee.Process), nil - case "ptrace.tracee.euid": - return int(ev.PTrace.Tracee.Process.Credentials.EUID), nil - case "ptrace.tracee.euser": - return ev.PTrace.Tracee.Process.Credentials.EUser, nil - case "ptrace.tracee.file.change_time": - if !ev.PTrace.Tracee.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Process.FileEvent.FileFields.CTime), nil - case "ptrace.tracee.file.filesystem": - if !ev.PTrace.Tracee.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.PTrace.Tracee.Process.FileEvent), nil - case "ptrace.tracee.file.gid": - if !ev.PTrace.Tracee.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Process.FileEvent.FileFields.GID), nil - case "ptrace.tracee.file.group": - if !ev.PTrace.Tracee.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.PTrace.Tracee.Process.FileEvent.FileFields), nil - case "ptrace.tracee.file.hashes": - if !ev.PTrace.Tracee.Process.IsNotKworker() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.PTrace.Tracee.Process.FileEvent), nil - case "ptrace.tracee.file.in_upper_layer": - if !ev.PTrace.Tracee.Process.IsNotKworker() { - return false, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.PTrace.Tracee.Process.FileEvent.FileFields), nil - case "ptrace.tracee.file.inode": - if !ev.PTrace.Tracee.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Process.FileEvent.FileFields.PathKey.Inode), nil - case "ptrace.tracee.file.mode": - if !ev.PTrace.Tracee.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Process.FileEvent.FileFields.Mode), nil - case "ptrace.tracee.file.modification_time": - if !ev.PTrace.Tracee.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Process.FileEvent.FileFields.MTime), nil - case "ptrace.tracee.file.mount_id": - if !ev.PTrace.Tracee.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Process.FileEvent.FileFields.PathKey.MountID), nil - case "ptrace.tracee.file.name": - if !ev.PTrace.Tracee.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.PTrace.Tracee.Process.FileEvent), nil - case "ptrace.tracee.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.PTrace.Tracee.Process.FileEvent), nil - case "ptrace.tracee.file.package.name": - if !ev.PTrace.Tracee.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.PTrace.Tracee.Process.FileEvent), nil - case "ptrace.tracee.file.package.source_version": - if !ev.PTrace.Tracee.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.PTrace.Tracee.Process.FileEvent), nil - case "ptrace.tracee.file.package.version": - if !ev.PTrace.Tracee.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.PTrace.Tracee.Process.FileEvent), nil - case "ptrace.tracee.file.path": - if !ev.PTrace.Tracee.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Process.FileEvent), nil - case "ptrace.tracee.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Process.FileEvent), nil - case "ptrace.tracee.file.rights": - if !ev.PTrace.Tracee.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.FieldHandlers.ResolveRights(ev, &ev.PTrace.Tracee.Process.FileEvent.FileFields)), nil - case "ptrace.tracee.file.uid": - if !ev.PTrace.Tracee.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Process.FileEvent.FileFields.UID), nil - case "ptrace.tracee.file.user": - if !ev.PTrace.Tracee.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.PTrace.Tracee.Process.FileEvent.FileFields), nil - case "ptrace.tracee.fsgid": - return int(ev.PTrace.Tracee.Process.Credentials.FSGID), nil - case "ptrace.tracee.fsgroup": - return ev.PTrace.Tracee.Process.Credentials.FSGroup, nil - case "ptrace.tracee.fsuid": - return int(ev.PTrace.Tracee.Process.Credentials.FSUID), nil - case "ptrace.tracee.fsuser": - return ev.PTrace.Tracee.Process.Credentials.FSUser, nil - case "ptrace.tracee.gid": - return int(ev.PTrace.Tracee.Process.Credentials.GID), nil - case "ptrace.tracee.group": - return ev.PTrace.Tracee.Process.Credentials.Group, nil - case "ptrace.tracee.interpreter.file.change_time": - if !ev.PTrace.Tracee.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.CTime), nil - case "ptrace.tracee.interpreter.file.filesystem": - if !ev.PTrace.Tracee.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent), nil - case "ptrace.tracee.interpreter.file.gid": - if !ev.PTrace.Tracee.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.GID), nil - case "ptrace.tracee.interpreter.file.group": - if !ev.PTrace.Tracee.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields), nil - case "ptrace.tracee.interpreter.file.hashes": - if !ev.PTrace.Tracee.Process.HasInterpreter() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent), nil - case "ptrace.tracee.interpreter.file.in_upper_layer": - if !ev.PTrace.Tracee.Process.HasInterpreter() { - return false, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields), nil - case "ptrace.tracee.interpreter.file.inode": - if !ev.PTrace.Tracee.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode), nil - case "ptrace.tracee.interpreter.file.mode": - if !ev.PTrace.Tracee.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.Mode), nil - case "ptrace.tracee.interpreter.file.modification_time": - if !ev.PTrace.Tracee.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.MTime), nil - case "ptrace.tracee.interpreter.file.mount_id": - if !ev.PTrace.Tracee.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID), nil - case "ptrace.tracee.interpreter.file.name": - if !ev.PTrace.Tracee.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent), nil - case "ptrace.tracee.interpreter.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent), nil - case "ptrace.tracee.interpreter.file.package.name": - if !ev.PTrace.Tracee.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent), nil - case "ptrace.tracee.interpreter.file.package.source_version": - if !ev.PTrace.Tracee.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent), nil - case "ptrace.tracee.interpreter.file.package.version": - if !ev.PTrace.Tracee.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent), nil - case "ptrace.tracee.interpreter.file.path": - if !ev.PTrace.Tracee.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent), nil - case "ptrace.tracee.interpreter.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent), nil - case "ptrace.tracee.interpreter.file.rights": - if !ev.PTrace.Tracee.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.FieldHandlers.ResolveRights(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields)), nil - case "ptrace.tracee.interpreter.file.uid": - if !ev.PTrace.Tracee.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.UID), nil - case "ptrace.tracee.interpreter.file.user": - if !ev.PTrace.Tracee.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields), nil - case "ptrace.tracee.is_exec": - return ev.PTrace.Tracee.Process.IsExec, nil - case "ptrace.tracee.is_kworker": - return ev.PTrace.Tracee.Process.PIDContext.IsKworker, nil - case "ptrace.tracee.is_thread": - return ev.FieldHandlers.ResolveProcessIsThread(ev, &ev.PTrace.Tracee.Process), nil - case "ptrace.tracee.parent.args": - if !ev.PTrace.Tracee.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessArgs(ev, ev.PTrace.Tracee.Parent), nil - case "ptrace.tracee.parent.args_flags": - if !ev.PTrace.Tracee.HasParent() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessArgsFlags(ev, ev.PTrace.Tracee.Parent), nil - case "ptrace.tracee.parent.args_options": - if !ev.PTrace.Tracee.HasParent() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessArgsOptions(ev, ev.PTrace.Tracee.Parent), nil - case "ptrace.tracee.parent.args_truncated": - if !ev.PTrace.Tracee.HasParent() { - return false, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessArgsTruncated(ev, ev.PTrace.Tracee.Parent), nil - case "ptrace.tracee.parent.argv": - if !ev.PTrace.Tracee.HasParent() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessArgv(ev, ev.PTrace.Tracee.Parent), nil - case "ptrace.tracee.parent.argv0": - if !ev.PTrace.Tracee.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessArgv0(ev, ev.PTrace.Tracee.Parent), nil - case "ptrace.tracee.parent.auid": - if !ev.PTrace.Tracee.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Parent.Credentials.AUID), nil - case "ptrace.tracee.parent.cap_effective": - if !ev.PTrace.Tracee.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Parent.Credentials.CapEffective), nil - case "ptrace.tracee.parent.cap_permitted": - if !ev.PTrace.Tracee.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Parent.Credentials.CapPermitted), nil - case "ptrace.tracee.parent.cgroup.file.inode": - if !ev.PTrace.Tracee.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Parent.CGroup.CGroupFile.Inode), nil - case "ptrace.tracee.parent.cgroup.file.mount_id": - if !ev.PTrace.Tracee.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Parent.CGroup.CGroupFile.MountID), nil - case "ptrace.tracee.parent.cgroup.id": - if !ev.PTrace.Tracee.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveCGroupID(ev, &ev.PTrace.Tracee.Parent.CGroup), nil - case "ptrace.tracee.parent.cgroup.manager": - if !ev.PTrace.Tracee.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveCGroupManager(ev, &ev.PTrace.Tracee.Parent.CGroup), nil - case "ptrace.tracee.parent.cgroup.version": - if !ev.PTrace.Tracee.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveCGroupVersion(ev, &ev.PTrace.Tracee.Parent.CGroup), nil - case "ptrace.tracee.parent.comm": - if !ev.PTrace.Tracee.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.PTrace.Tracee.Parent.Comm, nil - case "ptrace.tracee.parent.container.id": - if !ev.PTrace.Tracee.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessContainerID(ev, ev.PTrace.Tracee.Parent), nil - case "ptrace.tracee.parent.created_at": - if !ev.PTrace.Tracee.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, ev.PTrace.Tracee.Parent)), nil - case "ptrace.tracee.parent.egid": - if !ev.PTrace.Tracee.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Parent.Credentials.EGID), nil - case "ptrace.tracee.parent.egroup": - if !ev.PTrace.Tracee.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.PTrace.Tracee.Parent.Credentials.EGroup, nil - case "ptrace.tracee.parent.envp": - if !ev.PTrace.Tracee.HasParent() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessEnvp(ev, ev.PTrace.Tracee.Parent), nil - case "ptrace.tracee.parent.envs": - if !ev.PTrace.Tracee.HasParent() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessEnvs(ev, ev.PTrace.Tracee.Parent), nil - case "ptrace.tracee.parent.envs_truncated": - if !ev.PTrace.Tracee.HasParent() { - return false, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, ev.PTrace.Tracee.Parent), nil - case "ptrace.tracee.parent.euid": - if !ev.PTrace.Tracee.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Parent.Credentials.EUID), nil - case "ptrace.tracee.parent.euser": - if !ev.PTrace.Tracee.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.PTrace.Tracee.Parent.Credentials.EUser, nil - case "ptrace.tracee.parent.file.change_time": - if !ev.PTrace.Tracee.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.PTrace.Tracee.Parent.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Parent.FileEvent.FileFields.CTime), nil - case "ptrace.tracee.parent.file.filesystem": - if !ev.PTrace.Tracee.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.PTrace.Tracee.Parent.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.PTrace.Tracee.Parent.FileEvent), nil - case "ptrace.tracee.parent.file.gid": - if !ev.PTrace.Tracee.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.PTrace.Tracee.Parent.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Parent.FileEvent.FileFields.GID), nil - case "ptrace.tracee.parent.file.group": - if !ev.PTrace.Tracee.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.PTrace.Tracee.Parent.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.PTrace.Tracee.Parent.FileEvent.FileFields), nil - case "ptrace.tracee.parent.file.hashes": - if !ev.PTrace.Tracee.HasParent() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - if !ev.PTrace.Tracee.Parent.IsNotKworker() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.PTrace.Tracee.Parent.FileEvent), nil - case "ptrace.tracee.parent.file.in_upper_layer": - if !ev.PTrace.Tracee.HasParent() { - return false, &eval.ErrNotSupported{Field: field} - } - if !ev.PTrace.Tracee.Parent.IsNotKworker() { - return false, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.PTrace.Tracee.Parent.FileEvent.FileFields), nil - case "ptrace.tracee.parent.file.inode": - if !ev.PTrace.Tracee.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.PTrace.Tracee.Parent.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Parent.FileEvent.FileFields.PathKey.Inode), nil - case "ptrace.tracee.parent.file.mode": - if !ev.PTrace.Tracee.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.PTrace.Tracee.Parent.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Parent.FileEvent.FileFields.Mode), nil - case "ptrace.tracee.parent.file.modification_time": - if !ev.PTrace.Tracee.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.PTrace.Tracee.Parent.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Parent.FileEvent.FileFields.MTime), nil - case "ptrace.tracee.parent.file.mount_id": - if !ev.PTrace.Tracee.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.PTrace.Tracee.Parent.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Parent.FileEvent.FileFields.PathKey.MountID), nil - case "ptrace.tracee.parent.file.name": - if !ev.PTrace.Tracee.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.PTrace.Tracee.Parent.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.PTrace.Tracee.Parent.FileEvent), nil - case "ptrace.tracee.parent.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.PTrace.Tracee.Parent.FileEvent), nil - case "ptrace.tracee.parent.file.package.name": - if !ev.PTrace.Tracee.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.PTrace.Tracee.Parent.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.PTrace.Tracee.Parent.FileEvent), nil - case "ptrace.tracee.parent.file.package.source_version": - if !ev.PTrace.Tracee.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.PTrace.Tracee.Parent.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.PTrace.Tracee.Parent.FileEvent), nil - case "ptrace.tracee.parent.file.package.version": - if !ev.PTrace.Tracee.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.PTrace.Tracee.Parent.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.PTrace.Tracee.Parent.FileEvent), nil - case "ptrace.tracee.parent.file.path": - if !ev.PTrace.Tracee.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.PTrace.Tracee.Parent.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Parent.FileEvent), nil - case "ptrace.tracee.parent.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Parent.FileEvent), nil - case "ptrace.tracee.parent.file.rights": - if !ev.PTrace.Tracee.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.PTrace.Tracee.Parent.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.FieldHandlers.ResolveRights(ev, &ev.PTrace.Tracee.Parent.FileEvent.FileFields)), nil - case "ptrace.tracee.parent.file.uid": - if !ev.PTrace.Tracee.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.PTrace.Tracee.Parent.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Parent.FileEvent.FileFields.UID), nil - case "ptrace.tracee.parent.file.user": - if !ev.PTrace.Tracee.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.PTrace.Tracee.Parent.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.PTrace.Tracee.Parent.FileEvent.FileFields), nil - case "ptrace.tracee.parent.fsgid": - if !ev.PTrace.Tracee.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Parent.Credentials.FSGID), nil - case "ptrace.tracee.parent.fsgroup": - if !ev.PTrace.Tracee.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.PTrace.Tracee.Parent.Credentials.FSGroup, nil - case "ptrace.tracee.parent.fsuid": - if !ev.PTrace.Tracee.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Parent.Credentials.FSUID), nil - case "ptrace.tracee.parent.fsuser": - if !ev.PTrace.Tracee.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.PTrace.Tracee.Parent.Credentials.FSUser, nil - case "ptrace.tracee.parent.gid": - if !ev.PTrace.Tracee.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Parent.Credentials.GID), nil - case "ptrace.tracee.parent.group": - if !ev.PTrace.Tracee.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.PTrace.Tracee.Parent.Credentials.Group, nil - case "ptrace.tracee.parent.interpreter.file.change_time": - if !ev.PTrace.Tracee.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.PTrace.Tracee.Parent.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.CTime), nil - case "ptrace.tracee.parent.interpreter.file.filesystem": - if !ev.PTrace.Tracee.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.PTrace.Tracee.Parent.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent), nil - case "ptrace.tracee.parent.interpreter.file.gid": - if !ev.PTrace.Tracee.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.PTrace.Tracee.Parent.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.GID), nil - case "ptrace.tracee.parent.interpreter.file.group": - if !ev.PTrace.Tracee.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.PTrace.Tracee.Parent.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields), nil - case "ptrace.tracee.parent.interpreter.file.hashes": - if !ev.PTrace.Tracee.HasParent() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - if !ev.PTrace.Tracee.Parent.HasInterpreter() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent), nil - case "ptrace.tracee.parent.interpreter.file.in_upper_layer": - if !ev.PTrace.Tracee.HasParent() { - return false, &eval.ErrNotSupported{Field: field} - } - if !ev.PTrace.Tracee.Parent.HasInterpreter() { - return false, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields), nil - case "ptrace.tracee.parent.interpreter.file.inode": - if !ev.PTrace.Tracee.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.PTrace.Tracee.Parent.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.PathKey.Inode), nil - case "ptrace.tracee.parent.interpreter.file.mode": - if !ev.PTrace.Tracee.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.PTrace.Tracee.Parent.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.Mode), nil - case "ptrace.tracee.parent.interpreter.file.modification_time": - if !ev.PTrace.Tracee.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.PTrace.Tracee.Parent.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.MTime), nil - case "ptrace.tracee.parent.interpreter.file.mount_id": - if !ev.PTrace.Tracee.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.PTrace.Tracee.Parent.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.PathKey.MountID), nil - case "ptrace.tracee.parent.interpreter.file.name": - if !ev.PTrace.Tracee.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.PTrace.Tracee.Parent.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent), nil - case "ptrace.tracee.parent.interpreter.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent), nil - case "ptrace.tracee.parent.interpreter.file.package.name": - if !ev.PTrace.Tracee.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.PTrace.Tracee.Parent.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent), nil - case "ptrace.tracee.parent.interpreter.file.package.source_version": - if !ev.PTrace.Tracee.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.PTrace.Tracee.Parent.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent), nil - case "ptrace.tracee.parent.interpreter.file.package.version": - if !ev.PTrace.Tracee.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.PTrace.Tracee.Parent.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent), nil - case "ptrace.tracee.parent.interpreter.file.path": - if !ev.PTrace.Tracee.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.PTrace.Tracee.Parent.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent), nil - case "ptrace.tracee.parent.interpreter.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent), nil - case "ptrace.tracee.parent.interpreter.file.rights": - if !ev.PTrace.Tracee.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.PTrace.Tracee.Parent.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.FieldHandlers.ResolveRights(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields)), nil - case "ptrace.tracee.parent.interpreter.file.uid": - if !ev.PTrace.Tracee.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.PTrace.Tracee.Parent.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.UID), nil - case "ptrace.tracee.parent.interpreter.file.user": - if !ev.PTrace.Tracee.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.PTrace.Tracee.Parent.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields), nil - case "ptrace.tracee.parent.is_exec": - if !ev.PTrace.Tracee.HasParent() { - return false, &eval.ErrNotSupported{Field: field} - } - return ev.PTrace.Tracee.Parent.IsExec, nil - case "ptrace.tracee.parent.is_kworker": - if !ev.PTrace.Tracee.HasParent() { - return false, &eval.ErrNotSupported{Field: field} - } - return ev.PTrace.Tracee.Parent.PIDContext.IsKworker, nil - case "ptrace.tracee.parent.is_thread": - if !ev.PTrace.Tracee.HasParent() { - return false, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessIsThread(ev, ev.PTrace.Tracee.Parent), nil - case "ptrace.tracee.parent.pid": - if !ev.PTrace.Tracee.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Parent.PIDContext.Pid), nil - case "ptrace.tracee.parent.ppid": - if !ev.PTrace.Tracee.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Parent.PPid), nil - case "ptrace.tracee.parent.tid": - if !ev.PTrace.Tracee.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Parent.PIDContext.Tid), nil - case "ptrace.tracee.parent.tty_name": - if !ev.PTrace.Tracee.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.PTrace.Tracee.Parent.TTYName, nil - case "ptrace.tracee.parent.uid": - if !ev.PTrace.Tracee.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.PTrace.Tracee.Parent.Credentials.UID), nil - case "ptrace.tracee.parent.user": - if !ev.PTrace.Tracee.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.PTrace.Tracee.Parent.Credentials.User, nil - case "ptrace.tracee.parent.user_session.k8s_groups": - if !ev.PTrace.Tracee.HasParent() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveK8SGroups(ev, &ev.PTrace.Tracee.Parent.UserSession), nil - case "ptrace.tracee.parent.user_session.k8s_uid": - if !ev.PTrace.Tracee.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveK8SUID(ev, &ev.PTrace.Tracee.Parent.UserSession), nil - case "ptrace.tracee.parent.user_session.k8s_username": - if !ev.PTrace.Tracee.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveK8SUsername(ev, &ev.PTrace.Tracee.Parent.UserSession), nil - case "ptrace.tracee.pid": - return int(ev.PTrace.Tracee.Process.PIDContext.Pid), nil - case "ptrace.tracee.ppid": - return int(ev.PTrace.Tracee.Process.PPid), nil - case "ptrace.tracee.tid": - return int(ev.PTrace.Tracee.Process.PIDContext.Tid), nil - case "ptrace.tracee.tty_name": - return ev.PTrace.Tracee.Process.TTYName, nil - case "ptrace.tracee.uid": - return int(ev.PTrace.Tracee.Process.Credentials.UID), nil - case "ptrace.tracee.user": - return ev.PTrace.Tracee.Process.Credentials.User, nil - case "ptrace.tracee.user_session.k8s_groups": - return ev.FieldHandlers.ResolveK8SGroups(ev, &ev.PTrace.Tracee.Process.UserSession), nil - case "ptrace.tracee.user_session.k8s_uid": - return ev.FieldHandlers.ResolveK8SUID(ev, &ev.PTrace.Tracee.Process.UserSession), nil - case "ptrace.tracee.user_session.k8s_username": - return ev.FieldHandlers.ResolveK8SUsername(ev, &ev.PTrace.Tracee.Process.UserSession), nil - case "removexattr.file.change_time": - return int(ev.RemoveXAttr.File.FileFields.CTime), nil - case "removexattr.file.destination.name": - return ev.FieldHandlers.ResolveXAttrName(ev, &ev.RemoveXAttr), nil - case "removexattr.file.destination.namespace": - return ev.FieldHandlers.ResolveXAttrNamespace(ev, &ev.RemoveXAttr), nil - case "removexattr.file.filesystem": - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.RemoveXAttr.File), nil - case "removexattr.file.gid": - return int(ev.RemoveXAttr.File.FileFields.GID), nil - case "removexattr.file.group": - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.RemoveXAttr.File.FileFields), nil - case "removexattr.file.hashes": - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.RemoveXAttr.File), nil - case "removexattr.file.in_upper_layer": - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.RemoveXAttr.File.FileFields), nil - case "removexattr.file.inode": - return int(ev.RemoveXAttr.File.FileFields.PathKey.Inode), nil - case "removexattr.file.mode": - return int(ev.RemoveXAttr.File.FileFields.Mode), nil - case "removexattr.file.modification_time": - return int(ev.RemoveXAttr.File.FileFields.MTime), nil - case "removexattr.file.mount_id": - return int(ev.RemoveXAttr.File.FileFields.PathKey.MountID), nil - case "removexattr.file.name": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.RemoveXAttr.File), nil - case "removexattr.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.RemoveXAttr.File), nil - case "removexattr.file.package.name": - return ev.FieldHandlers.ResolvePackageName(ev, &ev.RemoveXAttr.File), nil - case "removexattr.file.package.source_version": - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.RemoveXAttr.File), nil - case "removexattr.file.package.version": - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.RemoveXAttr.File), nil - case "removexattr.file.path": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.RemoveXAttr.File), nil - case "removexattr.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.RemoveXAttr.File), nil - case "removexattr.file.rights": - return int(ev.FieldHandlers.ResolveRights(ev, &ev.RemoveXAttr.File.FileFields)), nil - case "removexattr.file.uid": - return int(ev.RemoveXAttr.File.FileFields.UID), nil - case "removexattr.file.user": - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.RemoveXAttr.File.FileFields), nil - case "removexattr.retval": - return int(ev.RemoveXAttr.SyscallEvent.Retval), nil - case "rename.file.change_time": - return int(ev.Rename.Old.FileFields.CTime), nil - case "rename.file.destination.change_time": - return int(ev.Rename.New.FileFields.CTime), nil - case "rename.file.destination.filesystem": - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Rename.New), nil - case "rename.file.destination.gid": - return int(ev.Rename.New.FileFields.GID), nil - case "rename.file.destination.group": - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Rename.New.FileFields), nil - case "rename.file.destination.hashes": - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Rename.New), nil - case "rename.file.destination.in_upper_layer": - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Rename.New.FileFields), nil - case "rename.file.destination.inode": - return int(ev.Rename.New.FileFields.PathKey.Inode), nil - case "rename.file.destination.mode": - return int(ev.Rename.New.FileFields.Mode), nil - case "rename.file.destination.modification_time": - return int(ev.Rename.New.FileFields.MTime), nil - case "rename.file.destination.mount_id": - return int(ev.Rename.New.FileFields.PathKey.MountID), nil - case "rename.file.destination.name": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Rename.New), nil - case "rename.file.destination.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Rename.New), nil - case "rename.file.destination.package.name": - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Rename.New), nil - case "rename.file.destination.package.source_version": - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Rename.New), nil - case "rename.file.destination.package.version": - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Rename.New), nil - case "rename.file.destination.path": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Rename.New), nil - case "rename.file.destination.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Rename.New), nil - case "rename.file.destination.rights": - return int(ev.FieldHandlers.ResolveRights(ev, &ev.Rename.New.FileFields)), nil - case "rename.file.destination.uid": - return int(ev.Rename.New.FileFields.UID), nil - case "rename.file.destination.user": - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Rename.New.FileFields), nil - case "rename.file.filesystem": - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Rename.Old), nil - case "rename.file.gid": - return int(ev.Rename.Old.FileFields.GID), nil - case "rename.file.group": - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Rename.Old.FileFields), nil - case "rename.file.hashes": - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Rename.Old), nil - case "rename.file.in_upper_layer": - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Rename.Old.FileFields), nil - case "rename.file.inode": - return int(ev.Rename.Old.FileFields.PathKey.Inode), nil - case "rename.file.mode": - return int(ev.Rename.Old.FileFields.Mode), nil - case "rename.file.modification_time": - return int(ev.Rename.Old.FileFields.MTime), nil - case "rename.file.mount_id": - return int(ev.Rename.Old.FileFields.PathKey.MountID), nil - case "rename.file.name": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Rename.Old), nil - case "rename.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Rename.Old), nil - case "rename.file.package.name": - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Rename.Old), nil - case "rename.file.package.source_version": - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Rename.Old), nil - case "rename.file.package.version": - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Rename.Old), nil - case "rename.file.path": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Rename.Old), nil - case "rename.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Rename.Old), nil - case "rename.file.rights": - return int(ev.FieldHandlers.ResolveRights(ev, &ev.Rename.Old.FileFields)), nil - case "rename.file.uid": - return int(ev.Rename.Old.FileFields.UID), nil - case "rename.file.user": - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Rename.Old.FileFields), nil - case "rename.retval": - return int(ev.Rename.SyscallEvent.Retval), nil - case "rename.syscall.destination.path": - return ev.FieldHandlers.ResolveSyscallCtxArgsStr2(ev, &ev.Rename.SyscallContext), nil - case "rename.syscall.path": - return ev.FieldHandlers.ResolveSyscallCtxArgsStr1(ev, &ev.Rename.SyscallContext), nil - case "rmdir.file.change_time": - return int(ev.Rmdir.File.FileFields.CTime), nil - case "rmdir.file.filesystem": - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Rmdir.File), nil - case "rmdir.file.gid": - return int(ev.Rmdir.File.FileFields.GID), nil - case "rmdir.file.group": - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Rmdir.File.FileFields), nil - case "rmdir.file.hashes": - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Rmdir.File), nil - case "rmdir.file.in_upper_layer": - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Rmdir.File.FileFields), nil - case "rmdir.file.inode": - return int(ev.Rmdir.File.FileFields.PathKey.Inode), nil - case "rmdir.file.mode": - return int(ev.Rmdir.File.FileFields.Mode), nil - case "rmdir.file.modification_time": - return int(ev.Rmdir.File.FileFields.MTime), nil - case "rmdir.file.mount_id": - return int(ev.Rmdir.File.FileFields.PathKey.MountID), nil - case "rmdir.file.name": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Rmdir.File), nil - case "rmdir.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Rmdir.File), nil - case "rmdir.file.package.name": - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Rmdir.File), nil - case "rmdir.file.package.source_version": - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Rmdir.File), nil - case "rmdir.file.package.version": - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Rmdir.File), nil - case "rmdir.file.path": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Rmdir.File), nil - case "rmdir.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Rmdir.File), nil - case "rmdir.file.rights": - return int(ev.FieldHandlers.ResolveRights(ev, &ev.Rmdir.File.FileFields)), nil - case "rmdir.file.uid": - return int(ev.Rmdir.File.FileFields.UID), nil - case "rmdir.file.user": - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Rmdir.File.FileFields), nil - case "rmdir.retval": - return int(ev.Rmdir.SyscallEvent.Retval), nil - case "selinux.bool.name": - return ev.FieldHandlers.ResolveSELinuxBoolName(ev, &ev.SELinux), nil - case "selinux.bool.state": - return ev.SELinux.BoolChangeValue, nil - case "selinux.bool_commit.state": - return ev.SELinux.BoolCommitValue, nil - case "selinux.enforce.status": - return ev.SELinux.EnforceStatus, nil - case "setgid.egid": - return int(ev.SetGID.EGID), nil - case "setgid.egroup": - return ev.FieldHandlers.ResolveSetgidEGroup(ev, &ev.SetGID), nil - case "setgid.fsgid": - return int(ev.SetGID.FSGID), nil - case "setgid.fsgroup": - return ev.FieldHandlers.ResolveSetgidFSGroup(ev, &ev.SetGID), nil - case "setgid.gid": - return int(ev.SetGID.GID), nil - case "setgid.group": - return ev.FieldHandlers.ResolveSetgidGroup(ev, &ev.SetGID), nil - case "setuid.euid": - return int(ev.SetUID.EUID), nil - case "setuid.euser": - return ev.FieldHandlers.ResolveSetuidEUser(ev, &ev.SetUID), nil - case "setuid.fsuid": - return int(ev.SetUID.FSUID), nil - case "setuid.fsuser": - return ev.FieldHandlers.ResolveSetuidFSUser(ev, &ev.SetUID), nil - case "setuid.uid": - return int(ev.SetUID.UID), nil - case "setuid.user": - return ev.FieldHandlers.ResolveSetuidUser(ev, &ev.SetUID), nil - case "setxattr.file.change_time": - return int(ev.SetXAttr.File.FileFields.CTime), nil - case "setxattr.file.destination.name": - return ev.FieldHandlers.ResolveXAttrName(ev, &ev.SetXAttr), nil - case "setxattr.file.destination.namespace": - return ev.FieldHandlers.ResolveXAttrNamespace(ev, &ev.SetXAttr), nil - case "setxattr.file.filesystem": - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.SetXAttr.File), nil - case "setxattr.file.gid": - return int(ev.SetXAttr.File.FileFields.GID), nil - case "setxattr.file.group": - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.SetXAttr.File.FileFields), nil - case "setxattr.file.hashes": - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.SetXAttr.File), nil - case "setxattr.file.in_upper_layer": - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.SetXAttr.File.FileFields), nil - case "setxattr.file.inode": - return int(ev.SetXAttr.File.FileFields.PathKey.Inode), nil - case "setxattr.file.mode": - return int(ev.SetXAttr.File.FileFields.Mode), nil - case "setxattr.file.modification_time": - return int(ev.SetXAttr.File.FileFields.MTime), nil - case "setxattr.file.mount_id": - return int(ev.SetXAttr.File.FileFields.PathKey.MountID), nil - case "setxattr.file.name": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.SetXAttr.File), nil - case "setxattr.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.SetXAttr.File), nil - case "setxattr.file.package.name": - return ev.FieldHandlers.ResolvePackageName(ev, &ev.SetXAttr.File), nil - case "setxattr.file.package.source_version": - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.SetXAttr.File), nil - case "setxattr.file.package.version": - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.SetXAttr.File), nil - case "setxattr.file.path": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.SetXAttr.File), nil - case "setxattr.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.SetXAttr.File), nil - case "setxattr.file.rights": - return int(ev.FieldHandlers.ResolveRights(ev, &ev.SetXAttr.File.FileFields)), nil - case "setxattr.file.uid": - return int(ev.SetXAttr.File.FileFields.UID), nil - case "setxattr.file.user": - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.SetXAttr.File.FileFields), nil - case "setxattr.retval": - return int(ev.SetXAttr.SyscallEvent.Retval), nil - case "signal.pid": - return int(ev.Signal.PID), nil - case "signal.retval": - return int(ev.Signal.SyscallEvent.Retval), nil - case "signal.target.ancestors.args": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessArgs(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.args_flags": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessArgsFlags(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.args_options": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessArgsOptions(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.args_truncated": - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessArgsTruncated(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.argv": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessArgv(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.argv0": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessArgv0(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.auid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.Credentials.AUID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.cap_effective": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.Credentials.CapEffective) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.cap_permitted": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.Credentials.CapPermitted) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.cgroup.file.inode": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.CGroup.CGroupFile.Inode) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.cgroup.file.mount_id": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.CGroup.CGroupFile.MountID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.cgroup.id": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveCGroupID(ev, &element.ProcessContext.Process.CGroup) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.cgroup.manager": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveCGroupManager(ev, &element.ProcessContext.Process.CGroup) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.cgroup.version": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveCGroupVersion(ev, &element.ProcessContext.Process.CGroup) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.comm": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := element.ProcessContext.Process.Comm - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.container.id": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessContainerID(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.created_at": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &element.ProcessContext.Process)) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.egid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.Credentials.EGID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.egroup": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := element.ProcessContext.Process.Credentials.EGroup - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.envp": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessEnvp(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.envs": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessEnvs(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.envs_truncated": - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.euid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.Credentials.EUID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.euser": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := element.ProcessContext.Process.Credentials.EUser - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.file.change_time": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.FileEvent.FileFields.CTime) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.file.filesystem": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFileFilesystem(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.file.gid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.FileEvent.FileFields.GID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.file.group": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFileFieldsGroup(ev, &element.ProcessContext.Process.FileEvent.FileFields) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.file.hashes": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveHashesFromEvent(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result...) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.file.in_upper_layer": - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &element.ProcessContext.Process.FileEvent.FileFields) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.file.inode": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.FileEvent.FileFields.PathKey.Inode) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.file.mode": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.FileEvent.FileFields.Mode) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.file.modification_time": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.FileEvent.FileFields.MTime) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.file.mount_id": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.FileEvent.FileFields.PathKey.MountID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.file.name": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Signal.Target.Ancestor.ProcessContext.Process.FileEvent), nil - case "signal.target.ancestors.file.package.name": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolvePackageName(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.file.package.source_version": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolvePackageSourceVersion(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.file.package.version": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolvePackageVersion(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.file.path": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Ancestor.ProcessContext.Process.FileEvent), nil - case "signal.target.ancestors.file.rights": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(ev.FieldHandlers.ResolveRights(ev, &element.ProcessContext.Process.FileEvent.FileFields)) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.file.uid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.FileEvent.FileFields.UID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.file.user": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFileFieldsUser(ev, &element.ProcessContext.Process.FileEvent.FileFields) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.fsgid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.Credentials.FSGID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.fsgroup": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := element.ProcessContext.Process.Credentials.FSGroup - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.fsuid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.Credentials.FSUID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.fsuser": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := element.ProcessContext.Process.Credentials.FSUser - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.gid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.Credentials.GID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.group": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := element.ProcessContext.Process.Credentials.Group - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.interpreter.file.change_time": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.CTime) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.interpreter.file.filesystem": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFileFilesystem(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.interpreter.file.gid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.GID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.interpreter.file.group": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFileFieldsGroup(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.interpreter.file.hashes": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveHashesFromEvent(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result...) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.interpreter.file.in_upper_layer": - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.interpreter.file.inode": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.interpreter.file.mode": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.interpreter.file.modification_time": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.MTime) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.interpreter.file.mount_id": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.interpreter.file.name": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.interpreter.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent), nil - case "signal.target.ancestors.interpreter.file.package.name": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolvePackageName(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.interpreter.file.package.source_version": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolvePackageSourceVersion(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.interpreter.file.package.version": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolvePackageVersion(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.interpreter.file.path": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.interpreter.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent), nil - case "signal.target.ancestors.interpreter.file.rights": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(ev.FieldHandlers.ResolveRights(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.interpreter.file.uid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.UID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.interpreter.file.user": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFileFieldsUser(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.is_exec": - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := element.ProcessContext.Process.IsExec - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.is_kworker": - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := element.ProcessContext.Process.PIDContext.IsKworker - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.is_thread": - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessIsThread(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.length": - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - return iterator.Len(ctx), nil - case "signal.target.ancestors.pid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.PIDContext.Pid) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.ppid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.PPid) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.tid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.PIDContext.Tid) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.tty_name": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := element.ProcessContext.Process.TTYName - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.uid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.Credentials.UID) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.user": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := element.ProcessContext.Process.Credentials.User - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.user_session.k8s_groups": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveK8SGroups(ev, &element.ProcessContext.Process.UserSession) - values = append(values, result...) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.user_session.k8s_uid": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveK8SUID(ev, &element.ProcessContext.Process.UserSession) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.ancestors.user_session.k8s_username": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveK8SUsername(ev, &element.ProcessContext.Process.UserSession) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "signal.target.args": - return ev.FieldHandlers.ResolveProcessArgs(ev, &ev.Signal.Target.Process), nil - case "signal.target.args_flags": - return ev.FieldHandlers.ResolveProcessArgsFlags(ev, &ev.Signal.Target.Process), nil - case "signal.target.args_options": - return ev.FieldHandlers.ResolveProcessArgsOptions(ev, &ev.Signal.Target.Process), nil - case "signal.target.args_truncated": - return ev.FieldHandlers.ResolveProcessArgsTruncated(ev, &ev.Signal.Target.Process), nil - case "signal.target.argv": - return ev.FieldHandlers.ResolveProcessArgv(ev, &ev.Signal.Target.Process), nil - case "signal.target.argv0": - return ev.FieldHandlers.ResolveProcessArgv0(ev, &ev.Signal.Target.Process), nil - case "signal.target.auid": - return int(ev.Signal.Target.Process.Credentials.AUID), nil - case "signal.target.cap_effective": - return int(ev.Signal.Target.Process.Credentials.CapEffective), nil - case "signal.target.cap_permitted": - return int(ev.Signal.Target.Process.Credentials.CapPermitted), nil - case "signal.target.cgroup.file.inode": - return int(ev.Signal.Target.Process.CGroup.CGroupFile.Inode), nil - case "signal.target.cgroup.file.mount_id": - return int(ev.Signal.Target.Process.CGroup.CGroupFile.MountID), nil - case "signal.target.cgroup.id": - return ev.FieldHandlers.ResolveCGroupID(ev, &ev.Signal.Target.Process.CGroup), nil - case "signal.target.cgroup.manager": - return ev.FieldHandlers.ResolveCGroupManager(ev, &ev.Signal.Target.Process.CGroup), nil - case "signal.target.cgroup.version": - return ev.FieldHandlers.ResolveCGroupVersion(ev, &ev.Signal.Target.Process.CGroup), nil - case "signal.target.comm": - return ev.Signal.Target.Process.Comm, nil - case "signal.target.container.id": - return ev.FieldHandlers.ResolveProcessContainerID(ev, &ev.Signal.Target.Process), nil - case "signal.target.created_at": - return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &ev.Signal.Target.Process)), nil - case "signal.target.egid": - return int(ev.Signal.Target.Process.Credentials.EGID), nil - case "signal.target.egroup": - return ev.Signal.Target.Process.Credentials.EGroup, nil - case "signal.target.envp": - return ev.FieldHandlers.ResolveProcessEnvp(ev, &ev.Signal.Target.Process), nil - case "signal.target.envs": - return ev.FieldHandlers.ResolveProcessEnvs(ev, &ev.Signal.Target.Process), nil - case "signal.target.envs_truncated": - return ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, &ev.Signal.Target.Process), nil - case "signal.target.euid": - return int(ev.Signal.Target.Process.Credentials.EUID), nil - case "signal.target.euser": - return ev.Signal.Target.Process.Credentials.EUser, nil - case "signal.target.file.change_time": - if !ev.Signal.Target.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Process.FileEvent.FileFields.CTime), nil - case "signal.target.file.filesystem": - if !ev.Signal.Target.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Signal.Target.Process.FileEvent), nil - case "signal.target.file.gid": - if !ev.Signal.Target.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Process.FileEvent.FileFields.GID), nil - case "signal.target.file.group": - if !ev.Signal.Target.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Signal.Target.Process.FileEvent.FileFields), nil - case "signal.target.file.hashes": - if !ev.Signal.Target.Process.IsNotKworker() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Signal.Target.Process.FileEvent), nil - case "signal.target.file.in_upper_layer": - if !ev.Signal.Target.Process.IsNotKworker() { - return false, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Signal.Target.Process.FileEvent.FileFields), nil - case "signal.target.file.inode": - if !ev.Signal.Target.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Process.FileEvent.FileFields.PathKey.Inode), nil - case "signal.target.file.mode": - if !ev.Signal.Target.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Process.FileEvent.FileFields.Mode), nil - case "signal.target.file.modification_time": - if !ev.Signal.Target.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Process.FileEvent.FileFields.MTime), nil - case "signal.target.file.mount_id": - if !ev.Signal.Target.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Process.FileEvent.FileFields.PathKey.MountID), nil - case "signal.target.file.name": - if !ev.Signal.Target.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Signal.Target.Process.FileEvent), nil - case "signal.target.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Signal.Target.Process.FileEvent), nil - case "signal.target.file.package.name": - if !ev.Signal.Target.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Signal.Target.Process.FileEvent), nil - case "signal.target.file.package.source_version": - if !ev.Signal.Target.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Signal.Target.Process.FileEvent), nil - case "signal.target.file.package.version": - if !ev.Signal.Target.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Signal.Target.Process.FileEvent), nil - case "signal.target.file.path": - if !ev.Signal.Target.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Process.FileEvent), nil - case "signal.target.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Process.FileEvent), nil - case "signal.target.file.rights": - if !ev.Signal.Target.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.FieldHandlers.ResolveRights(ev, &ev.Signal.Target.Process.FileEvent.FileFields)), nil - case "signal.target.file.uid": - if !ev.Signal.Target.Process.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Process.FileEvent.FileFields.UID), nil - case "signal.target.file.user": - if !ev.Signal.Target.Process.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Signal.Target.Process.FileEvent.FileFields), nil - case "signal.target.fsgid": - return int(ev.Signal.Target.Process.Credentials.FSGID), nil - case "signal.target.fsgroup": - return ev.Signal.Target.Process.Credentials.FSGroup, nil - case "signal.target.fsuid": - return int(ev.Signal.Target.Process.Credentials.FSUID), nil - case "signal.target.fsuser": - return ev.Signal.Target.Process.Credentials.FSUser, nil - case "signal.target.gid": - return int(ev.Signal.Target.Process.Credentials.GID), nil - case "signal.target.group": - return ev.Signal.Target.Process.Credentials.Group, nil - case "signal.target.interpreter.file.change_time": - if !ev.Signal.Target.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.CTime), nil - case "signal.target.interpreter.file.filesystem": - if !ev.Signal.Target.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent), nil - case "signal.target.interpreter.file.gid": - if !ev.Signal.Target.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.GID), nil - case "signal.target.interpreter.file.group": - if !ev.Signal.Target.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields), nil - case "signal.target.interpreter.file.hashes": - if !ev.Signal.Target.Process.HasInterpreter() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent), nil - case "signal.target.interpreter.file.in_upper_layer": - if !ev.Signal.Target.Process.HasInterpreter() { - return false, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields), nil - case "signal.target.interpreter.file.inode": - if !ev.Signal.Target.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode), nil - case "signal.target.interpreter.file.mode": - if !ev.Signal.Target.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.Mode), nil - case "signal.target.interpreter.file.modification_time": - if !ev.Signal.Target.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.MTime), nil - case "signal.target.interpreter.file.mount_id": - if !ev.Signal.Target.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID), nil - case "signal.target.interpreter.file.name": - if !ev.Signal.Target.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent), nil - case "signal.target.interpreter.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent), nil - case "signal.target.interpreter.file.package.name": - if !ev.Signal.Target.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent), nil - case "signal.target.interpreter.file.package.source_version": - if !ev.Signal.Target.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent), nil - case "signal.target.interpreter.file.package.version": - if !ev.Signal.Target.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent), nil - case "signal.target.interpreter.file.path": - if !ev.Signal.Target.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent), nil - case "signal.target.interpreter.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent), nil - case "signal.target.interpreter.file.rights": - if !ev.Signal.Target.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.FieldHandlers.ResolveRights(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields)), nil - case "signal.target.interpreter.file.uid": - if !ev.Signal.Target.Process.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.UID), nil - case "signal.target.interpreter.file.user": - if !ev.Signal.Target.Process.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields), nil - case "signal.target.is_exec": - return ev.Signal.Target.Process.IsExec, nil - case "signal.target.is_kworker": - return ev.Signal.Target.Process.PIDContext.IsKworker, nil - case "signal.target.is_thread": - return ev.FieldHandlers.ResolveProcessIsThread(ev, &ev.Signal.Target.Process), nil - case "signal.target.parent.args": - if !ev.Signal.Target.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessArgs(ev, ev.Signal.Target.Parent), nil - case "signal.target.parent.args_flags": - if !ev.Signal.Target.HasParent() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessArgsFlags(ev, ev.Signal.Target.Parent), nil - case "signal.target.parent.args_options": - if !ev.Signal.Target.HasParent() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessArgsOptions(ev, ev.Signal.Target.Parent), nil - case "signal.target.parent.args_truncated": - if !ev.Signal.Target.HasParent() { - return false, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessArgsTruncated(ev, ev.Signal.Target.Parent), nil - case "signal.target.parent.argv": - if !ev.Signal.Target.HasParent() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessArgv(ev, ev.Signal.Target.Parent), nil - case "signal.target.parent.argv0": - if !ev.Signal.Target.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessArgv0(ev, ev.Signal.Target.Parent), nil - case "signal.target.parent.auid": - if !ev.Signal.Target.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Parent.Credentials.AUID), nil - case "signal.target.parent.cap_effective": - if !ev.Signal.Target.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Parent.Credentials.CapEffective), nil - case "signal.target.parent.cap_permitted": - if !ev.Signal.Target.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Parent.Credentials.CapPermitted), nil - case "signal.target.parent.cgroup.file.inode": - if !ev.Signal.Target.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Parent.CGroup.CGroupFile.Inode), nil - case "signal.target.parent.cgroup.file.mount_id": - if !ev.Signal.Target.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Parent.CGroup.CGroupFile.MountID), nil - case "signal.target.parent.cgroup.id": - if !ev.Signal.Target.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveCGroupID(ev, &ev.Signal.Target.Parent.CGroup), nil - case "signal.target.parent.cgroup.manager": - if !ev.Signal.Target.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveCGroupManager(ev, &ev.Signal.Target.Parent.CGroup), nil - case "signal.target.parent.cgroup.version": - if !ev.Signal.Target.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveCGroupVersion(ev, &ev.Signal.Target.Parent.CGroup), nil - case "signal.target.parent.comm": - if !ev.Signal.Target.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.Signal.Target.Parent.Comm, nil - case "signal.target.parent.container.id": - if !ev.Signal.Target.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessContainerID(ev, ev.Signal.Target.Parent), nil - case "signal.target.parent.created_at": - if !ev.Signal.Target.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, ev.Signal.Target.Parent)), nil - case "signal.target.parent.egid": - if !ev.Signal.Target.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Parent.Credentials.EGID), nil - case "signal.target.parent.egroup": - if !ev.Signal.Target.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.Signal.Target.Parent.Credentials.EGroup, nil - case "signal.target.parent.envp": - if !ev.Signal.Target.HasParent() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessEnvp(ev, ev.Signal.Target.Parent), nil - case "signal.target.parent.envs": - if !ev.Signal.Target.HasParent() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessEnvs(ev, ev.Signal.Target.Parent), nil - case "signal.target.parent.envs_truncated": - if !ev.Signal.Target.HasParent() { - return false, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, ev.Signal.Target.Parent), nil - case "signal.target.parent.euid": - if !ev.Signal.Target.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Parent.Credentials.EUID), nil - case "signal.target.parent.euser": - if !ev.Signal.Target.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.Signal.Target.Parent.Credentials.EUser, nil - case "signal.target.parent.file.change_time": - if !ev.Signal.Target.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.Signal.Target.Parent.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Parent.FileEvent.FileFields.CTime), nil - case "signal.target.parent.file.filesystem": - if !ev.Signal.Target.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.Signal.Target.Parent.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Signal.Target.Parent.FileEvent), nil - case "signal.target.parent.file.gid": - if !ev.Signal.Target.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.Signal.Target.Parent.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Parent.FileEvent.FileFields.GID), nil - case "signal.target.parent.file.group": - if !ev.Signal.Target.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.Signal.Target.Parent.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Signal.Target.Parent.FileEvent.FileFields), nil - case "signal.target.parent.file.hashes": - if !ev.Signal.Target.HasParent() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - if !ev.Signal.Target.Parent.IsNotKworker() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Signal.Target.Parent.FileEvent), nil - case "signal.target.parent.file.in_upper_layer": - if !ev.Signal.Target.HasParent() { - return false, &eval.ErrNotSupported{Field: field} - } - if !ev.Signal.Target.Parent.IsNotKworker() { - return false, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Signal.Target.Parent.FileEvent.FileFields), nil - case "signal.target.parent.file.inode": - if !ev.Signal.Target.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.Signal.Target.Parent.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Parent.FileEvent.FileFields.PathKey.Inode), nil - case "signal.target.parent.file.mode": - if !ev.Signal.Target.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.Signal.Target.Parent.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Parent.FileEvent.FileFields.Mode), nil - case "signal.target.parent.file.modification_time": - if !ev.Signal.Target.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.Signal.Target.Parent.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Parent.FileEvent.FileFields.MTime), nil - case "signal.target.parent.file.mount_id": - if !ev.Signal.Target.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.Signal.Target.Parent.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Parent.FileEvent.FileFields.PathKey.MountID), nil - case "signal.target.parent.file.name": - if !ev.Signal.Target.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.Signal.Target.Parent.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Signal.Target.Parent.FileEvent), nil - case "signal.target.parent.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Signal.Target.Parent.FileEvent), nil - case "signal.target.parent.file.package.name": - if !ev.Signal.Target.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.Signal.Target.Parent.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Signal.Target.Parent.FileEvent), nil - case "signal.target.parent.file.package.source_version": - if !ev.Signal.Target.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.Signal.Target.Parent.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Signal.Target.Parent.FileEvent), nil - case "signal.target.parent.file.package.version": - if !ev.Signal.Target.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.Signal.Target.Parent.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Signal.Target.Parent.FileEvent), nil - case "signal.target.parent.file.path": - if !ev.Signal.Target.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.Signal.Target.Parent.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Parent.FileEvent), nil - case "signal.target.parent.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Parent.FileEvent), nil - case "signal.target.parent.file.rights": - if !ev.Signal.Target.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.Signal.Target.Parent.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.FieldHandlers.ResolveRights(ev, &ev.Signal.Target.Parent.FileEvent.FileFields)), nil - case "signal.target.parent.file.uid": - if !ev.Signal.Target.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.Signal.Target.Parent.IsNotKworker() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Parent.FileEvent.FileFields.UID), nil - case "signal.target.parent.file.user": - if !ev.Signal.Target.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.Signal.Target.Parent.IsNotKworker() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Signal.Target.Parent.FileEvent.FileFields), nil - case "signal.target.parent.fsgid": - if !ev.Signal.Target.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Parent.Credentials.FSGID), nil - case "signal.target.parent.fsgroup": - if !ev.Signal.Target.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.Signal.Target.Parent.Credentials.FSGroup, nil - case "signal.target.parent.fsuid": - if !ev.Signal.Target.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Parent.Credentials.FSUID), nil - case "signal.target.parent.fsuser": - if !ev.Signal.Target.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.Signal.Target.Parent.Credentials.FSUser, nil - case "signal.target.parent.gid": - if !ev.Signal.Target.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Parent.Credentials.GID), nil - case "signal.target.parent.group": - if !ev.Signal.Target.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.Signal.Target.Parent.Credentials.Group, nil - case "signal.target.parent.interpreter.file.change_time": - if !ev.Signal.Target.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.Signal.Target.Parent.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.CTime), nil - case "signal.target.parent.interpreter.file.filesystem": - if !ev.Signal.Target.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.Signal.Target.Parent.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent), nil - case "signal.target.parent.interpreter.file.gid": - if !ev.Signal.Target.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.Signal.Target.Parent.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.GID), nil - case "signal.target.parent.interpreter.file.group": - if !ev.Signal.Target.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.Signal.Target.Parent.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields), nil - case "signal.target.parent.interpreter.file.hashes": - if !ev.Signal.Target.HasParent() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - if !ev.Signal.Target.Parent.HasInterpreter() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent), nil - case "signal.target.parent.interpreter.file.in_upper_layer": - if !ev.Signal.Target.HasParent() { - return false, &eval.ErrNotSupported{Field: field} - } - if !ev.Signal.Target.Parent.HasInterpreter() { - return false, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields), nil - case "signal.target.parent.interpreter.file.inode": - if !ev.Signal.Target.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.Signal.Target.Parent.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.PathKey.Inode), nil - case "signal.target.parent.interpreter.file.mode": - if !ev.Signal.Target.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.Signal.Target.Parent.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.Mode), nil - case "signal.target.parent.interpreter.file.modification_time": - if !ev.Signal.Target.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.Signal.Target.Parent.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.MTime), nil - case "signal.target.parent.interpreter.file.mount_id": - if !ev.Signal.Target.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.Signal.Target.Parent.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.PathKey.MountID), nil - case "signal.target.parent.interpreter.file.name": - if !ev.Signal.Target.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.Signal.Target.Parent.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent), nil - case "signal.target.parent.interpreter.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent), nil - case "signal.target.parent.interpreter.file.package.name": - if !ev.Signal.Target.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.Signal.Target.Parent.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent), nil - case "signal.target.parent.interpreter.file.package.source_version": - if !ev.Signal.Target.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.Signal.Target.Parent.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent), nil - case "signal.target.parent.interpreter.file.package.version": - if !ev.Signal.Target.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.Signal.Target.Parent.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent), nil - case "signal.target.parent.interpreter.file.path": - if !ev.Signal.Target.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.Signal.Target.Parent.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent), nil - case "signal.target.parent.interpreter.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent), nil - case "signal.target.parent.interpreter.file.rights": - if !ev.Signal.Target.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.Signal.Target.Parent.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.FieldHandlers.ResolveRights(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields)), nil - case "signal.target.parent.interpreter.file.uid": - if !ev.Signal.Target.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - if !ev.Signal.Target.Parent.HasInterpreter() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.UID), nil - case "signal.target.parent.interpreter.file.user": - if !ev.Signal.Target.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - if !ev.Signal.Target.Parent.HasInterpreter() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields), nil - case "signal.target.parent.is_exec": - if !ev.Signal.Target.HasParent() { - return false, &eval.ErrNotSupported{Field: field} - } - return ev.Signal.Target.Parent.IsExec, nil - case "signal.target.parent.is_kworker": - if !ev.Signal.Target.HasParent() { - return false, &eval.ErrNotSupported{Field: field} - } - return ev.Signal.Target.Parent.PIDContext.IsKworker, nil - case "signal.target.parent.is_thread": - if !ev.Signal.Target.HasParent() { - return false, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessIsThread(ev, ev.Signal.Target.Parent), nil - case "signal.target.parent.pid": - if !ev.Signal.Target.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Parent.PIDContext.Pid), nil - case "signal.target.parent.ppid": - if !ev.Signal.Target.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Parent.PPid), nil - case "signal.target.parent.tid": - if !ev.Signal.Target.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Parent.PIDContext.Tid), nil - case "signal.target.parent.tty_name": - if !ev.Signal.Target.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.Signal.Target.Parent.TTYName, nil - case "signal.target.parent.uid": - if !ev.Signal.Target.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.Signal.Target.Parent.Credentials.UID), nil - case "signal.target.parent.user": - if !ev.Signal.Target.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.Signal.Target.Parent.Credentials.User, nil - case "signal.target.parent.user_session.k8s_groups": - if !ev.Signal.Target.HasParent() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveK8SGroups(ev, &ev.Signal.Target.Parent.UserSession), nil - case "signal.target.parent.user_session.k8s_uid": - if !ev.Signal.Target.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveK8SUID(ev, &ev.Signal.Target.Parent.UserSession), nil - case "signal.target.parent.user_session.k8s_username": - if !ev.Signal.Target.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveK8SUsername(ev, &ev.Signal.Target.Parent.UserSession), nil - case "signal.target.pid": - return int(ev.Signal.Target.Process.PIDContext.Pid), nil - case "signal.target.ppid": - return int(ev.Signal.Target.Process.PPid), nil - case "signal.target.tid": - return int(ev.Signal.Target.Process.PIDContext.Tid), nil - case "signal.target.tty_name": - return ev.Signal.Target.Process.TTYName, nil - case "signal.target.uid": - return int(ev.Signal.Target.Process.Credentials.UID), nil - case "signal.target.user": - return ev.Signal.Target.Process.Credentials.User, nil - case "signal.target.user_session.k8s_groups": - return ev.FieldHandlers.ResolveK8SGroups(ev, &ev.Signal.Target.Process.UserSession), nil - case "signal.target.user_session.k8s_uid": - return ev.FieldHandlers.ResolveK8SUID(ev, &ev.Signal.Target.Process.UserSession), nil - case "signal.target.user_session.k8s_username": - return ev.FieldHandlers.ResolveK8SUsername(ev, &ev.Signal.Target.Process.UserSession), nil - case "signal.type": - return int(ev.Signal.Type), nil - case "splice.file.change_time": - return int(ev.Splice.File.FileFields.CTime), nil - case "splice.file.filesystem": - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Splice.File), nil - case "splice.file.gid": - return int(ev.Splice.File.FileFields.GID), nil - case "splice.file.group": - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Splice.File.FileFields), nil - case "splice.file.hashes": - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Splice.File), nil - case "splice.file.in_upper_layer": - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Splice.File.FileFields), nil - case "splice.file.inode": - return int(ev.Splice.File.FileFields.PathKey.Inode), nil - case "splice.file.mode": - return int(ev.Splice.File.FileFields.Mode), nil - case "splice.file.modification_time": - return int(ev.Splice.File.FileFields.MTime), nil - case "splice.file.mount_id": - return int(ev.Splice.File.FileFields.PathKey.MountID), nil - case "splice.file.name": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Splice.File), nil - case "splice.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Splice.File), nil - case "splice.file.package.name": - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Splice.File), nil - case "splice.file.package.source_version": - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Splice.File), nil - case "splice.file.package.version": - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Splice.File), nil - case "splice.file.path": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Splice.File), nil - case "splice.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Splice.File), nil - case "splice.file.rights": - return int(ev.FieldHandlers.ResolveRights(ev, &ev.Splice.File.FileFields)), nil - case "splice.file.uid": - return int(ev.Splice.File.FileFields.UID), nil - case "splice.file.user": - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Splice.File.FileFields), nil - case "splice.pipe_entry_flag": - return int(ev.Splice.PipeEntryFlag), nil - case "splice.pipe_exit_flag": - return int(ev.Splice.PipeExitFlag), nil - case "splice.retval": - return int(ev.Splice.SyscallEvent.Retval), nil - case "unlink.file.change_time": - return int(ev.Unlink.File.FileFields.CTime), nil - case "unlink.file.filesystem": - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Unlink.File), nil - case "unlink.file.gid": - return int(ev.Unlink.File.FileFields.GID), nil - case "unlink.file.group": - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Unlink.File.FileFields), nil - case "unlink.file.hashes": - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Unlink.File), nil - case "unlink.file.in_upper_layer": - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Unlink.File.FileFields), nil - case "unlink.file.inode": - return int(ev.Unlink.File.FileFields.PathKey.Inode), nil - case "unlink.file.mode": - return int(ev.Unlink.File.FileFields.Mode), nil - case "unlink.file.modification_time": - return int(ev.Unlink.File.FileFields.MTime), nil - case "unlink.file.mount_id": - return int(ev.Unlink.File.FileFields.PathKey.MountID), nil - case "unlink.file.name": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Unlink.File), nil - case "unlink.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Unlink.File), nil - case "unlink.file.package.name": - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Unlink.File), nil - case "unlink.file.package.source_version": - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Unlink.File), nil - case "unlink.file.package.version": - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Unlink.File), nil - case "unlink.file.path": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Unlink.File), nil - case "unlink.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Unlink.File), nil - case "unlink.file.rights": - return int(ev.FieldHandlers.ResolveRights(ev, &ev.Unlink.File.FileFields)), nil - case "unlink.file.uid": - return int(ev.Unlink.File.FileFields.UID), nil - case "unlink.file.user": - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Unlink.File.FileFields), nil - case "unlink.flags": - return int(ev.Unlink.Flags), nil - case "unlink.retval": - return int(ev.Unlink.SyscallEvent.Retval), nil - case "unlink.syscall.dirfd": - return int(ev.FieldHandlers.ResolveSyscallCtxArgsInt1(ev, &ev.Unlink.SyscallContext)), nil - case "unlink.syscall.flags": - return int(ev.FieldHandlers.ResolveSyscallCtxArgsInt3(ev, &ev.Unlink.SyscallContext)), nil - case "unlink.syscall.path": - return ev.FieldHandlers.ResolveSyscallCtxArgsStr2(ev, &ev.Unlink.SyscallContext), nil - case "unload_module.name": - return ev.UnloadModule.Name, nil - case "unload_module.retval": - return int(ev.UnloadModule.SyscallEvent.Retval), nil - case "utimes.file.change_time": - return int(ev.Utimes.File.FileFields.CTime), nil - case "utimes.file.filesystem": - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Utimes.File), nil - case "utimes.file.gid": - return int(ev.Utimes.File.FileFields.GID), nil - case "utimes.file.group": - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Utimes.File.FileFields), nil - case "utimes.file.hashes": - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Utimes.File), nil - case "utimes.file.in_upper_layer": - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Utimes.File.FileFields), nil - case "utimes.file.inode": - return int(ev.Utimes.File.FileFields.PathKey.Inode), nil - case "utimes.file.mode": - return int(ev.Utimes.File.FileFields.Mode), nil - case "utimes.file.modification_time": - return int(ev.Utimes.File.FileFields.MTime), nil - case "utimes.file.mount_id": - return int(ev.Utimes.File.FileFields.PathKey.MountID), nil - case "utimes.file.name": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Utimes.File), nil - case "utimes.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Utimes.File), nil - case "utimes.file.package.name": - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Utimes.File), nil - case "utimes.file.package.source_version": - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Utimes.File), nil - case "utimes.file.package.version": - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Utimes.File), nil - case "utimes.file.path": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Utimes.File), nil - case "utimes.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Utimes.File), nil - case "utimes.file.rights": - return int(ev.FieldHandlers.ResolveRights(ev, &ev.Utimes.File.FileFields)), nil - case "utimes.file.uid": - return int(ev.Utimes.File.FileFields.UID), nil - case "utimes.file.user": - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Utimes.File.FileFields), nil - case "utimes.retval": - return int(ev.Utimes.SyscallEvent.Retval), nil - case "utimes.syscall.path": - return ev.FieldHandlers.ResolveSyscallCtxArgsStr1(ev, &ev.Utimes.SyscallContext), nil - } - return nil, &eval.ErrFieldNotFound{Field: field} -} -func (ev *Event) GetFieldEventType(field eval.Field) (eval.EventType, error) { - switch field { - case "bind.addr.family": - return "bind", nil - case "bind.addr.ip": - return "bind", nil - case "bind.addr.is_public": - return "bind", nil - case "bind.addr.port": - return "bind", nil - case "bind.protocol": - return "bind", nil - case "bind.retval": - return "bind", nil - case "bpf.cmd": - return "bpf", nil - case "bpf.map.name": - return "bpf", nil - case "bpf.map.type": - return "bpf", nil - case "bpf.prog.attach_type": - return "bpf", nil - case "bpf.prog.helpers": - return "bpf", nil - case "bpf.prog.name": - return "bpf", nil - case "bpf.prog.tag": - return "bpf", nil - case "bpf.prog.type": - return "bpf", nil - case "bpf.retval": - return "bpf", nil - case "capset.cap_effective": - return "capset", nil - case "capset.cap_permitted": - return "capset", nil - case "cgroup.file.inode": - return "", nil - case "cgroup.file.mount_id": - return "", nil - case "cgroup.id": - return "", nil - case "cgroup.manager": - return "", nil - case "cgroup.version": - return "", nil - case "chdir.file.change_time": - return "chdir", nil - case "chdir.file.filesystem": - return "chdir", nil - case "chdir.file.gid": - return "chdir", nil - case "chdir.file.group": - return "chdir", nil - case "chdir.file.hashes": - return "chdir", nil - case "chdir.file.in_upper_layer": - return "chdir", nil - case "chdir.file.inode": - return "chdir", nil - case "chdir.file.mode": - return "chdir", nil - case "chdir.file.modification_time": - return "chdir", nil - case "chdir.file.mount_id": - return "chdir", nil - case "chdir.file.name": - return "chdir", nil - case "chdir.file.name.length": - return "chdir", nil - case "chdir.file.package.name": - return "chdir", nil - case "chdir.file.package.source_version": - return "chdir", nil - case "chdir.file.package.version": - return "chdir", nil - case "chdir.file.path": - return "chdir", nil - case "chdir.file.path.length": - return "chdir", nil - case "chdir.file.rights": - return "chdir", nil - case "chdir.file.uid": - return "chdir", nil - case "chdir.file.user": - return "chdir", nil - case "chdir.retval": - return "chdir", nil - case "chdir.syscall.path": - return "chdir", nil - case "chmod.file.change_time": - return "chmod", nil - case "chmod.file.destination.mode": - return "chmod", nil - case "chmod.file.destination.rights": - return "chmod", nil - case "chmod.file.filesystem": - return "chmod", nil - case "chmod.file.gid": - return "chmod", nil - case "chmod.file.group": - return "chmod", nil - case "chmod.file.hashes": - return "chmod", nil - case "chmod.file.in_upper_layer": - return "chmod", nil - case "chmod.file.inode": - return "chmod", nil - case "chmod.file.mode": - return "chmod", nil - case "chmod.file.modification_time": - return "chmod", nil - case "chmod.file.mount_id": - return "chmod", nil - case "chmod.file.name": - return "chmod", nil - case "chmod.file.name.length": - return "chmod", nil - case "chmod.file.package.name": - return "chmod", nil - case "chmod.file.package.source_version": - return "chmod", nil - case "chmod.file.package.version": - return "chmod", nil - case "chmod.file.path": - return "chmod", nil - case "chmod.file.path.length": - return "chmod", nil - case "chmod.file.rights": - return "chmod", nil - case "chmod.file.uid": - return "chmod", nil - case "chmod.file.user": - return "chmod", nil - case "chmod.retval": - return "chmod", nil - case "chmod.syscall.mode": - return "chmod", nil - case "chmod.syscall.path": - return "chmod", nil - case "chown.file.change_time": - return "chown", nil - case "chown.file.destination.gid": - return "chown", nil - case "chown.file.destination.group": - return "chown", nil - case "chown.file.destination.uid": - return "chown", nil - case "chown.file.destination.user": - return "chown", nil - case "chown.file.filesystem": - return "chown", nil - case "chown.file.gid": - return "chown", nil - case "chown.file.group": - return "chown", nil - case "chown.file.hashes": - return "chown", nil - case "chown.file.in_upper_layer": - return "chown", nil - case "chown.file.inode": - return "chown", nil - case "chown.file.mode": - return "chown", nil - case "chown.file.modification_time": - return "chown", nil - case "chown.file.mount_id": - return "chown", nil - case "chown.file.name": - return "chown", nil - case "chown.file.name.length": - return "chown", nil - case "chown.file.package.name": - return "chown", nil - case "chown.file.package.source_version": - return "chown", nil - case "chown.file.package.version": - return "chown", nil - case "chown.file.path": - return "chown", nil - case "chown.file.path.length": - return "chown", nil - case "chown.file.rights": - return "chown", nil - case "chown.file.uid": - return "chown", nil - case "chown.file.user": - return "chown", nil - case "chown.retval": - return "chown", nil - case "chown.syscall.gid": - return "chown", nil - case "chown.syscall.path": - return "chown", nil - case "chown.syscall.uid": - return "chown", nil - case "connect.addr.family": - return "connect", nil - case "connect.addr.ip": - return "connect", nil - case "connect.addr.is_public": - return "connect", nil - case "connect.addr.port": - return "connect", nil - case "connect.protocol": - return "connect", nil - case "connect.retval": - return "connect", nil - case "container.created_at": - return "", nil - case "container.id": - return "", nil - case "container.runtime": - return "", nil - case "container.tags": - return "", nil - case "dns.id": - return "dns", nil - case "dns.question.class": - return "dns", nil - case "dns.question.count": - return "dns", nil - case "dns.question.length": - return "dns", nil - case "dns.question.name": - return "dns", nil - case "dns.question.name.length": - return "dns", nil - case "dns.question.type": - return "dns", nil - case "event.async": - return "", nil - case "event.hostname": - return "", nil - case "event.origin": - return "", nil - case "event.os": - return "", nil - case "event.service": - return "", nil - case "event.timestamp": - return "", nil - case "exec.args": - return "exec", nil - case "exec.args_flags": - return "exec", nil - case "exec.args_options": - return "exec", nil - case "exec.args_truncated": - return "exec", nil - case "exec.argv": - return "exec", nil - case "exec.argv0": - return "exec", nil - case "exec.auid": - return "exec", nil - case "exec.cap_effective": - return "exec", nil - case "exec.cap_permitted": - return "exec", nil - case "exec.cgroup.file.inode": - return "exec", nil - case "exec.cgroup.file.mount_id": - return "exec", nil - case "exec.cgroup.id": - return "exec", nil - case "exec.cgroup.manager": - return "exec", nil - case "exec.cgroup.version": - return "exec", nil - case "exec.comm": - return "exec", nil - case "exec.container.id": - return "exec", nil - case "exec.created_at": - return "exec", nil - case "exec.egid": - return "exec", nil - case "exec.egroup": - return "exec", nil - case "exec.envp": - return "exec", nil - case "exec.envs": - return "exec", nil - case "exec.envs_truncated": - return "exec", nil - case "exec.euid": - return "exec", nil - case "exec.euser": - return "exec", nil - case "exec.file.change_time": - return "exec", nil - case "exec.file.filesystem": - return "exec", nil - case "exec.file.gid": - return "exec", nil - case "exec.file.group": - return "exec", nil - case "exec.file.hashes": - return "exec", nil - case "exec.file.in_upper_layer": - return "exec", nil - case "exec.file.inode": - return "exec", nil - case "exec.file.mode": - return "exec", nil - case "exec.file.modification_time": - return "exec", nil - case "exec.file.mount_id": - return "exec", nil - case "exec.file.name": - return "exec", nil - case "exec.file.name.length": - return "exec", nil - case "exec.file.package.name": - return "exec", nil - case "exec.file.package.source_version": - return "exec", nil - case "exec.file.package.version": - return "exec", nil - case "exec.file.path": - return "exec", nil - case "exec.file.path.length": - return "exec", nil - case "exec.file.rights": - return "exec", nil - case "exec.file.uid": - return "exec", nil - case "exec.file.user": - return "exec", nil - case "exec.fsgid": - return "exec", nil - case "exec.fsgroup": - return "exec", nil - case "exec.fsuid": - return "exec", nil - case "exec.fsuser": - return "exec", nil - case "exec.gid": - return "exec", nil - case "exec.group": - return "exec", nil - case "exec.interpreter.file.change_time": - return "exec", nil - case "exec.interpreter.file.filesystem": - return "exec", nil - case "exec.interpreter.file.gid": - return "exec", nil - case "exec.interpreter.file.group": - return "exec", nil - case "exec.interpreter.file.hashes": - return "exec", nil - case "exec.interpreter.file.in_upper_layer": - return "exec", nil - case "exec.interpreter.file.inode": - return "exec", nil - case "exec.interpreter.file.mode": - return "exec", nil - case "exec.interpreter.file.modification_time": - return "exec", nil - case "exec.interpreter.file.mount_id": - return "exec", nil - case "exec.interpreter.file.name": - return "exec", nil - case "exec.interpreter.file.name.length": - return "exec", nil - case "exec.interpreter.file.package.name": - return "exec", nil - case "exec.interpreter.file.package.source_version": - return "exec", nil - case "exec.interpreter.file.package.version": - return "exec", nil - case "exec.interpreter.file.path": - return "exec", nil - case "exec.interpreter.file.path.length": - return "exec", nil - case "exec.interpreter.file.rights": - return "exec", nil - case "exec.interpreter.file.uid": - return "exec", nil - case "exec.interpreter.file.user": - return "exec", nil - case "exec.is_exec": - return "exec", nil - case "exec.is_kworker": - return "exec", nil - case "exec.is_thread": - return "exec", nil - case "exec.pid": - return "exec", nil - case "exec.ppid": - return "exec", nil - case "exec.syscall.path": - return "exec", nil - case "exec.tid": - return "exec", nil - case "exec.tty_name": - return "exec", nil - case "exec.uid": - return "exec", nil - case "exec.user": - return "exec", nil - case "exec.user_session.k8s_groups": - return "exec", nil - case "exec.user_session.k8s_uid": - return "exec", nil - case "exec.user_session.k8s_username": - return "exec", nil - case "exit.args": - return "exit", nil - case "exit.args_flags": - return "exit", nil - case "exit.args_options": - return "exit", nil - case "exit.args_truncated": - return "exit", nil - case "exit.argv": - return "exit", nil - case "exit.argv0": - return "exit", nil - case "exit.auid": - return "exit", nil - case "exit.cap_effective": - return "exit", nil - case "exit.cap_permitted": - return "exit", nil - case "exit.cause": - return "exit", nil - case "exit.cgroup.file.inode": - return "exit", nil - case "exit.cgroup.file.mount_id": - return "exit", nil - case "exit.cgroup.id": - return "exit", nil - case "exit.cgroup.manager": - return "exit", nil - case "exit.cgroup.version": - return "exit", nil - case "exit.code": - return "exit", nil - case "exit.comm": - return "exit", nil - case "exit.container.id": - return "exit", nil - case "exit.created_at": - return "exit", nil - case "exit.egid": - return "exit", nil - case "exit.egroup": - return "exit", nil - case "exit.envp": - return "exit", nil - case "exit.envs": - return "exit", nil - case "exit.envs_truncated": - return "exit", nil - case "exit.euid": - return "exit", nil - case "exit.euser": - return "exit", nil - case "exit.file.change_time": - return "exit", nil - case "exit.file.filesystem": - return "exit", nil - case "exit.file.gid": - return "exit", nil - case "exit.file.group": - return "exit", nil - case "exit.file.hashes": - return "exit", nil - case "exit.file.in_upper_layer": - return "exit", nil - case "exit.file.inode": - return "exit", nil - case "exit.file.mode": - return "exit", nil - case "exit.file.modification_time": - return "exit", nil - case "exit.file.mount_id": - return "exit", nil - case "exit.file.name": - return "exit", nil - case "exit.file.name.length": - return "exit", nil - case "exit.file.package.name": - return "exit", nil - case "exit.file.package.source_version": - return "exit", nil - case "exit.file.package.version": - return "exit", nil - case "exit.file.path": - return "exit", nil - case "exit.file.path.length": - return "exit", nil - case "exit.file.rights": - return "exit", nil - case "exit.file.uid": - return "exit", nil - case "exit.file.user": - return "exit", nil - case "exit.fsgid": - return "exit", nil - case "exit.fsgroup": - return "exit", nil - case "exit.fsuid": - return "exit", nil - case "exit.fsuser": - return "exit", nil - case "exit.gid": - return "exit", nil - case "exit.group": - return "exit", nil - case "exit.interpreter.file.change_time": - return "exit", nil - case "exit.interpreter.file.filesystem": - return "exit", nil - case "exit.interpreter.file.gid": - return "exit", nil - case "exit.interpreter.file.group": - return "exit", nil - case "exit.interpreter.file.hashes": - return "exit", nil - case "exit.interpreter.file.in_upper_layer": - return "exit", nil - case "exit.interpreter.file.inode": - return "exit", nil - case "exit.interpreter.file.mode": - return "exit", nil - case "exit.interpreter.file.modification_time": - return "exit", nil - case "exit.interpreter.file.mount_id": - return "exit", nil - case "exit.interpreter.file.name": - return "exit", nil - case "exit.interpreter.file.name.length": - return "exit", nil - case "exit.interpreter.file.package.name": - return "exit", nil - case "exit.interpreter.file.package.source_version": - return "exit", nil - case "exit.interpreter.file.package.version": - return "exit", nil - case "exit.interpreter.file.path": - return "exit", nil - case "exit.interpreter.file.path.length": - return "exit", nil - case "exit.interpreter.file.rights": - return "exit", nil - case "exit.interpreter.file.uid": - return "exit", nil - case "exit.interpreter.file.user": - return "exit", nil - case "exit.is_exec": - return "exit", nil - case "exit.is_kworker": - return "exit", nil - case "exit.is_thread": - return "exit", nil - case "exit.pid": - return "exit", nil - case "exit.ppid": - return "exit", nil - case "exit.tid": - return "exit", nil - case "exit.tty_name": - return "exit", nil - case "exit.uid": - return "exit", nil - case "exit.user": - return "exit", nil - case "exit.user_session.k8s_groups": - return "exit", nil - case "exit.user_session.k8s_uid": - return "exit", nil - case "exit.user_session.k8s_username": - return "exit", nil - case "imds.aws.is_imds_v2": - return "imds", nil - case "imds.aws.security_credentials.type": - return "imds", nil - case "imds.cloud_provider": - return "imds", nil - case "imds.host": - return "imds", nil - case "imds.server": - return "imds", nil - case "imds.type": - return "imds", nil - case "imds.url": - return "imds", nil - case "imds.user_agent": - return "imds", nil - case "link.file.change_time": - return "link", nil - case "link.file.destination.change_time": - return "link", nil - case "link.file.destination.filesystem": - return "link", nil - case "link.file.destination.gid": - return "link", nil - case "link.file.destination.group": - return "link", nil - case "link.file.destination.hashes": - return "link", nil - case "link.file.destination.in_upper_layer": - return "link", nil - case "link.file.destination.inode": - return "link", nil - case "link.file.destination.mode": - return "link", nil - case "link.file.destination.modification_time": - return "link", nil - case "link.file.destination.mount_id": - return "link", nil - case "link.file.destination.name": - return "link", nil - case "link.file.destination.name.length": - return "link", nil - case "link.file.destination.package.name": - return "link", nil - case "link.file.destination.package.source_version": - return "link", nil - case "link.file.destination.package.version": - return "link", nil - case "link.file.destination.path": - return "link", nil - case "link.file.destination.path.length": - return "link", nil - case "link.file.destination.rights": - return "link", nil - case "link.file.destination.uid": - return "link", nil - case "link.file.destination.user": - return "link", nil - case "link.file.filesystem": - return "link", nil - case "link.file.gid": - return "link", nil - case "link.file.group": - return "link", nil - case "link.file.hashes": - return "link", nil - case "link.file.in_upper_layer": - return "link", nil - case "link.file.inode": - return "link", nil - case "link.file.mode": - return "link", nil - case "link.file.modification_time": - return "link", nil - case "link.file.mount_id": - return "link", nil - case "link.file.name": - return "link", nil - case "link.file.name.length": - return "link", nil - case "link.file.package.name": - return "link", nil - case "link.file.package.source_version": - return "link", nil - case "link.file.package.version": - return "link", nil - case "link.file.path": - return "link", nil - case "link.file.path.length": - return "link", nil - case "link.file.rights": - return "link", nil - case "link.file.uid": - return "link", nil - case "link.file.user": - return "link", nil - case "link.retval": - return "link", nil - case "link.syscall.destination.path": - return "link", nil - case "link.syscall.path": - return "link", nil - case "load_module.args": - return "load_module", nil - case "load_module.args_truncated": - return "load_module", nil - case "load_module.argv": - return "load_module", nil - case "load_module.file.change_time": - return "load_module", nil - case "load_module.file.filesystem": - return "load_module", nil - case "load_module.file.gid": - return "load_module", nil - case "load_module.file.group": - return "load_module", nil - case "load_module.file.hashes": - return "load_module", nil - case "load_module.file.in_upper_layer": - return "load_module", nil - case "load_module.file.inode": - return "load_module", nil - case "load_module.file.mode": - return "load_module", nil - case "load_module.file.modification_time": - return "load_module", nil - case "load_module.file.mount_id": - return "load_module", nil - case "load_module.file.name": - return "load_module", nil - case "load_module.file.name.length": - return "load_module", nil - case "load_module.file.package.name": - return "load_module", nil - case "load_module.file.package.source_version": - return "load_module", nil - case "load_module.file.package.version": - return "load_module", nil - case "load_module.file.path": - return "load_module", nil - case "load_module.file.path.length": - return "load_module", nil - case "load_module.file.rights": - return "load_module", nil - case "load_module.file.uid": - return "load_module", nil - case "load_module.file.user": - return "load_module", nil - case "load_module.loaded_from_memory": - return "load_module", nil - case "load_module.name": - return "load_module", nil - case "load_module.retval": - return "load_module", nil - case "mkdir.file.change_time": - return "mkdir", nil - case "mkdir.file.destination.mode": - return "mkdir", nil - case "mkdir.file.destination.rights": - return "mkdir", nil - case "mkdir.file.filesystem": - return "mkdir", nil - case "mkdir.file.gid": - return "mkdir", nil - case "mkdir.file.group": - return "mkdir", nil - case "mkdir.file.hashes": - return "mkdir", nil - case "mkdir.file.in_upper_layer": - return "mkdir", nil - case "mkdir.file.inode": - return "mkdir", nil - case "mkdir.file.mode": - return "mkdir", nil - case "mkdir.file.modification_time": - return "mkdir", nil - case "mkdir.file.mount_id": - return "mkdir", nil - case "mkdir.file.name": - return "mkdir", nil - case "mkdir.file.name.length": - return "mkdir", nil - case "mkdir.file.package.name": - return "mkdir", nil - case "mkdir.file.package.source_version": - return "mkdir", nil - case "mkdir.file.package.version": - return "mkdir", nil - case "mkdir.file.path": - return "mkdir", nil - case "mkdir.file.path.length": - return "mkdir", nil - case "mkdir.file.rights": - return "mkdir", nil - case "mkdir.file.uid": - return "mkdir", nil - case "mkdir.file.user": - return "mkdir", nil - case "mkdir.retval": - return "mkdir", nil - case "mmap.file.change_time": - return "mmap", nil - case "mmap.file.filesystem": - return "mmap", nil - case "mmap.file.gid": - return "mmap", nil - case "mmap.file.group": - return "mmap", nil - case "mmap.file.hashes": - return "mmap", nil - case "mmap.file.in_upper_layer": - return "mmap", nil - case "mmap.file.inode": - return "mmap", nil - case "mmap.file.mode": - return "mmap", nil - case "mmap.file.modification_time": - return "mmap", nil - case "mmap.file.mount_id": - return "mmap", nil - case "mmap.file.name": - return "mmap", nil - case "mmap.file.name.length": - return "mmap", nil - case "mmap.file.package.name": - return "mmap", nil - case "mmap.file.package.source_version": - return "mmap", nil - case "mmap.file.package.version": - return "mmap", nil - case "mmap.file.path": - return "mmap", nil - case "mmap.file.path.length": - return "mmap", nil - case "mmap.file.rights": - return "mmap", nil - case "mmap.file.uid": - return "mmap", nil - case "mmap.file.user": - return "mmap", nil - case "mmap.flags": - return "mmap", nil - case "mmap.protection": - return "mmap", nil - case "mmap.retval": - return "mmap", nil - case "mount.fs_type": - return "mount", nil - case "mount.mountpoint.path": - return "mount", nil - case "mount.retval": - return "mount", nil - case "mount.root.path": - return "mount", nil - case "mount.source.path": - return "mount", nil - case "mount.syscall.fs_type": - return "mount", nil - case "mount.syscall.mountpoint.path": - return "mount", nil - case "mount.syscall.source.path": - return "mount", nil - case "mprotect.req_protection": - return "mprotect", nil - case "mprotect.retval": - return "mprotect", nil - case "mprotect.vm_protection": - return "mprotect", nil - case "network.destination.ip": - return "", nil - case "network.destination.is_public": - return "", nil - case "network.destination.port": - return "", nil - case "network.device.ifname": - return "", nil - case "network.l3_protocol": - return "", nil - case "network.l4_protocol": - return "", nil - case "network.size": - return "", nil - case "network.source.ip": - return "", nil - case "network.source.is_public": - return "", nil - case "network.source.port": - return "", nil - case "ondemand.arg1.str": - return "ondemand", nil - case "ondemand.arg1.uint": - return "ondemand", nil - case "ondemand.arg2.str": - return "ondemand", nil - case "ondemand.arg2.uint": - return "ondemand", nil - case "ondemand.arg3.str": - return "ondemand", nil - case "ondemand.arg3.uint": - return "ondemand", nil - case "ondemand.arg4.str": - return "ondemand", nil - case "ondemand.arg4.uint": - return "ondemand", nil - case "ondemand.name": - return "ondemand", nil - case "open.file.change_time": - return "open", nil - case "open.file.destination.mode": - return "open", nil - case "open.file.filesystem": - return "open", nil - case "open.file.gid": - return "open", nil - case "open.file.group": - return "open", nil - case "open.file.hashes": - return "open", nil - case "open.file.in_upper_layer": - return "open", nil - case "open.file.inode": - return "open", nil - case "open.file.mode": - return "open", nil - case "open.file.modification_time": - return "open", nil - case "open.file.mount_id": - return "open", nil - case "open.file.name": - return "open", nil - case "open.file.name.length": - return "open", nil - case "open.file.package.name": - return "open", nil - case "open.file.package.source_version": - return "open", nil - case "open.file.package.version": - return "open", nil - case "open.file.path": - return "open", nil - case "open.file.path.length": - return "open", nil - case "open.file.rights": - return "open", nil - case "open.file.uid": - return "open", nil - case "open.file.user": - return "open", nil - case "open.flags": - return "open", nil - case "open.retval": - return "open", nil - case "open.syscall.flags": - return "open", nil - case "open.syscall.mode": - return "open", nil - case "open.syscall.path": - return "open", nil - case "packet.destination.ip": - return "packet", nil - case "packet.destination.is_public": - return "packet", nil - case "packet.destination.port": - return "packet", nil - case "packet.device.ifname": - return "packet", nil - case "packet.filter": - return "packet", nil - case "packet.l3_protocol": - return "packet", nil - case "packet.l4_protocol": - return "packet", nil - case "packet.size": - return "packet", nil - case "packet.source.ip": - return "packet", nil - case "packet.source.is_public": - return "packet", nil - case "packet.source.port": - return "packet", nil - case "packet.tls.version": - return "packet", nil - case "process.ancestors.args": - return "", nil - case "process.ancestors.args_flags": - return "", nil - case "process.ancestors.args_options": - return "", nil - case "process.ancestors.args_truncated": - return "", nil - case "process.ancestors.argv": - return "", nil - case "process.ancestors.argv0": - return "", nil - case "process.ancestors.auid": - return "", nil - case "process.ancestors.cap_effective": - return "", nil - case "process.ancestors.cap_permitted": - return "", nil - case "process.ancestors.cgroup.file.inode": - return "", nil - case "process.ancestors.cgroup.file.mount_id": - return "", nil - case "process.ancestors.cgroup.id": - return "", nil - case "process.ancestors.cgroup.manager": - return "", nil - case "process.ancestors.cgroup.version": - return "", nil - case "process.ancestors.comm": - return "", nil - case "process.ancestors.container.id": - return "", nil - case "process.ancestors.created_at": - return "", nil - case "process.ancestors.egid": - return "", nil - case "process.ancestors.egroup": - return "", nil - case "process.ancestors.envp": - return "", nil - case "process.ancestors.envs": - return "", nil - case "process.ancestors.envs_truncated": - return "", nil - case "process.ancestors.euid": - return "", nil - case "process.ancestors.euser": - return "", nil - case "process.ancestors.file.change_time": - return "", nil - case "process.ancestors.file.filesystem": - return "", nil - case "process.ancestors.file.gid": - return "", nil - case "process.ancestors.file.group": - return "", nil - case "process.ancestors.file.hashes": - return "", nil - case "process.ancestors.file.in_upper_layer": - return "", nil - case "process.ancestors.file.inode": - return "", nil - case "process.ancestors.file.mode": - return "", nil - case "process.ancestors.file.modification_time": - return "", nil - case "process.ancestors.file.mount_id": - return "", nil - case "process.ancestors.file.name": - return "", nil - case "process.ancestors.file.name.length": - return "", nil - case "process.ancestors.file.package.name": - return "", nil - case "process.ancestors.file.package.source_version": - return "", nil - case "process.ancestors.file.package.version": - return "", nil - case "process.ancestors.file.path": - return "", nil - case "process.ancestors.file.path.length": - return "", nil - case "process.ancestors.file.rights": - return "", nil - case "process.ancestors.file.uid": - return "", nil - case "process.ancestors.file.user": - return "", nil - case "process.ancestors.fsgid": - return "", nil - case "process.ancestors.fsgroup": - return "", nil - case "process.ancestors.fsuid": - return "", nil - case "process.ancestors.fsuser": - return "", nil - case "process.ancestors.gid": - return "", nil - case "process.ancestors.group": - return "", nil - case "process.ancestors.interpreter.file.change_time": - return "", nil - case "process.ancestors.interpreter.file.filesystem": - return "", nil - case "process.ancestors.interpreter.file.gid": - return "", nil - case "process.ancestors.interpreter.file.group": - return "", nil - case "process.ancestors.interpreter.file.hashes": - return "", nil - case "process.ancestors.interpreter.file.in_upper_layer": - return "", nil - case "process.ancestors.interpreter.file.inode": - return "", nil - case "process.ancestors.interpreter.file.mode": - return "", nil - case "process.ancestors.interpreter.file.modification_time": - return "", nil - case "process.ancestors.interpreter.file.mount_id": - return "", nil - case "process.ancestors.interpreter.file.name": - return "", nil - case "process.ancestors.interpreter.file.name.length": - return "", nil - case "process.ancestors.interpreter.file.package.name": - return "", nil - case "process.ancestors.interpreter.file.package.source_version": - return "", nil - case "process.ancestors.interpreter.file.package.version": - return "", nil - case "process.ancestors.interpreter.file.path": - return "", nil - case "process.ancestors.interpreter.file.path.length": - return "", nil - case "process.ancestors.interpreter.file.rights": - return "", nil - case "process.ancestors.interpreter.file.uid": - return "", nil - case "process.ancestors.interpreter.file.user": - return "", nil - case "process.ancestors.is_exec": - return "", nil - case "process.ancestors.is_kworker": - return "", nil - case "process.ancestors.is_thread": - return "", nil - case "process.ancestors.length": - return "", nil - case "process.ancestors.pid": - return "", nil - case "process.ancestors.ppid": - return "", nil - case "process.ancestors.tid": - return "", nil - case "process.ancestors.tty_name": - return "", nil - case "process.ancestors.uid": - return "", nil - case "process.ancestors.user": - return "", nil - case "process.ancestors.user_session.k8s_groups": - return "", nil - case "process.ancestors.user_session.k8s_uid": - return "", nil - case "process.ancestors.user_session.k8s_username": - return "", nil - case "process.args": - return "", nil - case "process.args_flags": - return "", nil - case "process.args_options": - return "", nil - case "process.args_truncated": - return "", nil - case "process.argv": - return "", nil - case "process.argv0": - return "", nil - case "process.auid": - return "", nil - case "process.cap_effective": - return "", nil - case "process.cap_permitted": - return "", nil - case "process.cgroup.file.inode": - return "", nil - case "process.cgroup.file.mount_id": - return "", nil - case "process.cgroup.id": - return "", nil - case "process.cgroup.manager": - return "", nil - case "process.cgroup.version": - return "", nil - case "process.comm": - return "", nil - case "process.container.id": - return "", nil - case "process.created_at": - return "", nil - case "process.egid": - return "", nil - case "process.egroup": - return "", nil - case "process.envp": - return "", nil - case "process.envs": - return "", nil - case "process.envs_truncated": - return "", nil - case "process.euid": - return "", nil - case "process.euser": - return "", nil - case "process.file.change_time": - return "", nil - case "process.file.filesystem": - return "", nil - case "process.file.gid": - return "", nil - case "process.file.group": - return "", nil - case "process.file.hashes": - return "", nil - case "process.file.in_upper_layer": - return "", nil - case "process.file.inode": - return "", nil - case "process.file.mode": - return "", nil - case "process.file.modification_time": - return "", nil - case "process.file.mount_id": - return "", nil - case "process.file.name": - return "", nil - case "process.file.name.length": - return "", nil - case "process.file.package.name": - return "", nil - case "process.file.package.source_version": - return "", nil - case "process.file.package.version": - return "", nil - case "process.file.path": - return "", nil - case "process.file.path.length": - return "", nil - case "process.file.rights": - return "", nil - case "process.file.uid": - return "", nil - case "process.file.user": - return "", nil - case "process.fsgid": - return "", nil - case "process.fsgroup": - return "", nil - case "process.fsuid": - return "", nil - case "process.fsuser": - return "", nil - case "process.gid": - return "", nil - case "process.group": - return "", nil - case "process.interpreter.file.change_time": - return "", nil - case "process.interpreter.file.filesystem": - return "", nil - case "process.interpreter.file.gid": - return "", nil - case "process.interpreter.file.group": - return "", nil - case "process.interpreter.file.hashes": - return "", nil - case "process.interpreter.file.in_upper_layer": - return "", nil - case "process.interpreter.file.inode": - return "", nil - case "process.interpreter.file.mode": - return "", nil - case "process.interpreter.file.modification_time": - return "", nil - case "process.interpreter.file.mount_id": - return "", nil - case "process.interpreter.file.name": - return "", nil - case "process.interpreter.file.name.length": - return "", nil - case "process.interpreter.file.package.name": - return "", nil - case "process.interpreter.file.package.source_version": - return "", nil - case "process.interpreter.file.package.version": - return "", nil - case "process.interpreter.file.path": - return "", nil - case "process.interpreter.file.path.length": - return "", nil - case "process.interpreter.file.rights": - return "", nil - case "process.interpreter.file.uid": - return "", nil - case "process.interpreter.file.user": - return "", nil - case "process.is_exec": - return "", nil - case "process.is_kworker": - return "", nil - case "process.is_thread": - return "", nil - case "process.parent.args": - return "", nil - case "process.parent.args_flags": - return "", nil - case "process.parent.args_options": - return "", nil - case "process.parent.args_truncated": - return "", nil - case "process.parent.argv": - return "", nil - case "process.parent.argv0": - return "", nil - case "process.parent.auid": - return "", nil - case "process.parent.cap_effective": - return "", nil - case "process.parent.cap_permitted": - return "", nil - case "process.parent.cgroup.file.inode": - return "", nil - case "process.parent.cgroup.file.mount_id": - return "", nil - case "process.parent.cgroup.id": - return "", nil - case "process.parent.cgroup.manager": - return "", nil - case "process.parent.cgroup.version": - return "", nil - case "process.parent.comm": - return "", nil - case "process.parent.container.id": - return "", nil - case "process.parent.created_at": - return "", nil - case "process.parent.egid": - return "", nil - case "process.parent.egroup": - return "", nil - case "process.parent.envp": - return "", nil - case "process.parent.envs": - return "", nil - case "process.parent.envs_truncated": - return "", nil - case "process.parent.euid": - return "", nil - case "process.parent.euser": - return "", nil - case "process.parent.file.change_time": - return "", nil - case "process.parent.file.filesystem": - return "", nil - case "process.parent.file.gid": - return "", nil - case "process.parent.file.group": - return "", nil - case "process.parent.file.hashes": - return "", nil - case "process.parent.file.in_upper_layer": - return "", nil - case "process.parent.file.inode": - return "", nil - case "process.parent.file.mode": - return "", nil - case "process.parent.file.modification_time": - return "", nil - case "process.parent.file.mount_id": - return "", nil - case "process.parent.file.name": - return "", nil - case "process.parent.file.name.length": - return "", nil - case "process.parent.file.package.name": - return "", nil - case "process.parent.file.package.source_version": - return "", nil - case "process.parent.file.package.version": - return "", nil - case "process.parent.file.path": - return "", nil - case "process.parent.file.path.length": - return "", nil - case "process.parent.file.rights": - return "", nil - case "process.parent.file.uid": - return "", nil - case "process.parent.file.user": - return "", nil - case "process.parent.fsgid": - return "", nil - case "process.parent.fsgroup": - return "", nil - case "process.parent.fsuid": - return "", nil - case "process.parent.fsuser": - return "", nil - case "process.parent.gid": - return "", nil - case "process.parent.group": - return "", nil - case "process.parent.interpreter.file.change_time": - return "", nil - case "process.parent.interpreter.file.filesystem": - return "", nil - case "process.parent.interpreter.file.gid": - return "", nil - case "process.parent.interpreter.file.group": - return "", nil - case "process.parent.interpreter.file.hashes": - return "", nil - case "process.parent.interpreter.file.in_upper_layer": - return "", nil - case "process.parent.interpreter.file.inode": - return "", nil - case "process.parent.interpreter.file.mode": - return "", nil - case "process.parent.interpreter.file.modification_time": - return "", nil - case "process.parent.interpreter.file.mount_id": - return "", nil - case "process.parent.interpreter.file.name": - return "", nil - case "process.parent.interpreter.file.name.length": - return "", nil - case "process.parent.interpreter.file.package.name": - return "", nil - case "process.parent.interpreter.file.package.source_version": - return "", nil - case "process.parent.interpreter.file.package.version": - return "", nil - case "process.parent.interpreter.file.path": - return "", nil - case "process.parent.interpreter.file.path.length": - return "", nil - case "process.parent.interpreter.file.rights": - return "", nil - case "process.parent.interpreter.file.uid": - return "", nil - case "process.parent.interpreter.file.user": - return "", nil - case "process.parent.is_exec": - return "", nil - case "process.parent.is_kworker": - return "", nil - case "process.parent.is_thread": - return "", nil - case "process.parent.pid": - return "", nil - case "process.parent.ppid": - return "", nil - case "process.parent.tid": - return "", nil - case "process.parent.tty_name": - return "", nil - case "process.parent.uid": - return "", nil - case "process.parent.user": - return "", nil - case "process.parent.user_session.k8s_groups": - return "", nil - case "process.parent.user_session.k8s_uid": - return "", nil - case "process.parent.user_session.k8s_username": - return "", nil - case "process.pid": - return "", nil - case "process.ppid": - return "", nil - case "process.tid": - return "", nil - case "process.tty_name": - return "", nil - case "process.uid": - return "", nil - case "process.user": - return "", nil - case "process.user_session.k8s_groups": - return "", nil - case "process.user_session.k8s_uid": - return "", nil - case "process.user_session.k8s_username": - return "", nil - case "ptrace.request": - return "ptrace", nil - case "ptrace.retval": - return "ptrace", nil - case "ptrace.tracee.ancestors.args": - return "ptrace", nil - case "ptrace.tracee.ancestors.args_flags": - return "ptrace", nil - case "ptrace.tracee.ancestors.args_options": - return "ptrace", nil - case "ptrace.tracee.ancestors.args_truncated": - return "ptrace", nil - case "ptrace.tracee.ancestors.argv": - return "ptrace", nil - case "ptrace.tracee.ancestors.argv0": - return "ptrace", nil - case "ptrace.tracee.ancestors.auid": - return "ptrace", nil - case "ptrace.tracee.ancestors.cap_effective": - return "ptrace", nil - case "ptrace.tracee.ancestors.cap_permitted": - return "ptrace", nil - case "ptrace.tracee.ancestors.cgroup.file.inode": - return "ptrace", nil - case "ptrace.tracee.ancestors.cgroup.file.mount_id": - return "ptrace", nil - case "ptrace.tracee.ancestors.cgroup.id": - return "ptrace", nil - case "ptrace.tracee.ancestors.cgroup.manager": - return "ptrace", nil - case "ptrace.tracee.ancestors.cgroup.version": - return "ptrace", nil - case "ptrace.tracee.ancestors.comm": - return "ptrace", nil - case "ptrace.tracee.ancestors.container.id": - return "ptrace", nil - case "ptrace.tracee.ancestors.created_at": - return "ptrace", nil - case "ptrace.tracee.ancestors.egid": - return "ptrace", nil - case "ptrace.tracee.ancestors.egroup": - return "ptrace", nil - case "ptrace.tracee.ancestors.envp": - return "ptrace", nil - case "ptrace.tracee.ancestors.envs": - return "ptrace", nil - case "ptrace.tracee.ancestors.envs_truncated": - return "ptrace", nil - case "ptrace.tracee.ancestors.euid": - return "ptrace", nil - case "ptrace.tracee.ancestors.euser": - return "ptrace", nil - case "ptrace.tracee.ancestors.file.change_time": - return "ptrace", nil - case "ptrace.tracee.ancestors.file.filesystem": - return "ptrace", nil - case "ptrace.tracee.ancestors.file.gid": - return "ptrace", nil - case "ptrace.tracee.ancestors.file.group": - return "ptrace", nil - case "ptrace.tracee.ancestors.file.hashes": - return "ptrace", nil - case "ptrace.tracee.ancestors.file.in_upper_layer": - return "ptrace", nil - case "ptrace.tracee.ancestors.file.inode": - return "ptrace", nil - case "ptrace.tracee.ancestors.file.mode": - return "ptrace", nil - case "ptrace.tracee.ancestors.file.modification_time": - return "ptrace", nil - case "ptrace.tracee.ancestors.file.mount_id": - return "ptrace", nil - case "ptrace.tracee.ancestors.file.name": - return "ptrace", nil - case "ptrace.tracee.ancestors.file.name.length": - return "ptrace", nil - case "ptrace.tracee.ancestors.file.package.name": - return "ptrace", nil - case "ptrace.tracee.ancestors.file.package.source_version": - return "ptrace", nil - case "ptrace.tracee.ancestors.file.package.version": - return "ptrace", nil - case "ptrace.tracee.ancestors.file.path": - return "ptrace", nil - case "ptrace.tracee.ancestors.file.path.length": - return "ptrace", nil - case "ptrace.tracee.ancestors.file.rights": - return "ptrace", nil - case "ptrace.tracee.ancestors.file.uid": - return "ptrace", nil - case "ptrace.tracee.ancestors.file.user": - return "ptrace", nil - case "ptrace.tracee.ancestors.fsgid": - return "ptrace", nil - case "ptrace.tracee.ancestors.fsgroup": - return "ptrace", nil - case "ptrace.tracee.ancestors.fsuid": - return "ptrace", nil - case "ptrace.tracee.ancestors.fsuser": - return "ptrace", nil - case "ptrace.tracee.ancestors.gid": - return "ptrace", nil - case "ptrace.tracee.ancestors.group": - return "ptrace", nil - case "ptrace.tracee.ancestors.interpreter.file.change_time": - return "ptrace", nil - case "ptrace.tracee.ancestors.interpreter.file.filesystem": - return "ptrace", nil - case "ptrace.tracee.ancestors.interpreter.file.gid": - return "ptrace", nil - case "ptrace.tracee.ancestors.interpreter.file.group": - return "ptrace", nil - case "ptrace.tracee.ancestors.interpreter.file.hashes": - return "ptrace", nil - case "ptrace.tracee.ancestors.interpreter.file.in_upper_layer": - return "ptrace", nil - case "ptrace.tracee.ancestors.interpreter.file.inode": - return "ptrace", nil - case "ptrace.tracee.ancestors.interpreter.file.mode": - return "ptrace", nil - case "ptrace.tracee.ancestors.interpreter.file.modification_time": - return "ptrace", nil - case "ptrace.tracee.ancestors.interpreter.file.mount_id": - return "ptrace", nil - case "ptrace.tracee.ancestors.interpreter.file.name": - return "ptrace", nil - case "ptrace.tracee.ancestors.interpreter.file.name.length": - return "ptrace", nil - case "ptrace.tracee.ancestors.interpreter.file.package.name": - return "ptrace", nil - case "ptrace.tracee.ancestors.interpreter.file.package.source_version": - return "ptrace", nil - case "ptrace.tracee.ancestors.interpreter.file.package.version": - return "ptrace", nil - case "ptrace.tracee.ancestors.interpreter.file.path": - return "ptrace", nil - case "ptrace.tracee.ancestors.interpreter.file.path.length": - return "ptrace", nil - case "ptrace.tracee.ancestors.interpreter.file.rights": - return "ptrace", nil - case "ptrace.tracee.ancestors.interpreter.file.uid": - return "ptrace", nil - case "ptrace.tracee.ancestors.interpreter.file.user": - return "ptrace", nil - case "ptrace.tracee.ancestors.is_exec": - return "ptrace", nil - case "ptrace.tracee.ancestors.is_kworker": - return "ptrace", nil - case "ptrace.tracee.ancestors.is_thread": - return "ptrace", nil - case "ptrace.tracee.ancestors.length": - return "ptrace", nil - case "ptrace.tracee.ancestors.pid": - return "ptrace", nil - case "ptrace.tracee.ancestors.ppid": - return "ptrace", nil - case "ptrace.tracee.ancestors.tid": - return "ptrace", nil - case "ptrace.tracee.ancestors.tty_name": - return "ptrace", nil - case "ptrace.tracee.ancestors.uid": - return "ptrace", nil - case "ptrace.tracee.ancestors.user": - return "ptrace", nil - case "ptrace.tracee.ancestors.user_session.k8s_groups": - return "ptrace", nil - case "ptrace.tracee.ancestors.user_session.k8s_uid": - return "ptrace", nil - case "ptrace.tracee.ancestors.user_session.k8s_username": - return "ptrace", nil - case "ptrace.tracee.args": - return "ptrace", nil - case "ptrace.tracee.args_flags": - return "ptrace", nil - case "ptrace.tracee.args_options": - return "ptrace", nil - case "ptrace.tracee.args_truncated": - return "ptrace", nil - case "ptrace.tracee.argv": - return "ptrace", nil - case "ptrace.tracee.argv0": - return "ptrace", nil - case "ptrace.tracee.auid": - return "ptrace", nil - case "ptrace.tracee.cap_effective": - return "ptrace", nil - case "ptrace.tracee.cap_permitted": - return "ptrace", nil - case "ptrace.tracee.cgroup.file.inode": - return "ptrace", nil - case "ptrace.tracee.cgroup.file.mount_id": - return "ptrace", nil - case "ptrace.tracee.cgroup.id": - return "ptrace", nil - case "ptrace.tracee.cgroup.manager": - return "ptrace", nil - case "ptrace.tracee.cgroup.version": - return "ptrace", nil - case "ptrace.tracee.comm": - return "ptrace", nil - case "ptrace.tracee.container.id": - return "ptrace", nil - case "ptrace.tracee.created_at": - return "ptrace", nil - case "ptrace.tracee.egid": - return "ptrace", nil - case "ptrace.tracee.egroup": - return "ptrace", nil - case "ptrace.tracee.envp": - return "ptrace", nil - case "ptrace.tracee.envs": - return "ptrace", nil - case "ptrace.tracee.envs_truncated": - return "ptrace", nil - case "ptrace.tracee.euid": - return "ptrace", nil - case "ptrace.tracee.euser": - return "ptrace", nil - case "ptrace.tracee.file.change_time": - return "ptrace", nil - case "ptrace.tracee.file.filesystem": - return "ptrace", nil - case "ptrace.tracee.file.gid": - return "ptrace", nil - case "ptrace.tracee.file.group": - return "ptrace", nil - case "ptrace.tracee.file.hashes": - return "ptrace", nil - case "ptrace.tracee.file.in_upper_layer": - return "ptrace", nil - case "ptrace.tracee.file.inode": - return "ptrace", nil - case "ptrace.tracee.file.mode": - return "ptrace", nil - case "ptrace.tracee.file.modification_time": - return "ptrace", nil - case "ptrace.tracee.file.mount_id": - return "ptrace", nil - case "ptrace.tracee.file.name": - return "ptrace", nil - case "ptrace.tracee.file.name.length": - return "ptrace", nil - case "ptrace.tracee.file.package.name": - return "ptrace", nil - case "ptrace.tracee.file.package.source_version": - return "ptrace", nil - case "ptrace.tracee.file.package.version": - return "ptrace", nil - case "ptrace.tracee.file.path": - return "ptrace", nil - case "ptrace.tracee.file.path.length": - return "ptrace", nil - case "ptrace.tracee.file.rights": - return "ptrace", nil - case "ptrace.tracee.file.uid": - return "ptrace", nil - case "ptrace.tracee.file.user": - return "ptrace", nil - case "ptrace.tracee.fsgid": - return "ptrace", nil - case "ptrace.tracee.fsgroup": - return "ptrace", nil - case "ptrace.tracee.fsuid": - return "ptrace", nil - case "ptrace.tracee.fsuser": - return "ptrace", nil - case "ptrace.tracee.gid": - return "ptrace", nil - case "ptrace.tracee.group": - return "ptrace", nil - case "ptrace.tracee.interpreter.file.change_time": - return "ptrace", nil - case "ptrace.tracee.interpreter.file.filesystem": - return "ptrace", nil - case "ptrace.tracee.interpreter.file.gid": - return "ptrace", nil - case "ptrace.tracee.interpreter.file.group": - return "ptrace", nil - case "ptrace.tracee.interpreter.file.hashes": - return "ptrace", nil - case "ptrace.tracee.interpreter.file.in_upper_layer": - return "ptrace", nil - case "ptrace.tracee.interpreter.file.inode": - return "ptrace", nil - case "ptrace.tracee.interpreter.file.mode": - return "ptrace", nil - case "ptrace.tracee.interpreter.file.modification_time": - return "ptrace", nil - case "ptrace.tracee.interpreter.file.mount_id": - return "ptrace", nil - case "ptrace.tracee.interpreter.file.name": - return "ptrace", nil - case "ptrace.tracee.interpreter.file.name.length": - return "ptrace", nil - case "ptrace.tracee.interpreter.file.package.name": - return "ptrace", nil - case "ptrace.tracee.interpreter.file.package.source_version": - return "ptrace", nil - case "ptrace.tracee.interpreter.file.package.version": - return "ptrace", nil - case "ptrace.tracee.interpreter.file.path": - return "ptrace", nil - case "ptrace.tracee.interpreter.file.path.length": - return "ptrace", nil - case "ptrace.tracee.interpreter.file.rights": - return "ptrace", nil - case "ptrace.tracee.interpreter.file.uid": - return "ptrace", nil - case "ptrace.tracee.interpreter.file.user": - return "ptrace", nil - case "ptrace.tracee.is_exec": - return "ptrace", nil - case "ptrace.tracee.is_kworker": - return "ptrace", nil - case "ptrace.tracee.is_thread": - return "ptrace", nil - case "ptrace.tracee.parent.args": - return "ptrace", nil - case "ptrace.tracee.parent.args_flags": - return "ptrace", nil - case "ptrace.tracee.parent.args_options": - return "ptrace", nil - case "ptrace.tracee.parent.args_truncated": - return "ptrace", nil - case "ptrace.tracee.parent.argv": - return "ptrace", nil - case "ptrace.tracee.parent.argv0": - return "ptrace", nil - case "ptrace.tracee.parent.auid": - return "ptrace", nil - case "ptrace.tracee.parent.cap_effective": - return "ptrace", nil - case "ptrace.tracee.parent.cap_permitted": - return "ptrace", nil - case "ptrace.tracee.parent.cgroup.file.inode": - return "ptrace", nil - case "ptrace.tracee.parent.cgroup.file.mount_id": - return "ptrace", nil - case "ptrace.tracee.parent.cgroup.id": - return "ptrace", nil - case "ptrace.tracee.parent.cgroup.manager": - return "ptrace", nil - case "ptrace.tracee.parent.cgroup.version": - return "ptrace", nil - case "ptrace.tracee.parent.comm": - return "ptrace", nil - case "ptrace.tracee.parent.container.id": - return "ptrace", nil - case "ptrace.tracee.parent.created_at": - return "ptrace", nil - case "ptrace.tracee.parent.egid": - return "ptrace", nil - case "ptrace.tracee.parent.egroup": - return "ptrace", nil - case "ptrace.tracee.parent.envp": - return "ptrace", nil - case "ptrace.tracee.parent.envs": - return "ptrace", nil - case "ptrace.tracee.parent.envs_truncated": - return "ptrace", nil - case "ptrace.tracee.parent.euid": - return "ptrace", nil - case "ptrace.tracee.parent.euser": - return "ptrace", nil - case "ptrace.tracee.parent.file.change_time": - return "ptrace", nil - case "ptrace.tracee.parent.file.filesystem": - return "ptrace", nil - case "ptrace.tracee.parent.file.gid": - return "ptrace", nil - case "ptrace.tracee.parent.file.group": - return "ptrace", nil - case "ptrace.tracee.parent.file.hashes": - return "ptrace", nil - case "ptrace.tracee.parent.file.in_upper_layer": - return "ptrace", nil - case "ptrace.tracee.parent.file.inode": - return "ptrace", nil - case "ptrace.tracee.parent.file.mode": - return "ptrace", nil - case "ptrace.tracee.parent.file.modification_time": - return "ptrace", nil - case "ptrace.tracee.parent.file.mount_id": - return "ptrace", nil - case "ptrace.tracee.parent.file.name": - return "ptrace", nil - case "ptrace.tracee.parent.file.name.length": - return "ptrace", nil - case "ptrace.tracee.parent.file.package.name": - return "ptrace", nil - case "ptrace.tracee.parent.file.package.source_version": - return "ptrace", nil - case "ptrace.tracee.parent.file.package.version": - return "ptrace", nil - case "ptrace.tracee.parent.file.path": - return "ptrace", nil - case "ptrace.tracee.parent.file.path.length": - return "ptrace", nil - case "ptrace.tracee.parent.file.rights": - return "ptrace", nil - case "ptrace.tracee.parent.file.uid": - return "ptrace", nil - case "ptrace.tracee.parent.file.user": - return "ptrace", nil - case "ptrace.tracee.parent.fsgid": - return "ptrace", nil - case "ptrace.tracee.parent.fsgroup": - return "ptrace", nil - case "ptrace.tracee.parent.fsuid": - return "ptrace", nil - case "ptrace.tracee.parent.fsuser": - return "ptrace", nil - case "ptrace.tracee.parent.gid": - return "ptrace", nil - case "ptrace.tracee.parent.group": - return "ptrace", nil - case "ptrace.tracee.parent.interpreter.file.change_time": - return "ptrace", nil - case "ptrace.tracee.parent.interpreter.file.filesystem": - return "ptrace", nil - case "ptrace.tracee.parent.interpreter.file.gid": - return "ptrace", nil - case "ptrace.tracee.parent.interpreter.file.group": - return "ptrace", nil - case "ptrace.tracee.parent.interpreter.file.hashes": - return "ptrace", nil - case "ptrace.tracee.parent.interpreter.file.in_upper_layer": - return "ptrace", nil - case "ptrace.tracee.parent.interpreter.file.inode": - return "ptrace", nil - case "ptrace.tracee.parent.interpreter.file.mode": - return "ptrace", nil - case "ptrace.tracee.parent.interpreter.file.modification_time": - return "ptrace", nil - case "ptrace.tracee.parent.interpreter.file.mount_id": - return "ptrace", nil - case "ptrace.tracee.parent.interpreter.file.name": - return "ptrace", nil - case "ptrace.tracee.parent.interpreter.file.name.length": - return "ptrace", nil - case "ptrace.tracee.parent.interpreter.file.package.name": - return "ptrace", nil - case "ptrace.tracee.parent.interpreter.file.package.source_version": - return "ptrace", nil - case "ptrace.tracee.parent.interpreter.file.package.version": - return "ptrace", nil - case "ptrace.tracee.parent.interpreter.file.path": - return "ptrace", nil - case "ptrace.tracee.parent.interpreter.file.path.length": - return "ptrace", nil - case "ptrace.tracee.parent.interpreter.file.rights": - return "ptrace", nil - case "ptrace.tracee.parent.interpreter.file.uid": - return "ptrace", nil - case "ptrace.tracee.parent.interpreter.file.user": - return "ptrace", nil - case "ptrace.tracee.parent.is_exec": - return "ptrace", nil - case "ptrace.tracee.parent.is_kworker": - return "ptrace", nil - case "ptrace.tracee.parent.is_thread": - return "ptrace", nil - case "ptrace.tracee.parent.pid": - return "ptrace", nil - case "ptrace.tracee.parent.ppid": - return "ptrace", nil - case "ptrace.tracee.parent.tid": - return "ptrace", nil - case "ptrace.tracee.parent.tty_name": - return "ptrace", nil - case "ptrace.tracee.parent.uid": - return "ptrace", nil - case "ptrace.tracee.parent.user": - return "ptrace", nil - case "ptrace.tracee.parent.user_session.k8s_groups": - return "ptrace", nil - case "ptrace.tracee.parent.user_session.k8s_uid": - return "ptrace", nil - case "ptrace.tracee.parent.user_session.k8s_username": - return "ptrace", nil - case "ptrace.tracee.pid": - return "ptrace", nil - case "ptrace.tracee.ppid": - return "ptrace", nil - case "ptrace.tracee.tid": - return "ptrace", nil - case "ptrace.tracee.tty_name": - return "ptrace", nil - case "ptrace.tracee.uid": - return "ptrace", nil - case "ptrace.tracee.user": - return "ptrace", nil - case "ptrace.tracee.user_session.k8s_groups": - return "ptrace", nil - case "ptrace.tracee.user_session.k8s_uid": - return "ptrace", nil - case "ptrace.tracee.user_session.k8s_username": - return "ptrace", nil - case "removexattr.file.change_time": - return "removexattr", nil - case "removexattr.file.destination.name": - return "removexattr", nil - case "removexattr.file.destination.namespace": - return "removexattr", nil - case "removexattr.file.filesystem": - return "removexattr", nil - case "removexattr.file.gid": - return "removexattr", nil - case "removexattr.file.group": - return "removexattr", nil - case "removexattr.file.hashes": - return "removexattr", nil - case "removexattr.file.in_upper_layer": - return "removexattr", nil - case "removexattr.file.inode": - return "removexattr", nil - case "removexattr.file.mode": - return "removexattr", nil - case "removexattr.file.modification_time": - return "removexattr", nil - case "removexattr.file.mount_id": - return "removexattr", nil - case "removexattr.file.name": - return "removexattr", nil - case "removexattr.file.name.length": - return "removexattr", nil - case "removexattr.file.package.name": - return "removexattr", nil - case "removexattr.file.package.source_version": - return "removexattr", nil - case "removexattr.file.package.version": - return "removexattr", nil - case "removexattr.file.path": - return "removexattr", nil - case "removexattr.file.path.length": - return "removexattr", nil - case "removexattr.file.rights": - return "removexattr", nil - case "removexattr.file.uid": - return "removexattr", nil - case "removexattr.file.user": - return "removexattr", nil - case "removexattr.retval": - return "removexattr", nil - case "rename.file.change_time": - return "rename", nil - case "rename.file.destination.change_time": - return "rename", nil - case "rename.file.destination.filesystem": - return "rename", nil - case "rename.file.destination.gid": - return "rename", nil - case "rename.file.destination.group": - return "rename", nil - case "rename.file.destination.hashes": - return "rename", nil - case "rename.file.destination.in_upper_layer": - return "rename", nil - case "rename.file.destination.inode": - return "rename", nil - case "rename.file.destination.mode": - return "rename", nil - case "rename.file.destination.modification_time": - return "rename", nil - case "rename.file.destination.mount_id": - return "rename", nil - case "rename.file.destination.name": - return "rename", nil - case "rename.file.destination.name.length": - return "rename", nil - case "rename.file.destination.package.name": - return "rename", nil - case "rename.file.destination.package.source_version": - return "rename", nil - case "rename.file.destination.package.version": - return "rename", nil - case "rename.file.destination.path": - return "rename", nil - case "rename.file.destination.path.length": - return "rename", nil - case "rename.file.destination.rights": - return "rename", nil - case "rename.file.destination.uid": - return "rename", nil - case "rename.file.destination.user": - return "rename", nil - case "rename.file.filesystem": - return "rename", nil - case "rename.file.gid": - return "rename", nil - case "rename.file.group": - return "rename", nil - case "rename.file.hashes": - return "rename", nil - case "rename.file.in_upper_layer": - return "rename", nil - case "rename.file.inode": - return "rename", nil - case "rename.file.mode": - return "rename", nil - case "rename.file.modification_time": - return "rename", nil - case "rename.file.mount_id": - return "rename", nil - case "rename.file.name": - return "rename", nil - case "rename.file.name.length": - return "rename", nil - case "rename.file.package.name": - return "rename", nil - case "rename.file.package.source_version": - return "rename", nil - case "rename.file.package.version": - return "rename", nil - case "rename.file.path": - return "rename", nil - case "rename.file.path.length": - return "rename", nil - case "rename.file.rights": - return "rename", nil - case "rename.file.uid": - return "rename", nil - case "rename.file.user": - return "rename", nil - case "rename.retval": - return "rename", nil - case "rename.syscall.destination.path": - return "rename", nil - case "rename.syscall.path": - return "rename", nil - case "rmdir.file.change_time": - return "rmdir", nil - case "rmdir.file.filesystem": - return "rmdir", nil - case "rmdir.file.gid": - return "rmdir", nil - case "rmdir.file.group": - return "rmdir", nil - case "rmdir.file.hashes": - return "rmdir", nil - case "rmdir.file.in_upper_layer": - return "rmdir", nil - case "rmdir.file.inode": - return "rmdir", nil - case "rmdir.file.mode": - return "rmdir", nil - case "rmdir.file.modification_time": - return "rmdir", nil - case "rmdir.file.mount_id": - return "rmdir", nil - case "rmdir.file.name": - return "rmdir", nil - case "rmdir.file.name.length": - return "rmdir", nil - case "rmdir.file.package.name": - return "rmdir", nil - case "rmdir.file.package.source_version": - return "rmdir", nil - case "rmdir.file.package.version": - return "rmdir", nil - case "rmdir.file.path": - return "rmdir", nil - case "rmdir.file.path.length": - return "rmdir", nil - case "rmdir.file.rights": - return "rmdir", nil - case "rmdir.file.uid": - return "rmdir", nil - case "rmdir.file.user": - return "rmdir", nil - case "rmdir.retval": - return "rmdir", nil - case "selinux.bool.name": - return "selinux", nil - case "selinux.bool.state": - return "selinux", nil - case "selinux.bool_commit.state": - return "selinux", nil - case "selinux.enforce.status": - return "selinux", nil - case "setgid.egid": - return "setgid", nil - case "setgid.egroup": - return "setgid", nil - case "setgid.fsgid": - return "setgid", nil - case "setgid.fsgroup": - return "setgid", nil - case "setgid.gid": - return "setgid", nil - case "setgid.group": - return "setgid", nil - case "setuid.euid": - return "setuid", nil - case "setuid.euser": - return "setuid", nil - case "setuid.fsuid": - return "setuid", nil - case "setuid.fsuser": - return "setuid", nil - case "setuid.uid": - return "setuid", nil - case "setuid.user": - return "setuid", nil - case "setxattr.file.change_time": - return "setxattr", nil - case "setxattr.file.destination.name": - return "setxattr", nil - case "setxattr.file.destination.namespace": - return "setxattr", nil - case "setxattr.file.filesystem": - return "setxattr", nil - case "setxattr.file.gid": - return "setxattr", nil - case "setxattr.file.group": - return "setxattr", nil - case "setxattr.file.hashes": - return "setxattr", nil - case "setxattr.file.in_upper_layer": - return "setxattr", nil - case "setxattr.file.inode": - return "setxattr", nil - case "setxattr.file.mode": - return "setxattr", nil - case "setxattr.file.modification_time": - return "setxattr", nil - case "setxattr.file.mount_id": - return "setxattr", nil - case "setxattr.file.name": - return "setxattr", nil - case "setxattr.file.name.length": - return "setxattr", nil - case "setxattr.file.package.name": - return "setxattr", nil - case "setxattr.file.package.source_version": - return "setxattr", nil - case "setxattr.file.package.version": - return "setxattr", nil - case "setxattr.file.path": - return "setxattr", nil - case "setxattr.file.path.length": - return "setxattr", nil - case "setxattr.file.rights": - return "setxattr", nil - case "setxattr.file.uid": - return "setxattr", nil - case "setxattr.file.user": - return "setxattr", nil - case "setxattr.retval": - return "setxattr", nil - case "signal.pid": - return "signal", nil - case "signal.retval": - return "signal", nil - case "signal.target.ancestors.args": - return "signal", nil - case "signal.target.ancestors.args_flags": - return "signal", nil - case "signal.target.ancestors.args_options": - return "signal", nil - case "signal.target.ancestors.args_truncated": - return "signal", nil - case "signal.target.ancestors.argv": - return "signal", nil - case "signal.target.ancestors.argv0": - return "signal", nil - case "signal.target.ancestors.auid": - return "signal", nil - case "signal.target.ancestors.cap_effective": - return "signal", nil - case "signal.target.ancestors.cap_permitted": - return "signal", nil - case "signal.target.ancestors.cgroup.file.inode": - return "signal", nil - case "signal.target.ancestors.cgroup.file.mount_id": - return "signal", nil - case "signal.target.ancestors.cgroup.id": - return "signal", nil - case "signal.target.ancestors.cgroup.manager": - return "signal", nil - case "signal.target.ancestors.cgroup.version": - return "signal", nil - case "signal.target.ancestors.comm": - return "signal", nil - case "signal.target.ancestors.container.id": - return "signal", nil - case "signal.target.ancestors.created_at": - return "signal", nil - case "signal.target.ancestors.egid": - return "signal", nil - case "signal.target.ancestors.egroup": - return "signal", nil - case "signal.target.ancestors.envp": - return "signal", nil - case "signal.target.ancestors.envs": - return "signal", nil - case "signal.target.ancestors.envs_truncated": - return "signal", nil - case "signal.target.ancestors.euid": - return "signal", nil - case "signal.target.ancestors.euser": - return "signal", nil - case "signal.target.ancestors.file.change_time": - return "signal", nil - case "signal.target.ancestors.file.filesystem": - return "signal", nil - case "signal.target.ancestors.file.gid": - return "signal", nil - case "signal.target.ancestors.file.group": - return "signal", nil - case "signal.target.ancestors.file.hashes": - return "signal", nil - case "signal.target.ancestors.file.in_upper_layer": - return "signal", nil - case "signal.target.ancestors.file.inode": - return "signal", nil - case "signal.target.ancestors.file.mode": - return "signal", nil - case "signal.target.ancestors.file.modification_time": - return "signal", nil - case "signal.target.ancestors.file.mount_id": - return "signal", nil - case "signal.target.ancestors.file.name": - return "signal", nil - case "signal.target.ancestors.file.name.length": - return "signal", nil - case "signal.target.ancestors.file.package.name": - return "signal", nil - case "signal.target.ancestors.file.package.source_version": - return "signal", nil - case "signal.target.ancestors.file.package.version": - return "signal", nil - case "signal.target.ancestors.file.path": - return "signal", nil - case "signal.target.ancestors.file.path.length": - return "signal", nil - case "signal.target.ancestors.file.rights": - return "signal", nil - case "signal.target.ancestors.file.uid": - return "signal", nil - case "signal.target.ancestors.file.user": - return "signal", nil - case "signal.target.ancestors.fsgid": - return "signal", nil - case "signal.target.ancestors.fsgroup": - return "signal", nil - case "signal.target.ancestors.fsuid": - return "signal", nil - case "signal.target.ancestors.fsuser": - return "signal", nil - case "signal.target.ancestors.gid": - return "signal", nil - case "signal.target.ancestors.group": - return "signal", nil - case "signal.target.ancestors.interpreter.file.change_time": - return "signal", nil - case "signal.target.ancestors.interpreter.file.filesystem": - return "signal", nil - case "signal.target.ancestors.interpreter.file.gid": - return "signal", nil - case "signal.target.ancestors.interpreter.file.group": - return "signal", nil - case "signal.target.ancestors.interpreter.file.hashes": - return "signal", nil - case "signal.target.ancestors.interpreter.file.in_upper_layer": - return "signal", nil - case "signal.target.ancestors.interpreter.file.inode": - return "signal", nil - case "signal.target.ancestors.interpreter.file.mode": - return "signal", nil - case "signal.target.ancestors.interpreter.file.modification_time": - return "signal", nil - case "signal.target.ancestors.interpreter.file.mount_id": - return "signal", nil - case "signal.target.ancestors.interpreter.file.name": - return "signal", nil - case "signal.target.ancestors.interpreter.file.name.length": - return "signal", nil - case "signal.target.ancestors.interpreter.file.package.name": - return "signal", nil - case "signal.target.ancestors.interpreter.file.package.source_version": - return "signal", nil - case "signal.target.ancestors.interpreter.file.package.version": - return "signal", nil - case "signal.target.ancestors.interpreter.file.path": - return "signal", nil - case "signal.target.ancestors.interpreter.file.path.length": - return "signal", nil - case "signal.target.ancestors.interpreter.file.rights": - return "signal", nil - case "signal.target.ancestors.interpreter.file.uid": - return "signal", nil - case "signal.target.ancestors.interpreter.file.user": - return "signal", nil - case "signal.target.ancestors.is_exec": - return "signal", nil - case "signal.target.ancestors.is_kworker": - return "signal", nil - case "signal.target.ancestors.is_thread": - return "signal", nil - case "signal.target.ancestors.length": - return "signal", nil - case "signal.target.ancestors.pid": - return "signal", nil - case "signal.target.ancestors.ppid": - return "signal", nil - case "signal.target.ancestors.tid": - return "signal", nil - case "signal.target.ancestors.tty_name": - return "signal", nil - case "signal.target.ancestors.uid": - return "signal", nil - case "signal.target.ancestors.user": - return "signal", nil - case "signal.target.ancestors.user_session.k8s_groups": - return "signal", nil - case "signal.target.ancestors.user_session.k8s_uid": - return "signal", nil - case "signal.target.ancestors.user_session.k8s_username": - return "signal", nil - case "signal.target.args": - return "signal", nil - case "signal.target.args_flags": - return "signal", nil - case "signal.target.args_options": - return "signal", nil - case "signal.target.args_truncated": - return "signal", nil - case "signal.target.argv": - return "signal", nil - case "signal.target.argv0": - return "signal", nil - case "signal.target.auid": - return "signal", nil - case "signal.target.cap_effective": - return "signal", nil - case "signal.target.cap_permitted": - return "signal", nil - case "signal.target.cgroup.file.inode": - return "signal", nil - case "signal.target.cgroup.file.mount_id": - return "signal", nil - case "signal.target.cgroup.id": - return "signal", nil - case "signal.target.cgroup.manager": - return "signal", nil - case "signal.target.cgroup.version": - return "signal", nil - case "signal.target.comm": - return "signal", nil - case "signal.target.container.id": - return "signal", nil - case "signal.target.created_at": - return "signal", nil - case "signal.target.egid": - return "signal", nil - case "signal.target.egroup": - return "signal", nil - case "signal.target.envp": - return "signal", nil - case "signal.target.envs": - return "signal", nil - case "signal.target.envs_truncated": - return "signal", nil - case "signal.target.euid": - return "signal", nil - case "signal.target.euser": - return "signal", nil - case "signal.target.file.change_time": - return "signal", nil - case "signal.target.file.filesystem": - return "signal", nil - case "signal.target.file.gid": - return "signal", nil - case "signal.target.file.group": - return "signal", nil - case "signal.target.file.hashes": - return "signal", nil - case "signal.target.file.in_upper_layer": - return "signal", nil - case "signal.target.file.inode": - return "signal", nil - case "signal.target.file.mode": - return "signal", nil - case "signal.target.file.modification_time": - return "signal", nil - case "signal.target.file.mount_id": - return "signal", nil - case "signal.target.file.name": - return "signal", nil - case "signal.target.file.name.length": - return "signal", nil - case "signal.target.file.package.name": - return "signal", nil - case "signal.target.file.package.source_version": - return "signal", nil - case "signal.target.file.package.version": - return "signal", nil - case "signal.target.file.path": - return "signal", nil - case "signal.target.file.path.length": - return "signal", nil - case "signal.target.file.rights": - return "signal", nil - case "signal.target.file.uid": - return "signal", nil - case "signal.target.file.user": - return "signal", nil - case "signal.target.fsgid": - return "signal", nil - case "signal.target.fsgroup": - return "signal", nil - case "signal.target.fsuid": - return "signal", nil - case "signal.target.fsuser": - return "signal", nil - case "signal.target.gid": - return "signal", nil - case "signal.target.group": - return "signal", nil - case "signal.target.interpreter.file.change_time": - return "signal", nil - case "signal.target.interpreter.file.filesystem": - return "signal", nil - case "signal.target.interpreter.file.gid": - return "signal", nil - case "signal.target.interpreter.file.group": - return "signal", nil - case "signal.target.interpreter.file.hashes": - return "signal", nil - case "signal.target.interpreter.file.in_upper_layer": - return "signal", nil - case "signal.target.interpreter.file.inode": - return "signal", nil - case "signal.target.interpreter.file.mode": - return "signal", nil - case "signal.target.interpreter.file.modification_time": - return "signal", nil - case "signal.target.interpreter.file.mount_id": - return "signal", nil - case "signal.target.interpreter.file.name": - return "signal", nil - case "signal.target.interpreter.file.name.length": - return "signal", nil - case "signal.target.interpreter.file.package.name": - return "signal", nil - case "signal.target.interpreter.file.package.source_version": - return "signal", nil - case "signal.target.interpreter.file.package.version": - return "signal", nil - case "signal.target.interpreter.file.path": - return "signal", nil - case "signal.target.interpreter.file.path.length": - return "signal", nil - case "signal.target.interpreter.file.rights": - return "signal", nil - case "signal.target.interpreter.file.uid": - return "signal", nil - case "signal.target.interpreter.file.user": - return "signal", nil - case "signal.target.is_exec": - return "signal", nil - case "signal.target.is_kworker": - return "signal", nil - case "signal.target.is_thread": - return "signal", nil - case "signal.target.parent.args": - return "signal", nil - case "signal.target.parent.args_flags": - return "signal", nil - case "signal.target.parent.args_options": - return "signal", nil - case "signal.target.parent.args_truncated": - return "signal", nil - case "signal.target.parent.argv": - return "signal", nil - case "signal.target.parent.argv0": - return "signal", nil - case "signal.target.parent.auid": - return "signal", nil - case "signal.target.parent.cap_effective": - return "signal", nil - case "signal.target.parent.cap_permitted": - return "signal", nil - case "signal.target.parent.cgroup.file.inode": - return "signal", nil - case "signal.target.parent.cgroup.file.mount_id": - return "signal", nil - case "signal.target.parent.cgroup.id": - return "signal", nil - case "signal.target.parent.cgroup.manager": - return "signal", nil - case "signal.target.parent.cgroup.version": - return "signal", nil - case "signal.target.parent.comm": - return "signal", nil - case "signal.target.parent.container.id": - return "signal", nil - case "signal.target.parent.created_at": - return "signal", nil - case "signal.target.parent.egid": - return "signal", nil - case "signal.target.parent.egroup": - return "signal", nil - case "signal.target.parent.envp": - return "signal", nil - case "signal.target.parent.envs": - return "signal", nil - case "signal.target.parent.envs_truncated": - return "signal", nil - case "signal.target.parent.euid": - return "signal", nil - case "signal.target.parent.euser": - return "signal", nil - case "signal.target.parent.file.change_time": - return "signal", nil - case "signal.target.parent.file.filesystem": - return "signal", nil - case "signal.target.parent.file.gid": - return "signal", nil - case "signal.target.parent.file.group": - return "signal", nil - case "signal.target.parent.file.hashes": - return "signal", nil - case "signal.target.parent.file.in_upper_layer": - return "signal", nil - case "signal.target.parent.file.inode": - return "signal", nil - case "signal.target.parent.file.mode": - return "signal", nil - case "signal.target.parent.file.modification_time": - return "signal", nil - case "signal.target.parent.file.mount_id": - return "signal", nil - case "signal.target.parent.file.name": - return "signal", nil - case "signal.target.parent.file.name.length": - return "signal", nil - case "signal.target.parent.file.package.name": - return "signal", nil - case "signal.target.parent.file.package.source_version": - return "signal", nil - case "signal.target.parent.file.package.version": - return "signal", nil - case "signal.target.parent.file.path": - return "signal", nil - case "signal.target.parent.file.path.length": - return "signal", nil - case "signal.target.parent.file.rights": - return "signal", nil - case "signal.target.parent.file.uid": - return "signal", nil - case "signal.target.parent.file.user": - return "signal", nil - case "signal.target.parent.fsgid": - return "signal", nil - case "signal.target.parent.fsgroup": - return "signal", nil - case "signal.target.parent.fsuid": - return "signal", nil - case "signal.target.parent.fsuser": - return "signal", nil - case "signal.target.parent.gid": - return "signal", nil - case "signal.target.parent.group": - return "signal", nil - case "signal.target.parent.interpreter.file.change_time": - return "signal", nil - case "signal.target.parent.interpreter.file.filesystem": - return "signal", nil - case "signal.target.parent.interpreter.file.gid": - return "signal", nil - case "signal.target.parent.interpreter.file.group": - return "signal", nil - case "signal.target.parent.interpreter.file.hashes": - return "signal", nil - case "signal.target.parent.interpreter.file.in_upper_layer": - return "signal", nil - case "signal.target.parent.interpreter.file.inode": - return "signal", nil - case "signal.target.parent.interpreter.file.mode": - return "signal", nil - case "signal.target.parent.interpreter.file.modification_time": - return "signal", nil - case "signal.target.parent.interpreter.file.mount_id": - return "signal", nil - case "signal.target.parent.interpreter.file.name": - return "signal", nil - case "signal.target.parent.interpreter.file.name.length": - return "signal", nil - case "signal.target.parent.interpreter.file.package.name": - return "signal", nil - case "signal.target.parent.interpreter.file.package.source_version": - return "signal", nil - case "signal.target.parent.interpreter.file.package.version": - return "signal", nil - case "signal.target.parent.interpreter.file.path": - return "signal", nil - case "signal.target.parent.interpreter.file.path.length": - return "signal", nil - case "signal.target.parent.interpreter.file.rights": - return "signal", nil - case "signal.target.parent.interpreter.file.uid": - return "signal", nil - case "signal.target.parent.interpreter.file.user": - return "signal", nil - case "signal.target.parent.is_exec": - return "signal", nil - case "signal.target.parent.is_kworker": - return "signal", nil - case "signal.target.parent.is_thread": - return "signal", nil - case "signal.target.parent.pid": - return "signal", nil - case "signal.target.parent.ppid": - return "signal", nil - case "signal.target.parent.tid": - return "signal", nil - case "signal.target.parent.tty_name": - return "signal", nil - case "signal.target.parent.uid": - return "signal", nil - case "signal.target.parent.user": - return "signal", nil - case "signal.target.parent.user_session.k8s_groups": - return "signal", nil - case "signal.target.parent.user_session.k8s_uid": - return "signal", nil - case "signal.target.parent.user_session.k8s_username": - return "signal", nil - case "signal.target.pid": - return "signal", nil - case "signal.target.ppid": - return "signal", nil - case "signal.target.tid": - return "signal", nil - case "signal.target.tty_name": - return "signal", nil - case "signal.target.uid": - return "signal", nil - case "signal.target.user": - return "signal", nil - case "signal.target.user_session.k8s_groups": - return "signal", nil - case "signal.target.user_session.k8s_uid": - return "signal", nil - case "signal.target.user_session.k8s_username": - return "signal", nil - case "signal.type": - return "signal", nil - case "splice.file.change_time": - return "splice", nil - case "splice.file.filesystem": - return "splice", nil - case "splice.file.gid": - return "splice", nil - case "splice.file.group": - return "splice", nil - case "splice.file.hashes": - return "splice", nil - case "splice.file.in_upper_layer": - return "splice", nil - case "splice.file.inode": - return "splice", nil - case "splice.file.mode": - return "splice", nil - case "splice.file.modification_time": - return "splice", nil - case "splice.file.mount_id": - return "splice", nil - case "splice.file.name": - return "splice", nil - case "splice.file.name.length": - return "splice", nil - case "splice.file.package.name": - return "splice", nil - case "splice.file.package.source_version": - return "splice", nil - case "splice.file.package.version": - return "splice", nil - case "splice.file.path": - return "splice", nil - case "splice.file.path.length": - return "splice", nil - case "splice.file.rights": - return "splice", nil - case "splice.file.uid": - return "splice", nil - case "splice.file.user": - return "splice", nil - case "splice.pipe_entry_flag": - return "splice", nil - case "splice.pipe_exit_flag": - return "splice", nil - case "splice.retval": - return "splice", nil - case "unlink.file.change_time": - return "unlink", nil - case "unlink.file.filesystem": - return "unlink", nil - case "unlink.file.gid": - return "unlink", nil - case "unlink.file.group": - return "unlink", nil - case "unlink.file.hashes": - return "unlink", nil - case "unlink.file.in_upper_layer": - return "unlink", nil - case "unlink.file.inode": - return "unlink", nil - case "unlink.file.mode": - return "unlink", nil - case "unlink.file.modification_time": - return "unlink", nil - case "unlink.file.mount_id": - return "unlink", nil - case "unlink.file.name": - return "unlink", nil - case "unlink.file.name.length": - return "unlink", nil - case "unlink.file.package.name": - return "unlink", nil - case "unlink.file.package.source_version": - return "unlink", nil - case "unlink.file.package.version": - return "unlink", nil - case "unlink.file.path": - return "unlink", nil - case "unlink.file.path.length": - return "unlink", nil - case "unlink.file.rights": - return "unlink", nil - case "unlink.file.uid": - return "unlink", nil - case "unlink.file.user": - return "unlink", nil - case "unlink.flags": - return "unlink", nil - case "unlink.retval": - return "unlink", nil - case "unlink.syscall.dirfd": - return "unlink", nil - case "unlink.syscall.flags": - return "unlink", nil - case "unlink.syscall.path": - return "unlink", nil - case "unload_module.name": - return "unload_module", nil - case "unload_module.retval": - return "unload_module", nil - case "utimes.file.change_time": - return "utimes", nil - case "utimes.file.filesystem": - return "utimes", nil - case "utimes.file.gid": - return "utimes", nil - case "utimes.file.group": - return "utimes", nil - case "utimes.file.hashes": - return "utimes", nil - case "utimes.file.in_upper_layer": - return "utimes", nil - case "utimes.file.inode": - return "utimes", nil - case "utimes.file.mode": - return "utimes", nil - case "utimes.file.modification_time": - return "utimes", nil - case "utimes.file.mount_id": - return "utimes", nil - case "utimes.file.name": - return "utimes", nil - case "utimes.file.name.length": - return "utimes", nil - case "utimes.file.package.name": - return "utimes", nil - case "utimes.file.package.source_version": - return "utimes", nil - case "utimes.file.package.version": - return "utimes", nil - case "utimes.file.path": - return "utimes", nil - case "utimes.file.path.length": - return "utimes", nil - case "utimes.file.rights": - return "utimes", nil - case "utimes.file.uid": - return "utimes", nil - case "utimes.file.user": - return "utimes", nil - case "utimes.retval": - return "utimes", nil - case "utimes.syscall.path": - return "utimes", nil + "rename.file.package.version", + "rename.file.path", + "rename.file.path.length", + "rename.file.rights", + "rename.file.uid", + "rename.file.user", + "rename.retval", + "rename.syscall.destination.path", + "rename.syscall.path", + "rmdir.file.change_time", + "rmdir.file.filesystem", + "rmdir.file.gid", + "rmdir.file.group", + "rmdir.file.hashes", + "rmdir.file.in_upper_layer", + "rmdir.file.inode", + "rmdir.file.mode", + "rmdir.file.modification_time", + "rmdir.file.mount_id", + "rmdir.file.name", + "rmdir.file.name.length", + "rmdir.file.package.name", + "rmdir.file.package.source_version", + "rmdir.file.package.version", + "rmdir.file.path", + "rmdir.file.path.length", + "rmdir.file.rights", + "rmdir.file.uid", + "rmdir.file.user", + "rmdir.retval", + "rmdir.syscall.path", + "selinux.bool.name", + "selinux.bool.state", + "selinux.bool_commit.state", + "selinux.enforce.status", + "setgid.egid", + "setgid.egroup", + "setgid.fsgid", + "setgid.fsgroup", + "setgid.gid", + "setgid.group", + "setuid.euid", + "setuid.euser", + "setuid.fsuid", + "setuid.fsuser", + "setuid.uid", + "setuid.user", + "setxattr.file.change_time", + "setxattr.file.destination.name", + "setxattr.file.destination.namespace", + "setxattr.file.filesystem", + "setxattr.file.gid", + "setxattr.file.group", + "setxattr.file.hashes", + "setxattr.file.in_upper_layer", + "setxattr.file.inode", + "setxattr.file.mode", + "setxattr.file.modification_time", + "setxattr.file.mount_id", + "setxattr.file.name", + "setxattr.file.name.length", + "setxattr.file.package.name", + "setxattr.file.package.source_version", + "setxattr.file.package.version", + "setxattr.file.path", + "setxattr.file.path.length", + "setxattr.file.rights", + "setxattr.file.uid", + "setxattr.file.user", + "setxattr.retval", + "signal.pid", + "signal.retval", + "signal.target.ancestors.args", + "signal.target.ancestors.args_flags", + "signal.target.ancestors.args_options", + "signal.target.ancestors.args_truncated", + "signal.target.ancestors.argv", + "signal.target.ancestors.argv0", + "signal.target.ancestors.auid", + "signal.target.ancestors.cap_effective", + "signal.target.ancestors.cap_permitted", + "signal.target.ancestors.cgroup.file.inode", + "signal.target.ancestors.cgroup.file.mount_id", + "signal.target.ancestors.cgroup.id", + "signal.target.ancestors.cgroup.manager", + "signal.target.ancestors.cgroup.version", + "signal.target.ancestors.comm", + "signal.target.ancestors.container.id", + "signal.target.ancestors.created_at", + "signal.target.ancestors.egid", + "signal.target.ancestors.egroup", + "signal.target.ancestors.envp", + "signal.target.ancestors.envs", + "signal.target.ancestors.envs_truncated", + "signal.target.ancestors.euid", + "signal.target.ancestors.euser", + "signal.target.ancestors.file.change_time", + "signal.target.ancestors.file.filesystem", + "signal.target.ancestors.file.gid", + "signal.target.ancestors.file.group", + "signal.target.ancestors.file.hashes", + "signal.target.ancestors.file.in_upper_layer", + "signal.target.ancestors.file.inode", + "signal.target.ancestors.file.mode", + "signal.target.ancestors.file.modification_time", + "signal.target.ancestors.file.mount_id", + "signal.target.ancestors.file.name", + "signal.target.ancestors.file.name.length", + "signal.target.ancestors.file.package.name", + "signal.target.ancestors.file.package.source_version", + "signal.target.ancestors.file.package.version", + "signal.target.ancestors.file.path", + "signal.target.ancestors.file.path.length", + "signal.target.ancestors.file.rights", + "signal.target.ancestors.file.uid", + "signal.target.ancestors.file.user", + "signal.target.ancestors.fsgid", + "signal.target.ancestors.fsgroup", + "signal.target.ancestors.fsuid", + "signal.target.ancestors.fsuser", + "signal.target.ancestors.gid", + "signal.target.ancestors.group", + "signal.target.ancestors.interpreter.file.change_time", + "signal.target.ancestors.interpreter.file.filesystem", + "signal.target.ancestors.interpreter.file.gid", + "signal.target.ancestors.interpreter.file.group", + "signal.target.ancestors.interpreter.file.hashes", + "signal.target.ancestors.interpreter.file.in_upper_layer", + "signal.target.ancestors.interpreter.file.inode", + "signal.target.ancestors.interpreter.file.mode", + "signal.target.ancestors.interpreter.file.modification_time", + "signal.target.ancestors.interpreter.file.mount_id", + "signal.target.ancestors.interpreter.file.name", + "signal.target.ancestors.interpreter.file.name.length", + "signal.target.ancestors.interpreter.file.package.name", + "signal.target.ancestors.interpreter.file.package.source_version", + "signal.target.ancestors.interpreter.file.package.version", + "signal.target.ancestors.interpreter.file.path", + "signal.target.ancestors.interpreter.file.path.length", + "signal.target.ancestors.interpreter.file.rights", + "signal.target.ancestors.interpreter.file.uid", + "signal.target.ancestors.interpreter.file.user", + "signal.target.ancestors.is_exec", + "signal.target.ancestors.is_kworker", + "signal.target.ancestors.is_thread", + "signal.target.ancestors.length", + "signal.target.ancestors.pid", + "signal.target.ancestors.ppid", + "signal.target.ancestors.tid", + "signal.target.ancestors.tty_name", + "signal.target.ancestors.uid", + "signal.target.ancestors.user", + "signal.target.ancestors.user_session.k8s_groups", + "signal.target.ancestors.user_session.k8s_uid", + "signal.target.ancestors.user_session.k8s_username", + "signal.target.args", + "signal.target.args_flags", + "signal.target.args_options", + "signal.target.args_truncated", + "signal.target.argv", + "signal.target.argv0", + "signal.target.auid", + "signal.target.cap_effective", + "signal.target.cap_permitted", + "signal.target.cgroup.file.inode", + "signal.target.cgroup.file.mount_id", + "signal.target.cgroup.id", + "signal.target.cgroup.manager", + "signal.target.cgroup.version", + "signal.target.comm", + "signal.target.container.id", + "signal.target.created_at", + "signal.target.egid", + "signal.target.egroup", + "signal.target.envp", + "signal.target.envs", + "signal.target.envs_truncated", + "signal.target.euid", + "signal.target.euser", + "signal.target.file.change_time", + "signal.target.file.filesystem", + "signal.target.file.gid", + "signal.target.file.group", + "signal.target.file.hashes", + "signal.target.file.in_upper_layer", + "signal.target.file.inode", + "signal.target.file.mode", + "signal.target.file.modification_time", + "signal.target.file.mount_id", + "signal.target.file.name", + "signal.target.file.name.length", + "signal.target.file.package.name", + "signal.target.file.package.source_version", + "signal.target.file.package.version", + "signal.target.file.path", + "signal.target.file.path.length", + "signal.target.file.rights", + "signal.target.file.uid", + "signal.target.file.user", + "signal.target.fsgid", + "signal.target.fsgroup", + "signal.target.fsuid", + "signal.target.fsuser", + "signal.target.gid", + "signal.target.group", + "signal.target.interpreter.file.change_time", + "signal.target.interpreter.file.filesystem", + "signal.target.interpreter.file.gid", + "signal.target.interpreter.file.group", + "signal.target.interpreter.file.hashes", + "signal.target.interpreter.file.in_upper_layer", + "signal.target.interpreter.file.inode", + "signal.target.interpreter.file.mode", + "signal.target.interpreter.file.modification_time", + "signal.target.interpreter.file.mount_id", + "signal.target.interpreter.file.name", + "signal.target.interpreter.file.name.length", + "signal.target.interpreter.file.package.name", + "signal.target.interpreter.file.package.source_version", + "signal.target.interpreter.file.package.version", + "signal.target.interpreter.file.path", + "signal.target.interpreter.file.path.length", + "signal.target.interpreter.file.rights", + "signal.target.interpreter.file.uid", + "signal.target.interpreter.file.user", + "signal.target.is_exec", + "signal.target.is_kworker", + "signal.target.is_thread", + "signal.target.parent.args", + "signal.target.parent.args_flags", + "signal.target.parent.args_options", + "signal.target.parent.args_truncated", + "signal.target.parent.argv", + "signal.target.parent.argv0", + "signal.target.parent.auid", + "signal.target.parent.cap_effective", + "signal.target.parent.cap_permitted", + "signal.target.parent.cgroup.file.inode", + "signal.target.parent.cgroup.file.mount_id", + "signal.target.parent.cgroup.id", + "signal.target.parent.cgroup.manager", + "signal.target.parent.cgroup.version", + "signal.target.parent.comm", + "signal.target.parent.container.id", + "signal.target.parent.created_at", + "signal.target.parent.egid", + "signal.target.parent.egroup", + "signal.target.parent.envp", + "signal.target.parent.envs", + "signal.target.parent.envs_truncated", + "signal.target.parent.euid", + "signal.target.parent.euser", + "signal.target.parent.file.change_time", + "signal.target.parent.file.filesystem", + "signal.target.parent.file.gid", + "signal.target.parent.file.group", + "signal.target.parent.file.hashes", + "signal.target.parent.file.in_upper_layer", + "signal.target.parent.file.inode", + "signal.target.parent.file.mode", + "signal.target.parent.file.modification_time", + "signal.target.parent.file.mount_id", + "signal.target.parent.file.name", + "signal.target.parent.file.name.length", + "signal.target.parent.file.package.name", + "signal.target.parent.file.package.source_version", + "signal.target.parent.file.package.version", + "signal.target.parent.file.path", + "signal.target.parent.file.path.length", + "signal.target.parent.file.rights", + "signal.target.parent.file.uid", + "signal.target.parent.file.user", + "signal.target.parent.fsgid", + "signal.target.parent.fsgroup", + "signal.target.parent.fsuid", + "signal.target.parent.fsuser", + "signal.target.parent.gid", + "signal.target.parent.group", + "signal.target.parent.interpreter.file.change_time", + "signal.target.parent.interpreter.file.filesystem", + "signal.target.parent.interpreter.file.gid", + "signal.target.parent.interpreter.file.group", + "signal.target.parent.interpreter.file.hashes", + "signal.target.parent.interpreter.file.in_upper_layer", + "signal.target.parent.interpreter.file.inode", + "signal.target.parent.interpreter.file.mode", + "signal.target.parent.interpreter.file.modification_time", + "signal.target.parent.interpreter.file.mount_id", + "signal.target.parent.interpreter.file.name", + "signal.target.parent.interpreter.file.name.length", + "signal.target.parent.interpreter.file.package.name", + "signal.target.parent.interpreter.file.package.source_version", + "signal.target.parent.interpreter.file.package.version", + "signal.target.parent.interpreter.file.path", + "signal.target.parent.interpreter.file.path.length", + "signal.target.parent.interpreter.file.rights", + "signal.target.parent.interpreter.file.uid", + "signal.target.parent.interpreter.file.user", + "signal.target.parent.is_exec", + "signal.target.parent.is_kworker", + "signal.target.parent.is_thread", + "signal.target.parent.pid", + "signal.target.parent.ppid", + "signal.target.parent.tid", + "signal.target.parent.tty_name", + "signal.target.parent.uid", + "signal.target.parent.user", + "signal.target.parent.user_session.k8s_groups", + "signal.target.parent.user_session.k8s_uid", + "signal.target.parent.user_session.k8s_username", + "signal.target.pid", + "signal.target.ppid", + "signal.target.tid", + "signal.target.tty_name", + "signal.target.uid", + "signal.target.user", + "signal.target.user_session.k8s_groups", + "signal.target.user_session.k8s_uid", + "signal.target.user_session.k8s_username", + "signal.type", + "splice.file.change_time", + "splice.file.filesystem", + "splice.file.gid", + "splice.file.group", + "splice.file.hashes", + "splice.file.in_upper_layer", + "splice.file.inode", + "splice.file.mode", + "splice.file.modification_time", + "splice.file.mount_id", + "splice.file.name", + "splice.file.name.length", + "splice.file.package.name", + "splice.file.package.source_version", + "splice.file.package.version", + "splice.file.path", + "splice.file.path.length", + "splice.file.rights", + "splice.file.uid", + "splice.file.user", + "splice.pipe_entry_flag", + "splice.pipe_exit_flag", + "splice.retval", + "unlink.file.change_time", + "unlink.file.filesystem", + "unlink.file.gid", + "unlink.file.group", + "unlink.file.hashes", + "unlink.file.in_upper_layer", + "unlink.file.inode", + "unlink.file.mode", + "unlink.file.modification_time", + "unlink.file.mount_id", + "unlink.file.name", + "unlink.file.name.length", + "unlink.file.package.name", + "unlink.file.package.source_version", + "unlink.file.package.version", + "unlink.file.path", + "unlink.file.path.length", + "unlink.file.rights", + "unlink.file.uid", + "unlink.file.user", + "unlink.flags", + "unlink.retval", + "unlink.syscall.dirfd", + "unlink.syscall.flags", + "unlink.syscall.path", + "unload_module.name", + "unload_module.retval", + "utimes.file.change_time", + "utimes.file.filesystem", + "utimes.file.gid", + "utimes.file.group", + "utimes.file.hashes", + "utimes.file.in_upper_layer", + "utimes.file.inode", + "utimes.file.mode", + "utimes.file.modification_time", + "utimes.file.mount_id", + "utimes.file.name", + "utimes.file.name.length", + "utimes.file.package.name", + "utimes.file.package.source_version", + "utimes.file.package.version", + "utimes.file.path", + "utimes.file.path.length", + "utimes.file.rights", + "utimes.file.uid", + "utimes.file.user", + "utimes.retval", + "utimes.syscall.path", + } +} +func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { + m := &Model{} + evaluator, err := m.GetEvaluator(field, "") + if err != nil { + return nil, err } - return "", &eval.ErrFieldNotFound{Field: field} + ctx := eval.NewContext(ev) + value := evaluator.Eval(ctx) + if ctx.Error != nil { + return nil, ctx.Error + } + return value, nil } -func (ev *Event) GetFieldType(field eval.Field) (reflect.Kind, error) { +func (ev *Event) GetFieldMetadata(field eval.Field) (eval.EventType, reflect.Kind, error) { switch field { + case "accept.addr.family": + return "accept", reflect.Int, nil + case "accept.addr.ip": + return "accept", reflect.Struct, nil + case "accept.addr.is_public": + return "accept", reflect.Bool, nil + case "accept.addr.port": + return "accept", reflect.Int, nil + case "accept.retval": + return "accept", reflect.Int, nil case "bind.addr.family": - return reflect.Int, nil + return "bind", reflect.Int, nil case "bind.addr.ip": - return reflect.Struct, nil + return "bind", reflect.Struct, nil case "bind.addr.is_public": - return reflect.Bool, nil + return "bind", reflect.Bool, nil case "bind.addr.port": - return reflect.Int, nil + return "bind", reflect.Int, nil case "bind.protocol": - return reflect.Int, nil + return "bind", reflect.Int, nil case "bind.retval": - return reflect.Int, nil + return "bind", reflect.Int, nil case "bpf.cmd": - return reflect.Int, nil + return "bpf", reflect.Int, nil case "bpf.map.name": - return reflect.String, nil + return "bpf", reflect.String, nil case "bpf.map.type": - return reflect.Int, nil + return "bpf", reflect.Int, nil case "bpf.prog.attach_type": - return reflect.Int, nil + return "bpf", reflect.Int, nil case "bpf.prog.helpers": - return reflect.Int, nil + return "bpf", reflect.Int, nil case "bpf.prog.name": - return reflect.String, nil + return "bpf", reflect.String, nil case "bpf.prog.tag": - return reflect.String, nil + return "bpf", reflect.String, nil case "bpf.prog.type": - return reflect.Int, nil + return "bpf", reflect.Int, nil case "bpf.retval": - return reflect.Int, nil + return "bpf", reflect.Int, nil case "capset.cap_effective": - return reflect.Int, nil + return "capset", reflect.Int, nil case "capset.cap_permitted": - return reflect.Int, nil + return "capset", reflect.Int, nil case "cgroup.file.inode": - return reflect.Int, nil + return "", reflect.Int, nil case "cgroup.file.mount_id": - return reflect.Int, nil + return "", reflect.Int, nil case "cgroup.id": - return reflect.String, nil + return "", reflect.String, nil case "cgroup.manager": - return reflect.String, nil + return "", reflect.String, nil case "cgroup.version": - return reflect.Int, nil + return "", reflect.Int, nil case "chdir.file.change_time": - return reflect.Int, nil + return "chdir", reflect.Int, nil case "chdir.file.filesystem": - return reflect.String, nil + return "chdir", reflect.String, nil case "chdir.file.gid": - return reflect.Int, nil + return "chdir", reflect.Int, nil case "chdir.file.group": - return reflect.String, nil + return "chdir", reflect.String, nil case "chdir.file.hashes": - return reflect.String, nil + return "chdir", reflect.String, nil case "chdir.file.in_upper_layer": - return reflect.Bool, nil + return "chdir", reflect.Bool, nil case "chdir.file.inode": - return reflect.Int, nil + return "chdir", reflect.Int, nil case "chdir.file.mode": - return reflect.Int, nil + return "chdir", reflect.Int, nil case "chdir.file.modification_time": - return reflect.Int, nil + return "chdir", reflect.Int, nil case "chdir.file.mount_id": - return reflect.Int, nil + return "chdir", reflect.Int, nil case "chdir.file.name": - return reflect.String, nil + return "chdir", reflect.String, nil case "chdir.file.name.length": - return reflect.Int, nil + return "chdir", reflect.Int, nil case "chdir.file.package.name": - return reflect.String, nil + return "chdir", reflect.String, nil case "chdir.file.package.source_version": - return reflect.String, nil + return "chdir", reflect.String, nil case "chdir.file.package.version": - return reflect.String, nil + return "chdir", reflect.String, nil case "chdir.file.path": - return reflect.String, nil + return "chdir", reflect.String, nil case "chdir.file.path.length": - return reflect.Int, nil + return "chdir", reflect.Int, nil case "chdir.file.rights": - return reflect.Int, nil + return "chdir", reflect.Int, nil case "chdir.file.uid": - return reflect.Int, nil + return "chdir", reflect.Int, nil case "chdir.file.user": - return reflect.String, nil + return "chdir", reflect.String, nil case "chdir.retval": - return reflect.Int, nil + return "chdir", reflect.Int, nil case "chdir.syscall.path": - return reflect.String, nil + return "chdir", reflect.String, nil case "chmod.file.change_time": - return reflect.Int, nil + return "chmod", reflect.Int, nil case "chmod.file.destination.mode": - return reflect.Int, nil + return "chmod", reflect.Int, nil case "chmod.file.destination.rights": - return reflect.Int, nil + return "chmod", reflect.Int, nil case "chmod.file.filesystem": - return reflect.String, nil + return "chmod", reflect.String, nil case "chmod.file.gid": - return reflect.Int, nil + return "chmod", reflect.Int, nil case "chmod.file.group": - return reflect.String, nil + return "chmod", reflect.String, nil case "chmod.file.hashes": - return reflect.String, nil + return "chmod", reflect.String, nil case "chmod.file.in_upper_layer": - return reflect.Bool, nil + return "chmod", reflect.Bool, nil case "chmod.file.inode": - return reflect.Int, nil + return "chmod", reflect.Int, nil case "chmod.file.mode": - return reflect.Int, nil + return "chmod", reflect.Int, nil case "chmod.file.modification_time": - return reflect.Int, nil + return "chmod", reflect.Int, nil case "chmod.file.mount_id": - return reflect.Int, nil + return "chmod", reflect.Int, nil case "chmod.file.name": - return reflect.String, nil + return "chmod", reflect.String, nil case "chmod.file.name.length": - return reflect.Int, nil + return "chmod", reflect.Int, nil case "chmod.file.package.name": - return reflect.String, nil + return "chmod", reflect.String, nil case "chmod.file.package.source_version": - return reflect.String, nil + return "chmod", reflect.String, nil case "chmod.file.package.version": - return reflect.String, nil + return "chmod", reflect.String, nil case "chmod.file.path": - return reflect.String, nil + return "chmod", reflect.String, nil case "chmod.file.path.length": - return reflect.Int, nil + return "chmod", reflect.Int, nil case "chmod.file.rights": - return reflect.Int, nil + return "chmod", reflect.Int, nil case "chmod.file.uid": - return reflect.Int, nil + return "chmod", reflect.Int, nil case "chmod.file.user": - return reflect.String, nil + return "chmod", reflect.String, nil case "chmod.retval": - return reflect.Int, nil + return "chmod", reflect.Int, nil case "chmod.syscall.mode": - return reflect.Int, nil + return "chmod", reflect.Int, nil case "chmod.syscall.path": - return reflect.String, nil + return "chmod", reflect.String, nil case "chown.file.change_time": - return reflect.Int, nil + return "chown", reflect.Int, nil case "chown.file.destination.gid": - return reflect.Int, nil + return "chown", reflect.Int, nil case "chown.file.destination.group": - return reflect.String, nil + return "chown", reflect.String, nil case "chown.file.destination.uid": - return reflect.Int, nil + return "chown", reflect.Int, nil case "chown.file.destination.user": - return reflect.String, nil + return "chown", reflect.String, nil case "chown.file.filesystem": - return reflect.String, nil + return "chown", reflect.String, nil case "chown.file.gid": - return reflect.Int, nil + return "chown", reflect.Int, nil case "chown.file.group": - return reflect.String, nil + return "chown", reflect.String, nil case "chown.file.hashes": - return reflect.String, nil + return "chown", reflect.String, nil case "chown.file.in_upper_layer": - return reflect.Bool, nil + return "chown", reflect.Bool, nil case "chown.file.inode": - return reflect.Int, nil + return "chown", reflect.Int, nil case "chown.file.mode": - return reflect.Int, nil + return "chown", reflect.Int, nil case "chown.file.modification_time": - return reflect.Int, nil + return "chown", reflect.Int, nil case "chown.file.mount_id": - return reflect.Int, nil + return "chown", reflect.Int, nil case "chown.file.name": - return reflect.String, nil + return "chown", reflect.String, nil case "chown.file.name.length": - return reflect.Int, nil + return "chown", reflect.Int, nil case "chown.file.package.name": - return reflect.String, nil + return "chown", reflect.String, nil case "chown.file.package.source_version": - return reflect.String, nil + return "chown", reflect.String, nil case "chown.file.package.version": - return reflect.String, nil + return "chown", reflect.String, nil case "chown.file.path": - return reflect.String, nil + return "chown", reflect.String, nil case "chown.file.path.length": - return reflect.Int, nil + return "chown", reflect.Int, nil case "chown.file.rights": - return reflect.Int, nil + return "chown", reflect.Int, nil case "chown.file.uid": - return reflect.Int, nil + return "chown", reflect.Int, nil case "chown.file.user": - return reflect.String, nil + return "chown", reflect.String, nil case "chown.retval": - return reflect.Int, nil + return "chown", reflect.Int, nil case "chown.syscall.gid": - return reflect.Int, nil + return "chown", reflect.Int, nil case "chown.syscall.path": - return reflect.String, nil + return "chown", reflect.String, nil case "chown.syscall.uid": - return reflect.Int, nil + return "chown", reflect.Int, nil case "connect.addr.family": - return reflect.Int, nil + return "connect", reflect.Int, nil case "connect.addr.ip": - return reflect.Struct, nil + return "connect", reflect.Struct, nil case "connect.addr.is_public": - return reflect.Bool, nil + return "connect", reflect.Bool, nil case "connect.addr.port": - return reflect.Int, nil + return "connect", reflect.Int, nil case "connect.protocol": - return reflect.Int, nil + return "connect", reflect.Int, nil case "connect.retval": - return reflect.Int, nil + return "connect", reflect.Int, nil case "container.created_at": - return reflect.Int, nil + return "", reflect.Int, nil case "container.id": - return reflect.String, nil + return "", reflect.String, nil case "container.runtime": - return reflect.String, nil + return "", reflect.String, nil case "container.tags": - return reflect.String, nil + return "", reflect.String, nil case "dns.id": - return reflect.Int, nil + return "dns", reflect.Int, nil case "dns.question.class": - return reflect.Int, nil + return "dns", reflect.Int, nil case "dns.question.count": - return reflect.Int, nil + return "dns", reflect.Int, nil case "dns.question.length": - return reflect.Int, nil + return "dns", reflect.Int, nil case "dns.question.name": - return reflect.String, nil + return "dns", reflect.String, nil case "dns.question.name.length": - return reflect.Int, nil + return "dns", reflect.Int, nil case "dns.question.type": - return reflect.Int, nil + return "dns", reflect.Int, nil case "event.async": - return reflect.Bool, nil + return "", reflect.Bool, nil case "event.hostname": - return reflect.String, nil + return "", reflect.String, nil case "event.origin": - return reflect.String, nil + return "", reflect.String, nil case "event.os": - return reflect.String, nil + return "", reflect.String, nil case "event.service": - return reflect.String, nil + return "", reflect.String, nil case "event.timestamp": - return reflect.Int, nil + return "", reflect.Int, nil case "exec.args": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.args_flags": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.args_options": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.args_truncated": - return reflect.Bool, nil + return "exec", reflect.Bool, nil case "exec.argv": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.argv0": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.auid": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.cap_effective": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.cap_permitted": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.cgroup.file.inode": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.cgroup.file.mount_id": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.cgroup.id": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.cgroup.manager": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.cgroup.version": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.comm": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.container.id": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.created_at": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.egid": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.egroup": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.envp": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.envs": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.envs_truncated": - return reflect.Bool, nil + return "exec", reflect.Bool, nil case "exec.euid": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.euser": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.file.change_time": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.file.filesystem": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.file.gid": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.file.group": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.file.hashes": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.file.in_upper_layer": - return reflect.Bool, nil + return "exec", reflect.Bool, nil case "exec.file.inode": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.file.mode": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.file.modification_time": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.file.mount_id": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.file.name": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.file.name.length": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.file.package.name": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.file.package.source_version": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.file.package.version": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.file.path": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.file.path.length": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.file.rights": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.file.uid": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.file.user": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.fsgid": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.fsgroup": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.fsuid": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.fsuser": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.gid": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.group": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.interpreter.file.change_time": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.interpreter.file.filesystem": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.interpreter.file.gid": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.interpreter.file.group": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.interpreter.file.hashes": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.interpreter.file.in_upper_layer": - return reflect.Bool, nil + return "exec", reflect.Bool, nil case "exec.interpreter.file.inode": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.interpreter.file.mode": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.interpreter.file.modification_time": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.interpreter.file.mount_id": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.interpreter.file.name": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.interpreter.file.name.length": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.interpreter.file.package.name": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.interpreter.file.package.source_version": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.interpreter.file.package.version": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.interpreter.file.path": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.interpreter.file.path.length": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.interpreter.file.rights": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.interpreter.file.uid": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.interpreter.file.user": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.is_exec": - return reflect.Bool, nil + return "exec", reflect.Bool, nil case "exec.is_kworker": - return reflect.Bool, nil + return "exec", reflect.Bool, nil case "exec.is_thread": - return reflect.Bool, nil + return "exec", reflect.Bool, nil case "exec.pid": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.ppid": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.syscall.path": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.tid": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.tty_name": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.uid": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.user": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.user_session.k8s_groups": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.user_session.k8s_uid": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.user_session.k8s_username": - return reflect.String, nil + return "exec", reflect.String, nil case "exit.args": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.args_flags": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.args_options": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.args_truncated": - return reflect.Bool, nil + return "exit", reflect.Bool, nil case "exit.argv": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.argv0": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.auid": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.cap_effective": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.cap_permitted": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.cause": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.cgroup.file.inode": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.cgroup.file.mount_id": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.cgroup.id": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.cgroup.manager": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.cgroup.version": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.code": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.comm": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.container.id": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.created_at": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.egid": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.egroup": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.envp": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.envs": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.envs_truncated": - return reflect.Bool, nil + return "exit", reflect.Bool, nil case "exit.euid": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.euser": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.file.change_time": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.file.filesystem": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.file.gid": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.file.group": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.file.hashes": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.file.in_upper_layer": - return reflect.Bool, nil + return "exit", reflect.Bool, nil case "exit.file.inode": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.file.mode": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.file.modification_time": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.file.mount_id": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.file.name": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.file.name.length": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.file.package.name": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.file.package.source_version": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.file.package.version": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.file.path": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.file.path.length": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.file.rights": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.file.uid": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.file.user": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.fsgid": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.fsgroup": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.fsuid": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.fsuser": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.gid": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.group": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.interpreter.file.change_time": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.interpreter.file.filesystem": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.interpreter.file.gid": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.interpreter.file.group": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.interpreter.file.hashes": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.interpreter.file.in_upper_layer": - return reflect.Bool, nil + return "exit", reflect.Bool, nil case "exit.interpreter.file.inode": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.interpreter.file.mode": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.interpreter.file.modification_time": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.interpreter.file.mount_id": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.interpreter.file.name": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.interpreter.file.name.length": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.interpreter.file.package.name": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.interpreter.file.package.source_version": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.interpreter.file.package.version": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.interpreter.file.path": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.interpreter.file.path.length": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.interpreter.file.rights": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.interpreter.file.uid": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.interpreter.file.user": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.is_exec": - return reflect.Bool, nil + return "exit", reflect.Bool, nil case "exit.is_kworker": - return reflect.Bool, nil + return "exit", reflect.Bool, nil case "exit.is_thread": - return reflect.Bool, nil + return "exit", reflect.Bool, nil case "exit.pid": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.ppid": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.tid": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.tty_name": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.uid": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.user": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.user_session.k8s_groups": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.user_session.k8s_uid": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.user_session.k8s_username": - return reflect.String, nil + return "exit", reflect.String, nil case "imds.aws.is_imds_v2": - return reflect.Bool, nil + return "imds", reflect.Bool, nil case "imds.aws.security_credentials.type": - return reflect.String, nil + return "imds", reflect.String, nil case "imds.cloud_provider": - return reflect.String, nil + return "imds", reflect.String, nil case "imds.host": - return reflect.String, nil + return "imds", reflect.String, nil case "imds.server": - return reflect.String, nil + return "imds", reflect.String, nil case "imds.type": - return reflect.String, nil + return "imds", reflect.String, nil case "imds.url": - return reflect.String, nil + return "imds", reflect.String, nil case "imds.user_agent": - return reflect.String, nil + return "imds", reflect.String, nil case "link.file.change_time": - return reflect.Int, nil + return "link", reflect.Int, nil case "link.file.destination.change_time": - return reflect.Int, nil + return "link", reflect.Int, nil case "link.file.destination.filesystem": - return reflect.String, nil + return "link", reflect.String, nil case "link.file.destination.gid": - return reflect.Int, nil + return "link", reflect.Int, nil case "link.file.destination.group": - return reflect.String, nil + return "link", reflect.String, nil case "link.file.destination.hashes": - return reflect.String, nil + return "link", reflect.String, nil case "link.file.destination.in_upper_layer": - return reflect.Bool, nil + return "link", reflect.Bool, nil case "link.file.destination.inode": - return reflect.Int, nil + return "link", reflect.Int, nil case "link.file.destination.mode": - return reflect.Int, nil + return "link", reflect.Int, nil case "link.file.destination.modification_time": - return reflect.Int, nil + return "link", reflect.Int, nil case "link.file.destination.mount_id": - return reflect.Int, nil + return "link", reflect.Int, nil case "link.file.destination.name": - return reflect.String, nil + return "link", reflect.String, nil case "link.file.destination.name.length": - return reflect.Int, nil + return "link", reflect.Int, nil case "link.file.destination.package.name": - return reflect.String, nil + return "link", reflect.String, nil case "link.file.destination.package.source_version": - return reflect.String, nil + return "link", reflect.String, nil case "link.file.destination.package.version": - return reflect.String, nil + return "link", reflect.String, nil case "link.file.destination.path": - return reflect.String, nil + return "link", reflect.String, nil case "link.file.destination.path.length": - return reflect.Int, nil + return "link", reflect.Int, nil case "link.file.destination.rights": - return reflect.Int, nil + return "link", reflect.Int, nil case "link.file.destination.uid": - return reflect.Int, nil + return "link", reflect.Int, nil case "link.file.destination.user": - return reflect.String, nil + return "link", reflect.String, nil case "link.file.filesystem": - return reflect.String, nil + return "link", reflect.String, nil case "link.file.gid": - return reflect.Int, nil + return "link", reflect.Int, nil case "link.file.group": - return reflect.String, nil + return "link", reflect.String, nil case "link.file.hashes": - return reflect.String, nil + return "link", reflect.String, nil case "link.file.in_upper_layer": - return reflect.Bool, nil + return "link", reflect.Bool, nil case "link.file.inode": - return reflect.Int, nil + return "link", reflect.Int, nil case "link.file.mode": - return reflect.Int, nil + return "link", reflect.Int, nil case "link.file.modification_time": - return reflect.Int, nil + return "link", reflect.Int, nil case "link.file.mount_id": - return reflect.Int, nil + return "link", reflect.Int, nil case "link.file.name": - return reflect.String, nil + return "link", reflect.String, nil case "link.file.name.length": - return reflect.Int, nil + return "link", reflect.Int, nil case "link.file.package.name": - return reflect.String, nil + return "link", reflect.String, nil case "link.file.package.source_version": - return reflect.String, nil + return "link", reflect.String, nil case "link.file.package.version": - return reflect.String, nil + return "link", reflect.String, nil case "link.file.path": - return reflect.String, nil + return "link", reflect.String, nil case "link.file.path.length": - return reflect.Int, nil + return "link", reflect.Int, nil case "link.file.rights": - return reflect.Int, nil + return "link", reflect.Int, nil case "link.file.uid": - return reflect.Int, nil + return "link", reflect.Int, nil case "link.file.user": - return reflect.String, nil + return "link", reflect.String, nil case "link.retval": - return reflect.Int, nil + return "link", reflect.Int, nil case "link.syscall.destination.path": - return reflect.String, nil + return "link", reflect.String, nil case "link.syscall.path": - return reflect.String, nil + return "link", reflect.String, nil case "load_module.args": - return reflect.String, nil + return "load_module", reflect.String, nil case "load_module.args_truncated": - return reflect.Bool, nil + return "load_module", reflect.Bool, nil case "load_module.argv": - return reflect.String, nil + return "load_module", reflect.String, nil case "load_module.file.change_time": - return reflect.Int, nil + return "load_module", reflect.Int, nil case "load_module.file.filesystem": - return reflect.String, nil + return "load_module", reflect.String, nil case "load_module.file.gid": - return reflect.Int, nil + return "load_module", reflect.Int, nil case "load_module.file.group": - return reflect.String, nil + return "load_module", reflect.String, nil case "load_module.file.hashes": - return reflect.String, nil + return "load_module", reflect.String, nil case "load_module.file.in_upper_layer": - return reflect.Bool, nil + return "load_module", reflect.Bool, nil case "load_module.file.inode": - return reflect.Int, nil + return "load_module", reflect.Int, nil case "load_module.file.mode": - return reflect.Int, nil + return "load_module", reflect.Int, nil case "load_module.file.modification_time": - return reflect.Int, nil + return "load_module", reflect.Int, nil case "load_module.file.mount_id": - return reflect.Int, nil + return "load_module", reflect.Int, nil case "load_module.file.name": - return reflect.String, nil + return "load_module", reflect.String, nil case "load_module.file.name.length": - return reflect.Int, nil + return "load_module", reflect.Int, nil case "load_module.file.package.name": - return reflect.String, nil + return "load_module", reflect.String, nil case "load_module.file.package.source_version": - return reflect.String, nil + return "load_module", reflect.String, nil case "load_module.file.package.version": - return reflect.String, nil + return "load_module", reflect.String, nil case "load_module.file.path": - return reflect.String, nil + return "load_module", reflect.String, nil case "load_module.file.path.length": - return reflect.Int, nil + return "load_module", reflect.Int, nil case "load_module.file.rights": - return reflect.Int, nil + return "load_module", reflect.Int, nil case "load_module.file.uid": - return reflect.Int, nil + return "load_module", reflect.Int, nil case "load_module.file.user": - return reflect.String, nil + return "load_module", reflect.String, nil case "load_module.loaded_from_memory": - return reflect.Bool, nil + return "load_module", reflect.Bool, nil case "load_module.name": - return reflect.String, nil + return "load_module", reflect.String, nil case "load_module.retval": - return reflect.Int, nil + return "load_module", reflect.Int, nil case "mkdir.file.change_time": - return reflect.Int, nil + return "mkdir", reflect.Int, nil case "mkdir.file.destination.mode": - return reflect.Int, nil + return "mkdir", reflect.Int, nil case "mkdir.file.destination.rights": - return reflect.Int, nil + return "mkdir", reflect.Int, nil case "mkdir.file.filesystem": - return reflect.String, nil + return "mkdir", reflect.String, nil case "mkdir.file.gid": - return reflect.Int, nil + return "mkdir", reflect.Int, nil case "mkdir.file.group": - return reflect.String, nil + return "mkdir", reflect.String, nil case "mkdir.file.hashes": - return reflect.String, nil + return "mkdir", reflect.String, nil case "mkdir.file.in_upper_layer": - return reflect.Bool, nil + return "mkdir", reflect.Bool, nil case "mkdir.file.inode": - return reflect.Int, nil + return "mkdir", reflect.Int, nil case "mkdir.file.mode": - return reflect.Int, nil + return "mkdir", reflect.Int, nil case "mkdir.file.modification_time": - return reflect.Int, nil + return "mkdir", reflect.Int, nil case "mkdir.file.mount_id": - return reflect.Int, nil + return "mkdir", reflect.Int, nil case "mkdir.file.name": - return reflect.String, nil + return "mkdir", reflect.String, nil case "mkdir.file.name.length": - return reflect.Int, nil + return "mkdir", reflect.Int, nil case "mkdir.file.package.name": - return reflect.String, nil + return "mkdir", reflect.String, nil case "mkdir.file.package.source_version": - return reflect.String, nil + return "mkdir", reflect.String, nil case "mkdir.file.package.version": - return reflect.String, nil + return "mkdir", reflect.String, nil case "mkdir.file.path": - return reflect.String, nil + return "mkdir", reflect.String, nil case "mkdir.file.path.length": - return reflect.Int, nil + return "mkdir", reflect.Int, nil case "mkdir.file.rights": - return reflect.Int, nil + return "mkdir", reflect.Int, nil case "mkdir.file.uid": - return reflect.Int, nil + return "mkdir", reflect.Int, nil case "mkdir.file.user": - return reflect.String, nil + return "mkdir", reflect.String, nil case "mkdir.retval": - return reflect.Int, nil + return "mkdir", reflect.Int, nil + case "mkdir.syscall.mode": + return "mkdir", reflect.Int, nil + case "mkdir.syscall.path": + return "mkdir", reflect.String, nil case "mmap.file.change_time": - return reflect.Int, nil + return "mmap", reflect.Int, nil case "mmap.file.filesystem": - return reflect.String, nil + return "mmap", reflect.String, nil case "mmap.file.gid": - return reflect.Int, nil + return "mmap", reflect.Int, nil case "mmap.file.group": - return reflect.String, nil + return "mmap", reflect.String, nil case "mmap.file.hashes": - return reflect.String, nil + return "mmap", reflect.String, nil case "mmap.file.in_upper_layer": - return reflect.Bool, nil + return "mmap", reflect.Bool, nil case "mmap.file.inode": - return reflect.Int, nil + return "mmap", reflect.Int, nil case "mmap.file.mode": - return reflect.Int, nil + return "mmap", reflect.Int, nil case "mmap.file.modification_time": - return reflect.Int, nil + return "mmap", reflect.Int, nil case "mmap.file.mount_id": - return reflect.Int, nil + return "mmap", reflect.Int, nil case "mmap.file.name": - return reflect.String, nil + return "mmap", reflect.String, nil case "mmap.file.name.length": - return reflect.Int, nil + return "mmap", reflect.Int, nil case "mmap.file.package.name": - return reflect.String, nil + return "mmap", reflect.String, nil case "mmap.file.package.source_version": - return reflect.String, nil + return "mmap", reflect.String, nil case "mmap.file.package.version": - return reflect.String, nil + return "mmap", reflect.String, nil case "mmap.file.path": - return reflect.String, nil + return "mmap", reflect.String, nil case "mmap.file.path.length": - return reflect.Int, nil + return "mmap", reflect.Int, nil case "mmap.file.rights": - return reflect.Int, nil + return "mmap", reflect.Int, nil case "mmap.file.uid": - return reflect.Int, nil + return "mmap", reflect.Int, nil case "mmap.file.user": - return reflect.String, nil + return "mmap", reflect.String, nil case "mmap.flags": - return reflect.Int, nil + return "mmap", reflect.Int, nil case "mmap.protection": - return reflect.Int, nil + return "mmap", reflect.Int, nil case "mmap.retval": - return reflect.Int, nil + return "mmap", reflect.Int, nil case "mount.fs_type": - return reflect.String, nil + return "mount", reflect.String, nil case "mount.mountpoint.path": - return reflect.String, nil + return "mount", reflect.String, nil case "mount.retval": - return reflect.Int, nil + return "mount", reflect.Int, nil case "mount.root.path": - return reflect.String, nil + return "mount", reflect.String, nil case "mount.source.path": - return reflect.String, nil + return "mount", reflect.String, nil case "mount.syscall.fs_type": - return reflect.String, nil + return "mount", reflect.String, nil case "mount.syscall.mountpoint.path": - return reflect.String, nil + return "mount", reflect.String, nil case "mount.syscall.source.path": - return reflect.String, nil + return "mount", reflect.String, nil case "mprotect.req_protection": - return reflect.Int, nil + return "mprotect", reflect.Int, nil case "mprotect.retval": - return reflect.Int, nil + return "mprotect", reflect.Int, nil case "mprotect.vm_protection": - return reflect.Int, nil + return "mprotect", reflect.Int, nil case "network.destination.ip": - return reflect.Struct, nil + return "", reflect.Struct, nil case "network.destination.is_public": - return reflect.Bool, nil + return "", reflect.Bool, nil case "network.destination.port": - return reflect.Int, nil + return "", reflect.Int, nil case "network.device.ifname": - return reflect.String, nil + return "", reflect.String, nil case "network.l3_protocol": - return reflect.Int, nil + return "", reflect.Int, nil case "network.l4_protocol": - return reflect.Int, nil + return "", reflect.Int, nil + case "network.network_direction": + return "", reflect.Int, nil case "network.size": - return reflect.Int, nil + return "", reflect.Int, nil case "network.source.ip": - return reflect.Struct, nil + return "", reflect.Struct, nil case "network.source.is_public": - return reflect.Bool, nil + return "", reflect.Bool, nil case "network.source.port": - return reflect.Int, nil + return "", reflect.Int, nil + case "network_flow_monitor.device.ifname": + return "network_flow_monitor", reflect.String, nil + case "network_flow_monitor.flows.destination.ip": + return "network_flow_monitor", reflect.Struct, nil + case "network_flow_monitor.flows.destination.is_public": + return "network_flow_monitor", reflect.Bool, nil + case "network_flow_monitor.flows.destination.port": + return "network_flow_monitor", reflect.Int, nil + case "network_flow_monitor.flows.egress.data_size": + return "network_flow_monitor", reflect.Int, nil + case "network_flow_monitor.flows.egress.packet_count": + return "network_flow_monitor", reflect.Int, nil + case "network_flow_monitor.flows.ingress.data_size": + return "network_flow_monitor", reflect.Int, nil + case "network_flow_monitor.flows.ingress.packet_count": + return "network_flow_monitor", reflect.Int, nil + case "network_flow_monitor.flows.l3_protocol": + return "network_flow_monitor", reflect.Int, nil + case "network_flow_monitor.flows.l4_protocol": + return "network_flow_monitor", reflect.Int, nil + case "network_flow_monitor.flows.length": + return "network_flow_monitor", reflect.Int, nil + case "network_flow_monitor.flows.source.ip": + return "network_flow_monitor", reflect.Struct, nil + case "network_flow_monitor.flows.source.is_public": + return "network_flow_monitor", reflect.Bool, nil + case "network_flow_monitor.flows.source.port": + return "network_flow_monitor", reflect.Int, nil case "ondemand.arg1.str": - return reflect.String, nil + return "ondemand", reflect.String, nil case "ondemand.arg1.uint": - return reflect.Int, nil + return "ondemand", reflect.Int, nil case "ondemand.arg2.str": - return reflect.String, nil + return "ondemand", reflect.String, nil case "ondemand.arg2.uint": - return reflect.Int, nil + return "ondemand", reflect.Int, nil case "ondemand.arg3.str": - return reflect.String, nil + return "ondemand", reflect.String, nil case "ondemand.arg3.uint": - return reflect.Int, nil + return "ondemand", reflect.Int, nil case "ondemand.arg4.str": - return reflect.String, nil + return "ondemand", reflect.String, nil case "ondemand.arg4.uint": - return reflect.Int, nil + return "ondemand", reflect.Int, nil case "ondemand.name": - return reflect.String, nil + return "ondemand", reflect.String, nil case "open.file.change_time": - return reflect.Int, nil + return "open", reflect.Int, nil case "open.file.destination.mode": - return reflect.Int, nil + return "open", reflect.Int, nil case "open.file.filesystem": - return reflect.String, nil + return "open", reflect.String, nil case "open.file.gid": - return reflect.Int, nil + return "open", reflect.Int, nil case "open.file.group": - return reflect.String, nil + return "open", reflect.String, nil case "open.file.hashes": - return reflect.String, nil + return "open", reflect.String, nil case "open.file.in_upper_layer": - return reflect.Bool, nil + return "open", reflect.Bool, nil case "open.file.inode": - return reflect.Int, nil + return "open", reflect.Int, nil case "open.file.mode": - return reflect.Int, nil + return "open", reflect.Int, nil case "open.file.modification_time": - return reflect.Int, nil + return "open", reflect.Int, nil case "open.file.mount_id": - return reflect.Int, nil + return "open", reflect.Int, nil case "open.file.name": - return reflect.String, nil + return "open", reflect.String, nil case "open.file.name.length": - return reflect.Int, nil + return "open", reflect.Int, nil case "open.file.package.name": - return reflect.String, nil + return "open", reflect.String, nil case "open.file.package.source_version": - return reflect.String, nil + return "open", reflect.String, nil case "open.file.package.version": - return reflect.String, nil + return "open", reflect.String, nil case "open.file.path": - return reflect.String, nil + return "open", reflect.String, nil case "open.file.path.length": - return reflect.Int, nil + return "open", reflect.Int, nil case "open.file.rights": - return reflect.Int, nil + return "open", reflect.Int, nil case "open.file.uid": - return reflect.Int, nil + return "open", reflect.Int, nil case "open.file.user": - return reflect.String, nil + return "open", reflect.String, nil case "open.flags": - return reflect.Int, nil + return "open", reflect.Int, nil case "open.retval": - return reflect.Int, nil + return "open", reflect.Int, nil case "open.syscall.flags": - return reflect.Int, nil + return "open", reflect.Int, nil case "open.syscall.mode": - return reflect.Int, nil + return "open", reflect.Int, nil case "open.syscall.path": - return reflect.String, nil + return "open", reflect.String, nil case "packet.destination.ip": - return reflect.Struct, nil + return "packet", reflect.Struct, nil case "packet.destination.is_public": - return reflect.Bool, nil + return "packet", reflect.Bool, nil case "packet.destination.port": - return reflect.Int, nil + return "packet", reflect.Int, nil case "packet.device.ifname": - return reflect.String, nil + return "packet", reflect.String, nil case "packet.filter": - return reflect.String, nil + return "packet", reflect.String, nil case "packet.l3_protocol": - return reflect.Int, nil + return "packet", reflect.Int, nil case "packet.l4_protocol": - return reflect.Int, nil + return "packet", reflect.Int, nil + case "packet.network_direction": + return "packet", reflect.Int, nil case "packet.size": - return reflect.Int, nil + return "packet", reflect.Int, nil case "packet.source.ip": - return reflect.Struct, nil + return "packet", reflect.Struct, nil case "packet.source.is_public": - return reflect.Bool, nil + return "packet", reflect.Bool, nil case "packet.source.port": - return reflect.Int, nil + return "packet", reflect.Int, nil case "packet.tls.version": - return reflect.Int, nil + return "packet", reflect.Int, nil case "process.ancestors.args": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.args_flags": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.args_options": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.args_truncated": - return reflect.Bool, nil + return "", reflect.Bool, nil case "process.ancestors.argv": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.argv0": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.auid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.cap_effective": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.cap_permitted": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.cgroup.file.inode": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.cgroup.file.mount_id": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.cgroup.id": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.cgroup.manager": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.cgroup.version": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.comm": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.container.id": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.created_at": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.egid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.egroup": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.envp": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.envs": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.envs_truncated": - return reflect.Bool, nil + return "", reflect.Bool, nil case "process.ancestors.euid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.euser": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.file.change_time": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.file.filesystem": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.file.gid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.file.group": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.file.hashes": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.file.in_upper_layer": - return reflect.Bool, nil + return "", reflect.Bool, nil case "process.ancestors.file.inode": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.file.mode": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.file.modification_time": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.file.mount_id": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.file.name": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.file.name.length": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.file.package.name": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.file.package.source_version": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.file.package.version": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.file.path": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.file.path.length": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.file.rights": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.file.uid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.file.user": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.fsgid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.fsgroup": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.fsuid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.fsuser": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.gid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.group": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.interpreter.file.change_time": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.interpreter.file.filesystem": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.interpreter.file.gid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.interpreter.file.group": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.interpreter.file.hashes": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.interpreter.file.in_upper_layer": - return reflect.Bool, nil + return "", reflect.Bool, nil case "process.ancestors.interpreter.file.inode": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.interpreter.file.mode": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.interpreter.file.modification_time": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.interpreter.file.mount_id": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.interpreter.file.name": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.interpreter.file.name.length": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.interpreter.file.package.name": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.interpreter.file.package.source_version": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.interpreter.file.package.version": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.interpreter.file.path": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.interpreter.file.path.length": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.interpreter.file.rights": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.interpreter.file.uid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.interpreter.file.user": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.is_exec": - return reflect.Bool, nil + return "", reflect.Bool, nil case "process.ancestors.is_kworker": - return reflect.Bool, nil + return "", reflect.Bool, nil case "process.ancestors.is_thread": - return reflect.Bool, nil + return "", reflect.Bool, nil case "process.ancestors.length": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.pid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.ppid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.tid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.tty_name": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.uid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.user": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.user_session.k8s_groups": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.user_session.k8s_uid": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.user_session.k8s_username": - return reflect.String, nil + return "", reflect.String, nil case "process.args": - return reflect.String, nil + return "", reflect.String, nil case "process.args_flags": - return reflect.String, nil + return "", reflect.String, nil case "process.args_options": - return reflect.String, nil + return "", reflect.String, nil case "process.args_truncated": - return reflect.Bool, nil + return "", reflect.Bool, nil case "process.argv": - return reflect.String, nil + return "", reflect.String, nil case "process.argv0": - return reflect.String, nil + return "", reflect.String, nil case "process.auid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.cap_effective": - return reflect.Int, nil + return "", reflect.Int, nil case "process.cap_permitted": - return reflect.Int, nil + return "", reflect.Int, nil case "process.cgroup.file.inode": - return reflect.Int, nil + return "", reflect.Int, nil case "process.cgroup.file.mount_id": - return reflect.Int, nil + return "", reflect.Int, nil case "process.cgroup.id": - return reflect.String, nil + return "", reflect.String, nil case "process.cgroup.manager": - return reflect.String, nil + return "", reflect.String, nil case "process.cgroup.version": - return reflect.Int, nil + return "", reflect.Int, nil case "process.comm": - return reflect.String, nil + return "", reflect.String, nil case "process.container.id": - return reflect.String, nil + return "", reflect.String, nil case "process.created_at": - return reflect.Int, nil + return "", reflect.Int, nil case "process.egid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.egroup": - return reflect.String, nil + return "", reflect.String, nil case "process.envp": - return reflect.String, nil + return "", reflect.String, nil case "process.envs": - return reflect.String, nil + return "", reflect.String, nil case "process.envs_truncated": - return reflect.Bool, nil + return "", reflect.Bool, nil case "process.euid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.euser": - return reflect.String, nil + return "", reflect.String, nil case "process.file.change_time": - return reflect.Int, nil + return "", reflect.Int, nil case "process.file.filesystem": - return reflect.String, nil + return "", reflect.String, nil case "process.file.gid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.file.group": - return reflect.String, nil + return "", reflect.String, nil case "process.file.hashes": - return reflect.String, nil + return "", reflect.String, nil case "process.file.in_upper_layer": - return reflect.Bool, nil + return "", reflect.Bool, nil case "process.file.inode": - return reflect.Int, nil + return "", reflect.Int, nil case "process.file.mode": - return reflect.Int, nil + return "", reflect.Int, nil case "process.file.modification_time": - return reflect.Int, nil + return "", reflect.Int, nil case "process.file.mount_id": - return reflect.Int, nil + return "", reflect.Int, nil case "process.file.name": - return reflect.String, nil + return "", reflect.String, nil case "process.file.name.length": - return reflect.Int, nil + return "", reflect.Int, nil case "process.file.package.name": - return reflect.String, nil + return "", reflect.String, nil case "process.file.package.source_version": - return reflect.String, nil + return "", reflect.String, nil case "process.file.package.version": - return reflect.String, nil + return "", reflect.String, nil case "process.file.path": - return reflect.String, nil + return "", reflect.String, nil case "process.file.path.length": - return reflect.Int, nil + return "", reflect.Int, nil case "process.file.rights": - return reflect.Int, nil + return "", reflect.Int, nil case "process.file.uid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.file.user": - return reflect.String, nil + return "", reflect.String, nil case "process.fsgid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.fsgroup": - return reflect.String, nil + return "", reflect.String, nil case "process.fsuid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.fsuser": - return reflect.String, nil + return "", reflect.String, nil case "process.gid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.group": - return reflect.String, nil + return "", reflect.String, nil case "process.interpreter.file.change_time": - return reflect.Int, nil + return "", reflect.Int, nil case "process.interpreter.file.filesystem": - return reflect.String, nil + return "", reflect.String, nil case "process.interpreter.file.gid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.interpreter.file.group": - return reflect.String, nil + return "", reflect.String, nil case "process.interpreter.file.hashes": - return reflect.String, nil + return "", reflect.String, nil case "process.interpreter.file.in_upper_layer": - return reflect.Bool, nil + return "", reflect.Bool, nil case "process.interpreter.file.inode": - return reflect.Int, nil + return "", reflect.Int, nil case "process.interpreter.file.mode": - return reflect.Int, nil + return "", reflect.Int, nil case "process.interpreter.file.modification_time": - return reflect.Int, nil + return "", reflect.Int, nil case "process.interpreter.file.mount_id": - return reflect.Int, nil + return "", reflect.Int, nil case "process.interpreter.file.name": - return reflect.String, nil + return "", reflect.String, nil case "process.interpreter.file.name.length": - return reflect.Int, nil + return "", reflect.Int, nil case "process.interpreter.file.package.name": - return reflect.String, nil + return "", reflect.String, nil case "process.interpreter.file.package.source_version": - return reflect.String, nil + return "", reflect.String, nil case "process.interpreter.file.package.version": - return reflect.String, nil + return "", reflect.String, nil case "process.interpreter.file.path": - return reflect.String, nil + return "", reflect.String, nil case "process.interpreter.file.path.length": - return reflect.Int, nil + return "", reflect.Int, nil case "process.interpreter.file.rights": - return reflect.Int, nil + return "", reflect.Int, nil case "process.interpreter.file.uid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.interpreter.file.user": - return reflect.String, nil + return "", reflect.String, nil case "process.is_exec": - return reflect.Bool, nil + return "", reflect.Bool, nil case "process.is_kworker": - return reflect.Bool, nil + return "", reflect.Bool, nil case "process.is_thread": - return reflect.Bool, nil + return "", reflect.Bool, nil case "process.parent.args": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.args_flags": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.args_options": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.args_truncated": - return reflect.Bool, nil + return "", reflect.Bool, nil case "process.parent.argv": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.argv0": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.auid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.cap_effective": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.cap_permitted": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.cgroup.file.inode": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.cgroup.file.mount_id": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.cgroup.id": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.cgroup.manager": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.cgroup.version": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.comm": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.container.id": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.created_at": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.egid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.egroup": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.envp": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.envs": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.envs_truncated": - return reflect.Bool, nil + return "", reflect.Bool, nil case "process.parent.euid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.euser": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.file.change_time": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.file.filesystem": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.file.gid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.file.group": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.file.hashes": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.file.in_upper_layer": - return reflect.Bool, nil + return "", reflect.Bool, nil case "process.parent.file.inode": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.file.mode": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.file.modification_time": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.file.mount_id": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.file.name": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.file.name.length": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.file.package.name": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.file.package.source_version": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.file.package.version": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.file.path": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.file.path.length": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.file.rights": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.file.uid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.file.user": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.fsgid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.fsgroup": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.fsuid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.fsuser": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.gid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.group": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.interpreter.file.change_time": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.interpreter.file.filesystem": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.interpreter.file.gid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.interpreter.file.group": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.interpreter.file.hashes": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.interpreter.file.in_upper_layer": - return reflect.Bool, nil + return "", reflect.Bool, nil case "process.parent.interpreter.file.inode": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.interpreter.file.mode": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.interpreter.file.modification_time": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.interpreter.file.mount_id": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.interpreter.file.name": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.interpreter.file.name.length": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.interpreter.file.package.name": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.interpreter.file.package.source_version": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.interpreter.file.package.version": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.interpreter.file.path": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.interpreter.file.path.length": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.interpreter.file.rights": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.interpreter.file.uid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.interpreter.file.user": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.is_exec": - return reflect.Bool, nil + return "", reflect.Bool, nil case "process.parent.is_kworker": - return reflect.Bool, nil + return "", reflect.Bool, nil case "process.parent.is_thread": - return reflect.Bool, nil + return "", reflect.Bool, nil case "process.parent.pid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.ppid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.tid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.tty_name": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.uid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.user": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.user_session.k8s_groups": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.user_session.k8s_uid": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.user_session.k8s_username": - return reflect.String, nil + return "", reflect.String, nil case "process.pid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ppid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.tid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.tty_name": - return reflect.String, nil + return "", reflect.String, nil case "process.uid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.user": - return reflect.String, nil + return "", reflect.String, nil case "process.user_session.k8s_groups": - return reflect.String, nil + return "", reflect.String, nil case "process.user_session.k8s_uid": - return reflect.String, nil + return "", reflect.String, nil case "process.user_session.k8s_username": - return reflect.String, nil + return "", reflect.String, nil case "ptrace.request": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.retval": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.args": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.args_flags": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.args_options": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.args_truncated": - return reflect.Bool, nil + return "ptrace", reflect.Bool, nil case "ptrace.tracee.ancestors.argv": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.argv0": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.auid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.cap_effective": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.cap_permitted": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.cgroup.file.inode": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.cgroup.file.mount_id": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.cgroup.id": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.cgroup.manager": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.cgroup.version": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.comm": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.container.id": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.created_at": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.egid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.egroup": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.envp": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.envs": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.envs_truncated": - return reflect.Bool, nil + return "ptrace", reflect.Bool, nil case "ptrace.tracee.ancestors.euid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.euser": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.file.change_time": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.file.filesystem": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.file.gid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.file.group": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.file.hashes": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.file.in_upper_layer": - return reflect.Bool, nil + return "ptrace", reflect.Bool, nil case "ptrace.tracee.ancestors.file.inode": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.file.mode": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.file.modification_time": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.file.mount_id": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.file.name": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.file.name.length": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.file.package.name": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.file.package.source_version": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.file.package.version": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.file.path": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.file.path.length": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.file.rights": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.file.uid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.file.user": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.fsgid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.fsgroup": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.fsuid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.fsuser": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.gid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.group": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.interpreter.file.change_time": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.interpreter.file.filesystem": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.interpreter.file.gid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.interpreter.file.group": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.interpreter.file.hashes": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.interpreter.file.in_upper_layer": - return reflect.Bool, nil + return "ptrace", reflect.Bool, nil case "ptrace.tracee.ancestors.interpreter.file.inode": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.interpreter.file.mode": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.interpreter.file.modification_time": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.interpreter.file.mount_id": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.interpreter.file.name": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.interpreter.file.name.length": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.interpreter.file.package.name": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.interpreter.file.package.source_version": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.interpreter.file.package.version": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.interpreter.file.path": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.interpreter.file.path.length": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.interpreter.file.rights": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.interpreter.file.uid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.interpreter.file.user": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.is_exec": - return reflect.Bool, nil + return "ptrace", reflect.Bool, nil case "ptrace.tracee.ancestors.is_kworker": - return reflect.Bool, nil + return "ptrace", reflect.Bool, nil case "ptrace.tracee.ancestors.is_thread": - return reflect.Bool, nil + return "ptrace", reflect.Bool, nil case "ptrace.tracee.ancestors.length": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.pid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.ppid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.tid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.tty_name": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.uid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ancestors.user": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.user_session.k8s_groups": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.user_session.k8s_uid": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.ancestors.user_session.k8s_username": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.args": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.args_flags": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.args_options": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.args_truncated": - return reflect.Bool, nil + return "ptrace", reflect.Bool, nil case "ptrace.tracee.argv": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.argv0": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.auid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.cap_effective": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.cap_permitted": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.cgroup.file.inode": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.cgroup.file.mount_id": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.cgroup.id": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.cgroup.manager": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.cgroup.version": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.comm": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.container.id": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.created_at": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.egid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.egroup": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.envp": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.envs": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.envs_truncated": - return reflect.Bool, nil + return "ptrace", reflect.Bool, nil case "ptrace.tracee.euid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.euser": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.file.change_time": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.file.filesystem": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.file.gid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.file.group": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.file.hashes": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.file.in_upper_layer": - return reflect.Bool, nil + return "ptrace", reflect.Bool, nil case "ptrace.tracee.file.inode": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.file.mode": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.file.modification_time": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.file.mount_id": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.file.name": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.file.name.length": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.file.package.name": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.file.package.source_version": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.file.package.version": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.file.path": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.file.path.length": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.file.rights": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.file.uid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.file.user": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.fsgid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.fsgroup": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.fsuid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.fsuser": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.gid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.group": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.interpreter.file.change_time": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.interpreter.file.filesystem": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.interpreter.file.gid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.interpreter.file.group": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.interpreter.file.hashes": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.interpreter.file.in_upper_layer": - return reflect.Bool, nil + return "ptrace", reflect.Bool, nil case "ptrace.tracee.interpreter.file.inode": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.interpreter.file.mode": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.interpreter.file.modification_time": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.interpreter.file.mount_id": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.interpreter.file.name": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.interpreter.file.name.length": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.interpreter.file.package.name": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.interpreter.file.package.source_version": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.interpreter.file.package.version": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.interpreter.file.path": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.interpreter.file.path.length": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.interpreter.file.rights": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.interpreter.file.uid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.interpreter.file.user": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.is_exec": - return reflect.Bool, nil + return "ptrace", reflect.Bool, nil case "ptrace.tracee.is_kworker": - return reflect.Bool, nil + return "ptrace", reflect.Bool, nil case "ptrace.tracee.is_thread": - return reflect.Bool, nil + return "ptrace", reflect.Bool, nil case "ptrace.tracee.parent.args": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.args_flags": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.args_options": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.args_truncated": - return reflect.Bool, nil + return "ptrace", reflect.Bool, nil case "ptrace.tracee.parent.argv": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.argv0": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.auid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.parent.cap_effective": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.parent.cap_permitted": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.parent.cgroup.file.inode": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.parent.cgroup.file.mount_id": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.parent.cgroup.id": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.cgroup.manager": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.cgroup.version": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.parent.comm": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.container.id": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.created_at": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.parent.egid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.parent.egroup": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.envp": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.envs": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.envs_truncated": - return reflect.Bool, nil + return "ptrace", reflect.Bool, nil case "ptrace.tracee.parent.euid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.parent.euser": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.file.change_time": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.parent.file.filesystem": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.file.gid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.parent.file.group": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.file.hashes": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.file.in_upper_layer": - return reflect.Bool, nil + return "ptrace", reflect.Bool, nil case "ptrace.tracee.parent.file.inode": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.parent.file.mode": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.parent.file.modification_time": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.parent.file.mount_id": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.parent.file.name": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.file.name.length": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.parent.file.package.name": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.file.package.source_version": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.file.package.version": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.file.path": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.file.path.length": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.parent.file.rights": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.parent.file.uid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.parent.file.user": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.fsgid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.parent.fsgroup": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.fsuid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.parent.fsuser": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.gid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.parent.group": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.interpreter.file.change_time": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.parent.interpreter.file.filesystem": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.interpreter.file.gid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.parent.interpreter.file.group": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.interpreter.file.hashes": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.interpreter.file.in_upper_layer": - return reflect.Bool, nil + return "ptrace", reflect.Bool, nil case "ptrace.tracee.parent.interpreter.file.inode": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.parent.interpreter.file.mode": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.parent.interpreter.file.modification_time": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.parent.interpreter.file.mount_id": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.parent.interpreter.file.name": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.interpreter.file.name.length": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.parent.interpreter.file.package.name": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.interpreter.file.package.source_version": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.interpreter.file.package.version": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.interpreter.file.path": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.interpreter.file.path.length": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.parent.interpreter.file.rights": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.parent.interpreter.file.uid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.parent.interpreter.file.user": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.is_exec": - return reflect.Bool, nil + return "ptrace", reflect.Bool, nil case "ptrace.tracee.parent.is_kworker": - return reflect.Bool, nil + return "ptrace", reflect.Bool, nil case "ptrace.tracee.parent.is_thread": - return reflect.Bool, nil + return "ptrace", reflect.Bool, nil case "ptrace.tracee.parent.pid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.parent.ppid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.parent.tid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.parent.tty_name": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.uid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.parent.user": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.user_session.k8s_groups": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.user_session.k8s_uid": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.parent.user_session.k8s_username": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.pid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.ppid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.tid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.tty_name": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.uid": - return reflect.Int, nil + return "ptrace", reflect.Int, nil case "ptrace.tracee.user": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.user_session.k8s_groups": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.user_session.k8s_uid": - return reflect.String, nil + return "ptrace", reflect.String, nil case "ptrace.tracee.user_session.k8s_username": - return reflect.String, nil + return "ptrace", reflect.String, nil case "removexattr.file.change_time": - return reflect.Int, nil + return "removexattr", reflect.Int, nil case "removexattr.file.destination.name": - return reflect.String, nil + return "removexattr", reflect.String, nil case "removexattr.file.destination.namespace": - return reflect.String, nil + return "removexattr", reflect.String, nil case "removexattr.file.filesystem": - return reflect.String, nil + return "removexattr", reflect.String, nil case "removexattr.file.gid": - return reflect.Int, nil + return "removexattr", reflect.Int, nil case "removexattr.file.group": - return reflect.String, nil + return "removexattr", reflect.String, nil case "removexattr.file.hashes": - return reflect.String, nil + return "removexattr", reflect.String, nil case "removexattr.file.in_upper_layer": - return reflect.Bool, nil + return "removexattr", reflect.Bool, nil case "removexattr.file.inode": - return reflect.Int, nil + return "removexattr", reflect.Int, nil case "removexattr.file.mode": - return reflect.Int, nil + return "removexattr", reflect.Int, nil case "removexattr.file.modification_time": - return reflect.Int, nil + return "removexattr", reflect.Int, nil case "removexattr.file.mount_id": - return reflect.Int, nil + return "removexattr", reflect.Int, nil case "removexattr.file.name": - return reflect.String, nil + return "removexattr", reflect.String, nil case "removexattr.file.name.length": - return reflect.Int, nil + return "removexattr", reflect.Int, nil case "removexattr.file.package.name": - return reflect.String, nil + return "removexattr", reflect.String, nil case "removexattr.file.package.source_version": - return reflect.String, nil + return "removexattr", reflect.String, nil case "removexattr.file.package.version": - return reflect.String, nil + return "removexattr", reflect.String, nil case "removexattr.file.path": - return reflect.String, nil + return "removexattr", reflect.String, nil case "removexattr.file.path.length": - return reflect.Int, nil + return "removexattr", reflect.Int, nil case "removexattr.file.rights": - return reflect.Int, nil + return "removexattr", reflect.Int, nil case "removexattr.file.uid": - return reflect.Int, nil + return "removexattr", reflect.Int, nil case "removexattr.file.user": - return reflect.String, nil + return "removexattr", reflect.String, nil case "removexattr.retval": - return reflect.Int, nil + return "removexattr", reflect.Int, nil case "rename.file.change_time": - return reflect.Int, nil + return "rename", reflect.Int, nil case "rename.file.destination.change_time": - return reflect.Int, nil + return "rename", reflect.Int, nil case "rename.file.destination.filesystem": - return reflect.String, nil + return "rename", reflect.String, nil case "rename.file.destination.gid": - return reflect.Int, nil + return "rename", reflect.Int, nil case "rename.file.destination.group": - return reflect.String, nil + return "rename", reflect.String, nil case "rename.file.destination.hashes": - return reflect.String, nil + return "rename", reflect.String, nil case "rename.file.destination.in_upper_layer": - return reflect.Bool, nil + return "rename", reflect.Bool, nil case "rename.file.destination.inode": - return reflect.Int, nil + return "rename", reflect.Int, nil case "rename.file.destination.mode": - return reflect.Int, nil + return "rename", reflect.Int, nil case "rename.file.destination.modification_time": - return reflect.Int, nil + return "rename", reflect.Int, nil case "rename.file.destination.mount_id": - return reflect.Int, nil + return "rename", reflect.Int, nil case "rename.file.destination.name": - return reflect.String, nil + return "rename", reflect.String, nil case "rename.file.destination.name.length": - return reflect.Int, nil + return "rename", reflect.Int, nil case "rename.file.destination.package.name": - return reflect.String, nil + return "rename", reflect.String, nil case "rename.file.destination.package.source_version": - return reflect.String, nil + return "rename", reflect.String, nil case "rename.file.destination.package.version": - return reflect.String, nil + return "rename", reflect.String, nil case "rename.file.destination.path": - return reflect.String, nil + return "rename", reflect.String, nil case "rename.file.destination.path.length": - return reflect.Int, nil + return "rename", reflect.Int, nil case "rename.file.destination.rights": - return reflect.Int, nil + return "rename", reflect.Int, nil case "rename.file.destination.uid": - return reflect.Int, nil + return "rename", reflect.Int, nil case "rename.file.destination.user": - return reflect.String, nil + return "rename", reflect.String, nil case "rename.file.filesystem": - return reflect.String, nil + return "rename", reflect.String, nil case "rename.file.gid": - return reflect.Int, nil + return "rename", reflect.Int, nil case "rename.file.group": - return reflect.String, nil + return "rename", reflect.String, nil case "rename.file.hashes": - return reflect.String, nil + return "rename", reflect.String, nil case "rename.file.in_upper_layer": - return reflect.Bool, nil + return "rename", reflect.Bool, nil case "rename.file.inode": - return reflect.Int, nil + return "rename", reflect.Int, nil case "rename.file.mode": - return reflect.Int, nil + return "rename", reflect.Int, nil case "rename.file.modification_time": - return reflect.Int, nil + return "rename", reflect.Int, nil case "rename.file.mount_id": - return reflect.Int, nil + return "rename", reflect.Int, nil case "rename.file.name": - return reflect.String, nil + return "rename", reflect.String, nil case "rename.file.name.length": - return reflect.Int, nil + return "rename", reflect.Int, nil case "rename.file.package.name": - return reflect.String, nil + return "rename", reflect.String, nil case "rename.file.package.source_version": - return reflect.String, nil + return "rename", reflect.String, nil case "rename.file.package.version": - return reflect.String, nil + return "rename", reflect.String, nil case "rename.file.path": - return reflect.String, nil + return "rename", reflect.String, nil case "rename.file.path.length": - return reflect.Int, nil + return "rename", reflect.Int, nil case "rename.file.rights": - return reflect.Int, nil + return "rename", reflect.Int, nil case "rename.file.uid": - return reflect.Int, nil + return "rename", reflect.Int, nil case "rename.file.user": - return reflect.String, nil + return "rename", reflect.String, nil case "rename.retval": - return reflect.Int, nil + return "rename", reflect.Int, nil case "rename.syscall.destination.path": - return reflect.String, nil + return "rename", reflect.String, nil case "rename.syscall.path": - return reflect.String, nil + return "rename", reflect.String, nil case "rmdir.file.change_time": - return reflect.Int, nil + return "rmdir", reflect.Int, nil case "rmdir.file.filesystem": - return reflect.String, nil + return "rmdir", reflect.String, nil case "rmdir.file.gid": - return reflect.Int, nil + return "rmdir", reflect.Int, nil case "rmdir.file.group": - return reflect.String, nil + return "rmdir", reflect.String, nil case "rmdir.file.hashes": - return reflect.String, nil + return "rmdir", reflect.String, nil case "rmdir.file.in_upper_layer": - return reflect.Bool, nil + return "rmdir", reflect.Bool, nil case "rmdir.file.inode": - return reflect.Int, nil + return "rmdir", reflect.Int, nil case "rmdir.file.mode": - return reflect.Int, nil + return "rmdir", reflect.Int, nil case "rmdir.file.modification_time": - return reflect.Int, nil + return "rmdir", reflect.Int, nil case "rmdir.file.mount_id": - return reflect.Int, nil + return "rmdir", reflect.Int, nil case "rmdir.file.name": - return reflect.String, nil + return "rmdir", reflect.String, nil case "rmdir.file.name.length": - return reflect.Int, nil + return "rmdir", reflect.Int, nil case "rmdir.file.package.name": - return reflect.String, nil + return "rmdir", reflect.String, nil case "rmdir.file.package.source_version": - return reflect.String, nil + return "rmdir", reflect.String, nil case "rmdir.file.package.version": - return reflect.String, nil + return "rmdir", reflect.String, nil case "rmdir.file.path": - return reflect.String, nil + return "rmdir", reflect.String, nil case "rmdir.file.path.length": - return reflect.Int, nil + return "rmdir", reflect.Int, nil case "rmdir.file.rights": - return reflect.Int, nil + return "rmdir", reflect.Int, nil case "rmdir.file.uid": - return reflect.Int, nil + return "rmdir", reflect.Int, nil case "rmdir.file.user": - return reflect.String, nil + return "rmdir", reflect.String, nil case "rmdir.retval": - return reflect.Int, nil + return "rmdir", reflect.Int, nil + case "rmdir.syscall.path": + return "rmdir", reflect.String, nil case "selinux.bool.name": - return reflect.String, nil + return "selinux", reflect.String, nil case "selinux.bool.state": - return reflect.String, nil + return "selinux", reflect.String, nil case "selinux.bool_commit.state": - return reflect.Bool, nil + return "selinux", reflect.Bool, nil case "selinux.enforce.status": - return reflect.String, nil + return "selinux", reflect.String, nil case "setgid.egid": - return reflect.Int, nil + return "setgid", reflect.Int, nil case "setgid.egroup": - return reflect.String, nil + return "setgid", reflect.String, nil case "setgid.fsgid": - return reflect.Int, nil + return "setgid", reflect.Int, nil case "setgid.fsgroup": - return reflect.String, nil + return "setgid", reflect.String, nil case "setgid.gid": - return reflect.Int, nil + return "setgid", reflect.Int, nil case "setgid.group": - return reflect.String, nil + return "setgid", reflect.String, nil case "setuid.euid": - return reflect.Int, nil + return "setuid", reflect.Int, nil case "setuid.euser": - return reflect.String, nil + return "setuid", reflect.String, nil case "setuid.fsuid": - return reflect.Int, nil + return "setuid", reflect.Int, nil case "setuid.fsuser": - return reflect.String, nil + return "setuid", reflect.String, nil case "setuid.uid": - return reflect.Int, nil + return "setuid", reflect.Int, nil case "setuid.user": - return reflect.String, nil + return "setuid", reflect.String, nil case "setxattr.file.change_time": - return reflect.Int, nil + return "setxattr", reflect.Int, nil case "setxattr.file.destination.name": - return reflect.String, nil + return "setxattr", reflect.String, nil case "setxattr.file.destination.namespace": - return reflect.String, nil + return "setxattr", reflect.String, nil case "setxattr.file.filesystem": - return reflect.String, nil + return "setxattr", reflect.String, nil case "setxattr.file.gid": - return reflect.Int, nil + return "setxattr", reflect.Int, nil case "setxattr.file.group": - return reflect.String, nil + return "setxattr", reflect.String, nil case "setxattr.file.hashes": - return reflect.String, nil + return "setxattr", reflect.String, nil case "setxattr.file.in_upper_layer": - return reflect.Bool, nil + return "setxattr", reflect.Bool, nil case "setxattr.file.inode": - return reflect.Int, nil + return "setxattr", reflect.Int, nil case "setxattr.file.mode": - return reflect.Int, nil + return "setxattr", reflect.Int, nil case "setxattr.file.modification_time": - return reflect.Int, nil + return "setxattr", reflect.Int, nil case "setxattr.file.mount_id": - return reflect.Int, nil + return "setxattr", reflect.Int, nil case "setxattr.file.name": - return reflect.String, nil + return "setxattr", reflect.String, nil case "setxattr.file.name.length": - return reflect.Int, nil + return "setxattr", reflect.Int, nil case "setxattr.file.package.name": - return reflect.String, nil + return "setxattr", reflect.String, nil case "setxattr.file.package.source_version": - return reflect.String, nil + return "setxattr", reflect.String, nil case "setxattr.file.package.version": - return reflect.String, nil + return "setxattr", reflect.String, nil case "setxattr.file.path": - return reflect.String, nil + return "setxattr", reflect.String, nil case "setxattr.file.path.length": - return reflect.Int, nil + return "setxattr", reflect.Int, nil case "setxattr.file.rights": - return reflect.Int, nil + return "setxattr", reflect.Int, nil case "setxattr.file.uid": - return reflect.Int, nil + return "setxattr", reflect.Int, nil case "setxattr.file.user": - return reflect.String, nil + return "setxattr", reflect.String, nil case "setxattr.retval": - return reflect.Int, nil + return "setxattr", reflect.Int, nil case "signal.pid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.retval": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.args": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.args_flags": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.args_options": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.args_truncated": - return reflect.Bool, nil + return "signal", reflect.Bool, nil case "signal.target.ancestors.argv": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.argv0": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.auid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.cap_effective": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.cap_permitted": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.cgroup.file.inode": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.cgroup.file.mount_id": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.cgroup.id": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.cgroup.manager": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.cgroup.version": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.comm": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.container.id": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.created_at": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.egid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.egroup": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.envp": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.envs": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.envs_truncated": - return reflect.Bool, nil + return "signal", reflect.Bool, nil case "signal.target.ancestors.euid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.euser": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.file.change_time": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.file.filesystem": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.file.gid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.file.group": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.file.hashes": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.file.in_upper_layer": - return reflect.Bool, nil + return "signal", reflect.Bool, nil case "signal.target.ancestors.file.inode": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.file.mode": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.file.modification_time": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.file.mount_id": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.file.name": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.file.name.length": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.file.package.name": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.file.package.source_version": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.file.package.version": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.file.path": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.file.path.length": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.file.rights": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.file.uid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.file.user": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.fsgid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.fsgroup": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.fsuid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.fsuser": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.gid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.group": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.interpreter.file.change_time": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.interpreter.file.filesystem": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.interpreter.file.gid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.interpreter.file.group": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.interpreter.file.hashes": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.interpreter.file.in_upper_layer": - return reflect.Bool, nil + return "signal", reflect.Bool, nil case "signal.target.ancestors.interpreter.file.inode": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.interpreter.file.mode": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.interpreter.file.modification_time": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.interpreter.file.mount_id": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.interpreter.file.name": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.interpreter.file.name.length": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.interpreter.file.package.name": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.interpreter.file.package.source_version": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.interpreter.file.package.version": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.interpreter.file.path": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.interpreter.file.path.length": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.interpreter.file.rights": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.interpreter.file.uid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.interpreter.file.user": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.is_exec": - return reflect.Bool, nil + return "signal", reflect.Bool, nil case "signal.target.ancestors.is_kworker": - return reflect.Bool, nil + return "signal", reflect.Bool, nil case "signal.target.ancestors.is_thread": - return reflect.Bool, nil + return "signal", reflect.Bool, nil case "signal.target.ancestors.length": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.pid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.ppid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.tid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.tty_name": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.uid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ancestors.user": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.user_session.k8s_groups": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.user_session.k8s_uid": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.ancestors.user_session.k8s_username": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.args": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.args_flags": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.args_options": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.args_truncated": - return reflect.Bool, nil + return "signal", reflect.Bool, nil case "signal.target.argv": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.argv0": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.auid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.cap_effective": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.cap_permitted": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.cgroup.file.inode": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.cgroup.file.mount_id": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.cgroup.id": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.cgroup.manager": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.cgroup.version": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.comm": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.container.id": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.created_at": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.egid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.egroup": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.envp": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.envs": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.envs_truncated": - return reflect.Bool, nil + return "signal", reflect.Bool, nil case "signal.target.euid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.euser": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.file.change_time": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.file.filesystem": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.file.gid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.file.group": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.file.hashes": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.file.in_upper_layer": - return reflect.Bool, nil + return "signal", reflect.Bool, nil case "signal.target.file.inode": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.file.mode": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.file.modification_time": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.file.mount_id": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.file.name": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.file.name.length": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.file.package.name": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.file.package.source_version": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.file.package.version": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.file.path": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.file.path.length": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.file.rights": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.file.uid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.file.user": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.fsgid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.fsgroup": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.fsuid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.fsuser": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.gid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.group": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.interpreter.file.change_time": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.interpreter.file.filesystem": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.interpreter.file.gid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.interpreter.file.group": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.interpreter.file.hashes": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.interpreter.file.in_upper_layer": - return reflect.Bool, nil + return "signal", reflect.Bool, nil case "signal.target.interpreter.file.inode": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.interpreter.file.mode": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.interpreter.file.modification_time": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.interpreter.file.mount_id": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.interpreter.file.name": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.interpreter.file.name.length": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.interpreter.file.package.name": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.interpreter.file.package.source_version": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.interpreter.file.package.version": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.interpreter.file.path": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.interpreter.file.path.length": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.interpreter.file.rights": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.interpreter.file.uid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.interpreter.file.user": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.is_exec": - return reflect.Bool, nil + return "signal", reflect.Bool, nil case "signal.target.is_kworker": - return reflect.Bool, nil + return "signal", reflect.Bool, nil case "signal.target.is_thread": - return reflect.Bool, nil + return "signal", reflect.Bool, nil case "signal.target.parent.args": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.args_flags": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.args_options": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.args_truncated": - return reflect.Bool, nil + return "signal", reflect.Bool, nil case "signal.target.parent.argv": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.argv0": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.auid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.parent.cap_effective": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.parent.cap_permitted": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.parent.cgroup.file.inode": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.parent.cgroup.file.mount_id": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.parent.cgroup.id": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.cgroup.manager": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.cgroup.version": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.parent.comm": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.container.id": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.created_at": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.parent.egid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.parent.egroup": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.envp": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.envs": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.envs_truncated": - return reflect.Bool, nil + return "signal", reflect.Bool, nil case "signal.target.parent.euid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.parent.euser": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.file.change_time": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.parent.file.filesystem": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.file.gid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.parent.file.group": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.file.hashes": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.file.in_upper_layer": - return reflect.Bool, nil + return "signal", reflect.Bool, nil case "signal.target.parent.file.inode": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.parent.file.mode": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.parent.file.modification_time": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.parent.file.mount_id": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.parent.file.name": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.file.name.length": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.parent.file.package.name": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.file.package.source_version": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.file.package.version": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.file.path": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.file.path.length": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.parent.file.rights": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.parent.file.uid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.parent.file.user": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.fsgid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.parent.fsgroup": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.fsuid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.parent.fsuser": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.gid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.parent.group": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.interpreter.file.change_time": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.parent.interpreter.file.filesystem": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.interpreter.file.gid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.parent.interpreter.file.group": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.interpreter.file.hashes": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.interpreter.file.in_upper_layer": - return reflect.Bool, nil + return "signal", reflect.Bool, nil case "signal.target.parent.interpreter.file.inode": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.parent.interpreter.file.mode": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.parent.interpreter.file.modification_time": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.parent.interpreter.file.mount_id": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.parent.interpreter.file.name": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.interpreter.file.name.length": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.parent.interpreter.file.package.name": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.interpreter.file.package.source_version": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.interpreter.file.package.version": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.interpreter.file.path": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.interpreter.file.path.length": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.parent.interpreter.file.rights": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.parent.interpreter.file.uid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.parent.interpreter.file.user": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.is_exec": - return reflect.Bool, nil + return "signal", reflect.Bool, nil case "signal.target.parent.is_kworker": - return reflect.Bool, nil + return "signal", reflect.Bool, nil case "signal.target.parent.is_thread": - return reflect.Bool, nil + return "signal", reflect.Bool, nil case "signal.target.parent.pid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.parent.ppid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.parent.tid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.parent.tty_name": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.uid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.parent.user": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.user_session.k8s_groups": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.user_session.k8s_uid": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.parent.user_session.k8s_username": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.pid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.ppid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.tid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.tty_name": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.uid": - return reflect.Int, nil + return "signal", reflect.Int, nil case "signal.target.user": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.user_session.k8s_groups": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.user_session.k8s_uid": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.target.user_session.k8s_username": - return reflect.String, nil + return "signal", reflect.String, nil case "signal.type": - return reflect.Int, nil + return "signal", reflect.Int, nil case "splice.file.change_time": - return reflect.Int, nil + return "splice", reflect.Int, nil case "splice.file.filesystem": - return reflect.String, nil + return "splice", reflect.String, nil case "splice.file.gid": - return reflect.Int, nil + return "splice", reflect.Int, nil case "splice.file.group": - return reflect.String, nil + return "splice", reflect.String, nil case "splice.file.hashes": - return reflect.String, nil + return "splice", reflect.String, nil case "splice.file.in_upper_layer": - return reflect.Bool, nil + return "splice", reflect.Bool, nil case "splice.file.inode": - return reflect.Int, nil + return "splice", reflect.Int, nil case "splice.file.mode": - return reflect.Int, nil + return "splice", reflect.Int, nil case "splice.file.modification_time": - return reflect.Int, nil + return "splice", reflect.Int, nil case "splice.file.mount_id": - return reflect.Int, nil + return "splice", reflect.Int, nil case "splice.file.name": - return reflect.String, nil + return "splice", reflect.String, nil case "splice.file.name.length": - return reflect.Int, nil + return "splice", reflect.Int, nil case "splice.file.package.name": - return reflect.String, nil + return "splice", reflect.String, nil case "splice.file.package.source_version": - return reflect.String, nil + return "splice", reflect.String, nil case "splice.file.package.version": - return reflect.String, nil + return "splice", reflect.String, nil case "splice.file.path": - return reflect.String, nil + return "splice", reflect.String, nil case "splice.file.path.length": - return reflect.Int, nil + return "splice", reflect.Int, nil case "splice.file.rights": - return reflect.Int, nil + return "splice", reflect.Int, nil case "splice.file.uid": - return reflect.Int, nil + return "splice", reflect.Int, nil case "splice.file.user": - return reflect.String, nil + return "splice", reflect.String, nil case "splice.pipe_entry_flag": - return reflect.Int, nil + return "splice", reflect.Int, nil case "splice.pipe_exit_flag": - return reflect.Int, nil + return "splice", reflect.Int, nil case "splice.retval": - return reflect.Int, nil + return "splice", reflect.Int, nil case "unlink.file.change_time": - return reflect.Int, nil + return "unlink", reflect.Int, nil case "unlink.file.filesystem": - return reflect.String, nil + return "unlink", reflect.String, nil case "unlink.file.gid": - return reflect.Int, nil + return "unlink", reflect.Int, nil case "unlink.file.group": - return reflect.String, nil + return "unlink", reflect.String, nil case "unlink.file.hashes": - return reflect.String, nil + return "unlink", reflect.String, nil case "unlink.file.in_upper_layer": - return reflect.Bool, nil + return "unlink", reflect.Bool, nil case "unlink.file.inode": - return reflect.Int, nil + return "unlink", reflect.Int, nil case "unlink.file.mode": - return reflect.Int, nil + return "unlink", reflect.Int, nil case "unlink.file.modification_time": - return reflect.Int, nil + return "unlink", reflect.Int, nil case "unlink.file.mount_id": - return reflect.Int, nil + return "unlink", reflect.Int, nil case "unlink.file.name": - return reflect.String, nil + return "unlink", reflect.String, nil case "unlink.file.name.length": - return reflect.Int, nil + return "unlink", reflect.Int, nil case "unlink.file.package.name": - return reflect.String, nil + return "unlink", reflect.String, nil case "unlink.file.package.source_version": - return reflect.String, nil + return "unlink", reflect.String, nil case "unlink.file.package.version": - return reflect.String, nil + return "unlink", reflect.String, nil case "unlink.file.path": - return reflect.String, nil + return "unlink", reflect.String, nil case "unlink.file.path.length": - return reflect.Int, nil + return "unlink", reflect.Int, nil case "unlink.file.rights": - return reflect.Int, nil + return "unlink", reflect.Int, nil case "unlink.file.uid": - return reflect.Int, nil + return "unlink", reflect.Int, nil case "unlink.file.user": - return reflect.String, nil + return "unlink", reflect.String, nil case "unlink.flags": - return reflect.Int, nil + return "unlink", reflect.Int, nil case "unlink.retval": - return reflect.Int, nil + return "unlink", reflect.Int, nil case "unlink.syscall.dirfd": - return reflect.Int, nil + return "unlink", reflect.Int, nil case "unlink.syscall.flags": - return reflect.Int, nil + return "unlink", reflect.Int, nil case "unlink.syscall.path": - return reflect.String, nil + return "unlink", reflect.String, nil case "unload_module.name": - return reflect.String, nil + return "unload_module", reflect.String, nil case "unload_module.retval": - return reflect.Int, nil + return "unload_module", reflect.Int, nil case "utimes.file.change_time": - return reflect.Int, nil + return "utimes", reflect.Int, nil case "utimes.file.filesystem": - return reflect.String, nil + return "utimes", reflect.String, nil case "utimes.file.gid": - return reflect.Int, nil + return "utimes", reflect.Int, nil case "utimes.file.group": - return reflect.String, nil + return "utimes", reflect.String, nil case "utimes.file.hashes": - return reflect.String, nil + return "utimes", reflect.String, nil case "utimes.file.in_upper_layer": - return reflect.Bool, nil + return "utimes", reflect.Bool, nil case "utimes.file.inode": - return reflect.Int, nil + return "utimes", reflect.Int, nil case "utimes.file.mode": - return reflect.Int, nil + return "utimes", reflect.Int, nil case "utimes.file.modification_time": - return reflect.Int, nil + return "utimes", reflect.Int, nil case "utimes.file.mount_id": - return reflect.Int, nil + return "utimes", reflect.Int, nil case "utimes.file.name": - return reflect.String, nil + return "utimes", reflect.String, nil case "utimes.file.name.length": - return reflect.Int, nil + return "utimes", reflect.Int, nil case "utimes.file.package.name": - return reflect.String, nil + return "utimes", reflect.String, nil case "utimes.file.package.source_version": - return reflect.String, nil + return "utimes", reflect.String, nil case "utimes.file.package.version": - return reflect.String, nil + return "utimes", reflect.String, nil case "utimes.file.path": - return reflect.String, nil + return "utimes", reflect.String, nil case "utimes.file.path.length": - return reflect.Int, nil + return "utimes", reflect.Int, nil case "utimes.file.rights": - return reflect.Int, nil + return "utimes", reflect.Int, nil case "utimes.file.uid": - return reflect.Int, nil + return "utimes", reflect.Int, nil case "utimes.file.user": - return reflect.String, nil + return "utimes", reflect.String, nil case "utimes.retval": - return reflect.Int, nil + return "utimes", reflect.Int, nil case "utimes.syscall.path": - return reflect.String, nil + return "utimes", reflect.String, nil } - return reflect.Invalid, &eval.ErrFieldNotFound{Field: field} + return "", reflect.Invalid, &eval.ErrFieldNotFound{Field: field} } func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { switch field { + case "accept.addr.family": + rv, ok := value.(int) + if !ok { + return &eval.ErrValueTypeMismatch{Field: "accept.addr.family"} + } + if rv < 0 || rv > math.MaxUint16 { + return &eval.ErrValueOutOfRange{Field: "accept.addr.family"} + } + ev.Accept.AddrFamily = uint16(rv) + return nil + case "accept.addr.ip": + rv, ok := value.(net.IPNet) + if !ok { + return &eval.ErrValueTypeMismatch{Field: "accept.addr.ip"} + } + ev.Accept.Addr.IPNet = rv + return nil + case "accept.addr.is_public": + rv, ok := value.(bool) + if !ok { + return &eval.ErrValueTypeMismatch{Field: "accept.addr.is_public"} + } + ev.Accept.Addr.IsPublic = rv + return nil + case "accept.addr.port": + rv, ok := value.(int) + if !ok { + return &eval.ErrValueTypeMismatch{Field: "accept.addr.port"} + } + if rv < 0 || rv > math.MaxUint16 { + return &eval.ErrValueOutOfRange{Field: "accept.addr.port"} + } + ev.Accept.Addr.Port = uint16(rv) + return nil + case "accept.retval": + rv, ok := value.(int) + if !ok { + return &eval.ErrValueTypeMismatch{Field: "accept.retval"} + } + ev.Accept.SyscallEvent.Retval = int64(rv) + return nil case "bind.addr.family": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Bind.AddrFamily"} + return &eval.ErrValueTypeMismatch{Field: "bind.addr.family"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Bind.AddrFamily"} + return &eval.ErrValueOutOfRange{Field: "bind.addr.family"} } ev.Bind.AddrFamily = uint16(rv) return nil case "bind.addr.ip": rv, ok := value.(net.IPNet) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Bind.Addr.IPNet"} + return &eval.ErrValueTypeMismatch{Field: "bind.addr.ip"} } ev.Bind.Addr.IPNet = rv return nil case "bind.addr.is_public": rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Bind.Addr.IsPublic"} + return &eval.ErrValueTypeMismatch{Field: "bind.addr.is_public"} } ev.Bind.Addr.IsPublic = rv return nil case "bind.addr.port": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Bind.Addr.Port"} + return &eval.ErrValueTypeMismatch{Field: "bind.addr.port"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Bind.Addr.Port"} + return &eval.ErrValueOutOfRange{Field: "bind.addr.port"} } ev.Bind.Addr.Port = uint16(rv) return nil case "bind.protocol": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Bind.Protocol"} + return &eval.ErrValueTypeMismatch{Field: "bind.protocol"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Bind.Protocol"} + return &eval.ErrValueOutOfRange{Field: "bind.protocol"} } ev.Bind.Protocol = uint16(rv) return nil case "bind.retval": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Bind.SyscallEvent.Retval"} + return &eval.ErrValueTypeMismatch{Field: "bind.retval"} } ev.Bind.SyscallEvent.Retval = int64(rv) return nil case "bpf.cmd": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BPF.Cmd"} + return &eval.ErrValueTypeMismatch{Field: "bpf.cmd"} } ev.BPF.Cmd = uint32(rv) return nil case "bpf.map.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BPF.Map.Name"} + return &eval.ErrValueTypeMismatch{Field: "bpf.map.name"} } ev.BPF.Map.Name = rv return nil case "bpf.map.type": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BPF.Map.Type"} + return &eval.ErrValueTypeMismatch{Field: "bpf.map.type"} } ev.BPF.Map.Type = uint32(rv) return nil case "bpf.prog.attach_type": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BPF.Program.AttachType"} + return &eval.ErrValueTypeMismatch{Field: "bpf.prog.attach_type"} } ev.BPF.Program.AttachType = uint32(rv) return nil @@ -34976,111 +25915,111 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { ev.BPF.Program.Helpers = append(ev.BPF.Program.Helpers, uint32(i)) } default: - return &eval.ErrValueTypeMismatch{Field: "BPF.Program.Helpers"} + return &eval.ErrValueTypeMismatch{Field: "bpf.prog.helpers"} } return nil case "bpf.prog.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BPF.Program.Name"} + return &eval.ErrValueTypeMismatch{Field: "bpf.prog.name"} } ev.BPF.Program.Name = rv return nil case "bpf.prog.tag": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BPF.Program.Tag"} + return &eval.ErrValueTypeMismatch{Field: "bpf.prog.tag"} } ev.BPF.Program.Tag = rv return nil case "bpf.prog.type": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BPF.Program.Type"} + return &eval.ErrValueTypeMismatch{Field: "bpf.prog.type"} } ev.BPF.Program.Type = uint32(rv) return nil case "bpf.retval": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BPF.SyscallEvent.Retval"} + return &eval.ErrValueTypeMismatch{Field: "bpf.retval"} } ev.BPF.SyscallEvent.Retval = int64(rv) return nil case "capset.cap_effective": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Capset.CapEffective"} + return &eval.ErrValueTypeMismatch{Field: "capset.cap_effective"} } ev.Capset.CapEffective = uint64(rv) return nil case "capset.cap_permitted": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Capset.CapPermitted"} + return &eval.ErrValueTypeMismatch{Field: "capset.cap_permitted"} } ev.Capset.CapPermitted = uint64(rv) return nil case "cgroup.file.inode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "CGroupContext.CGroupFile.Inode"} + return &eval.ErrValueTypeMismatch{Field: "cgroup.file.inode"} } ev.CGroupContext.CGroupFile.Inode = uint64(rv) return nil case "cgroup.file.mount_id": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "CGroupContext.CGroupFile.MountID"} + return &eval.ErrValueTypeMismatch{Field: "cgroup.file.mount_id"} } ev.CGroupContext.CGroupFile.MountID = uint32(rv) return nil case "cgroup.id": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "CGroupContext.CGroupID"} + return &eval.ErrValueTypeMismatch{Field: "cgroup.id"} } ev.CGroupContext.CGroupID = containerutils.CGroupID(rv) return nil case "cgroup.manager": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "CGroupContext.CGroupManager"} + return &eval.ErrValueTypeMismatch{Field: "cgroup.manager"} } ev.CGroupContext.CGroupManager = rv return nil case "cgroup.version": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "CGroupContext.CGroupVersion"} + return &eval.ErrValueTypeMismatch{Field: "cgroup.version"} } ev.CGroupContext.CGroupVersion = int(rv) return nil case "chdir.file.change_time": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chdir.File.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "chdir.file.change_time"} } ev.Chdir.File.FileFields.CTime = uint64(rv) return nil case "chdir.file.filesystem": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chdir.File.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "chdir.file.filesystem"} } ev.Chdir.File.Filesystem = rv return nil case "chdir.file.gid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chdir.File.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "chdir.file.gid"} } ev.Chdir.File.FileFields.GID = uint32(rv) return nil case "chdir.file.group": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chdir.File.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "chdir.file.group"} } ev.Chdir.File.FileFields.Group = rv return nil @@ -35091,51 +26030,51 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Chdir.File.Hashes = append(ev.Chdir.File.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Chdir.File.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "chdir.file.hashes"} } return nil case "chdir.file.in_upper_layer": rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chdir.File.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "chdir.file.in_upper_layer"} } ev.Chdir.File.FileFields.InUpperLayer = rv return nil case "chdir.file.inode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chdir.File.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "chdir.file.inode"} } ev.Chdir.File.FileFields.PathKey.Inode = uint64(rv) return nil case "chdir.file.mode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chdir.File.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "chdir.file.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Chdir.File.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "chdir.file.mode"} } ev.Chdir.File.FileFields.Mode = uint16(rv) return nil case "chdir.file.modification_time": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chdir.File.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "chdir.file.modification_time"} } ev.Chdir.File.FileFields.MTime = uint64(rv) return nil case "chdir.file.mount_id": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chdir.File.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "chdir.file.mount_id"} } ev.Chdir.File.FileFields.PathKey.MountID = uint32(rv) return nil case "chdir.file.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chdir.File.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "chdir.file.name"} } ev.Chdir.File.BasenameStr = rv return nil @@ -35144,28 +26083,28 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "chdir.file.package.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chdir.File.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "chdir.file.package.name"} } ev.Chdir.File.PkgName = rv return nil case "chdir.file.package.source_version": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chdir.File.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "chdir.file.package.source_version"} } ev.Chdir.File.PkgSrcVersion = rv return nil case "chdir.file.package.version": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chdir.File.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "chdir.file.package.version"} } ev.Chdir.File.PkgVersion = rv return nil case "chdir.file.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chdir.File.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "chdir.file.path"} } ev.Chdir.File.PathnameStr = rv return nil @@ -35174,80 +26113,80 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "chdir.file.rights": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chdir.File.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "chdir.file.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Chdir.File.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "chdir.file.rights"} } ev.Chdir.File.FileFields.Mode = uint16(rv) return nil case "chdir.file.uid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chdir.File.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "chdir.file.uid"} } ev.Chdir.File.FileFields.UID = uint32(rv) return nil case "chdir.file.user": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chdir.File.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "chdir.file.user"} } ev.Chdir.File.FileFields.User = rv return nil case "chdir.retval": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chdir.SyscallEvent.Retval"} + return &eval.ErrValueTypeMismatch{Field: "chdir.retval"} } ev.Chdir.SyscallEvent.Retval = int64(rv) return nil case "chdir.syscall.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chdir.SyscallContext.StrArg1"} + return &eval.ErrValueTypeMismatch{Field: "chdir.syscall.path"} } ev.Chdir.SyscallContext.StrArg1 = rv return nil case "chmod.file.change_time": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chmod.File.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "chmod.file.change_time"} } ev.Chmod.File.FileFields.CTime = uint64(rv) return nil case "chmod.file.destination.mode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chmod.Mode"} + return &eval.ErrValueTypeMismatch{Field: "chmod.file.destination.mode"} } ev.Chmod.Mode = uint32(rv) return nil case "chmod.file.destination.rights": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chmod.Mode"} + return &eval.ErrValueTypeMismatch{Field: "chmod.file.destination.rights"} } ev.Chmod.Mode = uint32(rv) return nil case "chmod.file.filesystem": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chmod.File.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "chmod.file.filesystem"} } ev.Chmod.File.Filesystem = rv return nil case "chmod.file.gid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chmod.File.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "chmod.file.gid"} } ev.Chmod.File.FileFields.GID = uint32(rv) return nil case "chmod.file.group": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chmod.File.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "chmod.file.group"} } ev.Chmod.File.FileFields.Group = rv return nil @@ -35258,51 +26197,51 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Chmod.File.Hashes = append(ev.Chmod.File.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Chmod.File.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "chmod.file.hashes"} } return nil case "chmod.file.in_upper_layer": rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chmod.File.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "chmod.file.in_upper_layer"} } ev.Chmod.File.FileFields.InUpperLayer = rv return nil case "chmod.file.inode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chmod.File.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "chmod.file.inode"} } ev.Chmod.File.FileFields.PathKey.Inode = uint64(rv) return nil case "chmod.file.mode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chmod.File.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "chmod.file.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Chmod.File.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "chmod.file.mode"} } ev.Chmod.File.FileFields.Mode = uint16(rv) return nil case "chmod.file.modification_time": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chmod.File.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "chmod.file.modification_time"} } ev.Chmod.File.FileFields.MTime = uint64(rv) return nil case "chmod.file.mount_id": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chmod.File.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "chmod.file.mount_id"} } ev.Chmod.File.FileFields.PathKey.MountID = uint32(rv) return nil case "chmod.file.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chmod.File.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "chmod.file.name"} } ev.Chmod.File.BasenameStr = rv return nil @@ -35311,28 +26250,28 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "chmod.file.package.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chmod.File.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "chmod.file.package.name"} } ev.Chmod.File.PkgName = rv return nil case "chmod.file.package.source_version": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chmod.File.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "chmod.file.package.source_version"} } ev.Chmod.File.PkgSrcVersion = rv return nil case "chmod.file.package.version": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chmod.File.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "chmod.file.package.version"} } ev.Chmod.File.PkgVersion = rv return nil case "chmod.file.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chmod.File.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "chmod.file.path"} } ev.Chmod.File.PathnameStr = rv return nil @@ -35341,101 +26280,101 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "chmod.file.rights": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chmod.File.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "chmod.file.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Chmod.File.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "chmod.file.rights"} } ev.Chmod.File.FileFields.Mode = uint16(rv) return nil case "chmod.file.uid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chmod.File.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "chmod.file.uid"} } ev.Chmod.File.FileFields.UID = uint32(rv) return nil case "chmod.file.user": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chmod.File.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "chmod.file.user"} } ev.Chmod.File.FileFields.User = rv return nil case "chmod.retval": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chmod.SyscallEvent.Retval"} + return &eval.ErrValueTypeMismatch{Field: "chmod.retval"} } ev.Chmod.SyscallEvent.Retval = int64(rv) return nil case "chmod.syscall.mode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chmod.SyscallContext.IntArg2"} + return &eval.ErrValueTypeMismatch{Field: "chmod.syscall.mode"} } ev.Chmod.SyscallContext.IntArg2 = int64(rv) return nil case "chmod.syscall.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chmod.SyscallContext.StrArg1"} + return &eval.ErrValueTypeMismatch{Field: "chmod.syscall.path"} } ev.Chmod.SyscallContext.StrArg1 = rv return nil case "chown.file.change_time": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chown.File.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "chown.file.change_time"} } ev.Chown.File.FileFields.CTime = uint64(rv) return nil case "chown.file.destination.gid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chown.GID"} + return &eval.ErrValueTypeMismatch{Field: "chown.file.destination.gid"} } ev.Chown.GID = int64(rv) return nil case "chown.file.destination.group": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chown.Group"} + return &eval.ErrValueTypeMismatch{Field: "chown.file.destination.group"} } ev.Chown.Group = rv return nil case "chown.file.destination.uid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chown.UID"} + return &eval.ErrValueTypeMismatch{Field: "chown.file.destination.uid"} } ev.Chown.UID = int64(rv) return nil case "chown.file.destination.user": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chown.User"} + return &eval.ErrValueTypeMismatch{Field: "chown.file.destination.user"} } ev.Chown.User = rv return nil case "chown.file.filesystem": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chown.File.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "chown.file.filesystem"} } ev.Chown.File.Filesystem = rv return nil case "chown.file.gid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chown.File.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "chown.file.gid"} } ev.Chown.File.FileFields.GID = uint32(rv) return nil case "chown.file.group": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chown.File.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "chown.file.group"} } ev.Chown.File.FileFields.Group = rv return nil @@ -35446,51 +26385,51 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Chown.File.Hashes = append(ev.Chown.File.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Chown.File.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "chown.file.hashes"} } return nil case "chown.file.in_upper_layer": rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chown.File.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "chown.file.in_upper_layer"} } ev.Chown.File.FileFields.InUpperLayer = rv return nil case "chown.file.inode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chown.File.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "chown.file.inode"} } ev.Chown.File.FileFields.PathKey.Inode = uint64(rv) return nil case "chown.file.mode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chown.File.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "chown.file.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Chown.File.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "chown.file.mode"} } ev.Chown.File.FileFields.Mode = uint16(rv) return nil case "chown.file.modification_time": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chown.File.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "chown.file.modification_time"} } ev.Chown.File.FileFields.MTime = uint64(rv) return nil case "chown.file.mount_id": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chown.File.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "chown.file.mount_id"} } ev.Chown.File.FileFields.PathKey.MountID = uint32(rv) return nil case "chown.file.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chown.File.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "chown.file.name"} } ev.Chown.File.BasenameStr = rv return nil @@ -35499,28 +26438,28 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "chown.file.package.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chown.File.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "chown.file.package.name"} } ev.Chown.File.PkgName = rv return nil case "chown.file.package.source_version": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chown.File.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "chown.file.package.source_version"} } ev.Chown.File.PkgSrcVersion = rv return nil case "chown.file.package.version": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chown.File.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "chown.file.package.version"} } ev.Chown.File.PkgVersion = rv return nil case "chown.file.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chown.File.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "chown.file.path"} } ev.Chown.File.PathnameStr = rv return nil @@ -35529,103 +26468,103 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "chown.file.rights": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chown.File.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "chown.file.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Chown.File.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "chown.file.rights"} } ev.Chown.File.FileFields.Mode = uint16(rv) return nil case "chown.file.uid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chown.File.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "chown.file.uid"} } ev.Chown.File.FileFields.UID = uint32(rv) return nil case "chown.file.user": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chown.File.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "chown.file.user"} } ev.Chown.File.FileFields.User = rv return nil case "chown.retval": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chown.SyscallEvent.Retval"} + return &eval.ErrValueTypeMismatch{Field: "chown.retval"} } ev.Chown.SyscallEvent.Retval = int64(rv) return nil case "chown.syscall.gid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chown.SyscallContext.IntArg3"} + return &eval.ErrValueTypeMismatch{Field: "chown.syscall.gid"} } ev.Chown.SyscallContext.IntArg3 = int64(rv) return nil case "chown.syscall.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chown.SyscallContext.StrArg1"} + return &eval.ErrValueTypeMismatch{Field: "chown.syscall.path"} } ev.Chown.SyscallContext.StrArg1 = rv return nil case "chown.syscall.uid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Chown.SyscallContext.IntArg2"} + return &eval.ErrValueTypeMismatch{Field: "chown.syscall.uid"} } ev.Chown.SyscallContext.IntArg2 = int64(rv) return nil case "connect.addr.family": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Connect.AddrFamily"} + return &eval.ErrValueTypeMismatch{Field: "connect.addr.family"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Connect.AddrFamily"} + return &eval.ErrValueOutOfRange{Field: "connect.addr.family"} } ev.Connect.AddrFamily = uint16(rv) return nil case "connect.addr.ip": rv, ok := value.(net.IPNet) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Connect.Addr.IPNet"} + return &eval.ErrValueTypeMismatch{Field: "connect.addr.ip"} } ev.Connect.Addr.IPNet = rv return nil case "connect.addr.is_public": rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Connect.Addr.IsPublic"} + return &eval.ErrValueTypeMismatch{Field: "connect.addr.is_public"} } ev.Connect.Addr.IsPublic = rv return nil case "connect.addr.port": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Connect.Addr.Port"} + return &eval.ErrValueTypeMismatch{Field: "connect.addr.port"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Connect.Addr.Port"} + return &eval.ErrValueOutOfRange{Field: "connect.addr.port"} } ev.Connect.Addr.Port = uint16(rv) return nil case "connect.protocol": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Connect.Protocol"} + return &eval.ErrValueTypeMismatch{Field: "connect.protocol"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Connect.Protocol"} + return &eval.ErrValueOutOfRange{Field: "connect.protocol"} } ev.Connect.Protocol = uint16(rv) return nil case "connect.retval": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Connect.SyscallEvent.Retval"} + return &eval.ErrValueTypeMismatch{Field: "connect.retval"} } ev.Connect.SyscallEvent.Retval = int64(rv) return nil @@ -35635,7 +26574,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ContainerContext.CreatedAt"} + return &eval.ErrValueTypeMismatch{Field: "container.created_at"} } ev.BaseEvent.ContainerContext.CreatedAt = uint64(rv) return nil @@ -35645,7 +26584,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ContainerContext.ContainerID"} + return &eval.ErrValueTypeMismatch{Field: "container.id"} } ev.BaseEvent.ContainerContext.ContainerID = containerutils.ContainerID(rv) return nil @@ -35655,7 +26594,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ContainerContext.Runtime"} + return &eval.ErrValueTypeMismatch{Field: "container.runtime"} } ev.BaseEvent.ContainerContext.Runtime = rv return nil @@ -35669,53 +26608,53 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ContainerContext.Tags = append(ev.BaseEvent.ContainerContext.Tags, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ContainerContext.Tags"} + return &eval.ErrValueTypeMismatch{Field: "container.tags"} } return nil case "dns.id": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "DNS.ID"} + return &eval.ErrValueTypeMismatch{Field: "dns.id"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "DNS.ID"} + return &eval.ErrValueOutOfRange{Field: "dns.id"} } ev.DNS.ID = uint16(rv) return nil case "dns.question.class": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "DNS.Class"} + return &eval.ErrValueTypeMismatch{Field: "dns.question.class"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "DNS.Class"} + return &eval.ErrValueOutOfRange{Field: "dns.question.class"} } ev.DNS.Class = uint16(rv) return nil case "dns.question.count": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "DNS.Count"} + return &eval.ErrValueTypeMismatch{Field: "dns.question.count"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "DNS.Count"} + return &eval.ErrValueOutOfRange{Field: "dns.question.count"} } ev.DNS.Count = uint16(rv) return nil case "dns.question.length": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "DNS.Size"} + return &eval.ErrValueTypeMismatch{Field: "dns.question.length"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "DNS.Size"} + return &eval.ErrValueOutOfRange{Field: "dns.question.length"} } ev.DNS.Size = uint16(rv) return nil case "dns.question.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "DNS.Name"} + return &eval.ErrValueTypeMismatch{Field: "dns.question.name"} } ev.DNS.Name = rv return nil @@ -35724,52 +26663,52 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "dns.question.type": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "DNS.Type"} + return &eval.ErrValueTypeMismatch{Field: "dns.question.type"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "DNS.Type"} + return &eval.ErrValueOutOfRange{Field: "dns.question.type"} } ev.DNS.Type = uint16(rv) return nil case "event.async": rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Async"} + return &eval.ErrValueTypeMismatch{Field: "event.async"} } ev.Async = rv return nil case "event.hostname": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.Hostname"} + return &eval.ErrValueTypeMismatch{Field: "event.hostname"} } ev.BaseEvent.Hostname = rv return nil case "event.origin": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.Origin"} + return &eval.ErrValueTypeMismatch{Field: "event.origin"} } ev.BaseEvent.Origin = rv return nil case "event.os": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.Os"} + return &eval.ErrValueTypeMismatch{Field: "event.os"} } ev.BaseEvent.Os = rv return nil case "event.service": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.Service"} + return &eval.ErrValueTypeMismatch{Field: "event.service"} } ev.BaseEvent.Service = rv return nil case "event.timestamp": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.TimestampRaw"} + return &eval.ErrValueTypeMismatch{Field: "event.timestamp"} } ev.BaseEvent.TimestampRaw = uint64(rv) return nil @@ -35779,7 +26718,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.Args"} + return &eval.ErrValueTypeMismatch{Field: "exec.args"} } ev.Exec.Process.Args = rv return nil @@ -35793,7 +26732,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Exec.Process.Argv = append(ev.Exec.Process.Argv, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.Argv"} + return &eval.ErrValueTypeMismatch{Field: "exec.args_flags"} } return nil case "exec.args_options": @@ -35806,7 +26745,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Exec.Process.Argv = append(ev.Exec.Process.Argv, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.Argv"} + return &eval.ErrValueTypeMismatch{Field: "exec.args_options"} } return nil case "exec.args_truncated": @@ -35815,7 +26754,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.ArgsTruncated"} + return &eval.ErrValueTypeMismatch{Field: "exec.args_truncated"} } ev.Exec.Process.ArgsTruncated = rv return nil @@ -35829,7 +26768,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Exec.Process.Argv = append(ev.Exec.Process.Argv, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.Argv"} + return &eval.ErrValueTypeMismatch{Field: "exec.argv"} } return nil case "exec.argv0": @@ -35838,7 +26777,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.Argv0"} + return &eval.ErrValueTypeMismatch{Field: "exec.argv0"} } ev.Exec.Process.Argv0 = rv return nil @@ -35848,7 +26787,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.Credentials.AUID"} + return &eval.ErrValueTypeMismatch{Field: "exec.auid"} } ev.Exec.Process.Credentials.AUID = uint32(rv) return nil @@ -35858,7 +26797,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.Credentials.CapEffective"} + return &eval.ErrValueTypeMismatch{Field: "exec.cap_effective"} } ev.Exec.Process.Credentials.CapEffective = uint64(rv) return nil @@ -35868,7 +26807,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.Credentials.CapPermitted"} + return &eval.ErrValueTypeMismatch{Field: "exec.cap_permitted"} } ev.Exec.Process.Credentials.CapPermitted = uint64(rv) return nil @@ -35878,7 +26817,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.CGroup.CGroupFile.Inode"} + return &eval.ErrValueTypeMismatch{Field: "exec.cgroup.file.inode"} } ev.Exec.Process.CGroup.CGroupFile.Inode = uint64(rv) return nil @@ -35888,7 +26827,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.CGroup.CGroupFile.MountID"} + return &eval.ErrValueTypeMismatch{Field: "exec.cgroup.file.mount_id"} } ev.Exec.Process.CGroup.CGroupFile.MountID = uint32(rv) return nil @@ -35898,7 +26837,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.CGroup.CGroupID"} + return &eval.ErrValueTypeMismatch{Field: "exec.cgroup.id"} } ev.Exec.Process.CGroup.CGroupID = containerutils.CGroupID(rv) return nil @@ -35908,7 +26847,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.CGroup.CGroupManager"} + return &eval.ErrValueTypeMismatch{Field: "exec.cgroup.manager"} } ev.Exec.Process.CGroup.CGroupManager = rv return nil @@ -35918,7 +26857,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.CGroup.CGroupVersion"} + return &eval.ErrValueTypeMismatch{Field: "exec.cgroup.version"} } ev.Exec.Process.CGroup.CGroupVersion = int(rv) return nil @@ -35928,7 +26867,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.Comm"} + return &eval.ErrValueTypeMismatch{Field: "exec.comm"} } ev.Exec.Process.Comm = rv return nil @@ -35938,7 +26877,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.ContainerID"} + return &eval.ErrValueTypeMismatch{Field: "exec.container.id"} } ev.Exec.Process.ContainerID = containerutils.ContainerID(rv) return nil @@ -35948,7 +26887,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.CreatedAt"} + return &eval.ErrValueTypeMismatch{Field: "exec.created_at"} } ev.Exec.Process.CreatedAt = uint64(rv) return nil @@ -35958,7 +26897,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.Credentials.EGID"} + return &eval.ErrValueTypeMismatch{Field: "exec.egid"} } ev.Exec.Process.Credentials.EGID = uint32(rv) return nil @@ -35968,7 +26907,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.Credentials.EGroup"} + return &eval.ErrValueTypeMismatch{Field: "exec.egroup"} } ev.Exec.Process.Credentials.EGroup = rv return nil @@ -35982,7 +26921,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Exec.Process.Envp = append(ev.Exec.Process.Envp, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.Envp"} + return &eval.ErrValueTypeMismatch{Field: "exec.envp"} } return nil case "exec.envs": @@ -35995,7 +26934,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Exec.Process.Envs = append(ev.Exec.Process.Envs, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.Envs"} + return &eval.ErrValueTypeMismatch{Field: "exec.envs"} } return nil case "exec.envs_truncated": @@ -36004,7 +26943,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.EnvsTruncated"} + return &eval.ErrValueTypeMismatch{Field: "exec.envs_truncated"} } ev.Exec.Process.EnvsTruncated = rv return nil @@ -36014,7 +26953,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.Credentials.EUID"} + return &eval.ErrValueTypeMismatch{Field: "exec.euid"} } ev.Exec.Process.Credentials.EUID = uint32(rv) return nil @@ -36024,7 +26963,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.Credentials.EUser"} + return &eval.ErrValueTypeMismatch{Field: "exec.euser"} } ev.Exec.Process.Credentials.EUser = rv return nil @@ -36034,7 +26973,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.FileEvent.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "exec.file.change_time"} } ev.Exec.Process.FileEvent.FileFields.CTime = uint64(rv) return nil @@ -36044,7 +26983,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.FileEvent.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "exec.file.filesystem"} } ev.Exec.Process.FileEvent.Filesystem = rv return nil @@ -36054,7 +26993,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.FileEvent.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "exec.file.gid"} } ev.Exec.Process.FileEvent.FileFields.GID = uint32(rv) return nil @@ -36064,7 +27003,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.FileEvent.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "exec.file.group"} } ev.Exec.Process.FileEvent.FileFields.Group = rv return nil @@ -36078,7 +27017,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Exec.Process.FileEvent.Hashes = append(ev.Exec.Process.FileEvent.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.FileEvent.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "exec.file.hashes"} } return nil case "exec.file.in_upper_layer": @@ -36087,7 +27026,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.FileEvent.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "exec.file.in_upper_layer"} } ev.Exec.Process.FileEvent.FileFields.InUpperLayer = rv return nil @@ -36097,7 +27036,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.FileEvent.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "exec.file.inode"} } ev.Exec.Process.FileEvent.FileFields.PathKey.Inode = uint64(rv) return nil @@ -36107,10 +27046,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "exec.file.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Exec.Process.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "exec.file.mode"} } ev.Exec.Process.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -36120,7 +27059,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.FileEvent.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "exec.file.modification_time"} } ev.Exec.Process.FileEvent.FileFields.MTime = uint64(rv) return nil @@ -36130,7 +27069,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.FileEvent.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "exec.file.mount_id"} } ev.Exec.Process.FileEvent.FileFields.PathKey.MountID = uint32(rv) return nil @@ -36140,7 +27079,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.FileEvent.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "exec.file.name"} } ev.Exec.Process.FileEvent.BasenameStr = rv return nil @@ -36155,7 +27094,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.FileEvent.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "exec.file.package.name"} } ev.Exec.Process.FileEvent.PkgName = rv return nil @@ -36165,7 +27104,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.FileEvent.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "exec.file.package.source_version"} } ev.Exec.Process.FileEvent.PkgSrcVersion = rv return nil @@ -36175,7 +27114,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.FileEvent.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "exec.file.package.version"} } ev.Exec.Process.FileEvent.PkgVersion = rv return nil @@ -36185,7 +27124,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.FileEvent.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "exec.file.path"} } ev.Exec.Process.FileEvent.PathnameStr = rv return nil @@ -36200,10 +27139,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "exec.file.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Exec.Process.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "exec.file.rights"} } ev.Exec.Process.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -36213,7 +27152,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.FileEvent.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "exec.file.uid"} } ev.Exec.Process.FileEvent.FileFields.UID = uint32(rv) return nil @@ -36223,7 +27162,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.FileEvent.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "exec.file.user"} } ev.Exec.Process.FileEvent.FileFields.User = rv return nil @@ -36233,7 +27172,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.Credentials.FSGID"} + return &eval.ErrValueTypeMismatch{Field: "exec.fsgid"} } ev.Exec.Process.Credentials.FSGID = uint32(rv) return nil @@ -36243,7 +27182,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.Credentials.FSGroup"} + return &eval.ErrValueTypeMismatch{Field: "exec.fsgroup"} } ev.Exec.Process.Credentials.FSGroup = rv return nil @@ -36253,7 +27192,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.Credentials.FSUID"} + return &eval.ErrValueTypeMismatch{Field: "exec.fsuid"} } ev.Exec.Process.Credentials.FSUID = uint32(rv) return nil @@ -36263,7 +27202,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.Credentials.FSUser"} + return &eval.ErrValueTypeMismatch{Field: "exec.fsuser"} } ev.Exec.Process.Credentials.FSUser = rv return nil @@ -36273,7 +27212,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.Credentials.GID"} + return &eval.ErrValueTypeMismatch{Field: "exec.gid"} } ev.Exec.Process.Credentials.GID = uint32(rv) return nil @@ -36283,7 +27222,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.Credentials.Group"} + return &eval.ErrValueTypeMismatch{Field: "exec.group"} } ev.Exec.Process.Credentials.Group = rv return nil @@ -36293,7 +27232,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.LinuxBinprm.FileEvent.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "exec.interpreter.file.change_time"} } ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.CTime = uint64(rv) return nil @@ -36303,7 +27242,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.LinuxBinprm.FileEvent.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "exec.interpreter.file.filesystem"} } ev.Exec.Process.LinuxBinprm.FileEvent.Filesystem = rv return nil @@ -36313,7 +27252,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.LinuxBinprm.FileEvent.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "exec.interpreter.file.gid"} } ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.GID = uint32(rv) return nil @@ -36323,7 +27262,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.LinuxBinprm.FileEvent.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "exec.interpreter.file.group"} } ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.Group = rv return nil @@ -36337,7 +27276,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Exec.Process.LinuxBinprm.FileEvent.Hashes = append(ev.Exec.Process.LinuxBinprm.FileEvent.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.LinuxBinprm.FileEvent.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "exec.interpreter.file.hashes"} } return nil case "exec.interpreter.file.in_upper_layer": @@ -36346,7 +27285,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.LinuxBinprm.FileEvent.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "exec.interpreter.file.in_upper_layer"} } ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.InUpperLayer = rv return nil @@ -36356,7 +27295,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "exec.interpreter.file.inode"} } ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode = uint64(rv) return nil @@ -36366,10 +27305,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "exec.interpreter.file.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Exec.Process.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "exec.interpreter.file.mode"} } ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -36379,7 +27318,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.LinuxBinprm.FileEvent.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "exec.interpreter.file.modification_time"} } ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.MTime = uint64(rv) return nil @@ -36389,7 +27328,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "exec.interpreter.file.mount_id"} } ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID = uint32(rv) return nil @@ -36399,7 +27338,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.LinuxBinprm.FileEvent.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "exec.interpreter.file.name"} } ev.Exec.Process.LinuxBinprm.FileEvent.BasenameStr = rv return nil @@ -36414,7 +27353,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.LinuxBinprm.FileEvent.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "exec.interpreter.file.package.name"} } ev.Exec.Process.LinuxBinprm.FileEvent.PkgName = rv return nil @@ -36424,7 +27363,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.LinuxBinprm.FileEvent.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "exec.interpreter.file.package.source_version"} } ev.Exec.Process.LinuxBinprm.FileEvent.PkgSrcVersion = rv return nil @@ -36434,7 +27373,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.LinuxBinprm.FileEvent.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "exec.interpreter.file.package.version"} } ev.Exec.Process.LinuxBinprm.FileEvent.PkgVersion = rv return nil @@ -36444,7 +27383,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.LinuxBinprm.FileEvent.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "exec.interpreter.file.path"} } ev.Exec.Process.LinuxBinprm.FileEvent.PathnameStr = rv return nil @@ -36459,10 +27398,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "exec.interpreter.file.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Exec.Process.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "exec.interpreter.file.rights"} } ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -36472,7 +27411,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.LinuxBinprm.FileEvent.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "exec.interpreter.file.uid"} } ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.UID = uint32(rv) return nil @@ -36482,7 +27421,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.LinuxBinprm.FileEvent.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "exec.interpreter.file.user"} } ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.User = rv return nil @@ -36492,7 +27431,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.IsExec"} + return &eval.ErrValueTypeMismatch{Field: "exec.is_exec"} } ev.Exec.Process.IsExec = rv return nil @@ -36502,7 +27441,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.PIDContext.IsKworker"} + return &eval.ErrValueTypeMismatch{Field: "exec.is_kworker"} } ev.Exec.Process.PIDContext.IsKworker = rv return nil @@ -36512,7 +27451,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.IsThread"} + return &eval.ErrValueTypeMismatch{Field: "exec.is_thread"} } ev.Exec.Process.IsThread = rv return nil @@ -36522,7 +27461,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.PIDContext.Pid"} + return &eval.ErrValueTypeMismatch{Field: "exec.pid"} } ev.Exec.Process.PIDContext.Pid = uint32(rv) return nil @@ -36532,14 +27471,14 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.PPid"} + return &eval.ErrValueTypeMismatch{Field: "exec.ppid"} } ev.Exec.Process.PPid = uint32(rv) return nil case "exec.syscall.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.SyscallContext.StrArg1"} + return &eval.ErrValueTypeMismatch{Field: "exec.syscall.path"} } ev.Exec.SyscallContext.StrArg1 = rv return nil @@ -36549,7 +27488,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.PIDContext.Tid"} + return &eval.ErrValueTypeMismatch{Field: "exec.tid"} } ev.Exec.Process.PIDContext.Tid = uint32(rv) return nil @@ -36559,7 +27498,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.TTYName"} + return &eval.ErrValueTypeMismatch{Field: "exec.tty_name"} } ev.Exec.Process.TTYName = rv return nil @@ -36569,7 +27508,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.Credentials.UID"} + return &eval.ErrValueTypeMismatch{Field: "exec.uid"} } ev.Exec.Process.Credentials.UID = uint32(rv) return nil @@ -36579,7 +27518,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.Credentials.User"} + return &eval.ErrValueTypeMismatch{Field: "exec.user"} } ev.Exec.Process.Credentials.User = rv return nil @@ -36593,7 +27532,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Exec.Process.UserSession.K8SGroups = append(ev.Exec.Process.UserSession.K8SGroups, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.UserSession.K8SGroups"} + return &eval.ErrValueTypeMismatch{Field: "exec.user_session.k8s_groups"} } return nil case "exec.user_session.k8s_uid": @@ -36602,7 +27541,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.UserSession.K8SUID"} + return &eval.ErrValueTypeMismatch{Field: "exec.user_session.k8s_uid"} } ev.Exec.Process.UserSession.K8SUID = rv return nil @@ -36612,7 +27551,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.UserSession.K8SUsername"} + return &eval.ErrValueTypeMismatch{Field: "exec.user_session.k8s_username"} } ev.Exec.Process.UserSession.K8SUsername = rv return nil @@ -36622,7 +27561,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.Args"} + return &eval.ErrValueTypeMismatch{Field: "exit.args"} } ev.Exit.Process.Args = rv return nil @@ -36636,7 +27575,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Exit.Process.Argv = append(ev.Exit.Process.Argv, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.Argv"} + return &eval.ErrValueTypeMismatch{Field: "exit.args_flags"} } return nil case "exit.args_options": @@ -36649,7 +27588,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Exit.Process.Argv = append(ev.Exit.Process.Argv, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.Argv"} + return &eval.ErrValueTypeMismatch{Field: "exit.args_options"} } return nil case "exit.args_truncated": @@ -36658,7 +27597,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.ArgsTruncated"} + return &eval.ErrValueTypeMismatch{Field: "exit.args_truncated"} } ev.Exit.Process.ArgsTruncated = rv return nil @@ -36672,7 +27611,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Exit.Process.Argv = append(ev.Exit.Process.Argv, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.Argv"} + return &eval.ErrValueTypeMismatch{Field: "exit.argv"} } return nil case "exit.argv0": @@ -36681,7 +27620,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.Argv0"} + return &eval.ErrValueTypeMismatch{Field: "exit.argv0"} } ev.Exit.Process.Argv0 = rv return nil @@ -36691,7 +27630,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.Credentials.AUID"} + return &eval.ErrValueTypeMismatch{Field: "exit.auid"} } ev.Exit.Process.Credentials.AUID = uint32(rv) return nil @@ -36701,7 +27640,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.Credentials.CapEffective"} + return &eval.ErrValueTypeMismatch{Field: "exit.cap_effective"} } ev.Exit.Process.Credentials.CapEffective = uint64(rv) return nil @@ -36711,14 +27650,14 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.Credentials.CapPermitted"} + return &eval.ErrValueTypeMismatch{Field: "exit.cap_permitted"} } ev.Exit.Process.Credentials.CapPermitted = uint64(rv) return nil case "exit.cause": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Cause"} + return &eval.ErrValueTypeMismatch{Field: "exit.cause"} } ev.Exit.Cause = uint32(rv) return nil @@ -36728,7 +27667,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.CGroup.CGroupFile.Inode"} + return &eval.ErrValueTypeMismatch{Field: "exit.cgroup.file.inode"} } ev.Exit.Process.CGroup.CGroupFile.Inode = uint64(rv) return nil @@ -36738,7 +27677,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.CGroup.CGroupFile.MountID"} + return &eval.ErrValueTypeMismatch{Field: "exit.cgroup.file.mount_id"} } ev.Exit.Process.CGroup.CGroupFile.MountID = uint32(rv) return nil @@ -36748,7 +27687,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.CGroup.CGroupID"} + return &eval.ErrValueTypeMismatch{Field: "exit.cgroup.id"} } ev.Exit.Process.CGroup.CGroupID = containerutils.CGroupID(rv) return nil @@ -36758,7 +27697,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.CGroup.CGroupManager"} + return &eval.ErrValueTypeMismatch{Field: "exit.cgroup.manager"} } ev.Exit.Process.CGroup.CGroupManager = rv return nil @@ -36768,14 +27707,14 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.CGroup.CGroupVersion"} + return &eval.ErrValueTypeMismatch{Field: "exit.cgroup.version"} } ev.Exit.Process.CGroup.CGroupVersion = int(rv) return nil case "exit.code": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Code"} + return &eval.ErrValueTypeMismatch{Field: "exit.code"} } ev.Exit.Code = uint32(rv) return nil @@ -36785,7 +27724,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.Comm"} + return &eval.ErrValueTypeMismatch{Field: "exit.comm"} } ev.Exit.Process.Comm = rv return nil @@ -36795,7 +27734,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.ContainerID"} + return &eval.ErrValueTypeMismatch{Field: "exit.container.id"} } ev.Exit.Process.ContainerID = containerutils.ContainerID(rv) return nil @@ -36805,7 +27744,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.CreatedAt"} + return &eval.ErrValueTypeMismatch{Field: "exit.created_at"} } ev.Exit.Process.CreatedAt = uint64(rv) return nil @@ -36815,7 +27754,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.Credentials.EGID"} + return &eval.ErrValueTypeMismatch{Field: "exit.egid"} } ev.Exit.Process.Credentials.EGID = uint32(rv) return nil @@ -36825,7 +27764,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.Credentials.EGroup"} + return &eval.ErrValueTypeMismatch{Field: "exit.egroup"} } ev.Exit.Process.Credentials.EGroup = rv return nil @@ -36839,7 +27778,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Exit.Process.Envp = append(ev.Exit.Process.Envp, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.Envp"} + return &eval.ErrValueTypeMismatch{Field: "exit.envp"} } return nil case "exit.envs": @@ -36852,7 +27791,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Exit.Process.Envs = append(ev.Exit.Process.Envs, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.Envs"} + return &eval.ErrValueTypeMismatch{Field: "exit.envs"} } return nil case "exit.envs_truncated": @@ -36861,7 +27800,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.EnvsTruncated"} + return &eval.ErrValueTypeMismatch{Field: "exit.envs_truncated"} } ev.Exit.Process.EnvsTruncated = rv return nil @@ -36871,7 +27810,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.Credentials.EUID"} + return &eval.ErrValueTypeMismatch{Field: "exit.euid"} } ev.Exit.Process.Credentials.EUID = uint32(rv) return nil @@ -36881,7 +27820,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.Credentials.EUser"} + return &eval.ErrValueTypeMismatch{Field: "exit.euser"} } ev.Exit.Process.Credentials.EUser = rv return nil @@ -36891,7 +27830,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.FileEvent.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "exit.file.change_time"} } ev.Exit.Process.FileEvent.FileFields.CTime = uint64(rv) return nil @@ -36901,7 +27840,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.FileEvent.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "exit.file.filesystem"} } ev.Exit.Process.FileEvent.Filesystem = rv return nil @@ -36911,7 +27850,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.FileEvent.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "exit.file.gid"} } ev.Exit.Process.FileEvent.FileFields.GID = uint32(rv) return nil @@ -36921,7 +27860,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.FileEvent.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "exit.file.group"} } ev.Exit.Process.FileEvent.FileFields.Group = rv return nil @@ -36935,7 +27874,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Exit.Process.FileEvent.Hashes = append(ev.Exit.Process.FileEvent.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.FileEvent.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "exit.file.hashes"} } return nil case "exit.file.in_upper_layer": @@ -36944,7 +27883,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.FileEvent.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "exit.file.in_upper_layer"} } ev.Exit.Process.FileEvent.FileFields.InUpperLayer = rv return nil @@ -36954,7 +27893,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.FileEvent.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "exit.file.inode"} } ev.Exit.Process.FileEvent.FileFields.PathKey.Inode = uint64(rv) return nil @@ -36964,10 +27903,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "exit.file.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Exit.Process.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "exit.file.mode"} } ev.Exit.Process.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -36977,7 +27916,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.FileEvent.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "exit.file.modification_time"} } ev.Exit.Process.FileEvent.FileFields.MTime = uint64(rv) return nil @@ -36987,7 +27926,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.FileEvent.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "exit.file.mount_id"} } ev.Exit.Process.FileEvent.FileFields.PathKey.MountID = uint32(rv) return nil @@ -36997,7 +27936,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.FileEvent.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "exit.file.name"} } ev.Exit.Process.FileEvent.BasenameStr = rv return nil @@ -37012,7 +27951,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.FileEvent.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "exit.file.package.name"} } ev.Exit.Process.FileEvent.PkgName = rv return nil @@ -37022,7 +27961,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.FileEvent.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "exit.file.package.source_version"} } ev.Exit.Process.FileEvent.PkgSrcVersion = rv return nil @@ -37032,7 +27971,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.FileEvent.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "exit.file.package.version"} } ev.Exit.Process.FileEvent.PkgVersion = rv return nil @@ -37042,7 +27981,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.FileEvent.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "exit.file.path"} } ev.Exit.Process.FileEvent.PathnameStr = rv return nil @@ -37057,10 +27996,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "exit.file.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Exit.Process.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "exit.file.rights"} } ev.Exit.Process.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -37070,7 +28009,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.FileEvent.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "exit.file.uid"} } ev.Exit.Process.FileEvent.FileFields.UID = uint32(rv) return nil @@ -37080,7 +28019,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.FileEvent.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "exit.file.user"} } ev.Exit.Process.FileEvent.FileFields.User = rv return nil @@ -37090,7 +28029,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.Credentials.FSGID"} + return &eval.ErrValueTypeMismatch{Field: "exit.fsgid"} } ev.Exit.Process.Credentials.FSGID = uint32(rv) return nil @@ -37100,7 +28039,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.Credentials.FSGroup"} + return &eval.ErrValueTypeMismatch{Field: "exit.fsgroup"} } ev.Exit.Process.Credentials.FSGroup = rv return nil @@ -37110,7 +28049,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.Credentials.FSUID"} + return &eval.ErrValueTypeMismatch{Field: "exit.fsuid"} } ev.Exit.Process.Credentials.FSUID = uint32(rv) return nil @@ -37120,7 +28059,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.Credentials.FSUser"} + return &eval.ErrValueTypeMismatch{Field: "exit.fsuser"} } ev.Exit.Process.Credentials.FSUser = rv return nil @@ -37130,7 +28069,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.Credentials.GID"} + return &eval.ErrValueTypeMismatch{Field: "exit.gid"} } ev.Exit.Process.Credentials.GID = uint32(rv) return nil @@ -37140,7 +28079,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.Credentials.Group"} + return &eval.ErrValueTypeMismatch{Field: "exit.group"} } ev.Exit.Process.Credentials.Group = rv return nil @@ -37150,7 +28089,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.LinuxBinprm.FileEvent.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "exit.interpreter.file.change_time"} } ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.CTime = uint64(rv) return nil @@ -37160,7 +28099,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.LinuxBinprm.FileEvent.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "exit.interpreter.file.filesystem"} } ev.Exit.Process.LinuxBinprm.FileEvent.Filesystem = rv return nil @@ -37170,7 +28109,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.LinuxBinprm.FileEvent.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "exit.interpreter.file.gid"} } ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.GID = uint32(rv) return nil @@ -37180,7 +28119,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.LinuxBinprm.FileEvent.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "exit.interpreter.file.group"} } ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.Group = rv return nil @@ -37194,7 +28133,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Exit.Process.LinuxBinprm.FileEvent.Hashes = append(ev.Exit.Process.LinuxBinprm.FileEvent.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.LinuxBinprm.FileEvent.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "exit.interpreter.file.hashes"} } return nil case "exit.interpreter.file.in_upper_layer": @@ -37203,7 +28142,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.LinuxBinprm.FileEvent.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "exit.interpreter.file.in_upper_layer"} } ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.InUpperLayer = rv return nil @@ -37213,7 +28152,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "exit.interpreter.file.inode"} } ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode = uint64(rv) return nil @@ -37223,10 +28162,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "exit.interpreter.file.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Exit.Process.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "exit.interpreter.file.mode"} } ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -37236,7 +28175,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.LinuxBinprm.FileEvent.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "exit.interpreter.file.modification_time"} } ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.MTime = uint64(rv) return nil @@ -37246,7 +28185,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "exit.interpreter.file.mount_id"} } ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID = uint32(rv) return nil @@ -37256,7 +28195,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.LinuxBinprm.FileEvent.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "exit.interpreter.file.name"} } ev.Exit.Process.LinuxBinprm.FileEvent.BasenameStr = rv return nil @@ -37271,7 +28210,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.LinuxBinprm.FileEvent.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "exit.interpreter.file.package.name"} } ev.Exit.Process.LinuxBinprm.FileEvent.PkgName = rv return nil @@ -37281,7 +28220,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.LinuxBinprm.FileEvent.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "exit.interpreter.file.package.source_version"} } ev.Exit.Process.LinuxBinprm.FileEvent.PkgSrcVersion = rv return nil @@ -37291,7 +28230,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.LinuxBinprm.FileEvent.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "exit.interpreter.file.package.version"} } ev.Exit.Process.LinuxBinprm.FileEvent.PkgVersion = rv return nil @@ -37301,7 +28240,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.LinuxBinprm.FileEvent.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "exit.interpreter.file.path"} } ev.Exit.Process.LinuxBinprm.FileEvent.PathnameStr = rv return nil @@ -37316,10 +28255,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "exit.interpreter.file.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Exit.Process.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "exit.interpreter.file.rights"} } ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -37329,7 +28268,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.LinuxBinprm.FileEvent.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "exit.interpreter.file.uid"} } ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.UID = uint32(rv) return nil @@ -37339,7 +28278,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.LinuxBinprm.FileEvent.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "exit.interpreter.file.user"} } ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.User = rv return nil @@ -37349,7 +28288,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.IsExec"} + return &eval.ErrValueTypeMismatch{Field: "exit.is_exec"} } ev.Exit.Process.IsExec = rv return nil @@ -37359,7 +28298,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.PIDContext.IsKworker"} + return &eval.ErrValueTypeMismatch{Field: "exit.is_kworker"} } ev.Exit.Process.PIDContext.IsKworker = rv return nil @@ -37369,7 +28308,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.IsThread"} + return &eval.ErrValueTypeMismatch{Field: "exit.is_thread"} } ev.Exit.Process.IsThread = rv return nil @@ -37379,7 +28318,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.PIDContext.Pid"} + return &eval.ErrValueTypeMismatch{Field: "exit.pid"} } ev.Exit.Process.PIDContext.Pid = uint32(rv) return nil @@ -37389,7 +28328,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.PPid"} + return &eval.ErrValueTypeMismatch{Field: "exit.ppid"} } ev.Exit.Process.PPid = uint32(rv) return nil @@ -37399,7 +28338,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.PIDContext.Tid"} + return &eval.ErrValueTypeMismatch{Field: "exit.tid"} } ev.Exit.Process.PIDContext.Tid = uint32(rv) return nil @@ -37409,7 +28348,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.TTYName"} + return &eval.ErrValueTypeMismatch{Field: "exit.tty_name"} } ev.Exit.Process.TTYName = rv return nil @@ -37419,7 +28358,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.Credentials.UID"} + return &eval.ErrValueTypeMismatch{Field: "exit.uid"} } ev.Exit.Process.Credentials.UID = uint32(rv) return nil @@ -37429,7 +28368,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.Credentials.User"} + return &eval.ErrValueTypeMismatch{Field: "exit.user"} } ev.Exit.Process.Credentials.User = rv return nil @@ -37443,7 +28382,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Exit.Process.UserSession.K8SGroups = append(ev.Exit.Process.UserSession.K8SGroups, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.UserSession.K8SGroups"} + return &eval.ErrValueTypeMismatch{Field: "exit.user_session.k8s_groups"} } return nil case "exit.user_session.k8s_uid": @@ -37452,7 +28391,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.UserSession.K8SUID"} + return &eval.ErrValueTypeMismatch{Field: "exit.user_session.k8s_uid"} } ev.Exit.Process.UserSession.K8SUID = rv return nil @@ -37462,98 +28401,98 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.UserSession.K8SUsername"} + return &eval.ErrValueTypeMismatch{Field: "exit.user_session.k8s_username"} } ev.Exit.Process.UserSession.K8SUsername = rv return nil case "imds.aws.is_imds_v2": rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "IMDS.AWS.IsIMDSv2"} + return &eval.ErrValueTypeMismatch{Field: "imds.aws.is_imds_v2"} } ev.IMDS.AWS.IsIMDSv2 = rv return nil case "imds.aws.security_credentials.type": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "IMDS.AWS.SecurityCredentials.Type"} + return &eval.ErrValueTypeMismatch{Field: "imds.aws.security_credentials.type"} } ev.IMDS.AWS.SecurityCredentials.Type = rv return nil case "imds.cloud_provider": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "IMDS.CloudProvider"} + return &eval.ErrValueTypeMismatch{Field: "imds.cloud_provider"} } ev.IMDS.CloudProvider = rv return nil case "imds.host": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "IMDS.Host"} + return &eval.ErrValueTypeMismatch{Field: "imds.host"} } ev.IMDS.Host = rv return nil case "imds.server": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "IMDS.Server"} + return &eval.ErrValueTypeMismatch{Field: "imds.server"} } ev.IMDS.Server = rv return nil case "imds.type": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "IMDS.Type"} + return &eval.ErrValueTypeMismatch{Field: "imds.type"} } ev.IMDS.Type = rv return nil case "imds.url": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "IMDS.URL"} + return &eval.ErrValueTypeMismatch{Field: "imds.url"} } ev.IMDS.URL = rv return nil case "imds.user_agent": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "IMDS.UserAgent"} + return &eval.ErrValueTypeMismatch{Field: "imds.user_agent"} } ev.IMDS.UserAgent = rv return nil case "link.file.change_time": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Link.Source.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "link.file.change_time"} } ev.Link.Source.FileFields.CTime = uint64(rv) return nil case "link.file.destination.change_time": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Link.Target.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "link.file.destination.change_time"} } ev.Link.Target.FileFields.CTime = uint64(rv) return nil case "link.file.destination.filesystem": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Link.Target.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "link.file.destination.filesystem"} } ev.Link.Target.Filesystem = rv return nil case "link.file.destination.gid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Link.Target.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "link.file.destination.gid"} } ev.Link.Target.FileFields.GID = uint32(rv) return nil case "link.file.destination.group": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Link.Target.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "link.file.destination.group"} } ev.Link.Target.FileFields.Group = rv return nil @@ -37564,51 +28503,51 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Link.Target.Hashes = append(ev.Link.Target.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Link.Target.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "link.file.destination.hashes"} } return nil case "link.file.destination.in_upper_layer": rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Link.Target.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "link.file.destination.in_upper_layer"} } ev.Link.Target.FileFields.InUpperLayer = rv return nil case "link.file.destination.inode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Link.Target.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "link.file.destination.inode"} } ev.Link.Target.FileFields.PathKey.Inode = uint64(rv) return nil case "link.file.destination.mode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Link.Target.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "link.file.destination.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Link.Target.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "link.file.destination.mode"} } ev.Link.Target.FileFields.Mode = uint16(rv) return nil case "link.file.destination.modification_time": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Link.Target.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "link.file.destination.modification_time"} } ev.Link.Target.FileFields.MTime = uint64(rv) return nil case "link.file.destination.mount_id": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Link.Target.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "link.file.destination.mount_id"} } ev.Link.Target.FileFields.PathKey.MountID = uint32(rv) return nil case "link.file.destination.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Link.Target.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "link.file.destination.name"} } ev.Link.Target.BasenameStr = rv return nil @@ -37617,28 +28556,28 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "link.file.destination.package.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Link.Target.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "link.file.destination.package.name"} } ev.Link.Target.PkgName = rv return nil case "link.file.destination.package.source_version": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Link.Target.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "link.file.destination.package.source_version"} } ev.Link.Target.PkgSrcVersion = rv return nil case "link.file.destination.package.version": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Link.Target.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "link.file.destination.package.version"} } ev.Link.Target.PkgVersion = rv return nil case "link.file.destination.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Link.Target.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "link.file.destination.path"} } ev.Link.Target.PathnameStr = rv return nil @@ -37647,45 +28586,45 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "link.file.destination.rights": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Link.Target.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "link.file.destination.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Link.Target.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "link.file.destination.rights"} } ev.Link.Target.FileFields.Mode = uint16(rv) return nil case "link.file.destination.uid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Link.Target.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "link.file.destination.uid"} } ev.Link.Target.FileFields.UID = uint32(rv) return nil case "link.file.destination.user": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Link.Target.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "link.file.destination.user"} } ev.Link.Target.FileFields.User = rv return nil case "link.file.filesystem": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Link.Source.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "link.file.filesystem"} } ev.Link.Source.Filesystem = rv return nil case "link.file.gid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Link.Source.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "link.file.gid"} } ev.Link.Source.FileFields.GID = uint32(rv) return nil case "link.file.group": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Link.Source.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "link.file.group"} } ev.Link.Source.FileFields.Group = rv return nil @@ -37696,51 +28635,51 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Link.Source.Hashes = append(ev.Link.Source.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Link.Source.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "link.file.hashes"} } return nil case "link.file.in_upper_layer": rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Link.Source.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "link.file.in_upper_layer"} } ev.Link.Source.FileFields.InUpperLayer = rv return nil case "link.file.inode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Link.Source.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "link.file.inode"} } ev.Link.Source.FileFields.PathKey.Inode = uint64(rv) return nil case "link.file.mode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Link.Source.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "link.file.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Link.Source.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "link.file.mode"} } ev.Link.Source.FileFields.Mode = uint16(rv) return nil case "link.file.modification_time": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Link.Source.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "link.file.modification_time"} } ev.Link.Source.FileFields.MTime = uint64(rv) return nil case "link.file.mount_id": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Link.Source.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "link.file.mount_id"} } ev.Link.Source.FileFields.PathKey.MountID = uint32(rv) return nil case "link.file.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Link.Source.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "link.file.name"} } ev.Link.Source.BasenameStr = rv return nil @@ -37749,28 +28688,28 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "link.file.package.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Link.Source.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "link.file.package.name"} } ev.Link.Source.PkgName = rv return nil case "link.file.package.source_version": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Link.Source.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "link.file.package.source_version"} } ev.Link.Source.PkgSrcVersion = rv return nil case "link.file.package.version": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Link.Source.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "link.file.package.version"} } ev.Link.Source.PkgVersion = rv return nil case "link.file.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Link.Source.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "link.file.path"} } ev.Link.Source.PathnameStr = rv return nil @@ -37779,59 +28718,59 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "link.file.rights": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Link.Source.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "link.file.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Link.Source.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "link.file.rights"} } ev.Link.Source.FileFields.Mode = uint16(rv) return nil case "link.file.uid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Link.Source.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "link.file.uid"} } ev.Link.Source.FileFields.UID = uint32(rv) return nil case "link.file.user": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Link.Source.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "link.file.user"} } ev.Link.Source.FileFields.User = rv return nil case "link.retval": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Link.SyscallEvent.Retval"} + return &eval.ErrValueTypeMismatch{Field: "link.retval"} } ev.Link.SyscallEvent.Retval = int64(rv) return nil case "link.syscall.destination.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Link.SyscallContext.StrArg2"} + return &eval.ErrValueTypeMismatch{Field: "link.syscall.destination.path"} } ev.Link.SyscallContext.StrArg2 = rv return nil case "link.syscall.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Link.SyscallContext.StrArg1"} + return &eval.ErrValueTypeMismatch{Field: "link.syscall.path"} } ev.Link.SyscallContext.StrArg1 = rv return nil case "load_module.args": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "LoadModule.Args"} + return &eval.ErrValueTypeMismatch{Field: "load_module.args"} } ev.LoadModule.Args = rv return nil case "load_module.args_truncated": rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "LoadModule.ArgsTruncated"} + return &eval.ErrValueTypeMismatch{Field: "load_module.args_truncated"} } ev.LoadModule.ArgsTruncated = rv return nil @@ -37842,34 +28781,34 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.LoadModule.Argv = append(ev.LoadModule.Argv, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "LoadModule.Argv"} + return &eval.ErrValueTypeMismatch{Field: "load_module.argv"} } return nil case "load_module.file.change_time": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "LoadModule.File.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "load_module.file.change_time"} } ev.LoadModule.File.FileFields.CTime = uint64(rv) return nil case "load_module.file.filesystem": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "LoadModule.File.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "load_module.file.filesystem"} } ev.LoadModule.File.Filesystem = rv return nil case "load_module.file.gid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "LoadModule.File.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "load_module.file.gid"} } ev.LoadModule.File.FileFields.GID = uint32(rv) return nil case "load_module.file.group": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "LoadModule.File.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "load_module.file.group"} } ev.LoadModule.File.FileFields.Group = rv return nil @@ -37880,51 +28819,51 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.LoadModule.File.Hashes = append(ev.LoadModule.File.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "LoadModule.File.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "load_module.file.hashes"} } return nil case "load_module.file.in_upper_layer": rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "LoadModule.File.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "load_module.file.in_upper_layer"} } ev.LoadModule.File.FileFields.InUpperLayer = rv return nil case "load_module.file.inode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "LoadModule.File.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "load_module.file.inode"} } ev.LoadModule.File.FileFields.PathKey.Inode = uint64(rv) return nil case "load_module.file.mode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "LoadModule.File.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "load_module.file.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "LoadModule.File.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "load_module.file.mode"} } ev.LoadModule.File.FileFields.Mode = uint16(rv) return nil case "load_module.file.modification_time": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "LoadModule.File.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "load_module.file.modification_time"} } ev.LoadModule.File.FileFields.MTime = uint64(rv) return nil case "load_module.file.mount_id": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "LoadModule.File.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "load_module.file.mount_id"} } ev.LoadModule.File.FileFields.PathKey.MountID = uint32(rv) return nil case "load_module.file.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "LoadModule.File.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "load_module.file.name"} } ev.LoadModule.File.BasenameStr = rv return nil @@ -37933,28 +28872,28 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "load_module.file.package.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "LoadModule.File.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "load_module.file.package.name"} } ev.LoadModule.File.PkgName = rv return nil case "load_module.file.package.source_version": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "LoadModule.File.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "load_module.file.package.source_version"} } ev.LoadModule.File.PkgSrcVersion = rv return nil case "load_module.file.package.version": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "LoadModule.File.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "load_module.file.package.version"} } ev.LoadModule.File.PkgVersion = rv return nil case "load_module.file.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "LoadModule.File.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "load_module.file.path"} } ev.LoadModule.File.PathnameStr = rv return nil @@ -37963,87 +28902,87 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "load_module.file.rights": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "LoadModule.File.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "load_module.file.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "LoadModule.File.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "load_module.file.rights"} } ev.LoadModule.File.FileFields.Mode = uint16(rv) return nil case "load_module.file.uid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "LoadModule.File.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "load_module.file.uid"} } ev.LoadModule.File.FileFields.UID = uint32(rv) return nil case "load_module.file.user": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "LoadModule.File.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "load_module.file.user"} } ev.LoadModule.File.FileFields.User = rv return nil case "load_module.loaded_from_memory": rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "LoadModule.LoadedFromMemory"} + return &eval.ErrValueTypeMismatch{Field: "load_module.loaded_from_memory"} } ev.LoadModule.LoadedFromMemory = rv return nil case "load_module.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "LoadModule.Name"} + return &eval.ErrValueTypeMismatch{Field: "load_module.name"} } ev.LoadModule.Name = rv return nil case "load_module.retval": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "LoadModule.SyscallEvent.Retval"} + return &eval.ErrValueTypeMismatch{Field: "load_module.retval"} } ev.LoadModule.SyscallEvent.Retval = int64(rv) return nil case "mkdir.file.change_time": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Mkdir.File.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "mkdir.file.change_time"} } ev.Mkdir.File.FileFields.CTime = uint64(rv) return nil case "mkdir.file.destination.mode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Mkdir.Mode"} + return &eval.ErrValueTypeMismatch{Field: "mkdir.file.destination.mode"} } ev.Mkdir.Mode = uint32(rv) return nil case "mkdir.file.destination.rights": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Mkdir.Mode"} + return &eval.ErrValueTypeMismatch{Field: "mkdir.file.destination.rights"} } ev.Mkdir.Mode = uint32(rv) return nil case "mkdir.file.filesystem": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Mkdir.File.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "mkdir.file.filesystem"} } ev.Mkdir.File.Filesystem = rv return nil case "mkdir.file.gid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Mkdir.File.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "mkdir.file.gid"} } ev.Mkdir.File.FileFields.GID = uint32(rv) return nil case "mkdir.file.group": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Mkdir.File.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "mkdir.file.group"} } ev.Mkdir.File.FileFields.Group = rv return nil @@ -38054,51 +28993,51 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Mkdir.File.Hashes = append(ev.Mkdir.File.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Mkdir.File.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "mkdir.file.hashes"} } return nil case "mkdir.file.in_upper_layer": rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Mkdir.File.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "mkdir.file.in_upper_layer"} } ev.Mkdir.File.FileFields.InUpperLayer = rv return nil case "mkdir.file.inode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Mkdir.File.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "mkdir.file.inode"} } ev.Mkdir.File.FileFields.PathKey.Inode = uint64(rv) return nil case "mkdir.file.mode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Mkdir.File.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "mkdir.file.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Mkdir.File.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "mkdir.file.mode"} } ev.Mkdir.File.FileFields.Mode = uint16(rv) return nil case "mkdir.file.modification_time": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Mkdir.File.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "mkdir.file.modification_time"} } ev.Mkdir.File.FileFields.MTime = uint64(rv) return nil case "mkdir.file.mount_id": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Mkdir.File.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "mkdir.file.mount_id"} } ev.Mkdir.File.FileFields.PathKey.MountID = uint32(rv) return nil case "mkdir.file.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Mkdir.File.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "mkdir.file.name"} } ev.Mkdir.File.BasenameStr = rv return nil @@ -38107,28 +29046,28 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "mkdir.file.package.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Mkdir.File.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "mkdir.file.package.name"} } ev.Mkdir.File.PkgName = rv return nil case "mkdir.file.package.source_version": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Mkdir.File.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "mkdir.file.package.source_version"} } ev.Mkdir.File.PkgSrcVersion = rv return nil case "mkdir.file.package.version": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Mkdir.File.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "mkdir.file.package.version"} } ev.Mkdir.File.PkgVersion = rv return nil case "mkdir.file.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Mkdir.File.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "mkdir.file.path"} } ev.Mkdir.File.PathnameStr = rv return nil @@ -38137,59 +29076,73 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "mkdir.file.rights": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Mkdir.File.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "mkdir.file.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Mkdir.File.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "mkdir.file.rights"} } ev.Mkdir.File.FileFields.Mode = uint16(rv) return nil case "mkdir.file.uid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Mkdir.File.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "mkdir.file.uid"} } ev.Mkdir.File.FileFields.UID = uint32(rv) return nil case "mkdir.file.user": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Mkdir.File.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "mkdir.file.user"} } ev.Mkdir.File.FileFields.User = rv return nil case "mkdir.retval": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Mkdir.SyscallEvent.Retval"} + return &eval.ErrValueTypeMismatch{Field: "mkdir.retval"} } ev.Mkdir.SyscallEvent.Retval = int64(rv) return nil + case "mkdir.syscall.mode": + rv, ok := value.(int) + if !ok { + return &eval.ErrValueTypeMismatch{Field: "mkdir.syscall.mode"} + } + ev.Mkdir.SyscallContext.IntArg2 = int64(rv) + return nil + case "mkdir.syscall.path": + rv, ok := value.(string) + if !ok { + return &eval.ErrValueTypeMismatch{Field: "mkdir.syscall.path"} + } + ev.Mkdir.SyscallContext.StrArg1 = rv + return nil case "mmap.file.change_time": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "MMap.File.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "mmap.file.change_time"} } ev.MMap.File.FileFields.CTime = uint64(rv) return nil case "mmap.file.filesystem": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "MMap.File.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "mmap.file.filesystem"} } ev.MMap.File.Filesystem = rv return nil case "mmap.file.gid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "MMap.File.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "mmap.file.gid"} } ev.MMap.File.FileFields.GID = uint32(rv) return nil case "mmap.file.group": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "MMap.File.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "mmap.file.group"} } ev.MMap.File.FileFields.Group = rv return nil @@ -38200,51 +29153,51 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.MMap.File.Hashes = append(ev.MMap.File.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "MMap.File.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "mmap.file.hashes"} } return nil case "mmap.file.in_upper_layer": rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "MMap.File.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "mmap.file.in_upper_layer"} } ev.MMap.File.FileFields.InUpperLayer = rv return nil case "mmap.file.inode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "MMap.File.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "mmap.file.inode"} } ev.MMap.File.FileFields.PathKey.Inode = uint64(rv) return nil case "mmap.file.mode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "MMap.File.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "mmap.file.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "MMap.File.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "mmap.file.mode"} } ev.MMap.File.FileFields.Mode = uint16(rv) return nil case "mmap.file.modification_time": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "MMap.File.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "mmap.file.modification_time"} } ev.MMap.File.FileFields.MTime = uint64(rv) return nil case "mmap.file.mount_id": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "MMap.File.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "mmap.file.mount_id"} } ev.MMap.File.FileFields.PathKey.MountID = uint32(rv) return nil case "mmap.file.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "MMap.File.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "mmap.file.name"} } ev.MMap.File.BasenameStr = rv return nil @@ -38253,28 +29206,28 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "mmap.file.package.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "MMap.File.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "mmap.file.package.name"} } ev.MMap.File.PkgName = rv return nil case "mmap.file.package.source_version": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "MMap.File.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "mmap.file.package.source_version"} } ev.MMap.File.PkgSrcVersion = rv return nil case "mmap.file.package.version": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "MMap.File.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "mmap.file.package.version"} } ev.MMap.File.PkgVersion = rv return nil case "mmap.file.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "MMap.File.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "mmap.file.path"} } ev.MMap.File.PathnameStr = rv return nil @@ -38283,302 +29236,453 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "mmap.file.rights": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "MMap.File.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "mmap.file.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "MMap.File.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "mmap.file.rights"} } ev.MMap.File.FileFields.Mode = uint16(rv) return nil case "mmap.file.uid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "MMap.File.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "mmap.file.uid"} } ev.MMap.File.FileFields.UID = uint32(rv) return nil case "mmap.file.user": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "MMap.File.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "mmap.file.user"} } ev.MMap.File.FileFields.User = rv return nil case "mmap.flags": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "MMap.Flags"} + return &eval.ErrValueTypeMismatch{Field: "mmap.flags"} } ev.MMap.Flags = uint64(rv) return nil case "mmap.protection": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "MMap.Protection"} + return &eval.ErrValueTypeMismatch{Field: "mmap.protection"} } ev.MMap.Protection = uint64(rv) return nil case "mmap.retval": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "MMap.SyscallEvent.Retval"} + return &eval.ErrValueTypeMismatch{Field: "mmap.retval"} } ev.MMap.SyscallEvent.Retval = int64(rv) return nil case "mount.fs_type": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Mount.Mount.FSType"} + return &eval.ErrValueTypeMismatch{Field: "mount.fs_type"} } ev.Mount.Mount.FSType = rv return nil case "mount.mountpoint.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Mount.MountPointPath"} + return &eval.ErrValueTypeMismatch{Field: "mount.mountpoint.path"} } ev.Mount.MountPointPath = rv return nil case "mount.retval": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Mount.SyscallEvent.Retval"} + return &eval.ErrValueTypeMismatch{Field: "mount.retval"} } ev.Mount.SyscallEvent.Retval = int64(rv) return nil case "mount.root.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Mount.MountRootPath"} + return &eval.ErrValueTypeMismatch{Field: "mount.root.path"} } ev.Mount.MountRootPath = rv return nil case "mount.source.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Mount.MountSourcePath"} + return &eval.ErrValueTypeMismatch{Field: "mount.source.path"} } ev.Mount.MountSourcePath = rv return nil case "mount.syscall.fs_type": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Mount.SyscallContext.StrArg3"} + return &eval.ErrValueTypeMismatch{Field: "mount.syscall.fs_type"} } ev.Mount.SyscallContext.StrArg3 = rv return nil case "mount.syscall.mountpoint.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Mount.SyscallContext.StrArg2"} + return &eval.ErrValueTypeMismatch{Field: "mount.syscall.mountpoint.path"} } ev.Mount.SyscallContext.StrArg2 = rv return nil case "mount.syscall.source.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Mount.SyscallContext.StrArg1"} + return &eval.ErrValueTypeMismatch{Field: "mount.syscall.source.path"} } ev.Mount.SyscallContext.StrArg1 = rv return nil case "mprotect.req_protection": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "MProtect.ReqProtection"} + return &eval.ErrValueTypeMismatch{Field: "mprotect.req_protection"} } ev.MProtect.ReqProtection = int(rv) return nil case "mprotect.retval": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "MProtect.SyscallEvent.Retval"} + return &eval.ErrValueTypeMismatch{Field: "mprotect.retval"} } ev.MProtect.SyscallEvent.Retval = int64(rv) return nil case "mprotect.vm_protection": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "MProtect.VMProtection"} + return &eval.ErrValueTypeMismatch{Field: "mprotect.vm_protection"} } ev.MProtect.VMProtection = int(rv) return nil case "network.destination.ip": rv, ok := value.(net.IPNet) if !ok { - return &eval.ErrValueTypeMismatch{Field: "NetworkContext.Destination.IPNet"} + return &eval.ErrValueTypeMismatch{Field: "network.destination.ip"} } ev.NetworkContext.Destination.IPNet = rv return nil case "network.destination.is_public": rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "NetworkContext.Destination.IsPublic"} + return &eval.ErrValueTypeMismatch{Field: "network.destination.is_public"} } ev.NetworkContext.Destination.IsPublic = rv return nil case "network.destination.port": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "NetworkContext.Destination.Port"} + return &eval.ErrValueTypeMismatch{Field: "network.destination.port"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "NetworkContext.Destination.Port"} + return &eval.ErrValueOutOfRange{Field: "network.destination.port"} } ev.NetworkContext.Destination.Port = uint16(rv) return nil case "network.device.ifname": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "NetworkContext.Device.IfName"} + return &eval.ErrValueTypeMismatch{Field: "network.device.ifname"} } ev.NetworkContext.Device.IfName = rv return nil case "network.l3_protocol": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "NetworkContext.L3Protocol"} + return &eval.ErrValueTypeMismatch{Field: "network.l3_protocol"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "NetworkContext.L3Protocol"} + return &eval.ErrValueOutOfRange{Field: "network.l3_protocol"} } ev.NetworkContext.L3Protocol = uint16(rv) return nil case "network.l4_protocol": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "NetworkContext.L4Protocol"} + return &eval.ErrValueTypeMismatch{Field: "network.l4_protocol"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "NetworkContext.L4Protocol"} + return &eval.ErrValueOutOfRange{Field: "network.l4_protocol"} } ev.NetworkContext.L4Protocol = uint16(rv) return nil + case "network.network_direction": + rv, ok := value.(int) + if !ok { + return &eval.ErrValueTypeMismatch{Field: "network.network_direction"} + } + ev.NetworkContext.NetworkDirection = uint32(rv) + return nil case "network.size": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "NetworkContext.Size"} + return &eval.ErrValueTypeMismatch{Field: "network.size"} } ev.NetworkContext.Size = uint32(rv) return nil case "network.source.ip": rv, ok := value.(net.IPNet) if !ok { - return &eval.ErrValueTypeMismatch{Field: "NetworkContext.Source.IPNet"} + return &eval.ErrValueTypeMismatch{Field: "network.source.ip"} } ev.NetworkContext.Source.IPNet = rv return nil case "network.source.is_public": rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "NetworkContext.Source.IsPublic"} + return &eval.ErrValueTypeMismatch{Field: "network.source.is_public"} } ev.NetworkContext.Source.IsPublic = rv return nil case "network.source.port": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "NetworkContext.Source.Port"} + return &eval.ErrValueTypeMismatch{Field: "network.source.port"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "NetworkContext.Source.Port"} + return &eval.ErrValueOutOfRange{Field: "network.source.port"} } ev.NetworkContext.Source.Port = uint16(rv) return nil + case "network_flow_monitor.device.ifname": + rv, ok := value.(string) + if !ok { + return &eval.ErrValueTypeMismatch{Field: "network_flow_monitor.device.ifname"} + } + ev.NetworkFlowMonitor.Device.IfName = rv + return nil + case "network_flow_monitor.flows.destination.ip": + if len(ev.NetworkFlowMonitor.Flows) == 0 { + ev.NetworkFlowMonitor.Flows = append(ev.NetworkFlowMonitor.Flows, Flow{}) + } + rv, ok := value.(net.IPNet) + if !ok { + return &eval.ErrValueTypeMismatch{Field: "network_flow_monitor.flows.destination.ip"} + } + ev.NetworkFlowMonitor.Flows[0].Destination.IPNet = rv + return nil + case "network_flow_monitor.flows.destination.is_public": + if len(ev.NetworkFlowMonitor.Flows) == 0 { + ev.NetworkFlowMonitor.Flows = append(ev.NetworkFlowMonitor.Flows, Flow{}) + } + rv, ok := value.(bool) + if !ok { + return &eval.ErrValueTypeMismatch{Field: "network_flow_monitor.flows.destination.is_public"} + } + ev.NetworkFlowMonitor.Flows[0].Destination.IsPublic = rv + return nil + case "network_flow_monitor.flows.destination.port": + if len(ev.NetworkFlowMonitor.Flows) == 0 { + ev.NetworkFlowMonitor.Flows = append(ev.NetworkFlowMonitor.Flows, Flow{}) + } + rv, ok := value.(int) + if !ok { + return &eval.ErrValueTypeMismatch{Field: "network_flow_monitor.flows.destination.port"} + } + if rv < 0 || rv > math.MaxUint16 { + return &eval.ErrValueOutOfRange{Field: "network_flow_monitor.flows.destination.port"} + } + ev.NetworkFlowMonitor.Flows[0].Destination.Port = uint16(rv) + return nil + case "network_flow_monitor.flows.egress.data_size": + if len(ev.NetworkFlowMonitor.Flows) == 0 { + ev.NetworkFlowMonitor.Flows = append(ev.NetworkFlowMonitor.Flows, Flow{}) + } + rv, ok := value.(int) + if !ok { + return &eval.ErrValueTypeMismatch{Field: "network_flow_monitor.flows.egress.data_size"} + } + ev.NetworkFlowMonitor.Flows[0].Egress.DataSize = uint64(rv) + return nil + case "network_flow_monitor.flows.egress.packet_count": + if len(ev.NetworkFlowMonitor.Flows) == 0 { + ev.NetworkFlowMonitor.Flows = append(ev.NetworkFlowMonitor.Flows, Flow{}) + } + rv, ok := value.(int) + if !ok { + return &eval.ErrValueTypeMismatch{Field: "network_flow_monitor.flows.egress.packet_count"} + } + ev.NetworkFlowMonitor.Flows[0].Egress.PacketCount = uint64(rv) + return nil + case "network_flow_monitor.flows.ingress.data_size": + if len(ev.NetworkFlowMonitor.Flows) == 0 { + ev.NetworkFlowMonitor.Flows = append(ev.NetworkFlowMonitor.Flows, Flow{}) + } + rv, ok := value.(int) + if !ok { + return &eval.ErrValueTypeMismatch{Field: "network_flow_monitor.flows.ingress.data_size"} + } + ev.NetworkFlowMonitor.Flows[0].Ingress.DataSize = uint64(rv) + return nil + case "network_flow_monitor.flows.ingress.packet_count": + if len(ev.NetworkFlowMonitor.Flows) == 0 { + ev.NetworkFlowMonitor.Flows = append(ev.NetworkFlowMonitor.Flows, Flow{}) + } + rv, ok := value.(int) + if !ok { + return &eval.ErrValueTypeMismatch{Field: "network_flow_monitor.flows.ingress.packet_count"} + } + ev.NetworkFlowMonitor.Flows[0].Ingress.PacketCount = uint64(rv) + return nil + case "network_flow_monitor.flows.l3_protocol": + if len(ev.NetworkFlowMonitor.Flows) == 0 { + ev.NetworkFlowMonitor.Flows = append(ev.NetworkFlowMonitor.Flows, Flow{}) + } + rv, ok := value.(int) + if !ok { + return &eval.ErrValueTypeMismatch{Field: "network_flow_monitor.flows.l3_protocol"} + } + if rv < 0 || rv > math.MaxUint16 { + return &eval.ErrValueOutOfRange{Field: "network_flow_monitor.flows.l3_protocol"} + } + ev.NetworkFlowMonitor.Flows[0].L3Protocol = uint16(rv) + return nil + case "network_flow_monitor.flows.l4_protocol": + if len(ev.NetworkFlowMonitor.Flows) == 0 { + ev.NetworkFlowMonitor.Flows = append(ev.NetworkFlowMonitor.Flows, Flow{}) + } + rv, ok := value.(int) + if !ok { + return &eval.ErrValueTypeMismatch{Field: "network_flow_monitor.flows.l4_protocol"} + } + if rv < 0 || rv > math.MaxUint16 { + return &eval.ErrValueOutOfRange{Field: "network_flow_monitor.flows.l4_protocol"} + } + ev.NetworkFlowMonitor.Flows[0].L4Protocol = uint16(rv) + return nil + case "network_flow_monitor.flows.length": + if len(ev.NetworkFlowMonitor.Flows) == 0 { + ev.NetworkFlowMonitor.Flows = append(ev.NetworkFlowMonitor.Flows, Flow{}) + } + return &eval.ErrFieldReadOnly{Field: "network_flow_monitor.flows.length"} + case "network_flow_monitor.flows.source.ip": + if len(ev.NetworkFlowMonitor.Flows) == 0 { + ev.NetworkFlowMonitor.Flows = append(ev.NetworkFlowMonitor.Flows, Flow{}) + } + rv, ok := value.(net.IPNet) + if !ok { + return &eval.ErrValueTypeMismatch{Field: "network_flow_monitor.flows.source.ip"} + } + ev.NetworkFlowMonitor.Flows[0].Source.IPNet = rv + return nil + case "network_flow_monitor.flows.source.is_public": + if len(ev.NetworkFlowMonitor.Flows) == 0 { + ev.NetworkFlowMonitor.Flows = append(ev.NetworkFlowMonitor.Flows, Flow{}) + } + rv, ok := value.(bool) + if !ok { + return &eval.ErrValueTypeMismatch{Field: "network_flow_monitor.flows.source.is_public"} + } + ev.NetworkFlowMonitor.Flows[0].Source.IsPublic = rv + return nil + case "network_flow_monitor.flows.source.port": + if len(ev.NetworkFlowMonitor.Flows) == 0 { + ev.NetworkFlowMonitor.Flows = append(ev.NetworkFlowMonitor.Flows, Flow{}) + } + rv, ok := value.(int) + if !ok { + return &eval.ErrValueTypeMismatch{Field: "network_flow_monitor.flows.source.port"} + } + if rv < 0 || rv > math.MaxUint16 { + return &eval.ErrValueOutOfRange{Field: "network_flow_monitor.flows.source.port"} + } + ev.NetworkFlowMonitor.Flows[0].Source.Port = uint16(rv) + return nil case "ondemand.arg1.str": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "OnDemand.Arg1Str"} + return &eval.ErrValueTypeMismatch{Field: "ondemand.arg1.str"} } ev.OnDemand.Arg1Str = rv return nil case "ondemand.arg1.uint": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "OnDemand.Arg1Uint"} + return &eval.ErrValueTypeMismatch{Field: "ondemand.arg1.uint"} } ev.OnDemand.Arg1Uint = uint64(rv) return nil case "ondemand.arg2.str": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "OnDemand.Arg2Str"} + return &eval.ErrValueTypeMismatch{Field: "ondemand.arg2.str"} } ev.OnDemand.Arg2Str = rv return nil case "ondemand.arg2.uint": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "OnDemand.Arg2Uint"} + return &eval.ErrValueTypeMismatch{Field: "ondemand.arg2.uint"} } ev.OnDemand.Arg2Uint = uint64(rv) return nil case "ondemand.arg3.str": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "OnDemand.Arg3Str"} + return &eval.ErrValueTypeMismatch{Field: "ondemand.arg3.str"} } ev.OnDemand.Arg3Str = rv return nil case "ondemand.arg3.uint": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "OnDemand.Arg3Uint"} + return &eval.ErrValueTypeMismatch{Field: "ondemand.arg3.uint"} } ev.OnDemand.Arg3Uint = uint64(rv) return nil case "ondemand.arg4.str": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "OnDemand.Arg4Str"} + return &eval.ErrValueTypeMismatch{Field: "ondemand.arg4.str"} } ev.OnDemand.Arg4Str = rv return nil case "ondemand.arg4.uint": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "OnDemand.Arg4Uint"} + return &eval.ErrValueTypeMismatch{Field: "ondemand.arg4.uint"} } ev.OnDemand.Arg4Uint = uint64(rv) return nil case "ondemand.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "OnDemand.Name"} + return &eval.ErrValueTypeMismatch{Field: "ondemand.name"} } ev.OnDemand.Name = rv return nil case "open.file.change_time": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Open.File.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "open.file.change_time"} } ev.Open.File.FileFields.CTime = uint64(rv) return nil case "open.file.destination.mode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Open.Mode"} + return &eval.ErrValueTypeMismatch{Field: "open.file.destination.mode"} } ev.Open.Mode = uint32(rv) return nil case "open.file.filesystem": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Open.File.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "open.file.filesystem"} } ev.Open.File.Filesystem = rv return nil case "open.file.gid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Open.File.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "open.file.gid"} } ev.Open.File.FileFields.GID = uint32(rv) return nil case "open.file.group": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Open.File.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "open.file.group"} } ev.Open.File.FileFields.Group = rv return nil @@ -38589,51 +29693,51 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Open.File.Hashes = append(ev.Open.File.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Open.File.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "open.file.hashes"} } return nil case "open.file.in_upper_layer": rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Open.File.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "open.file.in_upper_layer"} } ev.Open.File.FileFields.InUpperLayer = rv return nil case "open.file.inode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Open.File.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "open.file.inode"} } ev.Open.File.FileFields.PathKey.Inode = uint64(rv) return nil case "open.file.mode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Open.File.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "open.file.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Open.File.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "open.file.mode"} } ev.Open.File.FileFields.Mode = uint16(rv) return nil case "open.file.modification_time": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Open.File.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "open.file.modification_time"} } ev.Open.File.FileFields.MTime = uint64(rv) return nil case "open.file.mount_id": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Open.File.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "open.file.mount_id"} } ev.Open.File.FileFields.PathKey.MountID = uint32(rv) return nil case "open.file.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Open.File.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "open.file.name"} } ev.Open.File.BasenameStr = rv return nil @@ -38642,28 +29746,28 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "open.file.package.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Open.File.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "open.file.package.name"} } ev.Open.File.PkgName = rv return nil case "open.file.package.source_version": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Open.File.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "open.file.package.source_version"} } ev.Open.File.PkgSrcVersion = rv return nil case "open.file.package.version": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Open.File.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "open.file.package.version"} } ev.Open.File.PkgVersion = rv return nil case "open.file.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Open.File.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "open.file.path"} } ev.Open.File.PathnameStr = rv return nil @@ -38672,158 +29776,165 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "open.file.rights": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Open.File.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "open.file.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Open.File.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "open.file.rights"} } ev.Open.File.FileFields.Mode = uint16(rv) return nil case "open.file.uid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Open.File.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "open.file.uid"} } ev.Open.File.FileFields.UID = uint32(rv) return nil case "open.file.user": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Open.File.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "open.file.user"} } ev.Open.File.FileFields.User = rv return nil case "open.flags": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Open.Flags"} + return &eval.ErrValueTypeMismatch{Field: "open.flags"} } ev.Open.Flags = uint32(rv) return nil case "open.retval": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Open.SyscallEvent.Retval"} + return &eval.ErrValueTypeMismatch{Field: "open.retval"} } ev.Open.SyscallEvent.Retval = int64(rv) return nil case "open.syscall.flags": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Open.SyscallContext.IntArg2"} + return &eval.ErrValueTypeMismatch{Field: "open.syscall.flags"} } ev.Open.SyscallContext.IntArg2 = int64(rv) return nil case "open.syscall.mode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Open.SyscallContext.IntArg3"} + return &eval.ErrValueTypeMismatch{Field: "open.syscall.mode"} } ev.Open.SyscallContext.IntArg3 = int64(rv) return nil case "open.syscall.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Open.SyscallContext.StrArg1"} + return &eval.ErrValueTypeMismatch{Field: "open.syscall.path"} } ev.Open.SyscallContext.StrArg1 = rv return nil case "packet.destination.ip": rv, ok := value.(net.IPNet) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RawPacket.NetworkContext.Destination.IPNet"} + return &eval.ErrValueTypeMismatch{Field: "packet.destination.ip"} } ev.RawPacket.NetworkContext.Destination.IPNet = rv return nil case "packet.destination.is_public": rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RawPacket.NetworkContext.Destination.IsPublic"} + return &eval.ErrValueTypeMismatch{Field: "packet.destination.is_public"} } ev.RawPacket.NetworkContext.Destination.IsPublic = rv return nil case "packet.destination.port": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RawPacket.NetworkContext.Destination.Port"} + return &eval.ErrValueTypeMismatch{Field: "packet.destination.port"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "RawPacket.NetworkContext.Destination.Port"} + return &eval.ErrValueOutOfRange{Field: "packet.destination.port"} } ev.RawPacket.NetworkContext.Destination.Port = uint16(rv) return nil case "packet.device.ifname": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RawPacket.NetworkContext.Device.IfName"} + return &eval.ErrValueTypeMismatch{Field: "packet.device.ifname"} } ev.RawPacket.NetworkContext.Device.IfName = rv return nil case "packet.filter": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RawPacket.Filter"} + return &eval.ErrValueTypeMismatch{Field: "packet.filter"} } ev.RawPacket.Filter = rv return nil case "packet.l3_protocol": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RawPacket.NetworkContext.L3Protocol"} + return &eval.ErrValueTypeMismatch{Field: "packet.l3_protocol"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "RawPacket.NetworkContext.L3Protocol"} + return &eval.ErrValueOutOfRange{Field: "packet.l3_protocol"} } ev.RawPacket.NetworkContext.L3Protocol = uint16(rv) return nil case "packet.l4_protocol": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RawPacket.NetworkContext.L4Protocol"} + return &eval.ErrValueTypeMismatch{Field: "packet.l4_protocol"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "RawPacket.NetworkContext.L4Protocol"} + return &eval.ErrValueOutOfRange{Field: "packet.l4_protocol"} } ev.RawPacket.NetworkContext.L4Protocol = uint16(rv) return nil + case "packet.network_direction": + rv, ok := value.(int) + if !ok { + return &eval.ErrValueTypeMismatch{Field: "packet.network_direction"} + } + ev.RawPacket.NetworkContext.NetworkDirection = uint32(rv) + return nil case "packet.size": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RawPacket.NetworkContext.Size"} + return &eval.ErrValueTypeMismatch{Field: "packet.size"} } ev.RawPacket.NetworkContext.Size = uint32(rv) return nil case "packet.source.ip": rv, ok := value.(net.IPNet) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RawPacket.NetworkContext.Source.IPNet"} + return &eval.ErrValueTypeMismatch{Field: "packet.source.ip"} } ev.RawPacket.NetworkContext.Source.IPNet = rv return nil case "packet.source.is_public": rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RawPacket.NetworkContext.Source.IsPublic"} + return &eval.ErrValueTypeMismatch{Field: "packet.source.is_public"} } ev.RawPacket.NetworkContext.Source.IsPublic = rv return nil case "packet.source.port": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RawPacket.NetworkContext.Source.Port"} + return &eval.ErrValueTypeMismatch{Field: "packet.source.port"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "RawPacket.NetworkContext.Source.Port"} + return &eval.ErrValueOutOfRange{Field: "packet.source.port"} } ev.RawPacket.NetworkContext.Source.Port = uint16(rv) return nil case "packet.tls.version": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RawPacket.TLSContext.Version"} + return &eval.ErrValueTypeMismatch{Field: "packet.tls.version"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "RawPacket.TLSContext.Version"} + return &eval.ErrValueOutOfRange{Field: "packet.tls.version"} } ev.RawPacket.TLSContext.Version = uint16(rv) return nil @@ -38836,7 +29947,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Args"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.args"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Args = rv return nil @@ -38853,7 +29964,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Argv = append(ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Argv, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Argv"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.args_flags"} } return nil case "process.ancestors.args_options": @@ -38869,7 +29980,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Argv = append(ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Argv, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Argv"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.args_options"} } return nil case "process.ancestors.args_truncated": @@ -38881,7 +29992,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.ArgsTruncated"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.args_truncated"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.ArgsTruncated = rv return nil @@ -38898,7 +30009,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Argv = append(ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Argv, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Argv"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.argv"} } return nil case "process.ancestors.argv0": @@ -38910,7 +30021,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Argv0"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.argv0"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Argv0 = rv return nil @@ -38923,7 +30034,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Credentials.AUID"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.auid"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Credentials.AUID = uint32(rv) return nil @@ -38936,7 +30047,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Credentials.CapEffective"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.cap_effective"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Credentials.CapEffective = uint64(rv) return nil @@ -38949,7 +30060,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Credentials.CapPermitted"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.cap_permitted"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Credentials.CapPermitted = uint64(rv) return nil @@ -38962,7 +30073,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.CGroup.CGroupFile.Inode"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.cgroup.file.inode"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.CGroup.CGroupFile.Inode = uint64(rv) return nil @@ -38975,7 +30086,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.CGroup.CGroupFile.MountID"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.cgroup.file.mount_id"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.CGroup.CGroupFile.MountID = uint32(rv) return nil @@ -38988,7 +30099,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.CGroup.CGroupID"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.cgroup.id"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.CGroup.CGroupID = containerutils.CGroupID(rv) return nil @@ -39001,7 +30112,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.CGroup.CGroupManager"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.cgroup.manager"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.CGroup.CGroupManager = rv return nil @@ -39014,7 +30125,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.CGroup.CGroupVersion"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.cgroup.version"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.CGroup.CGroupVersion = int(rv) return nil @@ -39027,7 +30138,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Comm"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.comm"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Comm = rv return nil @@ -39040,7 +30151,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.ContainerID"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.container.id"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.ContainerID = containerutils.ContainerID(rv) return nil @@ -39053,7 +30164,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.CreatedAt"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.created_at"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.CreatedAt = uint64(rv) return nil @@ -39066,7 +30177,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Credentials.EGID"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.egid"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Credentials.EGID = uint32(rv) return nil @@ -39079,7 +30190,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Credentials.EGroup"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.egroup"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Credentials.EGroup = rv return nil @@ -39096,7 +30207,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Envp = append(ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Envp, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Envp"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.envp"} } return nil case "process.ancestors.envs": @@ -39112,7 +30223,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Envs = append(ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Envs, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Envs"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.envs"} } return nil case "process.ancestors.envs_truncated": @@ -39124,7 +30235,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.EnvsTruncated"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.envs_truncated"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.EnvsTruncated = rv return nil @@ -39137,7 +30248,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Credentials.EUID"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.euid"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Credentials.EUID = uint32(rv) return nil @@ -39150,7 +30261,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Credentials.EUser"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.euser"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Credentials.EUser = rv return nil @@ -39163,7 +30274,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.file.change_time"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.FileFields.CTime = uint64(rv) return nil @@ -39176,7 +30287,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.file.filesystem"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.Filesystem = rv return nil @@ -39189,7 +30300,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.file.gid"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.FileFields.GID = uint32(rv) return nil @@ -39202,7 +30313,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.file.group"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.FileFields.Group = rv return nil @@ -39219,7 +30330,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.Hashes = append(ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.file.hashes"} } return nil case "process.ancestors.file.in_upper_layer": @@ -39231,7 +30342,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.file.in_upper_layer"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.FileFields.InUpperLayer = rv return nil @@ -39244,7 +30355,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.file.inode"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.FileFields.PathKey.Inode = uint64(rv) return nil @@ -39257,10 +30368,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.file.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "process.ancestors.file.mode"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -39273,7 +30384,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.file.modification_time"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.FileFields.MTime = uint64(rv) return nil @@ -39286,7 +30397,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.file.mount_id"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.FileFields.PathKey.MountID = uint32(rv) return nil @@ -39299,7 +30410,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.file.name"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.BasenameStr = rv return nil @@ -39320,7 +30431,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.file.package.name"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.PkgName = rv return nil @@ -39333,7 +30444,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.file.package.source_version"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.PkgSrcVersion = rv return nil @@ -39346,7 +30457,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.file.package.version"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.PkgVersion = rv return nil @@ -39359,7 +30470,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.file.path"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.PathnameStr = rv return nil @@ -39380,10 +30491,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.file.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "process.ancestors.file.rights"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -39396,7 +30507,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.file.uid"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.FileFields.UID = uint32(rv) return nil @@ -39409,7 +30520,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.file.user"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.FileFields.User = rv return nil @@ -39422,7 +30533,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Credentials.FSGID"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.fsgid"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Credentials.FSGID = uint32(rv) return nil @@ -39435,7 +30546,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Credentials.FSGroup"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.fsgroup"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Credentials.FSGroup = rv return nil @@ -39448,7 +30559,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Credentials.FSUID"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.fsuid"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Credentials.FSUID = uint32(rv) return nil @@ -39461,7 +30572,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Credentials.FSUser"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.fsuser"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Credentials.FSUser = rv return nil @@ -39474,7 +30585,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Credentials.GID"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.gid"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Credentials.GID = uint32(rv) return nil @@ -39487,7 +30598,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Credentials.Group"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.group"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Credentials.Group = rv return nil @@ -39500,7 +30611,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.interpreter.file.change_time"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.CTime = uint64(rv) return nil @@ -39513,7 +30624,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.interpreter.file.filesystem"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.Filesystem = rv return nil @@ -39526,7 +30637,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.interpreter.file.gid"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.GID = uint32(rv) return nil @@ -39539,7 +30650,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.interpreter.file.group"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Group = rv return nil @@ -39556,7 +30667,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.Hashes = append(ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.interpreter.file.hashes"} } return nil case "process.ancestors.interpreter.file.in_upper_layer": @@ -39568,7 +30679,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.interpreter.file.in_upper_layer"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.InUpperLayer = rv return nil @@ -39581,7 +30692,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.interpreter.file.inode"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode = uint64(rv) return nil @@ -39594,10 +30705,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.interpreter.file.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "process.ancestors.interpreter.file.mode"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -39610,7 +30721,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.interpreter.file.modification_time"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.MTime = uint64(rv) return nil @@ -39623,7 +30734,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.interpreter.file.mount_id"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID = uint32(rv) return nil @@ -39636,7 +30747,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.interpreter.file.name"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.BasenameStr = rv return nil @@ -39657,7 +30768,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.interpreter.file.package.name"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.PkgName = rv return nil @@ -39670,7 +30781,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.interpreter.file.package.source_version"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.PkgSrcVersion = rv return nil @@ -39683,7 +30794,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.interpreter.file.package.version"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.PkgVersion = rv return nil @@ -39696,7 +30807,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.interpreter.file.path"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.PathnameStr = rv return nil @@ -39717,10 +30828,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.interpreter.file.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "process.ancestors.interpreter.file.rights"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -39733,7 +30844,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.interpreter.file.uid"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.UID = uint32(rv) return nil @@ -39746,7 +30857,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.interpreter.file.user"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.User = rv return nil @@ -39759,7 +30870,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.IsExec"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.is_exec"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.IsExec = rv return nil @@ -39772,7 +30883,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.PIDContext.IsKworker"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.is_kworker"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.PIDContext.IsKworker = rv return nil @@ -39785,7 +30896,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.IsThread"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.is_thread"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.IsThread = rv return nil @@ -39806,7 +30917,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.PIDContext.Pid"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.pid"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.PIDContext.Pid = uint32(rv) return nil @@ -39819,7 +30930,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.PPid"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.ppid"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.PPid = uint32(rv) return nil @@ -39832,7 +30943,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.PIDContext.Tid"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.tid"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.PIDContext.Tid = uint32(rv) return nil @@ -39845,7 +30956,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.TTYName"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.tty_name"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.TTYName = rv return nil @@ -39858,7 +30969,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Credentials.UID"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.uid"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Credentials.UID = uint32(rv) return nil @@ -39871,7 +30982,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Credentials.User"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.user"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Credentials.User = rv return nil @@ -39888,7 +30999,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.UserSession.K8SGroups = append(ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.UserSession.K8SGroups, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.UserSession.K8SGroups"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.user_session.k8s_groups"} } return nil case "process.ancestors.user_session.k8s_uid": @@ -39900,7 +31011,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.UserSession.K8SUID"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.user_session.k8s_uid"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.UserSession.K8SUID = rv return nil @@ -39913,7 +31024,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.UserSession.K8SUsername"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.user_session.k8s_username"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.UserSession.K8SUsername = rv return nil @@ -39923,7 +31034,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.Args"} + return &eval.ErrValueTypeMismatch{Field: "process.args"} } ev.BaseEvent.ProcessContext.Process.Args = rv return nil @@ -39937,7 +31048,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ProcessContext.Process.Argv = append(ev.BaseEvent.ProcessContext.Process.Argv, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.Argv"} + return &eval.ErrValueTypeMismatch{Field: "process.args_flags"} } return nil case "process.args_options": @@ -39950,7 +31061,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ProcessContext.Process.Argv = append(ev.BaseEvent.ProcessContext.Process.Argv, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.Argv"} + return &eval.ErrValueTypeMismatch{Field: "process.args_options"} } return nil case "process.args_truncated": @@ -39959,7 +31070,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.ArgsTruncated"} + return &eval.ErrValueTypeMismatch{Field: "process.args_truncated"} } ev.BaseEvent.ProcessContext.Process.ArgsTruncated = rv return nil @@ -39973,7 +31084,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ProcessContext.Process.Argv = append(ev.BaseEvent.ProcessContext.Process.Argv, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.Argv"} + return &eval.ErrValueTypeMismatch{Field: "process.argv"} } return nil case "process.argv0": @@ -39982,7 +31093,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.Argv0"} + return &eval.ErrValueTypeMismatch{Field: "process.argv0"} } ev.BaseEvent.ProcessContext.Process.Argv0 = rv return nil @@ -39992,7 +31103,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.Credentials.AUID"} + return &eval.ErrValueTypeMismatch{Field: "process.auid"} } ev.BaseEvent.ProcessContext.Process.Credentials.AUID = uint32(rv) return nil @@ -40002,7 +31113,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.Credentials.CapEffective"} + return &eval.ErrValueTypeMismatch{Field: "process.cap_effective"} } ev.BaseEvent.ProcessContext.Process.Credentials.CapEffective = uint64(rv) return nil @@ -40012,7 +31123,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.Credentials.CapPermitted"} + return &eval.ErrValueTypeMismatch{Field: "process.cap_permitted"} } ev.BaseEvent.ProcessContext.Process.Credentials.CapPermitted = uint64(rv) return nil @@ -40022,7 +31133,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.CGroup.CGroupFile.Inode"} + return &eval.ErrValueTypeMismatch{Field: "process.cgroup.file.inode"} } ev.BaseEvent.ProcessContext.Process.CGroup.CGroupFile.Inode = uint64(rv) return nil @@ -40032,7 +31143,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.CGroup.CGroupFile.MountID"} + return &eval.ErrValueTypeMismatch{Field: "process.cgroup.file.mount_id"} } ev.BaseEvent.ProcessContext.Process.CGroup.CGroupFile.MountID = uint32(rv) return nil @@ -40042,7 +31153,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.CGroup.CGroupID"} + return &eval.ErrValueTypeMismatch{Field: "process.cgroup.id"} } ev.BaseEvent.ProcessContext.Process.CGroup.CGroupID = containerutils.CGroupID(rv) return nil @@ -40052,7 +31163,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.CGroup.CGroupManager"} + return &eval.ErrValueTypeMismatch{Field: "process.cgroup.manager"} } ev.BaseEvent.ProcessContext.Process.CGroup.CGroupManager = rv return nil @@ -40062,7 +31173,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.CGroup.CGroupVersion"} + return &eval.ErrValueTypeMismatch{Field: "process.cgroup.version"} } ev.BaseEvent.ProcessContext.Process.CGroup.CGroupVersion = int(rv) return nil @@ -40072,7 +31183,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.Comm"} + return &eval.ErrValueTypeMismatch{Field: "process.comm"} } ev.BaseEvent.ProcessContext.Process.Comm = rv return nil @@ -40082,7 +31193,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.ContainerID"} + return &eval.ErrValueTypeMismatch{Field: "process.container.id"} } ev.BaseEvent.ProcessContext.Process.ContainerID = containerutils.ContainerID(rv) return nil @@ -40092,7 +31203,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.CreatedAt"} + return &eval.ErrValueTypeMismatch{Field: "process.created_at"} } ev.BaseEvent.ProcessContext.Process.CreatedAt = uint64(rv) return nil @@ -40102,7 +31213,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.Credentials.EGID"} + return &eval.ErrValueTypeMismatch{Field: "process.egid"} } ev.BaseEvent.ProcessContext.Process.Credentials.EGID = uint32(rv) return nil @@ -40112,7 +31223,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.Credentials.EGroup"} + return &eval.ErrValueTypeMismatch{Field: "process.egroup"} } ev.BaseEvent.ProcessContext.Process.Credentials.EGroup = rv return nil @@ -40126,7 +31237,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ProcessContext.Process.Envp = append(ev.BaseEvent.ProcessContext.Process.Envp, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.Envp"} + return &eval.ErrValueTypeMismatch{Field: "process.envp"} } return nil case "process.envs": @@ -40139,7 +31250,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ProcessContext.Process.Envs = append(ev.BaseEvent.ProcessContext.Process.Envs, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.Envs"} + return &eval.ErrValueTypeMismatch{Field: "process.envs"} } return nil case "process.envs_truncated": @@ -40148,7 +31259,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.EnvsTruncated"} + return &eval.ErrValueTypeMismatch{Field: "process.envs_truncated"} } ev.BaseEvent.ProcessContext.Process.EnvsTruncated = rv return nil @@ -40158,7 +31269,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.Credentials.EUID"} + return &eval.ErrValueTypeMismatch{Field: "process.euid"} } ev.BaseEvent.ProcessContext.Process.Credentials.EUID = uint32(rv) return nil @@ -40168,7 +31279,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.Credentials.EUser"} + return &eval.ErrValueTypeMismatch{Field: "process.euser"} } ev.BaseEvent.ProcessContext.Process.Credentials.EUser = rv return nil @@ -40178,7 +31289,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.FileEvent.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "process.file.change_time"} } ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.CTime = uint64(rv) return nil @@ -40188,7 +31299,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.FileEvent.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "process.file.filesystem"} } ev.BaseEvent.ProcessContext.Process.FileEvent.Filesystem = rv return nil @@ -40198,7 +31309,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.FileEvent.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "process.file.gid"} } ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.GID = uint32(rv) return nil @@ -40208,7 +31319,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.FileEvent.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "process.file.group"} } ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.Group = rv return nil @@ -40222,7 +31333,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ProcessContext.Process.FileEvent.Hashes = append(ev.BaseEvent.ProcessContext.Process.FileEvent.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.FileEvent.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "process.file.hashes"} } return nil case "process.file.in_upper_layer": @@ -40231,7 +31342,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.FileEvent.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "process.file.in_upper_layer"} } ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.InUpperLayer = rv return nil @@ -40241,7 +31352,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.FileEvent.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "process.file.inode"} } ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.PathKey.Inode = uint64(rv) return nil @@ -40251,10 +31362,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "process.file.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "BaseEvent.ProcessContext.Process.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "process.file.mode"} } ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -40264,7 +31375,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.FileEvent.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "process.file.modification_time"} } ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.MTime = uint64(rv) return nil @@ -40274,7 +31385,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.FileEvent.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "process.file.mount_id"} } ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.PathKey.MountID = uint32(rv) return nil @@ -40284,7 +31395,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.FileEvent.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "process.file.name"} } ev.BaseEvent.ProcessContext.Process.FileEvent.BasenameStr = rv return nil @@ -40299,7 +31410,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.FileEvent.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "process.file.package.name"} } ev.BaseEvent.ProcessContext.Process.FileEvent.PkgName = rv return nil @@ -40309,7 +31420,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.FileEvent.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "process.file.package.source_version"} } ev.BaseEvent.ProcessContext.Process.FileEvent.PkgSrcVersion = rv return nil @@ -40319,7 +31430,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.FileEvent.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "process.file.package.version"} } ev.BaseEvent.ProcessContext.Process.FileEvent.PkgVersion = rv return nil @@ -40329,7 +31440,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.FileEvent.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "process.file.path"} } ev.BaseEvent.ProcessContext.Process.FileEvent.PathnameStr = rv return nil @@ -40344,10 +31455,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "process.file.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "BaseEvent.ProcessContext.Process.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "process.file.rights"} } ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -40357,7 +31468,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.FileEvent.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "process.file.uid"} } ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.UID = uint32(rv) return nil @@ -40367,7 +31478,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.FileEvent.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "process.file.user"} } ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.User = rv return nil @@ -40377,7 +31488,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.Credentials.FSGID"} + return &eval.ErrValueTypeMismatch{Field: "process.fsgid"} } ev.BaseEvent.ProcessContext.Process.Credentials.FSGID = uint32(rv) return nil @@ -40387,7 +31498,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.Credentials.FSGroup"} + return &eval.ErrValueTypeMismatch{Field: "process.fsgroup"} } ev.BaseEvent.ProcessContext.Process.Credentials.FSGroup = rv return nil @@ -40397,7 +31508,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.Credentials.FSUID"} + return &eval.ErrValueTypeMismatch{Field: "process.fsuid"} } ev.BaseEvent.ProcessContext.Process.Credentials.FSUID = uint32(rv) return nil @@ -40407,7 +31518,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.Credentials.FSUser"} + return &eval.ErrValueTypeMismatch{Field: "process.fsuser"} } ev.BaseEvent.ProcessContext.Process.Credentials.FSUser = rv return nil @@ -40417,7 +31528,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.Credentials.GID"} + return &eval.ErrValueTypeMismatch{Field: "process.gid"} } ev.BaseEvent.ProcessContext.Process.Credentials.GID = uint32(rv) return nil @@ -40427,7 +31538,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.Credentials.Group"} + return &eval.ErrValueTypeMismatch{Field: "process.group"} } ev.BaseEvent.ProcessContext.Process.Credentials.Group = rv return nil @@ -40437,7 +31548,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "process.interpreter.file.change_time"} } ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.CTime = uint64(rv) return nil @@ -40447,7 +31558,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "process.interpreter.file.filesystem"} } ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.Filesystem = rv return nil @@ -40457,7 +31568,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "process.interpreter.file.gid"} } ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.GID = uint32(rv) return nil @@ -40467,7 +31578,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "process.interpreter.file.group"} } ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Group = rv return nil @@ -40481,7 +31592,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.Hashes = append(ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "process.interpreter.file.hashes"} } return nil case "process.interpreter.file.in_upper_layer": @@ -40490,7 +31601,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "process.interpreter.file.in_upper_layer"} } ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.InUpperLayer = rv return nil @@ -40500,7 +31611,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "process.interpreter.file.inode"} } ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode = uint64(rv) return nil @@ -40510,10 +31621,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "process.interpreter.file.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "process.interpreter.file.mode"} } ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -40523,7 +31634,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "process.interpreter.file.modification_time"} } ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.MTime = uint64(rv) return nil @@ -40533,7 +31644,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "process.interpreter.file.mount_id"} } ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID = uint32(rv) return nil @@ -40543,7 +31654,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "process.interpreter.file.name"} } ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.BasenameStr = rv return nil @@ -40558,7 +31669,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "process.interpreter.file.package.name"} } ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.PkgName = rv return nil @@ -40568,7 +31679,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "process.interpreter.file.package.source_version"} } ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.PkgSrcVersion = rv return nil @@ -40578,7 +31689,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "process.interpreter.file.package.version"} } ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.PkgVersion = rv return nil @@ -40588,7 +31699,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "process.interpreter.file.path"} } ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.PathnameStr = rv return nil @@ -40603,10 +31714,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "process.interpreter.file.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "process.interpreter.file.rights"} } ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -40616,7 +31727,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "process.interpreter.file.uid"} } ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.UID = uint32(rv) return nil @@ -40626,7 +31737,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "process.interpreter.file.user"} } ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.User = rv return nil @@ -40636,7 +31747,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.IsExec"} + return &eval.ErrValueTypeMismatch{Field: "process.is_exec"} } ev.BaseEvent.ProcessContext.Process.IsExec = rv return nil @@ -40646,7 +31757,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.PIDContext.IsKworker"} + return &eval.ErrValueTypeMismatch{Field: "process.is_kworker"} } ev.BaseEvent.ProcessContext.Process.PIDContext.IsKworker = rv return nil @@ -40656,7 +31767,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.IsThread"} + return &eval.ErrValueTypeMismatch{Field: "process.is_thread"} } ev.BaseEvent.ProcessContext.Process.IsThread = rv return nil @@ -40669,7 +31780,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.Args"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.args"} } ev.BaseEvent.ProcessContext.Parent.Args = rv return nil @@ -40686,7 +31797,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ProcessContext.Parent.Argv = append(ev.BaseEvent.ProcessContext.Parent.Argv, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.Argv"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.args_flags"} } return nil case "process.parent.args_options": @@ -40702,7 +31813,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ProcessContext.Parent.Argv = append(ev.BaseEvent.ProcessContext.Parent.Argv, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.Argv"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.args_options"} } return nil case "process.parent.args_truncated": @@ -40714,7 +31825,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.ArgsTruncated"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.args_truncated"} } ev.BaseEvent.ProcessContext.Parent.ArgsTruncated = rv return nil @@ -40731,7 +31842,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ProcessContext.Parent.Argv = append(ev.BaseEvent.ProcessContext.Parent.Argv, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.Argv"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.argv"} } return nil case "process.parent.argv0": @@ -40743,7 +31854,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.Argv0"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.argv0"} } ev.BaseEvent.ProcessContext.Parent.Argv0 = rv return nil @@ -40756,7 +31867,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.Credentials.AUID"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.auid"} } ev.BaseEvent.ProcessContext.Parent.Credentials.AUID = uint32(rv) return nil @@ -40769,7 +31880,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.Credentials.CapEffective"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.cap_effective"} } ev.BaseEvent.ProcessContext.Parent.Credentials.CapEffective = uint64(rv) return nil @@ -40782,7 +31893,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.Credentials.CapPermitted"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.cap_permitted"} } ev.BaseEvent.ProcessContext.Parent.Credentials.CapPermitted = uint64(rv) return nil @@ -40795,7 +31906,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.CGroup.CGroupFile.Inode"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.cgroup.file.inode"} } ev.BaseEvent.ProcessContext.Parent.CGroup.CGroupFile.Inode = uint64(rv) return nil @@ -40808,7 +31919,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.CGroup.CGroupFile.MountID"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.cgroup.file.mount_id"} } ev.BaseEvent.ProcessContext.Parent.CGroup.CGroupFile.MountID = uint32(rv) return nil @@ -40821,7 +31932,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.CGroup.CGroupID"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.cgroup.id"} } ev.BaseEvent.ProcessContext.Parent.CGroup.CGroupID = containerutils.CGroupID(rv) return nil @@ -40834,7 +31945,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.CGroup.CGroupManager"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.cgroup.manager"} } ev.BaseEvent.ProcessContext.Parent.CGroup.CGroupManager = rv return nil @@ -40847,7 +31958,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.CGroup.CGroupVersion"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.cgroup.version"} } ev.BaseEvent.ProcessContext.Parent.CGroup.CGroupVersion = int(rv) return nil @@ -40860,7 +31971,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.Comm"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.comm"} } ev.BaseEvent.ProcessContext.Parent.Comm = rv return nil @@ -40873,7 +31984,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.ContainerID"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.container.id"} } ev.BaseEvent.ProcessContext.Parent.ContainerID = containerutils.ContainerID(rv) return nil @@ -40886,7 +31997,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.CreatedAt"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.created_at"} } ev.BaseEvent.ProcessContext.Parent.CreatedAt = uint64(rv) return nil @@ -40899,7 +32010,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.Credentials.EGID"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.egid"} } ev.BaseEvent.ProcessContext.Parent.Credentials.EGID = uint32(rv) return nil @@ -40912,7 +32023,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.Credentials.EGroup"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.egroup"} } ev.BaseEvent.ProcessContext.Parent.Credentials.EGroup = rv return nil @@ -40929,7 +32040,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ProcessContext.Parent.Envp = append(ev.BaseEvent.ProcessContext.Parent.Envp, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.Envp"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.envp"} } return nil case "process.parent.envs": @@ -40945,7 +32056,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ProcessContext.Parent.Envs = append(ev.BaseEvent.ProcessContext.Parent.Envs, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.Envs"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.envs"} } return nil case "process.parent.envs_truncated": @@ -40957,7 +32068,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.EnvsTruncated"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.envs_truncated"} } ev.BaseEvent.ProcessContext.Parent.EnvsTruncated = rv return nil @@ -40970,7 +32081,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.Credentials.EUID"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.euid"} } ev.BaseEvent.ProcessContext.Parent.Credentials.EUID = uint32(rv) return nil @@ -40983,7 +32094,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.Credentials.EUser"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.euser"} } ev.BaseEvent.ProcessContext.Parent.Credentials.EUser = rv return nil @@ -40996,7 +32107,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.FileEvent.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.file.change_time"} } ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.CTime = uint64(rv) return nil @@ -41009,7 +32120,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.FileEvent.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.file.filesystem"} } ev.BaseEvent.ProcessContext.Parent.FileEvent.Filesystem = rv return nil @@ -41022,7 +32133,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.FileEvent.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.file.gid"} } ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.GID = uint32(rv) return nil @@ -41035,7 +32146,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.FileEvent.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.file.group"} } ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.Group = rv return nil @@ -41052,7 +32163,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ProcessContext.Parent.FileEvent.Hashes = append(ev.BaseEvent.ProcessContext.Parent.FileEvent.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.FileEvent.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.file.hashes"} } return nil case "process.parent.file.in_upper_layer": @@ -41064,7 +32175,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.FileEvent.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.file.in_upper_layer"} } ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.InUpperLayer = rv return nil @@ -41077,7 +32188,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.FileEvent.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.file.inode"} } ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.PathKey.Inode = uint64(rv) return nil @@ -41090,10 +32201,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.file.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "BaseEvent.ProcessContext.Parent.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "process.parent.file.mode"} } ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -41106,7 +32217,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.FileEvent.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.file.modification_time"} } ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.MTime = uint64(rv) return nil @@ -41119,7 +32230,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.FileEvent.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.file.mount_id"} } ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.PathKey.MountID = uint32(rv) return nil @@ -41132,7 +32243,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.FileEvent.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.file.name"} } ev.BaseEvent.ProcessContext.Parent.FileEvent.BasenameStr = rv return nil @@ -41153,7 +32264,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.FileEvent.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.file.package.name"} } ev.BaseEvent.ProcessContext.Parent.FileEvent.PkgName = rv return nil @@ -41166,7 +32277,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.FileEvent.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.file.package.source_version"} } ev.BaseEvent.ProcessContext.Parent.FileEvent.PkgSrcVersion = rv return nil @@ -41179,7 +32290,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.FileEvent.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.file.package.version"} } ev.BaseEvent.ProcessContext.Parent.FileEvent.PkgVersion = rv return nil @@ -41192,7 +32303,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.FileEvent.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.file.path"} } ev.BaseEvent.ProcessContext.Parent.FileEvent.PathnameStr = rv return nil @@ -41213,10 +32324,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.file.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "BaseEvent.ProcessContext.Parent.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "process.parent.file.rights"} } ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -41229,7 +32340,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.FileEvent.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.file.uid"} } ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.UID = uint32(rv) return nil @@ -41242,7 +32353,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.FileEvent.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.file.user"} } ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.User = rv return nil @@ -41255,7 +32366,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.Credentials.FSGID"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.fsgid"} } ev.BaseEvent.ProcessContext.Parent.Credentials.FSGID = uint32(rv) return nil @@ -41268,7 +32379,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.Credentials.FSGroup"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.fsgroup"} } ev.BaseEvent.ProcessContext.Parent.Credentials.FSGroup = rv return nil @@ -41281,7 +32392,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.Credentials.FSUID"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.fsuid"} } ev.BaseEvent.ProcessContext.Parent.Credentials.FSUID = uint32(rv) return nil @@ -41294,7 +32405,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.Credentials.FSUser"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.fsuser"} } ev.BaseEvent.ProcessContext.Parent.Credentials.FSUser = rv return nil @@ -41307,7 +32418,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.Credentials.GID"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.gid"} } ev.BaseEvent.ProcessContext.Parent.Credentials.GID = uint32(rv) return nil @@ -41320,7 +32431,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.Credentials.Group"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.group"} } ev.BaseEvent.ProcessContext.Parent.Credentials.Group = rv return nil @@ -41333,7 +32444,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.interpreter.file.change_time"} } ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.CTime = uint64(rv) return nil @@ -41346,7 +32457,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.interpreter.file.filesystem"} } ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.Filesystem = rv return nil @@ -41359,7 +32470,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.interpreter.file.gid"} } ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.GID = uint32(rv) return nil @@ -41372,7 +32483,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.interpreter.file.group"} } ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.Group = rv return nil @@ -41389,7 +32500,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.Hashes = append(ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.interpreter.file.hashes"} } return nil case "process.parent.interpreter.file.in_upper_layer": @@ -41401,7 +32512,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.interpreter.file.in_upper_layer"} } ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.InUpperLayer = rv return nil @@ -41414,7 +32525,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.interpreter.file.inode"} } ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.PathKey.Inode = uint64(rv) return nil @@ -41427,10 +32538,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.interpreter.file.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "process.parent.interpreter.file.mode"} } ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -41443,7 +32554,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.interpreter.file.modification_time"} } ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.MTime = uint64(rv) return nil @@ -41456,7 +32567,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.interpreter.file.mount_id"} } ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.PathKey.MountID = uint32(rv) return nil @@ -41469,7 +32580,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.interpreter.file.name"} } ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.BasenameStr = rv return nil @@ -41490,7 +32601,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.interpreter.file.package.name"} } ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.PkgName = rv return nil @@ -41503,7 +32614,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.interpreter.file.package.source_version"} } ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.PkgSrcVersion = rv return nil @@ -41516,7 +32627,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.interpreter.file.package.version"} } ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.PkgVersion = rv return nil @@ -41529,7 +32640,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.interpreter.file.path"} } ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.PathnameStr = rv return nil @@ -41550,10 +32661,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.interpreter.file.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "process.parent.interpreter.file.rights"} } ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -41566,7 +32677,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.interpreter.file.uid"} } ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.UID = uint32(rv) return nil @@ -41579,7 +32690,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.interpreter.file.user"} } ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.User = rv return nil @@ -41592,7 +32703,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.IsExec"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.is_exec"} } ev.BaseEvent.ProcessContext.Parent.IsExec = rv return nil @@ -41605,7 +32716,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.PIDContext.IsKworker"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.is_kworker"} } ev.BaseEvent.ProcessContext.Parent.PIDContext.IsKworker = rv return nil @@ -41618,7 +32729,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.IsThread"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.is_thread"} } ev.BaseEvent.ProcessContext.Parent.IsThread = rv return nil @@ -41631,7 +32742,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.PIDContext.Pid"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.pid"} } ev.BaseEvent.ProcessContext.Parent.PIDContext.Pid = uint32(rv) return nil @@ -41644,7 +32755,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.PPid"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.ppid"} } ev.BaseEvent.ProcessContext.Parent.PPid = uint32(rv) return nil @@ -41657,7 +32768,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.PIDContext.Tid"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.tid"} } ev.BaseEvent.ProcessContext.Parent.PIDContext.Tid = uint32(rv) return nil @@ -41670,7 +32781,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.TTYName"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.tty_name"} } ev.BaseEvent.ProcessContext.Parent.TTYName = rv return nil @@ -41683,7 +32794,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.Credentials.UID"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.uid"} } ev.BaseEvent.ProcessContext.Parent.Credentials.UID = uint32(rv) return nil @@ -41696,7 +32807,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.Credentials.User"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.user"} } ev.BaseEvent.ProcessContext.Parent.Credentials.User = rv return nil @@ -41713,7 +32824,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ProcessContext.Parent.UserSession.K8SGroups = append(ev.BaseEvent.ProcessContext.Parent.UserSession.K8SGroups, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.UserSession.K8SGroups"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.user_session.k8s_groups"} } return nil case "process.parent.user_session.k8s_uid": @@ -41725,7 +32836,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.UserSession.K8SUID"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.user_session.k8s_uid"} } ev.BaseEvent.ProcessContext.Parent.UserSession.K8SUID = rv return nil @@ -41738,7 +32849,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.UserSession.K8SUsername"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.user_session.k8s_username"} } ev.BaseEvent.ProcessContext.Parent.UserSession.K8SUsername = rv return nil @@ -41748,7 +32859,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.PIDContext.Pid"} + return &eval.ErrValueTypeMismatch{Field: "process.pid"} } ev.BaseEvent.ProcessContext.Process.PIDContext.Pid = uint32(rv) return nil @@ -41758,7 +32869,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.PPid"} + return &eval.ErrValueTypeMismatch{Field: "process.ppid"} } ev.BaseEvent.ProcessContext.Process.PPid = uint32(rv) return nil @@ -41768,7 +32879,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.PIDContext.Tid"} + return &eval.ErrValueTypeMismatch{Field: "process.tid"} } ev.BaseEvent.ProcessContext.Process.PIDContext.Tid = uint32(rv) return nil @@ -41778,7 +32889,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.TTYName"} + return &eval.ErrValueTypeMismatch{Field: "process.tty_name"} } ev.BaseEvent.ProcessContext.Process.TTYName = rv return nil @@ -41788,7 +32899,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.Credentials.UID"} + return &eval.ErrValueTypeMismatch{Field: "process.uid"} } ev.BaseEvent.ProcessContext.Process.Credentials.UID = uint32(rv) return nil @@ -41798,7 +32909,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.Credentials.User"} + return &eval.ErrValueTypeMismatch{Field: "process.user"} } ev.BaseEvent.ProcessContext.Process.Credentials.User = rv return nil @@ -41812,7 +32923,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ProcessContext.Process.UserSession.K8SGroups = append(ev.BaseEvent.ProcessContext.Process.UserSession.K8SGroups, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.UserSession.K8SGroups"} + return &eval.ErrValueTypeMismatch{Field: "process.user_session.k8s_groups"} } return nil case "process.user_session.k8s_uid": @@ -41821,7 +32932,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.UserSession.K8SUID"} + return &eval.ErrValueTypeMismatch{Field: "process.user_session.k8s_uid"} } ev.BaseEvent.ProcessContext.Process.UserSession.K8SUID = rv return nil @@ -41831,21 +32942,21 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.UserSession.K8SUsername"} + return &eval.ErrValueTypeMismatch{Field: "process.user_session.k8s_username"} } ev.BaseEvent.ProcessContext.Process.UserSession.K8SUsername = rv return nil case "ptrace.request": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Request"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.request"} } ev.PTrace.Request = uint32(rv) return nil case "ptrace.retval": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.SyscallEvent.Retval"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.retval"} } ev.PTrace.SyscallEvent.Retval = int64(rv) return nil @@ -41858,7 +32969,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.Args"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.args"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.Args = rv return nil @@ -41875,7 +32986,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.PTrace.Tracee.Ancestor.ProcessContext.Process.Argv = append(ev.PTrace.Tracee.Ancestor.ProcessContext.Process.Argv, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.Argv"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.args_flags"} } return nil case "ptrace.tracee.ancestors.args_options": @@ -41891,7 +33002,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.PTrace.Tracee.Ancestor.ProcessContext.Process.Argv = append(ev.PTrace.Tracee.Ancestor.ProcessContext.Process.Argv, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.Argv"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.args_options"} } return nil case "ptrace.tracee.ancestors.args_truncated": @@ -41903,7 +33014,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.ArgsTruncated"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.args_truncated"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.ArgsTruncated = rv return nil @@ -41920,7 +33031,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.PTrace.Tracee.Ancestor.ProcessContext.Process.Argv = append(ev.PTrace.Tracee.Ancestor.ProcessContext.Process.Argv, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.Argv"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.argv"} } return nil case "ptrace.tracee.ancestors.argv0": @@ -41932,7 +33043,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.Argv0"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.argv0"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.Argv0 = rv return nil @@ -41945,7 +33056,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.Credentials.AUID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.auid"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.Credentials.AUID = uint32(rv) return nil @@ -41958,7 +33069,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.Credentials.CapEffective"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.cap_effective"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.Credentials.CapEffective = uint64(rv) return nil @@ -41971,7 +33082,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.Credentials.CapPermitted"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.cap_permitted"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.Credentials.CapPermitted = uint64(rv) return nil @@ -41984,7 +33095,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.CGroup.CGroupFile.Inode"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.cgroup.file.inode"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.CGroup.CGroupFile.Inode = uint64(rv) return nil @@ -41997,7 +33108,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.CGroup.CGroupFile.MountID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.cgroup.file.mount_id"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.CGroup.CGroupFile.MountID = uint32(rv) return nil @@ -42010,7 +33121,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.CGroup.CGroupID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.cgroup.id"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.CGroup.CGroupID = containerutils.CGroupID(rv) return nil @@ -42023,7 +33134,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.CGroup.CGroupManager"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.cgroup.manager"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.CGroup.CGroupManager = rv return nil @@ -42036,7 +33147,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.CGroup.CGroupVersion"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.cgroup.version"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.CGroup.CGroupVersion = int(rv) return nil @@ -42049,7 +33160,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.Comm"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.comm"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.Comm = rv return nil @@ -42062,7 +33173,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.ContainerID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.container.id"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.ContainerID = containerutils.ContainerID(rv) return nil @@ -42075,7 +33186,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.CreatedAt"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.created_at"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.CreatedAt = uint64(rv) return nil @@ -42088,7 +33199,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.Credentials.EGID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.egid"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.Credentials.EGID = uint32(rv) return nil @@ -42101,7 +33212,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.Credentials.EGroup"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.egroup"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.Credentials.EGroup = rv return nil @@ -42118,7 +33229,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.PTrace.Tracee.Ancestor.ProcessContext.Process.Envp = append(ev.PTrace.Tracee.Ancestor.ProcessContext.Process.Envp, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.Envp"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.envp"} } return nil case "ptrace.tracee.ancestors.envs": @@ -42134,7 +33245,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.PTrace.Tracee.Ancestor.ProcessContext.Process.Envs = append(ev.PTrace.Tracee.Ancestor.ProcessContext.Process.Envs, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.Envs"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.envs"} } return nil case "ptrace.tracee.ancestors.envs_truncated": @@ -42146,7 +33257,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.EnvsTruncated"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.envs_truncated"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.EnvsTruncated = rv return nil @@ -42159,7 +33270,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.Credentials.EUID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.euid"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.Credentials.EUID = uint32(rv) return nil @@ -42172,7 +33283,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.Credentials.EUser"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.euser"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.Credentials.EUser = rv return nil @@ -42185,7 +33296,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.file.change_time"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.FileFields.CTime = uint64(rv) return nil @@ -42198,7 +33309,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.file.filesystem"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.Filesystem = rv return nil @@ -42211,7 +33322,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.file.gid"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.FileFields.GID = uint32(rv) return nil @@ -42224,7 +33335,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.file.group"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.FileFields.Group = rv return nil @@ -42241,7 +33352,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.Hashes = append(ev.PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.file.hashes"} } return nil case "ptrace.tracee.ancestors.file.in_upper_layer": @@ -42253,7 +33364,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.file.in_upper_layer"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.FileFields.InUpperLayer = rv return nil @@ -42266,7 +33377,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.file.inode"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.FileFields.PathKey.Inode = uint64(rv) return nil @@ -42279,10 +33390,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.file.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "ptrace.tracee.ancestors.file.mode"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -42295,7 +33406,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.file.modification_time"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.FileFields.MTime = uint64(rv) return nil @@ -42308,7 +33419,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.file.mount_id"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.FileFields.PathKey.MountID = uint32(rv) return nil @@ -42321,7 +33432,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.file.name"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.BasenameStr = rv return nil @@ -42342,7 +33453,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.file.package.name"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.PkgName = rv return nil @@ -42355,7 +33466,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.file.package.source_version"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.PkgSrcVersion = rv return nil @@ -42368,7 +33479,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.file.package.version"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.PkgVersion = rv return nil @@ -42381,7 +33492,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.file.path"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.PathnameStr = rv return nil @@ -42402,10 +33513,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.file.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "ptrace.tracee.ancestors.file.rights"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -42418,7 +33529,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.file.uid"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.FileFields.UID = uint32(rv) return nil @@ -42431,7 +33542,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.file.user"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.FileEvent.FileFields.User = rv return nil @@ -42444,7 +33555,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.Credentials.FSGID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.fsgid"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.Credentials.FSGID = uint32(rv) return nil @@ -42457,7 +33568,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.Credentials.FSGroup"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.fsgroup"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.Credentials.FSGroup = rv return nil @@ -42470,7 +33581,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.Credentials.FSUID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.fsuid"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.Credentials.FSUID = uint32(rv) return nil @@ -42483,7 +33594,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.Credentials.FSUser"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.fsuser"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.Credentials.FSUser = rv return nil @@ -42496,7 +33607,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.Credentials.GID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.gid"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.Credentials.GID = uint32(rv) return nil @@ -42509,7 +33620,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.Credentials.Group"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.group"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.Credentials.Group = rv return nil @@ -42522,7 +33633,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.interpreter.file.change_time"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.CTime = uint64(rv) return nil @@ -42535,7 +33646,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.interpreter.file.filesystem"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.Filesystem = rv return nil @@ -42548,7 +33659,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.interpreter.file.gid"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.GID = uint32(rv) return nil @@ -42561,7 +33672,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.interpreter.file.group"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Group = rv return nil @@ -42578,7 +33689,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.Hashes = append(ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.interpreter.file.hashes"} } return nil case "ptrace.tracee.ancestors.interpreter.file.in_upper_layer": @@ -42590,7 +33701,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.interpreter.file.in_upper_layer"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.InUpperLayer = rv return nil @@ -42603,7 +33714,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.interpreter.file.inode"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode = uint64(rv) return nil @@ -42616,10 +33727,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.interpreter.file.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "ptrace.tracee.ancestors.interpreter.file.mode"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -42632,7 +33743,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.interpreter.file.modification_time"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.MTime = uint64(rv) return nil @@ -42645,7 +33756,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.interpreter.file.mount_id"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID = uint32(rv) return nil @@ -42658,7 +33769,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.interpreter.file.name"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.BasenameStr = rv return nil @@ -42679,7 +33790,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.interpreter.file.package.name"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.PkgName = rv return nil @@ -42692,7 +33803,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.interpreter.file.package.source_version"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.PkgSrcVersion = rv return nil @@ -42705,7 +33816,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.interpreter.file.package.version"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.PkgVersion = rv return nil @@ -42718,7 +33829,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.interpreter.file.path"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.PathnameStr = rv return nil @@ -42739,10 +33850,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.interpreter.file.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "ptrace.tracee.ancestors.interpreter.file.rights"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -42755,7 +33866,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.interpreter.file.uid"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.UID = uint32(rv) return nil @@ -42768,7 +33879,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.interpreter.file.user"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.User = rv return nil @@ -42781,7 +33892,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.IsExec"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.is_exec"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.IsExec = rv return nil @@ -42794,7 +33905,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.PIDContext.IsKworker"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.is_kworker"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.PIDContext.IsKworker = rv return nil @@ -42807,7 +33918,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.IsThread"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.is_thread"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.IsThread = rv return nil @@ -42828,7 +33939,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.PIDContext.Pid"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.pid"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.PIDContext.Pid = uint32(rv) return nil @@ -42841,7 +33952,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.PPid"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.ppid"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.PPid = uint32(rv) return nil @@ -42854,7 +33965,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.PIDContext.Tid"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.tid"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.PIDContext.Tid = uint32(rv) return nil @@ -42867,7 +33978,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.TTYName"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.tty_name"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.TTYName = rv return nil @@ -42880,7 +33991,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.Credentials.UID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.uid"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.Credentials.UID = uint32(rv) return nil @@ -42893,7 +34004,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.Credentials.User"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.user"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.Credentials.User = rv return nil @@ -42910,7 +34021,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.PTrace.Tracee.Ancestor.ProcessContext.Process.UserSession.K8SGroups = append(ev.PTrace.Tracee.Ancestor.ProcessContext.Process.UserSession.K8SGroups, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.UserSession.K8SGroups"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.user_session.k8s_groups"} } return nil case "ptrace.tracee.ancestors.user_session.k8s_uid": @@ -42922,7 +34033,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.UserSession.K8SUID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.user_session.k8s_uid"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.UserSession.K8SUID = rv return nil @@ -42935,7 +34046,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Ancestor.ProcessContext.Process.UserSession.K8SUsername"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ancestors.user_session.k8s_username"} } ev.PTrace.Tracee.Ancestor.ProcessContext.Process.UserSession.K8SUsername = rv return nil @@ -42945,7 +34056,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.Args"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.args"} } ev.PTrace.Tracee.Process.Args = rv return nil @@ -42959,7 +34070,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.PTrace.Tracee.Process.Argv = append(ev.PTrace.Tracee.Process.Argv, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.Argv"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.args_flags"} } return nil case "ptrace.tracee.args_options": @@ -42972,7 +34083,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.PTrace.Tracee.Process.Argv = append(ev.PTrace.Tracee.Process.Argv, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.Argv"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.args_options"} } return nil case "ptrace.tracee.args_truncated": @@ -42981,7 +34092,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.ArgsTruncated"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.args_truncated"} } ev.PTrace.Tracee.Process.ArgsTruncated = rv return nil @@ -42995,7 +34106,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.PTrace.Tracee.Process.Argv = append(ev.PTrace.Tracee.Process.Argv, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.Argv"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.argv"} } return nil case "ptrace.tracee.argv0": @@ -43004,7 +34115,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.Argv0"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.argv0"} } ev.PTrace.Tracee.Process.Argv0 = rv return nil @@ -43014,7 +34125,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.Credentials.AUID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.auid"} } ev.PTrace.Tracee.Process.Credentials.AUID = uint32(rv) return nil @@ -43024,7 +34135,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.Credentials.CapEffective"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.cap_effective"} } ev.PTrace.Tracee.Process.Credentials.CapEffective = uint64(rv) return nil @@ -43034,7 +34145,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.Credentials.CapPermitted"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.cap_permitted"} } ev.PTrace.Tracee.Process.Credentials.CapPermitted = uint64(rv) return nil @@ -43044,7 +34155,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.CGroup.CGroupFile.Inode"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.cgroup.file.inode"} } ev.PTrace.Tracee.Process.CGroup.CGroupFile.Inode = uint64(rv) return nil @@ -43054,7 +34165,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.CGroup.CGroupFile.MountID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.cgroup.file.mount_id"} } ev.PTrace.Tracee.Process.CGroup.CGroupFile.MountID = uint32(rv) return nil @@ -43064,7 +34175,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.CGroup.CGroupID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.cgroup.id"} } ev.PTrace.Tracee.Process.CGroup.CGroupID = containerutils.CGroupID(rv) return nil @@ -43074,7 +34185,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.CGroup.CGroupManager"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.cgroup.manager"} } ev.PTrace.Tracee.Process.CGroup.CGroupManager = rv return nil @@ -43084,7 +34195,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.CGroup.CGroupVersion"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.cgroup.version"} } ev.PTrace.Tracee.Process.CGroup.CGroupVersion = int(rv) return nil @@ -43094,7 +34205,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.Comm"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.comm"} } ev.PTrace.Tracee.Process.Comm = rv return nil @@ -43104,7 +34215,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.ContainerID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.container.id"} } ev.PTrace.Tracee.Process.ContainerID = containerutils.ContainerID(rv) return nil @@ -43114,7 +34225,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.CreatedAt"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.created_at"} } ev.PTrace.Tracee.Process.CreatedAt = uint64(rv) return nil @@ -43124,7 +34235,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.Credentials.EGID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.egid"} } ev.PTrace.Tracee.Process.Credentials.EGID = uint32(rv) return nil @@ -43134,7 +34245,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.Credentials.EGroup"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.egroup"} } ev.PTrace.Tracee.Process.Credentials.EGroup = rv return nil @@ -43148,7 +34259,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.PTrace.Tracee.Process.Envp = append(ev.PTrace.Tracee.Process.Envp, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.Envp"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.envp"} } return nil case "ptrace.tracee.envs": @@ -43161,7 +34272,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.PTrace.Tracee.Process.Envs = append(ev.PTrace.Tracee.Process.Envs, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.Envs"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.envs"} } return nil case "ptrace.tracee.envs_truncated": @@ -43170,7 +34281,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.EnvsTruncated"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.envs_truncated"} } ev.PTrace.Tracee.Process.EnvsTruncated = rv return nil @@ -43180,7 +34291,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.Credentials.EUID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.euid"} } ev.PTrace.Tracee.Process.Credentials.EUID = uint32(rv) return nil @@ -43190,7 +34301,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.Credentials.EUser"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.euser"} } ev.PTrace.Tracee.Process.Credentials.EUser = rv return nil @@ -43200,7 +34311,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.FileEvent.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.file.change_time"} } ev.PTrace.Tracee.Process.FileEvent.FileFields.CTime = uint64(rv) return nil @@ -43210,7 +34321,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.FileEvent.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.file.filesystem"} } ev.PTrace.Tracee.Process.FileEvent.Filesystem = rv return nil @@ -43220,7 +34331,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.FileEvent.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.file.gid"} } ev.PTrace.Tracee.Process.FileEvent.FileFields.GID = uint32(rv) return nil @@ -43230,7 +34341,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.FileEvent.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.file.group"} } ev.PTrace.Tracee.Process.FileEvent.FileFields.Group = rv return nil @@ -43244,7 +34355,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.PTrace.Tracee.Process.FileEvent.Hashes = append(ev.PTrace.Tracee.Process.FileEvent.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.FileEvent.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.file.hashes"} } return nil case "ptrace.tracee.file.in_upper_layer": @@ -43253,7 +34364,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.FileEvent.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.file.in_upper_layer"} } ev.PTrace.Tracee.Process.FileEvent.FileFields.InUpperLayer = rv return nil @@ -43263,7 +34374,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.FileEvent.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.file.inode"} } ev.PTrace.Tracee.Process.FileEvent.FileFields.PathKey.Inode = uint64(rv) return nil @@ -43273,10 +34384,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.file.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "PTrace.Tracee.Process.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "ptrace.tracee.file.mode"} } ev.PTrace.Tracee.Process.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -43286,7 +34397,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.FileEvent.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.file.modification_time"} } ev.PTrace.Tracee.Process.FileEvent.FileFields.MTime = uint64(rv) return nil @@ -43296,7 +34407,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.FileEvent.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.file.mount_id"} } ev.PTrace.Tracee.Process.FileEvent.FileFields.PathKey.MountID = uint32(rv) return nil @@ -43306,7 +34417,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.FileEvent.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.file.name"} } ev.PTrace.Tracee.Process.FileEvent.BasenameStr = rv return nil @@ -43321,7 +34432,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.FileEvent.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.file.package.name"} } ev.PTrace.Tracee.Process.FileEvent.PkgName = rv return nil @@ -43331,7 +34442,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.FileEvent.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.file.package.source_version"} } ev.PTrace.Tracee.Process.FileEvent.PkgSrcVersion = rv return nil @@ -43341,7 +34452,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.FileEvent.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.file.package.version"} } ev.PTrace.Tracee.Process.FileEvent.PkgVersion = rv return nil @@ -43351,7 +34462,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.FileEvent.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.file.path"} } ev.PTrace.Tracee.Process.FileEvent.PathnameStr = rv return nil @@ -43366,10 +34477,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.file.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "PTrace.Tracee.Process.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "ptrace.tracee.file.rights"} } ev.PTrace.Tracee.Process.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -43379,7 +34490,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.FileEvent.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.file.uid"} } ev.PTrace.Tracee.Process.FileEvent.FileFields.UID = uint32(rv) return nil @@ -43389,7 +34500,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.FileEvent.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.file.user"} } ev.PTrace.Tracee.Process.FileEvent.FileFields.User = rv return nil @@ -43399,7 +34510,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.Credentials.FSGID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.fsgid"} } ev.PTrace.Tracee.Process.Credentials.FSGID = uint32(rv) return nil @@ -43409,7 +34520,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.Credentials.FSGroup"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.fsgroup"} } ev.PTrace.Tracee.Process.Credentials.FSGroup = rv return nil @@ -43419,7 +34530,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.Credentials.FSUID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.fsuid"} } ev.PTrace.Tracee.Process.Credentials.FSUID = uint32(rv) return nil @@ -43429,7 +34540,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.Credentials.FSUser"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.fsuser"} } ev.PTrace.Tracee.Process.Credentials.FSUser = rv return nil @@ -43439,7 +34550,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.Credentials.GID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.gid"} } ev.PTrace.Tracee.Process.Credentials.GID = uint32(rv) return nil @@ -43449,7 +34560,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.Credentials.Group"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.group"} } ev.PTrace.Tracee.Process.Credentials.Group = rv return nil @@ -43459,7 +34570,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.interpreter.file.change_time"} } ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.CTime = uint64(rv) return nil @@ -43469,7 +34580,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.LinuxBinprm.FileEvent.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.interpreter.file.filesystem"} } ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.Filesystem = rv return nil @@ -43479,7 +34590,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.interpreter.file.gid"} } ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.GID = uint32(rv) return nil @@ -43489,7 +34600,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.interpreter.file.group"} } ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.Group = rv return nil @@ -43503,7 +34614,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.Hashes = append(ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.LinuxBinprm.FileEvent.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.interpreter.file.hashes"} } return nil case "ptrace.tracee.interpreter.file.in_upper_layer": @@ -43512,7 +34623,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.interpreter.file.in_upper_layer"} } ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.InUpperLayer = rv return nil @@ -43522,7 +34633,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.interpreter.file.inode"} } ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode = uint64(rv) return nil @@ -43532,10 +34643,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.interpreter.file.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "ptrace.tracee.interpreter.file.mode"} } ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -43545,7 +34656,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.interpreter.file.modification_time"} } ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.MTime = uint64(rv) return nil @@ -43555,7 +34666,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.interpreter.file.mount_id"} } ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID = uint32(rv) return nil @@ -43565,7 +34676,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.LinuxBinprm.FileEvent.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.interpreter.file.name"} } ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.BasenameStr = rv return nil @@ -43580,7 +34691,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.LinuxBinprm.FileEvent.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.interpreter.file.package.name"} } ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.PkgName = rv return nil @@ -43590,7 +34701,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.LinuxBinprm.FileEvent.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.interpreter.file.package.source_version"} } ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.PkgSrcVersion = rv return nil @@ -43600,7 +34711,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.LinuxBinprm.FileEvent.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.interpreter.file.package.version"} } ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.PkgVersion = rv return nil @@ -43610,7 +34721,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.LinuxBinprm.FileEvent.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.interpreter.file.path"} } ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.PathnameStr = rv return nil @@ -43625,10 +34736,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.interpreter.file.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "ptrace.tracee.interpreter.file.rights"} } ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -43638,7 +34749,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.interpreter.file.uid"} } ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.UID = uint32(rv) return nil @@ -43648,7 +34759,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.interpreter.file.user"} } ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.User = rv return nil @@ -43658,7 +34769,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.IsExec"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.is_exec"} } ev.PTrace.Tracee.Process.IsExec = rv return nil @@ -43668,7 +34779,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.PIDContext.IsKworker"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.is_kworker"} } ev.PTrace.Tracee.Process.PIDContext.IsKworker = rv return nil @@ -43678,7 +34789,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.IsThread"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.is_thread"} } ev.PTrace.Tracee.Process.IsThread = rv return nil @@ -43691,7 +34802,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.Args"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.args"} } ev.PTrace.Tracee.Parent.Args = rv return nil @@ -43708,7 +34819,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.PTrace.Tracee.Parent.Argv = append(ev.PTrace.Tracee.Parent.Argv, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.Argv"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.args_flags"} } return nil case "ptrace.tracee.parent.args_options": @@ -43724,7 +34835,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.PTrace.Tracee.Parent.Argv = append(ev.PTrace.Tracee.Parent.Argv, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.Argv"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.args_options"} } return nil case "ptrace.tracee.parent.args_truncated": @@ -43736,7 +34847,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.ArgsTruncated"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.args_truncated"} } ev.PTrace.Tracee.Parent.ArgsTruncated = rv return nil @@ -43753,7 +34864,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.PTrace.Tracee.Parent.Argv = append(ev.PTrace.Tracee.Parent.Argv, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.Argv"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.argv"} } return nil case "ptrace.tracee.parent.argv0": @@ -43765,7 +34876,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.Argv0"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.argv0"} } ev.PTrace.Tracee.Parent.Argv0 = rv return nil @@ -43778,7 +34889,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.Credentials.AUID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.auid"} } ev.PTrace.Tracee.Parent.Credentials.AUID = uint32(rv) return nil @@ -43791,7 +34902,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.Credentials.CapEffective"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.cap_effective"} } ev.PTrace.Tracee.Parent.Credentials.CapEffective = uint64(rv) return nil @@ -43804,7 +34915,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.Credentials.CapPermitted"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.cap_permitted"} } ev.PTrace.Tracee.Parent.Credentials.CapPermitted = uint64(rv) return nil @@ -43817,7 +34928,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.CGroup.CGroupFile.Inode"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.cgroup.file.inode"} } ev.PTrace.Tracee.Parent.CGroup.CGroupFile.Inode = uint64(rv) return nil @@ -43830,7 +34941,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.CGroup.CGroupFile.MountID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.cgroup.file.mount_id"} } ev.PTrace.Tracee.Parent.CGroup.CGroupFile.MountID = uint32(rv) return nil @@ -43843,7 +34954,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.CGroup.CGroupID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.cgroup.id"} } ev.PTrace.Tracee.Parent.CGroup.CGroupID = containerutils.CGroupID(rv) return nil @@ -43856,7 +34967,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.CGroup.CGroupManager"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.cgroup.manager"} } ev.PTrace.Tracee.Parent.CGroup.CGroupManager = rv return nil @@ -43869,7 +34980,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.CGroup.CGroupVersion"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.cgroup.version"} } ev.PTrace.Tracee.Parent.CGroup.CGroupVersion = int(rv) return nil @@ -43882,7 +34993,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.Comm"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.comm"} } ev.PTrace.Tracee.Parent.Comm = rv return nil @@ -43895,7 +35006,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.ContainerID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.container.id"} } ev.PTrace.Tracee.Parent.ContainerID = containerutils.ContainerID(rv) return nil @@ -43908,7 +35019,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.CreatedAt"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.created_at"} } ev.PTrace.Tracee.Parent.CreatedAt = uint64(rv) return nil @@ -43921,7 +35032,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.Credentials.EGID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.egid"} } ev.PTrace.Tracee.Parent.Credentials.EGID = uint32(rv) return nil @@ -43934,7 +35045,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.Credentials.EGroup"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.egroup"} } ev.PTrace.Tracee.Parent.Credentials.EGroup = rv return nil @@ -43951,7 +35062,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.PTrace.Tracee.Parent.Envp = append(ev.PTrace.Tracee.Parent.Envp, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.Envp"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.envp"} } return nil case "ptrace.tracee.parent.envs": @@ -43967,7 +35078,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.PTrace.Tracee.Parent.Envs = append(ev.PTrace.Tracee.Parent.Envs, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.Envs"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.envs"} } return nil case "ptrace.tracee.parent.envs_truncated": @@ -43979,7 +35090,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.EnvsTruncated"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.envs_truncated"} } ev.PTrace.Tracee.Parent.EnvsTruncated = rv return nil @@ -43992,7 +35103,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.Credentials.EUID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.euid"} } ev.PTrace.Tracee.Parent.Credentials.EUID = uint32(rv) return nil @@ -44005,7 +35116,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.Credentials.EUser"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.euser"} } ev.PTrace.Tracee.Parent.Credentials.EUser = rv return nil @@ -44018,7 +35129,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.FileEvent.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.file.change_time"} } ev.PTrace.Tracee.Parent.FileEvent.FileFields.CTime = uint64(rv) return nil @@ -44031,7 +35142,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.FileEvent.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.file.filesystem"} } ev.PTrace.Tracee.Parent.FileEvent.Filesystem = rv return nil @@ -44044,7 +35155,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.FileEvent.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.file.gid"} } ev.PTrace.Tracee.Parent.FileEvent.FileFields.GID = uint32(rv) return nil @@ -44057,7 +35168,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.FileEvent.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.file.group"} } ev.PTrace.Tracee.Parent.FileEvent.FileFields.Group = rv return nil @@ -44074,7 +35185,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.PTrace.Tracee.Parent.FileEvent.Hashes = append(ev.PTrace.Tracee.Parent.FileEvent.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.FileEvent.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.file.hashes"} } return nil case "ptrace.tracee.parent.file.in_upper_layer": @@ -44086,7 +35197,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.FileEvent.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.file.in_upper_layer"} } ev.PTrace.Tracee.Parent.FileEvent.FileFields.InUpperLayer = rv return nil @@ -44099,7 +35210,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.FileEvent.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.file.inode"} } ev.PTrace.Tracee.Parent.FileEvent.FileFields.PathKey.Inode = uint64(rv) return nil @@ -44112,10 +35223,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.file.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "PTrace.Tracee.Parent.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "ptrace.tracee.parent.file.mode"} } ev.PTrace.Tracee.Parent.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -44128,7 +35239,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.FileEvent.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.file.modification_time"} } ev.PTrace.Tracee.Parent.FileEvent.FileFields.MTime = uint64(rv) return nil @@ -44141,7 +35252,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.FileEvent.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.file.mount_id"} } ev.PTrace.Tracee.Parent.FileEvent.FileFields.PathKey.MountID = uint32(rv) return nil @@ -44154,7 +35265,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.FileEvent.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.file.name"} } ev.PTrace.Tracee.Parent.FileEvent.BasenameStr = rv return nil @@ -44175,7 +35286,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.FileEvent.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.file.package.name"} } ev.PTrace.Tracee.Parent.FileEvent.PkgName = rv return nil @@ -44188,7 +35299,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.FileEvent.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.file.package.source_version"} } ev.PTrace.Tracee.Parent.FileEvent.PkgSrcVersion = rv return nil @@ -44201,7 +35312,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.FileEvent.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.file.package.version"} } ev.PTrace.Tracee.Parent.FileEvent.PkgVersion = rv return nil @@ -44214,7 +35325,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.FileEvent.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.file.path"} } ev.PTrace.Tracee.Parent.FileEvent.PathnameStr = rv return nil @@ -44235,10 +35346,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.file.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "PTrace.Tracee.Parent.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "ptrace.tracee.parent.file.rights"} } ev.PTrace.Tracee.Parent.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -44251,7 +35362,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.FileEvent.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.file.uid"} } ev.PTrace.Tracee.Parent.FileEvent.FileFields.UID = uint32(rv) return nil @@ -44264,7 +35375,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.FileEvent.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.file.user"} } ev.PTrace.Tracee.Parent.FileEvent.FileFields.User = rv return nil @@ -44277,7 +35388,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.Credentials.FSGID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.fsgid"} } ev.PTrace.Tracee.Parent.Credentials.FSGID = uint32(rv) return nil @@ -44290,7 +35401,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.Credentials.FSGroup"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.fsgroup"} } ev.PTrace.Tracee.Parent.Credentials.FSGroup = rv return nil @@ -44303,7 +35414,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.Credentials.FSUID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.fsuid"} } ev.PTrace.Tracee.Parent.Credentials.FSUID = uint32(rv) return nil @@ -44316,7 +35427,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.Credentials.FSUser"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.fsuser"} } ev.PTrace.Tracee.Parent.Credentials.FSUser = rv return nil @@ -44329,7 +35440,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.Credentials.GID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.gid"} } ev.PTrace.Tracee.Parent.Credentials.GID = uint32(rv) return nil @@ -44342,7 +35453,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.Credentials.Group"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.group"} } ev.PTrace.Tracee.Parent.Credentials.Group = rv return nil @@ -44355,7 +35466,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.interpreter.file.change_time"} } ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.CTime = uint64(rv) return nil @@ -44368,7 +35479,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.LinuxBinprm.FileEvent.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.interpreter.file.filesystem"} } ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.Filesystem = rv return nil @@ -44381,7 +35492,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.interpreter.file.gid"} } ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.GID = uint32(rv) return nil @@ -44394,7 +35505,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.interpreter.file.group"} } ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.Group = rv return nil @@ -44411,7 +35522,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.Hashes = append(ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.LinuxBinprm.FileEvent.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.interpreter.file.hashes"} } return nil case "ptrace.tracee.parent.interpreter.file.in_upper_layer": @@ -44423,7 +35534,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.interpreter.file.in_upper_layer"} } ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.InUpperLayer = rv return nil @@ -44436,7 +35547,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.interpreter.file.inode"} } ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.PathKey.Inode = uint64(rv) return nil @@ -44449,10 +35560,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.interpreter.file.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "ptrace.tracee.parent.interpreter.file.mode"} } ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -44465,7 +35576,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.interpreter.file.modification_time"} } ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.MTime = uint64(rv) return nil @@ -44478,7 +35589,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.interpreter.file.mount_id"} } ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.PathKey.MountID = uint32(rv) return nil @@ -44491,7 +35602,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.LinuxBinprm.FileEvent.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.interpreter.file.name"} } ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.BasenameStr = rv return nil @@ -44512,7 +35623,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.LinuxBinprm.FileEvent.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.interpreter.file.package.name"} } ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.PkgName = rv return nil @@ -44525,7 +35636,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.LinuxBinprm.FileEvent.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.interpreter.file.package.source_version"} } ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.PkgSrcVersion = rv return nil @@ -44538,7 +35649,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.LinuxBinprm.FileEvent.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.interpreter.file.package.version"} } ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.PkgVersion = rv return nil @@ -44551,7 +35662,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.LinuxBinprm.FileEvent.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.interpreter.file.path"} } ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.PathnameStr = rv return nil @@ -44572,10 +35683,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.interpreter.file.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "ptrace.tracee.parent.interpreter.file.rights"} } ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -44588,7 +35699,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.interpreter.file.uid"} } ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.UID = uint32(rv) return nil @@ -44601,7 +35712,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.interpreter.file.user"} } ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.User = rv return nil @@ -44614,7 +35725,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.IsExec"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.is_exec"} } ev.PTrace.Tracee.Parent.IsExec = rv return nil @@ -44627,7 +35738,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.PIDContext.IsKworker"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.is_kworker"} } ev.PTrace.Tracee.Parent.PIDContext.IsKworker = rv return nil @@ -44640,7 +35751,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.IsThread"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.is_thread"} } ev.PTrace.Tracee.Parent.IsThread = rv return nil @@ -44653,7 +35764,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.PIDContext.Pid"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.pid"} } ev.PTrace.Tracee.Parent.PIDContext.Pid = uint32(rv) return nil @@ -44666,7 +35777,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.PPid"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.ppid"} } ev.PTrace.Tracee.Parent.PPid = uint32(rv) return nil @@ -44679,7 +35790,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.PIDContext.Tid"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.tid"} } ev.PTrace.Tracee.Parent.PIDContext.Tid = uint32(rv) return nil @@ -44692,7 +35803,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.TTYName"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.tty_name"} } ev.PTrace.Tracee.Parent.TTYName = rv return nil @@ -44705,7 +35816,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.Credentials.UID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.uid"} } ev.PTrace.Tracee.Parent.Credentials.UID = uint32(rv) return nil @@ -44718,7 +35829,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.Credentials.User"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.user"} } ev.PTrace.Tracee.Parent.Credentials.User = rv return nil @@ -44735,7 +35846,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.PTrace.Tracee.Parent.UserSession.K8SGroups = append(ev.PTrace.Tracee.Parent.UserSession.K8SGroups, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.UserSession.K8SGroups"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.user_session.k8s_groups"} } return nil case "ptrace.tracee.parent.user_session.k8s_uid": @@ -44747,7 +35858,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.UserSession.K8SUID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.user_session.k8s_uid"} } ev.PTrace.Tracee.Parent.UserSession.K8SUID = rv return nil @@ -44760,7 +35871,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Parent.UserSession.K8SUsername"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.parent.user_session.k8s_username"} } ev.PTrace.Tracee.Parent.UserSession.K8SUsername = rv return nil @@ -44770,7 +35881,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.PIDContext.Pid"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.pid"} } ev.PTrace.Tracee.Process.PIDContext.Pid = uint32(rv) return nil @@ -44780,7 +35891,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.PPid"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.ppid"} } ev.PTrace.Tracee.Process.PPid = uint32(rv) return nil @@ -44790,7 +35901,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.PIDContext.Tid"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.tid"} } ev.PTrace.Tracee.Process.PIDContext.Tid = uint32(rv) return nil @@ -44800,7 +35911,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.TTYName"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.tty_name"} } ev.PTrace.Tracee.Process.TTYName = rv return nil @@ -44810,7 +35921,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.Credentials.UID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.uid"} } ev.PTrace.Tracee.Process.Credentials.UID = uint32(rv) return nil @@ -44820,7 +35931,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.Credentials.User"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.user"} } ev.PTrace.Tracee.Process.Credentials.User = rv return nil @@ -44834,7 +35945,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.PTrace.Tracee.Process.UserSession.K8SGroups = append(ev.PTrace.Tracee.Process.UserSession.K8SGroups, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.UserSession.K8SGroups"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.user_session.k8s_groups"} } return nil case "ptrace.tracee.user_session.k8s_uid": @@ -44843,7 +35954,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.UserSession.K8SUID"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.user_session.k8s_uid"} } ev.PTrace.Tracee.Process.UserSession.K8SUID = rv return nil @@ -44853,49 +35964,49 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "PTrace.Tracee.Process.UserSession.K8SUsername"} + return &eval.ErrValueTypeMismatch{Field: "ptrace.tracee.user_session.k8s_username"} } ev.PTrace.Tracee.Process.UserSession.K8SUsername = rv return nil case "removexattr.file.change_time": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RemoveXAttr.File.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "removexattr.file.change_time"} } ev.RemoveXAttr.File.FileFields.CTime = uint64(rv) return nil case "removexattr.file.destination.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RemoveXAttr.Name"} + return &eval.ErrValueTypeMismatch{Field: "removexattr.file.destination.name"} } ev.RemoveXAttr.Name = rv return nil case "removexattr.file.destination.namespace": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RemoveXAttr.Namespace"} + return &eval.ErrValueTypeMismatch{Field: "removexattr.file.destination.namespace"} } ev.RemoveXAttr.Namespace = rv return nil case "removexattr.file.filesystem": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RemoveXAttr.File.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "removexattr.file.filesystem"} } ev.RemoveXAttr.File.Filesystem = rv return nil case "removexattr.file.gid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RemoveXAttr.File.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "removexattr.file.gid"} } ev.RemoveXAttr.File.FileFields.GID = uint32(rv) return nil case "removexattr.file.group": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RemoveXAttr.File.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "removexattr.file.group"} } ev.RemoveXAttr.File.FileFields.Group = rv return nil @@ -44906,51 +36017,51 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.RemoveXAttr.File.Hashes = append(ev.RemoveXAttr.File.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "RemoveXAttr.File.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "removexattr.file.hashes"} } return nil case "removexattr.file.in_upper_layer": rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RemoveXAttr.File.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "removexattr.file.in_upper_layer"} } ev.RemoveXAttr.File.FileFields.InUpperLayer = rv return nil case "removexattr.file.inode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RemoveXAttr.File.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "removexattr.file.inode"} } ev.RemoveXAttr.File.FileFields.PathKey.Inode = uint64(rv) return nil case "removexattr.file.mode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RemoveXAttr.File.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "removexattr.file.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "RemoveXAttr.File.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "removexattr.file.mode"} } ev.RemoveXAttr.File.FileFields.Mode = uint16(rv) return nil case "removexattr.file.modification_time": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RemoveXAttr.File.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "removexattr.file.modification_time"} } ev.RemoveXAttr.File.FileFields.MTime = uint64(rv) return nil case "removexattr.file.mount_id": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RemoveXAttr.File.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "removexattr.file.mount_id"} } ev.RemoveXAttr.File.FileFields.PathKey.MountID = uint32(rv) return nil case "removexattr.file.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RemoveXAttr.File.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "removexattr.file.name"} } ev.RemoveXAttr.File.BasenameStr = rv return nil @@ -44959,28 +36070,28 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "removexattr.file.package.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RemoveXAttr.File.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "removexattr.file.package.name"} } ev.RemoveXAttr.File.PkgName = rv return nil case "removexattr.file.package.source_version": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RemoveXAttr.File.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "removexattr.file.package.source_version"} } ev.RemoveXAttr.File.PkgSrcVersion = rv return nil case "removexattr.file.package.version": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RemoveXAttr.File.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "removexattr.file.package.version"} } ev.RemoveXAttr.File.PkgVersion = rv return nil case "removexattr.file.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RemoveXAttr.File.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "removexattr.file.path"} } ev.RemoveXAttr.File.PathnameStr = rv return nil @@ -44989,66 +36100,66 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "removexattr.file.rights": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RemoveXAttr.File.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "removexattr.file.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "RemoveXAttr.File.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "removexattr.file.rights"} } ev.RemoveXAttr.File.FileFields.Mode = uint16(rv) return nil case "removexattr.file.uid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RemoveXAttr.File.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "removexattr.file.uid"} } ev.RemoveXAttr.File.FileFields.UID = uint32(rv) return nil case "removexattr.file.user": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RemoveXAttr.File.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "removexattr.file.user"} } ev.RemoveXAttr.File.FileFields.User = rv return nil case "removexattr.retval": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RemoveXAttr.SyscallEvent.Retval"} + return &eval.ErrValueTypeMismatch{Field: "removexattr.retval"} } ev.RemoveXAttr.SyscallEvent.Retval = int64(rv) return nil case "rename.file.change_time": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rename.Old.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.change_time"} } ev.Rename.Old.FileFields.CTime = uint64(rv) return nil case "rename.file.destination.change_time": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rename.New.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.destination.change_time"} } ev.Rename.New.FileFields.CTime = uint64(rv) return nil case "rename.file.destination.filesystem": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rename.New.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.destination.filesystem"} } ev.Rename.New.Filesystem = rv return nil case "rename.file.destination.gid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rename.New.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.destination.gid"} } ev.Rename.New.FileFields.GID = uint32(rv) return nil case "rename.file.destination.group": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rename.New.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.destination.group"} } ev.Rename.New.FileFields.Group = rv return nil @@ -45059,51 +36170,51 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Rename.New.Hashes = append(ev.Rename.New.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Rename.New.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.destination.hashes"} } return nil case "rename.file.destination.in_upper_layer": rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rename.New.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.destination.in_upper_layer"} } ev.Rename.New.FileFields.InUpperLayer = rv return nil case "rename.file.destination.inode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rename.New.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.destination.inode"} } ev.Rename.New.FileFields.PathKey.Inode = uint64(rv) return nil case "rename.file.destination.mode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rename.New.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.destination.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Rename.New.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "rename.file.destination.mode"} } ev.Rename.New.FileFields.Mode = uint16(rv) return nil case "rename.file.destination.modification_time": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rename.New.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.destination.modification_time"} } ev.Rename.New.FileFields.MTime = uint64(rv) return nil case "rename.file.destination.mount_id": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rename.New.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.destination.mount_id"} } ev.Rename.New.FileFields.PathKey.MountID = uint32(rv) return nil case "rename.file.destination.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rename.New.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.destination.name"} } ev.Rename.New.BasenameStr = rv return nil @@ -45112,28 +36223,28 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "rename.file.destination.package.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rename.New.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.destination.package.name"} } ev.Rename.New.PkgName = rv return nil case "rename.file.destination.package.source_version": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rename.New.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.destination.package.source_version"} } ev.Rename.New.PkgSrcVersion = rv return nil case "rename.file.destination.package.version": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rename.New.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.destination.package.version"} } ev.Rename.New.PkgVersion = rv return nil case "rename.file.destination.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rename.New.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.destination.path"} } ev.Rename.New.PathnameStr = rv return nil @@ -45142,45 +36253,45 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "rename.file.destination.rights": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rename.New.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.destination.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Rename.New.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "rename.file.destination.rights"} } ev.Rename.New.FileFields.Mode = uint16(rv) return nil case "rename.file.destination.uid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rename.New.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.destination.uid"} } ev.Rename.New.FileFields.UID = uint32(rv) return nil case "rename.file.destination.user": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rename.New.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.destination.user"} } ev.Rename.New.FileFields.User = rv return nil case "rename.file.filesystem": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rename.Old.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.filesystem"} } ev.Rename.Old.Filesystem = rv return nil case "rename.file.gid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rename.Old.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.gid"} } ev.Rename.Old.FileFields.GID = uint32(rv) return nil case "rename.file.group": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rename.Old.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.group"} } ev.Rename.Old.FileFields.Group = rv return nil @@ -45191,51 +36302,51 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Rename.Old.Hashes = append(ev.Rename.Old.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Rename.Old.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.hashes"} } return nil case "rename.file.in_upper_layer": rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rename.Old.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.in_upper_layer"} } ev.Rename.Old.FileFields.InUpperLayer = rv return nil case "rename.file.inode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rename.Old.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.inode"} } ev.Rename.Old.FileFields.PathKey.Inode = uint64(rv) return nil case "rename.file.mode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rename.Old.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Rename.Old.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "rename.file.mode"} } ev.Rename.Old.FileFields.Mode = uint16(rv) return nil case "rename.file.modification_time": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rename.Old.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.modification_time"} } ev.Rename.Old.FileFields.MTime = uint64(rv) return nil case "rename.file.mount_id": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rename.Old.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.mount_id"} } ev.Rename.Old.FileFields.PathKey.MountID = uint32(rv) return nil case "rename.file.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rename.Old.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.name"} } ev.Rename.Old.BasenameStr = rv return nil @@ -45244,28 +36355,28 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "rename.file.package.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rename.Old.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.package.name"} } ev.Rename.Old.PkgName = rv return nil case "rename.file.package.source_version": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rename.Old.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.package.source_version"} } ev.Rename.Old.PkgSrcVersion = rv return nil case "rename.file.package.version": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rename.Old.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.package.version"} } ev.Rename.Old.PkgVersion = rv return nil case "rename.file.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rename.Old.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.path"} } ev.Rename.Old.PathnameStr = rv return nil @@ -45274,73 +36385,73 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "rename.file.rights": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rename.Old.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Rename.Old.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "rename.file.rights"} } ev.Rename.Old.FileFields.Mode = uint16(rv) return nil case "rename.file.uid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rename.Old.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.uid"} } ev.Rename.Old.FileFields.UID = uint32(rv) return nil case "rename.file.user": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rename.Old.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.user"} } ev.Rename.Old.FileFields.User = rv return nil case "rename.retval": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rename.SyscallEvent.Retval"} + return &eval.ErrValueTypeMismatch{Field: "rename.retval"} } ev.Rename.SyscallEvent.Retval = int64(rv) return nil case "rename.syscall.destination.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rename.SyscallContext.StrArg2"} + return &eval.ErrValueTypeMismatch{Field: "rename.syscall.destination.path"} } ev.Rename.SyscallContext.StrArg2 = rv return nil case "rename.syscall.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rename.SyscallContext.StrArg1"} + return &eval.ErrValueTypeMismatch{Field: "rename.syscall.path"} } ev.Rename.SyscallContext.StrArg1 = rv return nil case "rmdir.file.change_time": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rmdir.File.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "rmdir.file.change_time"} } ev.Rmdir.File.FileFields.CTime = uint64(rv) return nil case "rmdir.file.filesystem": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rmdir.File.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "rmdir.file.filesystem"} } ev.Rmdir.File.Filesystem = rv return nil case "rmdir.file.gid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rmdir.File.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "rmdir.file.gid"} } ev.Rmdir.File.FileFields.GID = uint32(rv) return nil case "rmdir.file.group": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rmdir.File.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "rmdir.file.group"} } ev.Rmdir.File.FileFields.Group = rv return nil @@ -45351,51 +36462,51 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Rmdir.File.Hashes = append(ev.Rmdir.File.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Rmdir.File.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "rmdir.file.hashes"} } return nil case "rmdir.file.in_upper_layer": rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rmdir.File.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "rmdir.file.in_upper_layer"} } ev.Rmdir.File.FileFields.InUpperLayer = rv return nil case "rmdir.file.inode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rmdir.File.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "rmdir.file.inode"} } ev.Rmdir.File.FileFields.PathKey.Inode = uint64(rv) return nil case "rmdir.file.mode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rmdir.File.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "rmdir.file.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Rmdir.File.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "rmdir.file.mode"} } ev.Rmdir.File.FileFields.Mode = uint16(rv) return nil case "rmdir.file.modification_time": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rmdir.File.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "rmdir.file.modification_time"} } ev.Rmdir.File.FileFields.MTime = uint64(rv) return nil case "rmdir.file.mount_id": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rmdir.File.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "rmdir.file.mount_id"} } ev.Rmdir.File.FileFields.PathKey.MountID = uint32(rv) return nil case "rmdir.file.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rmdir.File.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "rmdir.file.name"} } ev.Rmdir.File.BasenameStr = rv return nil @@ -45404,28 +36515,28 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "rmdir.file.package.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rmdir.File.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "rmdir.file.package.name"} } ev.Rmdir.File.PkgName = rv return nil case "rmdir.file.package.source_version": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rmdir.File.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "rmdir.file.package.source_version"} } ev.Rmdir.File.PkgSrcVersion = rv return nil case "rmdir.file.package.version": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rmdir.File.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "rmdir.file.package.version"} } ev.Rmdir.File.PkgVersion = rv return nil case "rmdir.file.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rmdir.File.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "rmdir.file.path"} } ev.Rmdir.File.PathnameStr = rv return nil @@ -45434,185 +36545,192 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "rmdir.file.rights": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rmdir.File.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "rmdir.file.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Rmdir.File.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "rmdir.file.rights"} } ev.Rmdir.File.FileFields.Mode = uint16(rv) return nil case "rmdir.file.uid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rmdir.File.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "rmdir.file.uid"} } ev.Rmdir.File.FileFields.UID = uint32(rv) return nil case "rmdir.file.user": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rmdir.File.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "rmdir.file.user"} } ev.Rmdir.File.FileFields.User = rv return nil case "rmdir.retval": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Rmdir.SyscallEvent.Retval"} + return &eval.ErrValueTypeMismatch{Field: "rmdir.retval"} } ev.Rmdir.SyscallEvent.Retval = int64(rv) return nil + case "rmdir.syscall.path": + rv, ok := value.(string) + if !ok { + return &eval.ErrValueTypeMismatch{Field: "rmdir.syscall.path"} + } + ev.Rmdir.SyscallContext.StrArg1 = rv + return nil case "selinux.bool.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SELinux.BoolName"} + return &eval.ErrValueTypeMismatch{Field: "selinux.bool.name"} } ev.SELinux.BoolName = rv return nil case "selinux.bool.state": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SELinux.BoolChangeValue"} + return &eval.ErrValueTypeMismatch{Field: "selinux.bool.state"} } ev.SELinux.BoolChangeValue = rv return nil case "selinux.bool_commit.state": rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SELinux.BoolCommitValue"} + return &eval.ErrValueTypeMismatch{Field: "selinux.bool_commit.state"} } ev.SELinux.BoolCommitValue = rv return nil case "selinux.enforce.status": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SELinux.EnforceStatus"} + return &eval.ErrValueTypeMismatch{Field: "selinux.enforce.status"} } ev.SELinux.EnforceStatus = rv return nil case "setgid.egid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetGID.EGID"} + return &eval.ErrValueTypeMismatch{Field: "setgid.egid"} } ev.SetGID.EGID = uint32(rv) return nil case "setgid.egroup": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetGID.EGroup"} + return &eval.ErrValueTypeMismatch{Field: "setgid.egroup"} } ev.SetGID.EGroup = rv return nil case "setgid.fsgid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetGID.FSGID"} + return &eval.ErrValueTypeMismatch{Field: "setgid.fsgid"} } ev.SetGID.FSGID = uint32(rv) return nil case "setgid.fsgroup": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetGID.FSGroup"} + return &eval.ErrValueTypeMismatch{Field: "setgid.fsgroup"} } ev.SetGID.FSGroup = rv return nil case "setgid.gid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetGID.GID"} + return &eval.ErrValueTypeMismatch{Field: "setgid.gid"} } ev.SetGID.GID = uint32(rv) return nil case "setgid.group": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetGID.Group"} + return &eval.ErrValueTypeMismatch{Field: "setgid.group"} } ev.SetGID.Group = rv return nil case "setuid.euid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetUID.EUID"} + return &eval.ErrValueTypeMismatch{Field: "setuid.euid"} } ev.SetUID.EUID = uint32(rv) return nil case "setuid.euser": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetUID.EUser"} + return &eval.ErrValueTypeMismatch{Field: "setuid.euser"} } ev.SetUID.EUser = rv return nil case "setuid.fsuid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetUID.FSUID"} + return &eval.ErrValueTypeMismatch{Field: "setuid.fsuid"} } ev.SetUID.FSUID = uint32(rv) return nil case "setuid.fsuser": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetUID.FSUser"} + return &eval.ErrValueTypeMismatch{Field: "setuid.fsuser"} } ev.SetUID.FSUser = rv return nil case "setuid.uid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetUID.UID"} + return &eval.ErrValueTypeMismatch{Field: "setuid.uid"} } ev.SetUID.UID = uint32(rv) return nil case "setuid.user": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetUID.User"} + return &eval.ErrValueTypeMismatch{Field: "setuid.user"} } ev.SetUID.User = rv return nil case "setxattr.file.change_time": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetXAttr.File.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "setxattr.file.change_time"} } ev.SetXAttr.File.FileFields.CTime = uint64(rv) return nil case "setxattr.file.destination.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetXAttr.Name"} + return &eval.ErrValueTypeMismatch{Field: "setxattr.file.destination.name"} } ev.SetXAttr.Name = rv return nil case "setxattr.file.destination.namespace": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetXAttr.Namespace"} + return &eval.ErrValueTypeMismatch{Field: "setxattr.file.destination.namespace"} } ev.SetXAttr.Namespace = rv return nil case "setxattr.file.filesystem": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetXAttr.File.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "setxattr.file.filesystem"} } ev.SetXAttr.File.Filesystem = rv return nil case "setxattr.file.gid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetXAttr.File.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "setxattr.file.gid"} } ev.SetXAttr.File.FileFields.GID = uint32(rv) return nil case "setxattr.file.group": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetXAttr.File.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "setxattr.file.group"} } ev.SetXAttr.File.FileFields.Group = rv return nil @@ -45623,51 +36741,51 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.SetXAttr.File.Hashes = append(ev.SetXAttr.File.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "SetXAttr.File.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "setxattr.file.hashes"} } return nil case "setxattr.file.in_upper_layer": rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetXAttr.File.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "setxattr.file.in_upper_layer"} } ev.SetXAttr.File.FileFields.InUpperLayer = rv return nil case "setxattr.file.inode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetXAttr.File.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "setxattr.file.inode"} } ev.SetXAttr.File.FileFields.PathKey.Inode = uint64(rv) return nil case "setxattr.file.mode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetXAttr.File.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "setxattr.file.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "SetXAttr.File.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "setxattr.file.mode"} } ev.SetXAttr.File.FileFields.Mode = uint16(rv) return nil case "setxattr.file.modification_time": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetXAttr.File.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "setxattr.file.modification_time"} } ev.SetXAttr.File.FileFields.MTime = uint64(rv) return nil case "setxattr.file.mount_id": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetXAttr.File.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "setxattr.file.mount_id"} } ev.SetXAttr.File.FileFields.PathKey.MountID = uint32(rv) return nil case "setxattr.file.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetXAttr.File.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "setxattr.file.name"} } ev.SetXAttr.File.BasenameStr = rv return nil @@ -45676,28 +36794,28 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "setxattr.file.package.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetXAttr.File.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "setxattr.file.package.name"} } ev.SetXAttr.File.PkgName = rv return nil case "setxattr.file.package.source_version": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetXAttr.File.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "setxattr.file.package.source_version"} } ev.SetXAttr.File.PkgSrcVersion = rv return nil case "setxattr.file.package.version": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetXAttr.File.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "setxattr.file.package.version"} } ev.SetXAttr.File.PkgVersion = rv return nil case "setxattr.file.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetXAttr.File.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "setxattr.file.path"} } ev.SetXAttr.File.PathnameStr = rv return nil @@ -45706,45 +36824,45 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "setxattr.file.rights": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetXAttr.File.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "setxattr.file.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "SetXAttr.File.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "setxattr.file.rights"} } ev.SetXAttr.File.FileFields.Mode = uint16(rv) return nil case "setxattr.file.uid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetXAttr.File.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "setxattr.file.uid"} } ev.SetXAttr.File.FileFields.UID = uint32(rv) return nil case "setxattr.file.user": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetXAttr.File.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "setxattr.file.user"} } ev.SetXAttr.File.FileFields.User = rv return nil case "setxattr.retval": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetXAttr.SyscallEvent.Retval"} + return &eval.ErrValueTypeMismatch{Field: "setxattr.retval"} } ev.SetXAttr.SyscallEvent.Retval = int64(rv) return nil case "signal.pid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.PID"} + return &eval.ErrValueTypeMismatch{Field: "signal.pid"} } ev.Signal.PID = uint32(rv) return nil case "signal.retval": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.SyscallEvent.Retval"} + return &eval.ErrValueTypeMismatch{Field: "signal.retval"} } ev.Signal.SyscallEvent.Retval = int64(rv) return nil @@ -45757,7 +36875,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.Args"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.args"} } ev.Signal.Target.Ancestor.ProcessContext.Process.Args = rv return nil @@ -45774,7 +36892,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Signal.Target.Ancestor.ProcessContext.Process.Argv = append(ev.Signal.Target.Ancestor.ProcessContext.Process.Argv, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.Argv"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.args_flags"} } return nil case "signal.target.ancestors.args_options": @@ -45790,7 +36908,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Signal.Target.Ancestor.ProcessContext.Process.Argv = append(ev.Signal.Target.Ancestor.ProcessContext.Process.Argv, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.Argv"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.args_options"} } return nil case "signal.target.ancestors.args_truncated": @@ -45802,7 +36920,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.ArgsTruncated"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.args_truncated"} } ev.Signal.Target.Ancestor.ProcessContext.Process.ArgsTruncated = rv return nil @@ -45819,7 +36937,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Signal.Target.Ancestor.ProcessContext.Process.Argv = append(ev.Signal.Target.Ancestor.ProcessContext.Process.Argv, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.Argv"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.argv"} } return nil case "signal.target.ancestors.argv0": @@ -45831,7 +36949,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.Argv0"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.argv0"} } ev.Signal.Target.Ancestor.ProcessContext.Process.Argv0 = rv return nil @@ -45844,7 +36962,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.Credentials.AUID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.auid"} } ev.Signal.Target.Ancestor.ProcessContext.Process.Credentials.AUID = uint32(rv) return nil @@ -45857,7 +36975,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.Credentials.CapEffective"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.cap_effective"} } ev.Signal.Target.Ancestor.ProcessContext.Process.Credentials.CapEffective = uint64(rv) return nil @@ -45870,7 +36988,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.Credentials.CapPermitted"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.cap_permitted"} } ev.Signal.Target.Ancestor.ProcessContext.Process.Credentials.CapPermitted = uint64(rv) return nil @@ -45883,7 +37001,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.CGroup.CGroupFile.Inode"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.cgroup.file.inode"} } ev.Signal.Target.Ancestor.ProcessContext.Process.CGroup.CGroupFile.Inode = uint64(rv) return nil @@ -45896,7 +37014,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.CGroup.CGroupFile.MountID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.cgroup.file.mount_id"} } ev.Signal.Target.Ancestor.ProcessContext.Process.CGroup.CGroupFile.MountID = uint32(rv) return nil @@ -45909,7 +37027,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.CGroup.CGroupID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.cgroup.id"} } ev.Signal.Target.Ancestor.ProcessContext.Process.CGroup.CGroupID = containerutils.CGroupID(rv) return nil @@ -45922,7 +37040,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.CGroup.CGroupManager"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.cgroup.manager"} } ev.Signal.Target.Ancestor.ProcessContext.Process.CGroup.CGroupManager = rv return nil @@ -45935,7 +37053,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.CGroup.CGroupVersion"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.cgroup.version"} } ev.Signal.Target.Ancestor.ProcessContext.Process.CGroup.CGroupVersion = int(rv) return nil @@ -45948,7 +37066,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.Comm"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.comm"} } ev.Signal.Target.Ancestor.ProcessContext.Process.Comm = rv return nil @@ -45961,7 +37079,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.ContainerID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.container.id"} } ev.Signal.Target.Ancestor.ProcessContext.Process.ContainerID = containerutils.ContainerID(rv) return nil @@ -45974,7 +37092,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.CreatedAt"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.created_at"} } ev.Signal.Target.Ancestor.ProcessContext.Process.CreatedAt = uint64(rv) return nil @@ -45987,7 +37105,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.Credentials.EGID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.egid"} } ev.Signal.Target.Ancestor.ProcessContext.Process.Credentials.EGID = uint32(rv) return nil @@ -46000,7 +37118,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.Credentials.EGroup"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.egroup"} } ev.Signal.Target.Ancestor.ProcessContext.Process.Credentials.EGroup = rv return nil @@ -46017,7 +37135,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Signal.Target.Ancestor.ProcessContext.Process.Envp = append(ev.Signal.Target.Ancestor.ProcessContext.Process.Envp, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.Envp"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.envp"} } return nil case "signal.target.ancestors.envs": @@ -46033,7 +37151,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Signal.Target.Ancestor.ProcessContext.Process.Envs = append(ev.Signal.Target.Ancestor.ProcessContext.Process.Envs, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.Envs"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.envs"} } return nil case "signal.target.ancestors.envs_truncated": @@ -46045,7 +37163,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.EnvsTruncated"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.envs_truncated"} } ev.Signal.Target.Ancestor.ProcessContext.Process.EnvsTruncated = rv return nil @@ -46058,7 +37176,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.Credentials.EUID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.euid"} } ev.Signal.Target.Ancestor.ProcessContext.Process.Credentials.EUID = uint32(rv) return nil @@ -46071,7 +37189,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.Credentials.EUser"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.euser"} } ev.Signal.Target.Ancestor.ProcessContext.Process.Credentials.EUser = rv return nil @@ -46084,7 +37202,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.FileEvent.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.file.change_time"} } ev.Signal.Target.Ancestor.ProcessContext.Process.FileEvent.FileFields.CTime = uint64(rv) return nil @@ -46097,7 +37215,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.FileEvent.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.file.filesystem"} } ev.Signal.Target.Ancestor.ProcessContext.Process.FileEvent.Filesystem = rv return nil @@ -46110,7 +37228,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.FileEvent.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.file.gid"} } ev.Signal.Target.Ancestor.ProcessContext.Process.FileEvent.FileFields.GID = uint32(rv) return nil @@ -46123,7 +37241,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.FileEvent.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.file.group"} } ev.Signal.Target.Ancestor.ProcessContext.Process.FileEvent.FileFields.Group = rv return nil @@ -46140,7 +37258,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Signal.Target.Ancestor.ProcessContext.Process.FileEvent.Hashes = append(ev.Signal.Target.Ancestor.ProcessContext.Process.FileEvent.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.FileEvent.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.file.hashes"} } return nil case "signal.target.ancestors.file.in_upper_layer": @@ -46152,7 +37270,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.FileEvent.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.file.in_upper_layer"} } ev.Signal.Target.Ancestor.ProcessContext.Process.FileEvent.FileFields.InUpperLayer = rv return nil @@ -46165,7 +37283,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.FileEvent.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.file.inode"} } ev.Signal.Target.Ancestor.ProcessContext.Process.FileEvent.FileFields.PathKey.Inode = uint64(rv) return nil @@ -46178,10 +37296,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.file.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Signal.Target.Ancestor.ProcessContext.Process.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "signal.target.ancestors.file.mode"} } ev.Signal.Target.Ancestor.ProcessContext.Process.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -46194,7 +37312,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.FileEvent.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.file.modification_time"} } ev.Signal.Target.Ancestor.ProcessContext.Process.FileEvent.FileFields.MTime = uint64(rv) return nil @@ -46207,7 +37325,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.FileEvent.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.file.mount_id"} } ev.Signal.Target.Ancestor.ProcessContext.Process.FileEvent.FileFields.PathKey.MountID = uint32(rv) return nil @@ -46220,7 +37338,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.FileEvent.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.file.name"} } ev.Signal.Target.Ancestor.ProcessContext.Process.FileEvent.BasenameStr = rv return nil @@ -46241,7 +37359,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.FileEvent.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.file.package.name"} } ev.Signal.Target.Ancestor.ProcessContext.Process.FileEvent.PkgName = rv return nil @@ -46254,7 +37372,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.FileEvent.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.file.package.source_version"} } ev.Signal.Target.Ancestor.ProcessContext.Process.FileEvent.PkgSrcVersion = rv return nil @@ -46267,7 +37385,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.FileEvent.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.file.package.version"} } ev.Signal.Target.Ancestor.ProcessContext.Process.FileEvent.PkgVersion = rv return nil @@ -46280,7 +37398,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.FileEvent.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.file.path"} } ev.Signal.Target.Ancestor.ProcessContext.Process.FileEvent.PathnameStr = rv return nil @@ -46301,10 +37419,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.file.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Signal.Target.Ancestor.ProcessContext.Process.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "signal.target.ancestors.file.rights"} } ev.Signal.Target.Ancestor.ProcessContext.Process.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -46317,7 +37435,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.FileEvent.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.file.uid"} } ev.Signal.Target.Ancestor.ProcessContext.Process.FileEvent.FileFields.UID = uint32(rv) return nil @@ -46330,7 +37448,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.FileEvent.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.file.user"} } ev.Signal.Target.Ancestor.ProcessContext.Process.FileEvent.FileFields.User = rv return nil @@ -46343,7 +37461,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.Credentials.FSGID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.fsgid"} } ev.Signal.Target.Ancestor.ProcessContext.Process.Credentials.FSGID = uint32(rv) return nil @@ -46356,7 +37474,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.Credentials.FSGroup"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.fsgroup"} } ev.Signal.Target.Ancestor.ProcessContext.Process.Credentials.FSGroup = rv return nil @@ -46369,7 +37487,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.Credentials.FSUID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.fsuid"} } ev.Signal.Target.Ancestor.ProcessContext.Process.Credentials.FSUID = uint32(rv) return nil @@ -46382,7 +37500,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.Credentials.FSUser"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.fsuser"} } ev.Signal.Target.Ancestor.ProcessContext.Process.Credentials.FSUser = rv return nil @@ -46395,7 +37513,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.Credentials.GID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.gid"} } ev.Signal.Target.Ancestor.ProcessContext.Process.Credentials.GID = uint32(rv) return nil @@ -46408,7 +37526,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.Credentials.Group"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.group"} } ev.Signal.Target.Ancestor.ProcessContext.Process.Credentials.Group = rv return nil @@ -46421,7 +37539,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.interpreter.file.change_time"} } ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.CTime = uint64(rv) return nil @@ -46434,7 +37552,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.interpreter.file.filesystem"} } ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.Filesystem = rv return nil @@ -46447,7 +37565,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.interpreter.file.gid"} } ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.GID = uint32(rv) return nil @@ -46460,7 +37578,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.interpreter.file.group"} } ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Group = rv return nil @@ -46477,7 +37595,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.Hashes = append(ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.interpreter.file.hashes"} } return nil case "signal.target.ancestors.interpreter.file.in_upper_layer": @@ -46489,7 +37607,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.interpreter.file.in_upper_layer"} } ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.InUpperLayer = rv return nil @@ -46502,7 +37620,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.interpreter.file.inode"} } ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode = uint64(rv) return nil @@ -46515,10 +37633,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.interpreter.file.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "signal.target.ancestors.interpreter.file.mode"} } ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -46531,7 +37649,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.interpreter.file.modification_time"} } ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.MTime = uint64(rv) return nil @@ -46544,7 +37662,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.interpreter.file.mount_id"} } ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID = uint32(rv) return nil @@ -46557,7 +37675,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.interpreter.file.name"} } ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.BasenameStr = rv return nil @@ -46578,7 +37696,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.interpreter.file.package.name"} } ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.PkgName = rv return nil @@ -46591,7 +37709,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.interpreter.file.package.source_version"} } ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.PkgSrcVersion = rv return nil @@ -46604,7 +37722,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.interpreter.file.package.version"} } ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.PkgVersion = rv return nil @@ -46617,7 +37735,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.interpreter.file.path"} } ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.PathnameStr = rv return nil @@ -46638,10 +37756,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.interpreter.file.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "signal.target.ancestors.interpreter.file.rights"} } ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -46654,7 +37772,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.interpreter.file.uid"} } ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.UID = uint32(rv) return nil @@ -46667,7 +37785,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.interpreter.file.user"} } ev.Signal.Target.Ancestor.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.User = rv return nil @@ -46680,7 +37798,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.IsExec"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.is_exec"} } ev.Signal.Target.Ancestor.ProcessContext.Process.IsExec = rv return nil @@ -46693,7 +37811,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.PIDContext.IsKworker"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.is_kworker"} } ev.Signal.Target.Ancestor.ProcessContext.Process.PIDContext.IsKworker = rv return nil @@ -46706,7 +37824,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.IsThread"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.is_thread"} } ev.Signal.Target.Ancestor.ProcessContext.Process.IsThread = rv return nil @@ -46727,7 +37845,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.PIDContext.Pid"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.pid"} } ev.Signal.Target.Ancestor.ProcessContext.Process.PIDContext.Pid = uint32(rv) return nil @@ -46740,7 +37858,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.PPid"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.ppid"} } ev.Signal.Target.Ancestor.ProcessContext.Process.PPid = uint32(rv) return nil @@ -46753,7 +37871,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.PIDContext.Tid"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.tid"} } ev.Signal.Target.Ancestor.ProcessContext.Process.PIDContext.Tid = uint32(rv) return nil @@ -46766,7 +37884,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.TTYName"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.tty_name"} } ev.Signal.Target.Ancestor.ProcessContext.Process.TTYName = rv return nil @@ -46779,7 +37897,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.Credentials.UID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.uid"} } ev.Signal.Target.Ancestor.ProcessContext.Process.Credentials.UID = uint32(rv) return nil @@ -46792,7 +37910,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.Credentials.User"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.user"} } ev.Signal.Target.Ancestor.ProcessContext.Process.Credentials.User = rv return nil @@ -46809,7 +37927,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Signal.Target.Ancestor.ProcessContext.Process.UserSession.K8SGroups = append(ev.Signal.Target.Ancestor.ProcessContext.Process.UserSession.K8SGroups, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.UserSession.K8SGroups"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.user_session.k8s_groups"} } return nil case "signal.target.ancestors.user_session.k8s_uid": @@ -46821,7 +37939,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.UserSession.K8SUID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.user_session.k8s_uid"} } ev.Signal.Target.Ancestor.ProcessContext.Process.UserSession.K8SUID = rv return nil @@ -46834,7 +37952,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Ancestor.ProcessContext.Process.UserSession.K8SUsername"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ancestors.user_session.k8s_username"} } ev.Signal.Target.Ancestor.ProcessContext.Process.UserSession.K8SUsername = rv return nil @@ -46844,7 +37962,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.Args"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.args"} } ev.Signal.Target.Process.Args = rv return nil @@ -46858,7 +37976,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Signal.Target.Process.Argv = append(ev.Signal.Target.Process.Argv, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.Argv"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.args_flags"} } return nil case "signal.target.args_options": @@ -46871,7 +37989,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Signal.Target.Process.Argv = append(ev.Signal.Target.Process.Argv, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.Argv"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.args_options"} } return nil case "signal.target.args_truncated": @@ -46880,7 +37998,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.ArgsTruncated"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.args_truncated"} } ev.Signal.Target.Process.ArgsTruncated = rv return nil @@ -46894,7 +38012,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Signal.Target.Process.Argv = append(ev.Signal.Target.Process.Argv, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.Argv"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.argv"} } return nil case "signal.target.argv0": @@ -46903,7 +38021,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.Argv0"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.argv0"} } ev.Signal.Target.Process.Argv0 = rv return nil @@ -46913,7 +38031,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.Credentials.AUID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.auid"} } ev.Signal.Target.Process.Credentials.AUID = uint32(rv) return nil @@ -46923,7 +38041,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.Credentials.CapEffective"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.cap_effective"} } ev.Signal.Target.Process.Credentials.CapEffective = uint64(rv) return nil @@ -46933,7 +38051,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.Credentials.CapPermitted"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.cap_permitted"} } ev.Signal.Target.Process.Credentials.CapPermitted = uint64(rv) return nil @@ -46943,7 +38061,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.CGroup.CGroupFile.Inode"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.cgroup.file.inode"} } ev.Signal.Target.Process.CGroup.CGroupFile.Inode = uint64(rv) return nil @@ -46953,7 +38071,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.CGroup.CGroupFile.MountID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.cgroup.file.mount_id"} } ev.Signal.Target.Process.CGroup.CGroupFile.MountID = uint32(rv) return nil @@ -46963,7 +38081,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.CGroup.CGroupID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.cgroup.id"} } ev.Signal.Target.Process.CGroup.CGroupID = containerutils.CGroupID(rv) return nil @@ -46973,7 +38091,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.CGroup.CGroupManager"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.cgroup.manager"} } ev.Signal.Target.Process.CGroup.CGroupManager = rv return nil @@ -46983,7 +38101,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.CGroup.CGroupVersion"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.cgroup.version"} } ev.Signal.Target.Process.CGroup.CGroupVersion = int(rv) return nil @@ -46993,7 +38111,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.Comm"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.comm"} } ev.Signal.Target.Process.Comm = rv return nil @@ -47003,7 +38121,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.ContainerID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.container.id"} } ev.Signal.Target.Process.ContainerID = containerutils.ContainerID(rv) return nil @@ -47013,7 +38131,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.CreatedAt"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.created_at"} } ev.Signal.Target.Process.CreatedAt = uint64(rv) return nil @@ -47023,7 +38141,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.Credentials.EGID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.egid"} } ev.Signal.Target.Process.Credentials.EGID = uint32(rv) return nil @@ -47033,7 +38151,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.Credentials.EGroup"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.egroup"} } ev.Signal.Target.Process.Credentials.EGroup = rv return nil @@ -47047,7 +38165,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Signal.Target.Process.Envp = append(ev.Signal.Target.Process.Envp, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.Envp"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.envp"} } return nil case "signal.target.envs": @@ -47060,7 +38178,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Signal.Target.Process.Envs = append(ev.Signal.Target.Process.Envs, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.Envs"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.envs"} } return nil case "signal.target.envs_truncated": @@ -47069,7 +38187,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.EnvsTruncated"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.envs_truncated"} } ev.Signal.Target.Process.EnvsTruncated = rv return nil @@ -47079,7 +38197,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.Credentials.EUID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.euid"} } ev.Signal.Target.Process.Credentials.EUID = uint32(rv) return nil @@ -47089,7 +38207,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.Credentials.EUser"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.euser"} } ev.Signal.Target.Process.Credentials.EUser = rv return nil @@ -47099,7 +38217,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.FileEvent.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.file.change_time"} } ev.Signal.Target.Process.FileEvent.FileFields.CTime = uint64(rv) return nil @@ -47109,7 +38227,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.FileEvent.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.file.filesystem"} } ev.Signal.Target.Process.FileEvent.Filesystem = rv return nil @@ -47119,7 +38237,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.FileEvent.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.file.gid"} } ev.Signal.Target.Process.FileEvent.FileFields.GID = uint32(rv) return nil @@ -47129,7 +38247,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.FileEvent.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.file.group"} } ev.Signal.Target.Process.FileEvent.FileFields.Group = rv return nil @@ -47143,7 +38261,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Signal.Target.Process.FileEvent.Hashes = append(ev.Signal.Target.Process.FileEvent.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.FileEvent.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.file.hashes"} } return nil case "signal.target.file.in_upper_layer": @@ -47152,7 +38270,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.FileEvent.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.file.in_upper_layer"} } ev.Signal.Target.Process.FileEvent.FileFields.InUpperLayer = rv return nil @@ -47162,7 +38280,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.FileEvent.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.file.inode"} } ev.Signal.Target.Process.FileEvent.FileFields.PathKey.Inode = uint64(rv) return nil @@ -47172,10 +38290,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.file.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Signal.Target.Process.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "signal.target.file.mode"} } ev.Signal.Target.Process.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -47185,7 +38303,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.FileEvent.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.file.modification_time"} } ev.Signal.Target.Process.FileEvent.FileFields.MTime = uint64(rv) return nil @@ -47195,7 +38313,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.FileEvent.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.file.mount_id"} } ev.Signal.Target.Process.FileEvent.FileFields.PathKey.MountID = uint32(rv) return nil @@ -47205,7 +38323,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.FileEvent.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.file.name"} } ev.Signal.Target.Process.FileEvent.BasenameStr = rv return nil @@ -47220,7 +38338,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.FileEvent.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.file.package.name"} } ev.Signal.Target.Process.FileEvent.PkgName = rv return nil @@ -47230,7 +38348,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.FileEvent.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.file.package.source_version"} } ev.Signal.Target.Process.FileEvent.PkgSrcVersion = rv return nil @@ -47240,7 +38358,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.FileEvent.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.file.package.version"} } ev.Signal.Target.Process.FileEvent.PkgVersion = rv return nil @@ -47250,7 +38368,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.FileEvent.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.file.path"} } ev.Signal.Target.Process.FileEvent.PathnameStr = rv return nil @@ -47265,10 +38383,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.file.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Signal.Target.Process.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "signal.target.file.rights"} } ev.Signal.Target.Process.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -47278,7 +38396,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.FileEvent.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.file.uid"} } ev.Signal.Target.Process.FileEvent.FileFields.UID = uint32(rv) return nil @@ -47288,7 +38406,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.FileEvent.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.file.user"} } ev.Signal.Target.Process.FileEvent.FileFields.User = rv return nil @@ -47298,7 +38416,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.Credentials.FSGID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.fsgid"} } ev.Signal.Target.Process.Credentials.FSGID = uint32(rv) return nil @@ -47308,7 +38426,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.Credentials.FSGroup"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.fsgroup"} } ev.Signal.Target.Process.Credentials.FSGroup = rv return nil @@ -47318,7 +38436,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.Credentials.FSUID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.fsuid"} } ev.Signal.Target.Process.Credentials.FSUID = uint32(rv) return nil @@ -47328,7 +38446,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.Credentials.FSUser"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.fsuser"} } ev.Signal.Target.Process.Credentials.FSUser = rv return nil @@ -47338,7 +38456,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.Credentials.GID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.gid"} } ev.Signal.Target.Process.Credentials.GID = uint32(rv) return nil @@ -47348,7 +38466,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.Credentials.Group"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.group"} } ev.Signal.Target.Process.Credentials.Group = rv return nil @@ -47358,7 +38476,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.interpreter.file.change_time"} } ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.CTime = uint64(rv) return nil @@ -47368,7 +38486,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.LinuxBinprm.FileEvent.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.interpreter.file.filesystem"} } ev.Signal.Target.Process.LinuxBinprm.FileEvent.Filesystem = rv return nil @@ -47378,7 +38496,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.interpreter.file.gid"} } ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.GID = uint32(rv) return nil @@ -47388,7 +38506,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.interpreter.file.group"} } ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.Group = rv return nil @@ -47402,7 +38520,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Signal.Target.Process.LinuxBinprm.FileEvent.Hashes = append(ev.Signal.Target.Process.LinuxBinprm.FileEvent.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.LinuxBinprm.FileEvent.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.interpreter.file.hashes"} } return nil case "signal.target.interpreter.file.in_upper_layer": @@ -47411,7 +38529,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.interpreter.file.in_upper_layer"} } ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.InUpperLayer = rv return nil @@ -47421,7 +38539,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.interpreter.file.inode"} } ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode = uint64(rv) return nil @@ -47431,10 +38549,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.interpreter.file.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "signal.target.interpreter.file.mode"} } ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -47444,7 +38562,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.interpreter.file.modification_time"} } ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.MTime = uint64(rv) return nil @@ -47454,7 +38572,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.interpreter.file.mount_id"} } ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID = uint32(rv) return nil @@ -47464,7 +38582,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.LinuxBinprm.FileEvent.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.interpreter.file.name"} } ev.Signal.Target.Process.LinuxBinprm.FileEvent.BasenameStr = rv return nil @@ -47479,7 +38597,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.LinuxBinprm.FileEvent.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.interpreter.file.package.name"} } ev.Signal.Target.Process.LinuxBinprm.FileEvent.PkgName = rv return nil @@ -47489,7 +38607,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.LinuxBinprm.FileEvent.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.interpreter.file.package.source_version"} } ev.Signal.Target.Process.LinuxBinprm.FileEvent.PkgSrcVersion = rv return nil @@ -47499,7 +38617,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.LinuxBinprm.FileEvent.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.interpreter.file.package.version"} } ev.Signal.Target.Process.LinuxBinprm.FileEvent.PkgVersion = rv return nil @@ -47509,7 +38627,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.LinuxBinprm.FileEvent.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.interpreter.file.path"} } ev.Signal.Target.Process.LinuxBinprm.FileEvent.PathnameStr = rv return nil @@ -47524,10 +38642,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.interpreter.file.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "signal.target.interpreter.file.rights"} } ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -47537,7 +38655,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.interpreter.file.uid"} } ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.UID = uint32(rv) return nil @@ -47547,7 +38665,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.interpreter.file.user"} } ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.User = rv return nil @@ -47557,7 +38675,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.IsExec"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.is_exec"} } ev.Signal.Target.Process.IsExec = rv return nil @@ -47567,7 +38685,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.PIDContext.IsKworker"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.is_kworker"} } ev.Signal.Target.Process.PIDContext.IsKworker = rv return nil @@ -47577,7 +38695,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.IsThread"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.is_thread"} } ev.Signal.Target.Process.IsThread = rv return nil @@ -47590,7 +38708,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.Args"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.args"} } ev.Signal.Target.Parent.Args = rv return nil @@ -47607,7 +38725,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Signal.Target.Parent.Argv = append(ev.Signal.Target.Parent.Argv, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.Argv"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.args_flags"} } return nil case "signal.target.parent.args_options": @@ -47623,7 +38741,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Signal.Target.Parent.Argv = append(ev.Signal.Target.Parent.Argv, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.Argv"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.args_options"} } return nil case "signal.target.parent.args_truncated": @@ -47635,7 +38753,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.ArgsTruncated"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.args_truncated"} } ev.Signal.Target.Parent.ArgsTruncated = rv return nil @@ -47652,7 +38770,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Signal.Target.Parent.Argv = append(ev.Signal.Target.Parent.Argv, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.Argv"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.argv"} } return nil case "signal.target.parent.argv0": @@ -47664,7 +38782,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.Argv0"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.argv0"} } ev.Signal.Target.Parent.Argv0 = rv return nil @@ -47677,7 +38795,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.Credentials.AUID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.auid"} } ev.Signal.Target.Parent.Credentials.AUID = uint32(rv) return nil @@ -47690,7 +38808,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.Credentials.CapEffective"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.cap_effective"} } ev.Signal.Target.Parent.Credentials.CapEffective = uint64(rv) return nil @@ -47703,7 +38821,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.Credentials.CapPermitted"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.cap_permitted"} } ev.Signal.Target.Parent.Credentials.CapPermitted = uint64(rv) return nil @@ -47716,7 +38834,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.CGroup.CGroupFile.Inode"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.cgroup.file.inode"} } ev.Signal.Target.Parent.CGroup.CGroupFile.Inode = uint64(rv) return nil @@ -47729,7 +38847,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.CGroup.CGroupFile.MountID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.cgroup.file.mount_id"} } ev.Signal.Target.Parent.CGroup.CGroupFile.MountID = uint32(rv) return nil @@ -47742,7 +38860,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.CGroup.CGroupID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.cgroup.id"} } ev.Signal.Target.Parent.CGroup.CGroupID = containerutils.CGroupID(rv) return nil @@ -47755,7 +38873,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.CGroup.CGroupManager"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.cgroup.manager"} } ev.Signal.Target.Parent.CGroup.CGroupManager = rv return nil @@ -47768,7 +38886,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.CGroup.CGroupVersion"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.cgroup.version"} } ev.Signal.Target.Parent.CGroup.CGroupVersion = int(rv) return nil @@ -47781,7 +38899,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.Comm"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.comm"} } ev.Signal.Target.Parent.Comm = rv return nil @@ -47794,7 +38912,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.ContainerID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.container.id"} } ev.Signal.Target.Parent.ContainerID = containerutils.ContainerID(rv) return nil @@ -47807,7 +38925,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.CreatedAt"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.created_at"} } ev.Signal.Target.Parent.CreatedAt = uint64(rv) return nil @@ -47820,7 +38938,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.Credentials.EGID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.egid"} } ev.Signal.Target.Parent.Credentials.EGID = uint32(rv) return nil @@ -47833,7 +38951,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.Credentials.EGroup"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.egroup"} } ev.Signal.Target.Parent.Credentials.EGroup = rv return nil @@ -47850,7 +38968,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Signal.Target.Parent.Envp = append(ev.Signal.Target.Parent.Envp, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.Envp"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.envp"} } return nil case "signal.target.parent.envs": @@ -47866,7 +38984,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Signal.Target.Parent.Envs = append(ev.Signal.Target.Parent.Envs, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.Envs"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.envs"} } return nil case "signal.target.parent.envs_truncated": @@ -47878,7 +38996,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.EnvsTruncated"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.envs_truncated"} } ev.Signal.Target.Parent.EnvsTruncated = rv return nil @@ -47891,7 +39009,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.Credentials.EUID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.euid"} } ev.Signal.Target.Parent.Credentials.EUID = uint32(rv) return nil @@ -47904,7 +39022,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.Credentials.EUser"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.euser"} } ev.Signal.Target.Parent.Credentials.EUser = rv return nil @@ -47917,7 +39035,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.FileEvent.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.file.change_time"} } ev.Signal.Target.Parent.FileEvent.FileFields.CTime = uint64(rv) return nil @@ -47930,7 +39048,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.FileEvent.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.file.filesystem"} } ev.Signal.Target.Parent.FileEvent.Filesystem = rv return nil @@ -47943,7 +39061,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.FileEvent.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.file.gid"} } ev.Signal.Target.Parent.FileEvent.FileFields.GID = uint32(rv) return nil @@ -47956,7 +39074,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.FileEvent.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.file.group"} } ev.Signal.Target.Parent.FileEvent.FileFields.Group = rv return nil @@ -47973,7 +39091,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Signal.Target.Parent.FileEvent.Hashes = append(ev.Signal.Target.Parent.FileEvent.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.FileEvent.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.file.hashes"} } return nil case "signal.target.parent.file.in_upper_layer": @@ -47985,7 +39103,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.FileEvent.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.file.in_upper_layer"} } ev.Signal.Target.Parent.FileEvent.FileFields.InUpperLayer = rv return nil @@ -47998,7 +39116,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.FileEvent.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.file.inode"} } ev.Signal.Target.Parent.FileEvent.FileFields.PathKey.Inode = uint64(rv) return nil @@ -48011,10 +39129,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.file.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Signal.Target.Parent.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "signal.target.parent.file.mode"} } ev.Signal.Target.Parent.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -48027,7 +39145,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.FileEvent.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.file.modification_time"} } ev.Signal.Target.Parent.FileEvent.FileFields.MTime = uint64(rv) return nil @@ -48040,7 +39158,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.FileEvent.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.file.mount_id"} } ev.Signal.Target.Parent.FileEvent.FileFields.PathKey.MountID = uint32(rv) return nil @@ -48053,7 +39171,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.FileEvent.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.file.name"} } ev.Signal.Target.Parent.FileEvent.BasenameStr = rv return nil @@ -48074,7 +39192,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.FileEvent.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.file.package.name"} } ev.Signal.Target.Parent.FileEvent.PkgName = rv return nil @@ -48087,7 +39205,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.FileEvent.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.file.package.source_version"} } ev.Signal.Target.Parent.FileEvent.PkgSrcVersion = rv return nil @@ -48100,7 +39218,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.FileEvent.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.file.package.version"} } ev.Signal.Target.Parent.FileEvent.PkgVersion = rv return nil @@ -48113,7 +39231,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.FileEvent.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.file.path"} } ev.Signal.Target.Parent.FileEvent.PathnameStr = rv return nil @@ -48134,10 +39252,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.file.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Signal.Target.Parent.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "signal.target.parent.file.rights"} } ev.Signal.Target.Parent.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -48150,7 +39268,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.FileEvent.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.file.uid"} } ev.Signal.Target.Parent.FileEvent.FileFields.UID = uint32(rv) return nil @@ -48163,7 +39281,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.FileEvent.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.file.user"} } ev.Signal.Target.Parent.FileEvent.FileFields.User = rv return nil @@ -48176,7 +39294,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.Credentials.FSGID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.fsgid"} } ev.Signal.Target.Parent.Credentials.FSGID = uint32(rv) return nil @@ -48189,7 +39307,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.Credentials.FSGroup"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.fsgroup"} } ev.Signal.Target.Parent.Credentials.FSGroup = rv return nil @@ -48202,7 +39320,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.Credentials.FSUID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.fsuid"} } ev.Signal.Target.Parent.Credentials.FSUID = uint32(rv) return nil @@ -48215,7 +39333,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.Credentials.FSUser"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.fsuser"} } ev.Signal.Target.Parent.Credentials.FSUser = rv return nil @@ -48228,7 +39346,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.Credentials.GID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.gid"} } ev.Signal.Target.Parent.Credentials.GID = uint32(rv) return nil @@ -48241,7 +39359,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.Credentials.Group"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.group"} } ev.Signal.Target.Parent.Credentials.Group = rv return nil @@ -48254,7 +39372,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.interpreter.file.change_time"} } ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.CTime = uint64(rv) return nil @@ -48267,7 +39385,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.LinuxBinprm.FileEvent.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.interpreter.file.filesystem"} } ev.Signal.Target.Parent.LinuxBinprm.FileEvent.Filesystem = rv return nil @@ -48280,7 +39398,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.interpreter.file.gid"} } ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.GID = uint32(rv) return nil @@ -48293,7 +39411,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.interpreter.file.group"} } ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.Group = rv return nil @@ -48310,7 +39428,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Signal.Target.Parent.LinuxBinprm.FileEvent.Hashes = append(ev.Signal.Target.Parent.LinuxBinprm.FileEvent.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.LinuxBinprm.FileEvent.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.interpreter.file.hashes"} } return nil case "signal.target.parent.interpreter.file.in_upper_layer": @@ -48322,7 +39440,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.interpreter.file.in_upper_layer"} } ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.InUpperLayer = rv return nil @@ -48335,7 +39453,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.interpreter.file.inode"} } ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.PathKey.Inode = uint64(rv) return nil @@ -48348,10 +39466,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.interpreter.file.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "signal.target.parent.interpreter.file.mode"} } ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -48364,7 +39482,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.interpreter.file.modification_time"} } ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.MTime = uint64(rv) return nil @@ -48377,7 +39495,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.interpreter.file.mount_id"} } ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.PathKey.MountID = uint32(rv) return nil @@ -48390,7 +39508,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.LinuxBinprm.FileEvent.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.interpreter.file.name"} } ev.Signal.Target.Parent.LinuxBinprm.FileEvent.BasenameStr = rv return nil @@ -48411,7 +39529,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.LinuxBinprm.FileEvent.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.interpreter.file.package.name"} } ev.Signal.Target.Parent.LinuxBinprm.FileEvent.PkgName = rv return nil @@ -48424,7 +39542,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.LinuxBinprm.FileEvent.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.interpreter.file.package.source_version"} } ev.Signal.Target.Parent.LinuxBinprm.FileEvent.PkgSrcVersion = rv return nil @@ -48437,7 +39555,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.LinuxBinprm.FileEvent.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.interpreter.file.package.version"} } ev.Signal.Target.Parent.LinuxBinprm.FileEvent.PkgVersion = rv return nil @@ -48450,7 +39568,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.LinuxBinprm.FileEvent.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.interpreter.file.path"} } ev.Signal.Target.Parent.LinuxBinprm.FileEvent.PathnameStr = rv return nil @@ -48471,10 +39589,10 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.interpreter.file.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "signal.target.parent.interpreter.file.rights"} } ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.Mode = uint16(rv) return nil @@ -48487,7 +39605,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.interpreter.file.uid"} } ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.UID = uint32(rv) return nil @@ -48500,7 +39618,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.interpreter.file.user"} } ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.User = rv return nil @@ -48513,7 +39631,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.IsExec"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.is_exec"} } ev.Signal.Target.Parent.IsExec = rv return nil @@ -48526,7 +39644,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.PIDContext.IsKworker"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.is_kworker"} } ev.Signal.Target.Parent.PIDContext.IsKworker = rv return nil @@ -48539,7 +39657,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.IsThread"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.is_thread"} } ev.Signal.Target.Parent.IsThread = rv return nil @@ -48552,7 +39670,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.PIDContext.Pid"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.pid"} } ev.Signal.Target.Parent.PIDContext.Pid = uint32(rv) return nil @@ -48565,7 +39683,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.PPid"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.ppid"} } ev.Signal.Target.Parent.PPid = uint32(rv) return nil @@ -48578,7 +39696,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.PIDContext.Tid"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.tid"} } ev.Signal.Target.Parent.PIDContext.Tid = uint32(rv) return nil @@ -48591,7 +39709,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.TTYName"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.tty_name"} } ev.Signal.Target.Parent.TTYName = rv return nil @@ -48604,7 +39722,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.Credentials.UID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.uid"} } ev.Signal.Target.Parent.Credentials.UID = uint32(rv) return nil @@ -48617,7 +39735,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.Credentials.User"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.user"} } ev.Signal.Target.Parent.Credentials.User = rv return nil @@ -48634,7 +39752,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Signal.Target.Parent.UserSession.K8SGroups = append(ev.Signal.Target.Parent.UserSession.K8SGroups, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.UserSession.K8SGroups"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.user_session.k8s_groups"} } return nil case "signal.target.parent.user_session.k8s_uid": @@ -48646,7 +39764,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.UserSession.K8SUID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.user_session.k8s_uid"} } ev.Signal.Target.Parent.UserSession.K8SUID = rv return nil @@ -48659,7 +39777,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Parent.UserSession.K8SUsername"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.parent.user_session.k8s_username"} } ev.Signal.Target.Parent.UserSession.K8SUsername = rv return nil @@ -48669,7 +39787,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.PIDContext.Pid"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.pid"} } ev.Signal.Target.Process.PIDContext.Pid = uint32(rv) return nil @@ -48679,7 +39797,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.PPid"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.ppid"} } ev.Signal.Target.Process.PPid = uint32(rv) return nil @@ -48689,7 +39807,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.PIDContext.Tid"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.tid"} } ev.Signal.Target.Process.PIDContext.Tid = uint32(rv) return nil @@ -48699,7 +39817,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.TTYName"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.tty_name"} } ev.Signal.Target.Process.TTYName = rv return nil @@ -48709,7 +39827,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.Credentials.UID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.uid"} } ev.Signal.Target.Process.Credentials.UID = uint32(rv) return nil @@ -48719,7 +39837,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.Credentials.User"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.user"} } ev.Signal.Target.Process.Credentials.User = rv return nil @@ -48733,7 +39851,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Signal.Target.Process.UserSession.K8SGroups = append(ev.Signal.Target.Process.UserSession.K8SGroups, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.UserSession.K8SGroups"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.user_session.k8s_groups"} } return nil case "signal.target.user_session.k8s_uid": @@ -48742,7 +39860,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.UserSession.K8SUID"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.user_session.k8s_uid"} } ev.Signal.Target.Process.UserSession.K8SUID = rv return nil @@ -48752,42 +39870,42 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Target.Process.UserSession.K8SUsername"} + return &eval.ErrValueTypeMismatch{Field: "signal.target.user_session.k8s_username"} } ev.Signal.Target.Process.UserSession.K8SUsername = rv return nil case "signal.type": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Signal.Type"} + return &eval.ErrValueTypeMismatch{Field: "signal.type"} } ev.Signal.Type = uint32(rv) return nil case "splice.file.change_time": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Splice.File.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "splice.file.change_time"} } ev.Splice.File.FileFields.CTime = uint64(rv) return nil case "splice.file.filesystem": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Splice.File.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "splice.file.filesystem"} } ev.Splice.File.Filesystem = rv return nil case "splice.file.gid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Splice.File.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "splice.file.gid"} } ev.Splice.File.FileFields.GID = uint32(rv) return nil case "splice.file.group": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Splice.File.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "splice.file.group"} } ev.Splice.File.FileFields.Group = rv return nil @@ -48798,51 +39916,51 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Splice.File.Hashes = append(ev.Splice.File.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Splice.File.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "splice.file.hashes"} } return nil case "splice.file.in_upper_layer": rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Splice.File.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "splice.file.in_upper_layer"} } ev.Splice.File.FileFields.InUpperLayer = rv return nil case "splice.file.inode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Splice.File.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "splice.file.inode"} } ev.Splice.File.FileFields.PathKey.Inode = uint64(rv) return nil case "splice.file.mode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Splice.File.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "splice.file.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Splice.File.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "splice.file.mode"} } ev.Splice.File.FileFields.Mode = uint16(rv) return nil case "splice.file.modification_time": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Splice.File.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "splice.file.modification_time"} } ev.Splice.File.FileFields.MTime = uint64(rv) return nil case "splice.file.mount_id": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Splice.File.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "splice.file.mount_id"} } ev.Splice.File.FileFields.PathKey.MountID = uint32(rv) return nil case "splice.file.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Splice.File.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "splice.file.name"} } ev.Splice.File.BasenameStr = rv return nil @@ -48851,28 +39969,28 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "splice.file.package.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Splice.File.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "splice.file.package.name"} } ev.Splice.File.PkgName = rv return nil case "splice.file.package.source_version": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Splice.File.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "splice.file.package.source_version"} } ev.Splice.File.PkgSrcVersion = rv return nil case "splice.file.package.version": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Splice.File.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "splice.file.package.version"} } ev.Splice.File.PkgVersion = rv return nil case "splice.file.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Splice.File.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "splice.file.path"} } ev.Splice.File.PathnameStr = rv return nil @@ -48881,73 +39999,73 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "splice.file.rights": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Splice.File.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "splice.file.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Splice.File.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "splice.file.rights"} } ev.Splice.File.FileFields.Mode = uint16(rv) return nil case "splice.file.uid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Splice.File.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "splice.file.uid"} } ev.Splice.File.FileFields.UID = uint32(rv) return nil case "splice.file.user": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Splice.File.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "splice.file.user"} } ev.Splice.File.FileFields.User = rv return nil case "splice.pipe_entry_flag": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Splice.PipeEntryFlag"} + return &eval.ErrValueTypeMismatch{Field: "splice.pipe_entry_flag"} } ev.Splice.PipeEntryFlag = uint32(rv) return nil case "splice.pipe_exit_flag": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Splice.PipeExitFlag"} + return &eval.ErrValueTypeMismatch{Field: "splice.pipe_exit_flag"} } ev.Splice.PipeExitFlag = uint32(rv) return nil case "splice.retval": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Splice.SyscallEvent.Retval"} + return &eval.ErrValueTypeMismatch{Field: "splice.retval"} } ev.Splice.SyscallEvent.Retval = int64(rv) return nil case "unlink.file.change_time": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Unlink.File.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "unlink.file.change_time"} } ev.Unlink.File.FileFields.CTime = uint64(rv) return nil case "unlink.file.filesystem": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Unlink.File.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "unlink.file.filesystem"} } ev.Unlink.File.Filesystem = rv return nil case "unlink.file.gid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Unlink.File.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "unlink.file.gid"} } ev.Unlink.File.FileFields.GID = uint32(rv) return nil case "unlink.file.group": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Unlink.File.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "unlink.file.group"} } ev.Unlink.File.FileFields.Group = rv return nil @@ -48958,51 +40076,51 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Unlink.File.Hashes = append(ev.Unlink.File.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Unlink.File.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "unlink.file.hashes"} } return nil case "unlink.file.in_upper_layer": rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Unlink.File.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "unlink.file.in_upper_layer"} } ev.Unlink.File.FileFields.InUpperLayer = rv return nil case "unlink.file.inode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Unlink.File.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "unlink.file.inode"} } ev.Unlink.File.FileFields.PathKey.Inode = uint64(rv) return nil case "unlink.file.mode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Unlink.File.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "unlink.file.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Unlink.File.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "unlink.file.mode"} } ev.Unlink.File.FileFields.Mode = uint16(rv) return nil case "unlink.file.modification_time": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Unlink.File.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "unlink.file.modification_time"} } ev.Unlink.File.FileFields.MTime = uint64(rv) return nil case "unlink.file.mount_id": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Unlink.File.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "unlink.file.mount_id"} } ev.Unlink.File.FileFields.PathKey.MountID = uint32(rv) return nil case "unlink.file.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Unlink.File.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "unlink.file.name"} } ev.Unlink.File.BasenameStr = rv return nil @@ -49011,28 +40129,28 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "unlink.file.package.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Unlink.File.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "unlink.file.package.name"} } ev.Unlink.File.PkgName = rv return nil case "unlink.file.package.source_version": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Unlink.File.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "unlink.file.package.source_version"} } ev.Unlink.File.PkgSrcVersion = rv return nil case "unlink.file.package.version": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Unlink.File.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "unlink.file.package.version"} } ev.Unlink.File.PkgVersion = rv return nil case "unlink.file.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Unlink.File.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "unlink.file.path"} } ev.Unlink.File.PathnameStr = rv return nil @@ -49041,101 +40159,101 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "unlink.file.rights": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Unlink.File.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "unlink.file.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Unlink.File.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "unlink.file.rights"} } ev.Unlink.File.FileFields.Mode = uint16(rv) return nil case "unlink.file.uid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Unlink.File.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "unlink.file.uid"} } ev.Unlink.File.FileFields.UID = uint32(rv) return nil case "unlink.file.user": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Unlink.File.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "unlink.file.user"} } ev.Unlink.File.FileFields.User = rv return nil case "unlink.flags": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Unlink.Flags"} + return &eval.ErrValueTypeMismatch{Field: "unlink.flags"} } ev.Unlink.Flags = uint32(rv) return nil case "unlink.retval": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Unlink.SyscallEvent.Retval"} + return &eval.ErrValueTypeMismatch{Field: "unlink.retval"} } ev.Unlink.SyscallEvent.Retval = int64(rv) return nil case "unlink.syscall.dirfd": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Unlink.SyscallContext.IntArg1"} + return &eval.ErrValueTypeMismatch{Field: "unlink.syscall.dirfd"} } ev.Unlink.SyscallContext.IntArg1 = int64(rv) return nil case "unlink.syscall.flags": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Unlink.SyscallContext.IntArg3"} + return &eval.ErrValueTypeMismatch{Field: "unlink.syscall.flags"} } ev.Unlink.SyscallContext.IntArg3 = int64(rv) return nil case "unlink.syscall.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Unlink.SyscallContext.StrArg2"} + return &eval.ErrValueTypeMismatch{Field: "unlink.syscall.path"} } ev.Unlink.SyscallContext.StrArg2 = rv return nil case "unload_module.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "UnloadModule.Name"} + return &eval.ErrValueTypeMismatch{Field: "unload_module.name"} } ev.UnloadModule.Name = rv return nil case "unload_module.retval": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "UnloadModule.SyscallEvent.Retval"} + return &eval.ErrValueTypeMismatch{Field: "unload_module.retval"} } ev.UnloadModule.SyscallEvent.Retval = int64(rv) return nil case "utimes.file.change_time": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Utimes.File.FileFields.CTime"} + return &eval.ErrValueTypeMismatch{Field: "utimes.file.change_time"} } ev.Utimes.File.FileFields.CTime = uint64(rv) return nil case "utimes.file.filesystem": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Utimes.File.Filesystem"} + return &eval.ErrValueTypeMismatch{Field: "utimes.file.filesystem"} } ev.Utimes.File.Filesystem = rv return nil case "utimes.file.gid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Utimes.File.FileFields.GID"} + return &eval.ErrValueTypeMismatch{Field: "utimes.file.gid"} } ev.Utimes.File.FileFields.GID = uint32(rv) return nil case "utimes.file.group": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Utimes.File.FileFields.Group"} + return &eval.ErrValueTypeMismatch{Field: "utimes.file.group"} } ev.Utimes.File.FileFields.Group = rv return nil @@ -49146,51 +40264,51 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Utimes.File.Hashes = append(ev.Utimes.File.Hashes, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Utimes.File.Hashes"} + return &eval.ErrValueTypeMismatch{Field: "utimes.file.hashes"} } return nil case "utimes.file.in_upper_layer": rv, ok := value.(bool) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Utimes.File.FileFields.InUpperLayer"} + return &eval.ErrValueTypeMismatch{Field: "utimes.file.in_upper_layer"} } ev.Utimes.File.FileFields.InUpperLayer = rv return nil case "utimes.file.inode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Utimes.File.FileFields.PathKey.Inode"} + return &eval.ErrValueTypeMismatch{Field: "utimes.file.inode"} } ev.Utimes.File.FileFields.PathKey.Inode = uint64(rv) return nil case "utimes.file.mode": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Utimes.File.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "utimes.file.mode"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Utimes.File.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "utimes.file.mode"} } ev.Utimes.File.FileFields.Mode = uint16(rv) return nil case "utimes.file.modification_time": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Utimes.File.FileFields.MTime"} + return &eval.ErrValueTypeMismatch{Field: "utimes.file.modification_time"} } ev.Utimes.File.FileFields.MTime = uint64(rv) return nil case "utimes.file.mount_id": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Utimes.File.FileFields.PathKey.MountID"} + return &eval.ErrValueTypeMismatch{Field: "utimes.file.mount_id"} } ev.Utimes.File.FileFields.PathKey.MountID = uint32(rv) return nil case "utimes.file.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Utimes.File.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "utimes.file.name"} } ev.Utimes.File.BasenameStr = rv return nil @@ -49199,28 +40317,28 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "utimes.file.package.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Utimes.File.PkgName"} + return &eval.ErrValueTypeMismatch{Field: "utimes.file.package.name"} } ev.Utimes.File.PkgName = rv return nil case "utimes.file.package.source_version": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Utimes.File.PkgSrcVersion"} + return &eval.ErrValueTypeMismatch{Field: "utimes.file.package.source_version"} } ev.Utimes.File.PkgSrcVersion = rv return nil case "utimes.file.package.version": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Utimes.File.PkgVersion"} + return &eval.ErrValueTypeMismatch{Field: "utimes.file.package.version"} } ev.Utimes.File.PkgVersion = rv return nil case "utimes.file.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Utimes.File.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "utimes.file.path"} } ev.Utimes.File.PathnameStr = rv return nil @@ -49229,38 +40347,38 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "utimes.file.rights": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Utimes.File.FileFields.Mode"} + return &eval.ErrValueTypeMismatch{Field: "utimes.file.rights"} } if rv < 0 || rv > math.MaxUint16 { - return &eval.ErrValueOutOfRange{Field: "Utimes.File.FileFields.Mode"} + return &eval.ErrValueOutOfRange{Field: "utimes.file.rights"} } ev.Utimes.File.FileFields.Mode = uint16(rv) return nil case "utimes.file.uid": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Utimes.File.FileFields.UID"} + return &eval.ErrValueTypeMismatch{Field: "utimes.file.uid"} } ev.Utimes.File.FileFields.UID = uint32(rv) return nil case "utimes.file.user": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Utimes.File.FileFields.User"} + return &eval.ErrValueTypeMismatch{Field: "utimes.file.user"} } ev.Utimes.File.FileFields.User = rv return nil case "utimes.retval": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Utimes.SyscallEvent.Retval"} + return &eval.ErrValueTypeMismatch{Field: "utimes.retval"} } ev.Utimes.SyscallEvent.Retval = int64(rv) return nil case "utimes.syscall.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Utimes.SyscallContext.StrArg1"} + return &eval.ErrValueTypeMismatch{Field: "utimes.syscall.path"} } ev.Utimes.SyscallContext.StrArg1 = rv return nil diff --git a/pkg/security/secl/model/accessors_windows.go b/pkg/security/secl/model/accessors_windows.go index 6f15992c5aab1..5c07db9be757a 100644 --- a/pkg/security/secl/model/accessors_windows.go +++ b/pkg/security/secl/model/accessors_windows.go @@ -12,13 +12,15 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/containerutils" "math" + "net" "reflect" ) // to always require the math package var _ = math.MaxUint16 +var _ = net.IP{} -func (m *Model) GetEventTypes() []eval.EventType { +func (_ *Model) GetEventTypes() []eval.EventType { return []eval.EventType{ eval.EventType("change_permission"), eval.EventType("create"), @@ -33,12 +35,12 @@ func (m *Model) GetEventTypes() []eval.EventType { eval.EventType("write"), } } -func (m *Model) GetFieldRestrictions(field eval.Field) []eval.EventType { +func (_ *Model) GetFieldRestrictions(field eval.Field) []eval.EventType { switch field { } return nil } -func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Evaluator, error) { +func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Evaluator, error) { switch field { case "change_permission.new_sd": return &eval.StringEvaluator{ @@ -870,23 +872,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessCmdLine(ev, &element.ProcessContext.Process) - results = append(results, result) - return results + return []string{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - return ev.FieldHandlers.ResolveProcessCmdLine(ev, &pce.ProcessContext.Process) + if result, ok := ctx.StringCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + return ev.FieldHandlers.ResolveProcessCmdLine(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results return results @@ -897,23 +896,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := element.ProcessContext.Process.ContainerID - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) string { - return pce.ProcessContext.Process.ContainerID + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + return current.ProcessContext.Process.ContainerID }) ctx.StringCache[field] = results return results @@ -925,23 +921,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &element.ProcessContext.Process)) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int { - return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &pce.ProcessContext.Process)) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, ¤t.ProcessContext.Process)) }) ctx.IntCache[field] = results return results @@ -953,23 +946,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessEnvp(ev, &element.ProcessContext.Process) - results = append(results, result...) - return results + return result + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIteratorArray(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) []string { - return ev.FieldHandlers.ResolveProcessEnvp(ev, &pce.ProcessContext.Process) + results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + return ev.FieldHandlers.ResolveProcessEnvp(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results return results @@ -981,23 +971,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessEnvs(ev, &element.ProcessContext.Process) - results = append(results, result...) - return results + return result + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIteratorArray(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) []string { - return ev.FieldHandlers.ResolveProcessEnvs(ev, &pce.ProcessContext.Process) + results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + return ev.FieldHandlers.ResolveProcessEnvs(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results return results @@ -1010,23 +997,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - return ev.FieldHandlers.ResolveFileBasename(ev, &pce.ProcessContext.Process.FileEvent) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + return ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.FileEvent) }) ctx.StringCache[field] = results return results @@ -1039,23 +1023,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent)) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int { - return len(ev.FieldHandlers.ResolveFileBasename(ev, &pce.ProcessContext.Process.FileEvent)) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + return len(ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.FileEvent)) }) ctx.IntCache[field] = results return results @@ -1068,23 +1049,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - return ev.FieldHandlers.ResolveFilePath(ev, &pce.ProcessContext.Process.FileEvent) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + return ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.FileEvent) }) ctx.StringCache[field] = results return results @@ -1097,23 +1075,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent)) - results = append(results, result) - return results + return []int{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int { - return len(ev.FieldHandlers.ResolveFilePath(ev, &pce.ProcessContext.Process.FileEvent)) + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + return len(ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.FileEvent)) }) ctx.IntCache[field] = results return results @@ -1134,23 +1109,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.PIDContext.Pid) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.PIDContext.Pid) + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.PIDContext.Pid) }) ctx.IntCache[field] = results return results @@ -1161,23 +1133,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.PPid) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.PPid) + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.PPid) }) ctx.IntCache[field] = results return results @@ -1189,23 +1158,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveUser(ev, &element.ProcessContext.Process) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - return ev.FieldHandlers.ResolveUser(ev, &pce.ProcessContext.Process) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + return ev.FieldHandlers.ResolveUser(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results return results @@ -1216,23 +1182,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := element.ProcessContext.Process.OwnerSidString - results = append(results, result) - return results + return []string{result} } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) string { - return pce.ProcessContext.Process.OwnerSidString + if result, ok := ctx.StringCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + return current.ProcessContext.Process.OwnerSidString }) ctx.StringCache[field] = results return results @@ -1341,6 +1304,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveProcessCmdLine(ev, ev.BaseEvent.ProcessContext.Parent) @@ -1354,6 +1318,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.BaseEvent.ProcessContext.Parent.ContainerID @@ -1367,6 +1332,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, ev.BaseEvent.ProcessContext.Parent)) @@ -1380,6 +1346,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessEnvp(ev, ev.BaseEvent.ProcessContext.Parent) @@ -1393,6 +1360,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessEnvs(ev, ev.BaseEvent.ProcessContext.Parent) @@ -1407,6 +1375,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent) @@ -1432,6 +1401,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent) @@ -1456,6 +1426,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.PIDContext.Pid) @@ -1469,6 +1440,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.PPid) @@ -1482,6 +1454,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveUser(ev, ev.BaseEvent.ProcessContext.Parent) @@ -1495,6 +1468,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.BaseEvent.ProcessContext.Parent.OwnerSidString @@ -2043,1112 +2017,366 @@ func (ev *Event) GetFields() []eval.Field { } } func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { - switch field { - case "change_permission.new_sd": - return ev.FieldHandlers.ResolveNewSecurityDescriptor(ev, &ev.ChangePermission), nil - case "change_permission.old_sd": - return ev.FieldHandlers.ResolveOldSecurityDescriptor(ev, &ev.ChangePermission), nil - case "change_permission.path": - return ev.ChangePermission.ObjectName, nil - case "change_permission.type": - return ev.ChangePermission.ObjectType, nil - case "change_permission.user_domain": - return ev.ChangePermission.UserDomain, nil - case "change_permission.username": - return ev.ChangePermission.UserName, nil - case "container.created_at": - return int(ev.FieldHandlers.ResolveContainerCreatedAt(ev, ev.BaseEvent.ContainerContext)), nil - case "container.id": - return ev.FieldHandlers.ResolveContainerID(ev, ev.BaseEvent.ContainerContext), nil - case "container.runtime": - return ev.FieldHandlers.ResolveContainerRuntime(ev, ev.BaseEvent.ContainerContext), nil - case "container.tags": - return ev.FieldHandlers.ResolveContainerTags(ev, ev.BaseEvent.ContainerContext), nil - case "create.file.device_path": - return ev.FieldHandlers.ResolveFimFilePath(ev, &ev.CreateNewFile.File), nil - case "create.file.device_path.length": - return ev.FieldHandlers.ResolveFimFilePath(ev, &ev.CreateNewFile.File), nil - case "create.file.name": - return ev.FieldHandlers.ResolveFimFileBasename(ev, &ev.CreateNewFile.File), nil - case "create.file.name.length": - return ev.FieldHandlers.ResolveFimFileBasename(ev, &ev.CreateNewFile.File), nil - case "create.file.path": - return ev.FieldHandlers.ResolveFileUserPath(ev, &ev.CreateNewFile.File), nil - case "create.file.path.length": - return ev.FieldHandlers.ResolveFileUserPath(ev, &ev.CreateNewFile.File), nil - case "create.registry.key_name": - return ev.CreateRegistryKey.Registry.KeyName, nil - case "create.registry.key_name.length": - return len(ev.CreateRegistryKey.Registry.KeyName), nil - case "create.registry.key_path": - return ev.CreateRegistryKey.Registry.KeyPath, nil - case "create.registry.key_path.length": - return len(ev.CreateRegistryKey.Registry.KeyPath), nil - case "create_key.registry.key_name": - return ev.CreateRegistryKey.Registry.KeyName, nil - case "create_key.registry.key_name.length": - return len(ev.CreateRegistryKey.Registry.KeyName), nil - case "create_key.registry.key_path": - return ev.CreateRegistryKey.Registry.KeyPath, nil - case "create_key.registry.key_path.length": - return len(ev.CreateRegistryKey.Registry.KeyPath), nil - case "delete.file.device_path": - return ev.FieldHandlers.ResolveFimFilePath(ev, &ev.DeleteFile.File), nil - case "delete.file.device_path.length": - return ev.FieldHandlers.ResolveFimFilePath(ev, &ev.DeleteFile.File), nil - case "delete.file.name": - return ev.FieldHandlers.ResolveFimFileBasename(ev, &ev.DeleteFile.File), nil - case "delete.file.name.length": - return ev.FieldHandlers.ResolveFimFileBasename(ev, &ev.DeleteFile.File), nil - case "delete.file.path": - return ev.FieldHandlers.ResolveFileUserPath(ev, &ev.DeleteFile.File), nil - case "delete.file.path.length": - return ev.FieldHandlers.ResolveFileUserPath(ev, &ev.DeleteFile.File), nil - case "delete.registry.key_name": - return ev.DeleteRegistryKey.Registry.KeyName, nil - case "delete.registry.key_name.length": - return len(ev.DeleteRegistryKey.Registry.KeyName), nil - case "delete.registry.key_path": - return ev.DeleteRegistryKey.Registry.KeyPath, nil - case "delete.registry.key_path.length": - return len(ev.DeleteRegistryKey.Registry.KeyPath), nil - case "delete_key.registry.key_name": - return ev.DeleteRegistryKey.Registry.KeyName, nil - case "delete_key.registry.key_name.length": - return len(ev.DeleteRegistryKey.Registry.KeyName), nil - case "delete_key.registry.key_path": - return ev.DeleteRegistryKey.Registry.KeyPath, nil - case "delete_key.registry.key_path.length": - return len(ev.DeleteRegistryKey.Registry.KeyPath), nil - case "event.hostname": - return ev.FieldHandlers.ResolveHostname(ev, &ev.BaseEvent), nil - case "event.origin": - return ev.BaseEvent.Origin, nil - case "event.os": - return ev.BaseEvent.Os, nil - case "event.service": - return ev.FieldHandlers.ResolveService(ev, &ev.BaseEvent), nil - case "event.timestamp": - return int(ev.FieldHandlers.ResolveEventTimestamp(ev, &ev.BaseEvent)), nil - case "exec.cmdline": - return ev.FieldHandlers.ResolveProcessCmdLine(ev, ev.Exec.Process), nil - case "exec.container.id": - return ev.Exec.Process.ContainerID, nil - case "exec.created_at": - return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, ev.Exec.Process)), nil - case "exec.envp": - return ev.FieldHandlers.ResolveProcessEnvp(ev, ev.Exec.Process), nil - case "exec.envs": - return ev.FieldHandlers.ResolveProcessEnvs(ev, ev.Exec.Process), nil - case "exec.file.name": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Exec.Process.FileEvent), nil - case "exec.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Exec.Process.FileEvent), nil - case "exec.file.path": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exec.Process.FileEvent), nil - case "exec.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exec.Process.FileEvent), nil - case "exec.pid": - return int(ev.Exec.Process.PIDContext.Pid), nil - case "exec.ppid": - return int(ev.Exec.Process.PPid), nil - case "exec.user": - return ev.FieldHandlers.ResolveUser(ev, ev.Exec.Process), nil - case "exec.user_sid": - return ev.Exec.Process.OwnerSidString, nil - case "exit.cause": - return int(ev.Exit.Cause), nil - case "exit.cmdline": - return ev.FieldHandlers.ResolveProcessCmdLine(ev, ev.Exit.Process), nil - case "exit.code": - return int(ev.Exit.Code), nil - case "exit.container.id": - return ev.Exit.Process.ContainerID, nil - case "exit.created_at": - return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, ev.Exit.Process)), nil - case "exit.envp": - return ev.FieldHandlers.ResolveProcessEnvp(ev, ev.Exit.Process), nil - case "exit.envs": - return ev.FieldHandlers.ResolveProcessEnvs(ev, ev.Exit.Process), nil - case "exit.file.name": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Exit.Process.FileEvent), nil - case "exit.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Exit.Process.FileEvent), nil - case "exit.file.path": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exit.Process.FileEvent), nil - case "exit.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exit.Process.FileEvent), nil - case "exit.pid": - return int(ev.Exit.Process.PIDContext.Pid), nil - case "exit.ppid": - return int(ev.Exit.Process.PPid), nil - case "exit.user": - return ev.FieldHandlers.ResolveUser(ev, ev.Exit.Process), nil - case "exit.user_sid": - return ev.Exit.Process.OwnerSidString, nil - case "open.registry.key_name": - return ev.OpenRegistryKey.Registry.KeyName, nil - case "open.registry.key_name.length": - return len(ev.OpenRegistryKey.Registry.KeyName), nil - case "open.registry.key_path": - return ev.OpenRegistryKey.Registry.KeyPath, nil - case "open.registry.key_path.length": - return len(ev.OpenRegistryKey.Registry.KeyPath), nil - case "open_key.registry.key_name": - return ev.OpenRegistryKey.Registry.KeyName, nil - case "open_key.registry.key_name.length": - return len(ev.OpenRegistryKey.Registry.KeyName), nil - case "open_key.registry.key_path": - return ev.OpenRegistryKey.Registry.KeyPath, nil - case "open_key.registry.key_path.length": - return len(ev.OpenRegistryKey.Registry.KeyPath), nil - case "process.ancestors.cmdline": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessCmdLine(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.container.id": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := element.ProcessContext.Process.ContainerID - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.created_at": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &element.ProcessContext.Process)) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.envp": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessEnvp(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.envs": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessEnvs(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.file.name": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent), nil - case "process.ancestors.file.path": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent), nil - case "process.ancestors.length": - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - return iterator.Len(ctx), nil - case "process.ancestors.pid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.PIDContext.Pid) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.ppid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.PPid) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.user": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveUser(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.user_sid": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := element.ProcessContext.Process.OwnerSidString - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.cmdline": - return ev.FieldHandlers.ResolveProcessCmdLine(ev, &ev.BaseEvent.ProcessContext.Process), nil - case "process.container.id": - return ev.BaseEvent.ProcessContext.Process.ContainerID, nil - case "process.created_at": - return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &ev.BaseEvent.ProcessContext.Process)), nil - case "process.envp": - return ev.FieldHandlers.ResolveProcessEnvp(ev, &ev.BaseEvent.ProcessContext.Process), nil - case "process.envs": - return ev.FieldHandlers.ResolveProcessEnvs(ev, &ev.BaseEvent.ProcessContext.Process), nil - case "process.file.name": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent), nil - case "process.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent), nil - case "process.file.path": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent), nil - case "process.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent), nil - case "process.parent.cmdline": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessCmdLine(ev, ev.BaseEvent.ProcessContext.Parent), nil - case "process.parent.container.id": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.BaseEvent.ProcessContext.Parent.ContainerID, nil - case "process.parent.created_at": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, ev.BaseEvent.ProcessContext.Parent)), nil - case "process.parent.envp": - if !ev.BaseEvent.ProcessContext.HasParent() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessEnvp(ev, ev.BaseEvent.ProcessContext.Parent), nil - case "process.parent.envs": - if !ev.BaseEvent.ProcessContext.HasParent() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessEnvs(ev, ev.BaseEvent.ProcessContext.Parent), nil - case "process.parent.file.name": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent), nil - case "process.parent.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent), nil - case "process.parent.file.path": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent), nil - case "process.parent.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent), nil - case "process.parent.pid": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Parent.PIDContext.Pid), nil - case "process.parent.ppid": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Parent.PPid), nil - case "process.parent.user": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveUser(ev, ev.BaseEvent.ProcessContext.Parent), nil - case "process.parent.user_sid": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.BaseEvent.ProcessContext.Parent.OwnerSidString, nil - case "process.pid": - return int(ev.BaseEvent.ProcessContext.Process.PIDContext.Pid), nil - case "process.ppid": - return int(ev.BaseEvent.ProcessContext.Process.PPid), nil - case "process.user": - return ev.FieldHandlers.ResolveUser(ev, &ev.BaseEvent.ProcessContext.Process), nil - case "process.user_sid": - return ev.BaseEvent.ProcessContext.Process.OwnerSidString, nil - case "rename.file.destination.device_path": - return ev.FieldHandlers.ResolveFimFilePath(ev, &ev.RenameFile.New), nil - case "rename.file.destination.device_path.length": - return ev.FieldHandlers.ResolveFimFilePath(ev, &ev.RenameFile.New), nil - case "rename.file.destination.name": - return ev.FieldHandlers.ResolveFimFileBasename(ev, &ev.RenameFile.New), nil - case "rename.file.destination.name.length": - return ev.FieldHandlers.ResolveFimFileBasename(ev, &ev.RenameFile.New), nil - case "rename.file.destination.path": - return ev.FieldHandlers.ResolveFileUserPath(ev, &ev.RenameFile.New), nil - case "rename.file.destination.path.length": - return ev.FieldHandlers.ResolveFileUserPath(ev, &ev.RenameFile.New), nil - case "rename.file.device_path": - return ev.FieldHandlers.ResolveFimFilePath(ev, &ev.RenameFile.Old), nil - case "rename.file.device_path.length": - return ev.FieldHandlers.ResolveFimFilePath(ev, &ev.RenameFile.Old), nil - case "rename.file.name": - return ev.FieldHandlers.ResolveFimFileBasename(ev, &ev.RenameFile.Old), nil - case "rename.file.name.length": - return ev.FieldHandlers.ResolveFimFileBasename(ev, &ev.RenameFile.Old), nil - case "rename.file.path": - return ev.FieldHandlers.ResolveFileUserPath(ev, &ev.RenameFile.Old), nil - case "rename.file.path.length": - return ev.FieldHandlers.ResolveFileUserPath(ev, &ev.RenameFile.Old), nil - case "set.registry.key_name": - return ev.SetRegistryKeyValue.Registry.KeyName, nil - case "set.registry.key_name.length": - return len(ev.SetRegistryKeyValue.Registry.KeyName), nil - case "set.registry.key_path": - return ev.SetRegistryKeyValue.Registry.KeyPath, nil - case "set.registry.key_path.length": - return len(ev.SetRegistryKeyValue.Registry.KeyPath), nil - case "set.registry.value_name": - return ev.SetRegistryKeyValue.ValueName, nil - case "set.registry.value_name.length": - return len(ev.SetRegistryKeyValue.ValueName), nil - case "set.value_name": - return ev.SetRegistryKeyValue.ValueName, nil - case "set_key_value.registry.key_name": - return ev.SetRegistryKeyValue.Registry.KeyName, nil - case "set_key_value.registry.key_name.length": - return len(ev.SetRegistryKeyValue.Registry.KeyName), nil - case "set_key_value.registry.key_path": - return ev.SetRegistryKeyValue.Registry.KeyPath, nil - case "set_key_value.registry.key_path.length": - return len(ev.SetRegistryKeyValue.Registry.KeyPath), nil - case "set_key_value.registry.value_name": - return ev.SetRegistryKeyValue.ValueName, nil - case "set_key_value.registry.value_name.length": - return len(ev.SetRegistryKeyValue.ValueName), nil - case "set_key_value.value_name": - return ev.SetRegistryKeyValue.ValueName, nil - case "write.file.device_path": - return ev.FieldHandlers.ResolveFimFilePath(ev, &ev.WriteFile.File), nil - case "write.file.device_path.length": - return ev.FieldHandlers.ResolveFimFilePath(ev, &ev.WriteFile.File), nil - case "write.file.name": - return ev.FieldHandlers.ResolveFimFileBasename(ev, &ev.WriteFile.File), nil - case "write.file.name.length": - return ev.FieldHandlers.ResolveFimFileBasename(ev, &ev.WriteFile.File), nil - case "write.file.path": - return ev.FieldHandlers.ResolveFileUserPath(ev, &ev.WriteFile.File), nil - case "write.file.path.length": - return ev.FieldHandlers.ResolveFileUserPath(ev, &ev.WriteFile.File), nil + m := &Model{} + evaluator, err := m.GetEvaluator(field, "") + if err != nil { + return nil, err } - return nil, &eval.ErrFieldNotFound{Field: field} -} -func (ev *Event) GetFieldEventType(field eval.Field) (eval.EventType, error) { - switch field { - case "change_permission.new_sd": - return "change_permission", nil - case "change_permission.old_sd": - return "change_permission", nil - case "change_permission.path": - return "change_permission", nil - case "change_permission.type": - return "change_permission", nil - case "change_permission.user_domain": - return "change_permission", nil - case "change_permission.username": - return "change_permission", nil - case "container.created_at": - return "", nil - case "container.id": - return "", nil - case "container.runtime": - return "", nil - case "container.tags": - return "", nil - case "create.file.device_path": - return "create", nil - case "create.file.device_path.length": - return "create", nil - case "create.file.name": - return "create", nil - case "create.file.name.length": - return "create", nil - case "create.file.path": - return "create", nil - case "create.file.path.length": - return "create", nil - case "create.registry.key_name": - return "create_key", nil - case "create.registry.key_name.length": - return "create_key", nil - case "create.registry.key_path": - return "create_key", nil - case "create.registry.key_path.length": - return "create_key", nil - case "create_key.registry.key_name": - return "create_key", nil - case "create_key.registry.key_name.length": - return "create_key", nil - case "create_key.registry.key_path": - return "create_key", nil - case "create_key.registry.key_path.length": - return "create_key", nil - case "delete.file.device_path": - return "delete", nil - case "delete.file.device_path.length": - return "delete", nil - case "delete.file.name": - return "delete", nil - case "delete.file.name.length": - return "delete", nil - case "delete.file.path": - return "delete", nil - case "delete.file.path.length": - return "delete", nil - case "delete.registry.key_name": - return "delete_key", nil - case "delete.registry.key_name.length": - return "delete_key", nil - case "delete.registry.key_path": - return "delete_key", nil - case "delete.registry.key_path.length": - return "delete_key", nil - case "delete_key.registry.key_name": - return "delete_key", nil - case "delete_key.registry.key_name.length": - return "delete_key", nil - case "delete_key.registry.key_path": - return "delete_key", nil - case "delete_key.registry.key_path.length": - return "delete_key", nil - case "event.hostname": - return "", nil - case "event.origin": - return "", nil - case "event.os": - return "", nil - case "event.service": - return "", nil - case "event.timestamp": - return "", nil - case "exec.cmdline": - return "exec", nil - case "exec.container.id": - return "exec", nil - case "exec.created_at": - return "exec", nil - case "exec.envp": - return "exec", nil - case "exec.envs": - return "exec", nil - case "exec.file.name": - return "exec", nil - case "exec.file.name.length": - return "exec", nil - case "exec.file.path": - return "exec", nil - case "exec.file.path.length": - return "exec", nil - case "exec.pid": - return "exec", nil - case "exec.ppid": - return "exec", nil - case "exec.user": - return "exec", nil - case "exec.user_sid": - return "exec", nil - case "exit.cause": - return "exit", nil - case "exit.cmdline": - return "exit", nil - case "exit.code": - return "exit", nil - case "exit.container.id": - return "exit", nil - case "exit.created_at": - return "exit", nil - case "exit.envp": - return "exit", nil - case "exit.envs": - return "exit", nil - case "exit.file.name": - return "exit", nil - case "exit.file.name.length": - return "exit", nil - case "exit.file.path": - return "exit", nil - case "exit.file.path.length": - return "exit", nil - case "exit.pid": - return "exit", nil - case "exit.ppid": - return "exit", nil - case "exit.user": - return "exit", nil - case "exit.user_sid": - return "exit", nil - case "open.registry.key_name": - return "open_key", nil - case "open.registry.key_name.length": - return "open_key", nil - case "open.registry.key_path": - return "open_key", nil - case "open.registry.key_path.length": - return "open_key", nil - case "open_key.registry.key_name": - return "open_key", nil - case "open_key.registry.key_name.length": - return "open_key", nil - case "open_key.registry.key_path": - return "open_key", nil - case "open_key.registry.key_path.length": - return "open_key", nil - case "process.ancestors.cmdline": - return "", nil - case "process.ancestors.container.id": - return "", nil - case "process.ancestors.created_at": - return "", nil - case "process.ancestors.envp": - return "", nil - case "process.ancestors.envs": - return "", nil - case "process.ancestors.file.name": - return "", nil - case "process.ancestors.file.name.length": - return "", nil - case "process.ancestors.file.path": - return "", nil - case "process.ancestors.file.path.length": - return "", nil - case "process.ancestors.length": - return "", nil - case "process.ancestors.pid": - return "", nil - case "process.ancestors.ppid": - return "", nil - case "process.ancestors.user": - return "", nil - case "process.ancestors.user_sid": - return "", nil - case "process.cmdline": - return "", nil - case "process.container.id": - return "", nil - case "process.created_at": - return "", nil - case "process.envp": - return "", nil - case "process.envs": - return "", nil - case "process.file.name": - return "", nil - case "process.file.name.length": - return "", nil - case "process.file.path": - return "", nil - case "process.file.path.length": - return "", nil - case "process.parent.cmdline": - return "", nil - case "process.parent.container.id": - return "", nil - case "process.parent.created_at": - return "", nil - case "process.parent.envp": - return "", nil - case "process.parent.envs": - return "", nil - case "process.parent.file.name": - return "", nil - case "process.parent.file.name.length": - return "", nil - case "process.parent.file.path": - return "", nil - case "process.parent.file.path.length": - return "", nil - case "process.parent.pid": - return "", nil - case "process.parent.ppid": - return "", nil - case "process.parent.user": - return "", nil - case "process.parent.user_sid": - return "", nil - case "process.pid": - return "", nil - case "process.ppid": - return "", nil - case "process.user": - return "", nil - case "process.user_sid": - return "", nil - case "rename.file.destination.device_path": - return "rename", nil - case "rename.file.destination.device_path.length": - return "rename", nil - case "rename.file.destination.name": - return "rename", nil - case "rename.file.destination.name.length": - return "rename", nil - case "rename.file.destination.path": - return "rename", nil - case "rename.file.destination.path.length": - return "rename", nil - case "rename.file.device_path": - return "rename", nil - case "rename.file.device_path.length": - return "rename", nil - case "rename.file.name": - return "rename", nil - case "rename.file.name.length": - return "rename", nil - case "rename.file.path": - return "rename", nil - case "rename.file.path.length": - return "rename", nil - case "set.registry.key_name": - return "set_key_value", nil - case "set.registry.key_name.length": - return "set_key_value", nil - case "set.registry.key_path": - return "set_key_value", nil - case "set.registry.key_path.length": - return "set_key_value", nil - case "set.registry.value_name": - return "set_key_value", nil - case "set.registry.value_name.length": - return "set_key_value", nil - case "set.value_name": - return "set_key_value", nil - case "set_key_value.registry.key_name": - return "set_key_value", nil - case "set_key_value.registry.key_name.length": - return "set_key_value", nil - case "set_key_value.registry.key_path": - return "set_key_value", nil - case "set_key_value.registry.key_path.length": - return "set_key_value", nil - case "set_key_value.registry.value_name": - return "set_key_value", nil - case "set_key_value.registry.value_name.length": - return "set_key_value", nil - case "set_key_value.value_name": - return "set_key_value", nil - case "write.file.device_path": - return "write", nil - case "write.file.device_path.length": - return "write", nil - case "write.file.name": - return "write", nil - case "write.file.name.length": - return "write", nil - case "write.file.path": - return "write", nil - case "write.file.path.length": - return "write", nil + ctx := eval.NewContext(ev) + value := evaluator.Eval(ctx) + if ctx.Error != nil { + return nil, ctx.Error } - return "", &eval.ErrFieldNotFound{Field: field} + return value, nil } -func (ev *Event) GetFieldType(field eval.Field) (reflect.Kind, error) { +func (ev *Event) GetFieldMetadata(field eval.Field) (eval.EventType, reflect.Kind, error) { switch field { case "change_permission.new_sd": - return reflect.String, nil + return "change_permission", reflect.String, nil case "change_permission.old_sd": - return reflect.String, nil + return "change_permission", reflect.String, nil case "change_permission.path": - return reflect.String, nil + return "change_permission", reflect.String, nil case "change_permission.type": - return reflect.String, nil + return "change_permission", reflect.String, nil case "change_permission.user_domain": - return reflect.String, nil + return "change_permission", reflect.String, nil case "change_permission.username": - return reflect.String, nil + return "change_permission", reflect.String, nil case "container.created_at": - return reflect.Int, nil + return "", reflect.Int, nil case "container.id": - return reflect.String, nil + return "", reflect.String, nil case "container.runtime": - return reflect.String, nil + return "", reflect.String, nil case "container.tags": - return reflect.String, nil + return "", reflect.String, nil case "create.file.device_path": - return reflect.String, nil + return "create", reflect.String, nil case "create.file.device_path.length": - return reflect.Int, nil + return "create", reflect.Int, nil case "create.file.name": - return reflect.String, nil + return "create", reflect.String, nil case "create.file.name.length": - return reflect.Int, nil + return "create", reflect.Int, nil case "create.file.path": - return reflect.String, nil + return "create", reflect.String, nil case "create.file.path.length": - return reflect.Int, nil + return "create", reflect.Int, nil case "create.registry.key_name": - return reflect.String, nil + return "create_key", reflect.String, nil case "create.registry.key_name.length": - return reflect.Int, nil + return "create_key", reflect.Int, nil case "create.registry.key_path": - return reflect.String, nil + return "create_key", reflect.String, nil case "create.registry.key_path.length": - return reflect.Int, nil + return "create_key", reflect.Int, nil case "create_key.registry.key_name": - return reflect.String, nil + return "create_key", reflect.String, nil case "create_key.registry.key_name.length": - return reflect.Int, nil + return "create_key", reflect.Int, nil case "create_key.registry.key_path": - return reflect.String, nil + return "create_key", reflect.String, nil case "create_key.registry.key_path.length": - return reflect.Int, nil + return "create_key", reflect.Int, nil case "delete.file.device_path": - return reflect.String, nil + return "delete", reflect.String, nil case "delete.file.device_path.length": - return reflect.Int, nil + return "delete", reflect.Int, nil case "delete.file.name": - return reflect.String, nil + return "delete", reflect.String, nil case "delete.file.name.length": - return reflect.Int, nil + return "delete", reflect.Int, nil case "delete.file.path": - return reflect.String, nil + return "delete", reflect.String, nil case "delete.file.path.length": - return reflect.Int, nil + return "delete", reflect.Int, nil case "delete.registry.key_name": - return reflect.String, nil + return "delete_key", reflect.String, nil case "delete.registry.key_name.length": - return reflect.Int, nil + return "delete_key", reflect.Int, nil case "delete.registry.key_path": - return reflect.String, nil + return "delete_key", reflect.String, nil case "delete.registry.key_path.length": - return reflect.Int, nil + return "delete_key", reflect.Int, nil case "delete_key.registry.key_name": - return reflect.String, nil + return "delete_key", reflect.String, nil case "delete_key.registry.key_name.length": - return reflect.Int, nil + return "delete_key", reflect.Int, nil case "delete_key.registry.key_path": - return reflect.String, nil + return "delete_key", reflect.String, nil case "delete_key.registry.key_path.length": - return reflect.Int, nil + return "delete_key", reflect.Int, nil case "event.hostname": - return reflect.String, nil + return "", reflect.String, nil case "event.origin": - return reflect.String, nil + return "", reflect.String, nil case "event.os": - return reflect.String, nil + return "", reflect.String, nil case "event.service": - return reflect.String, nil + return "", reflect.String, nil case "event.timestamp": - return reflect.Int, nil + return "", reflect.Int, nil case "exec.cmdline": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.container.id": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.created_at": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.envp": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.envs": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.file.name": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.file.name.length": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.file.path": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.file.path.length": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.pid": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.ppid": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.user": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.user_sid": - return reflect.String, nil + return "exec", reflect.String, nil case "exit.cause": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.cmdline": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.code": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.container.id": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.created_at": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.envp": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.envs": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.file.name": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.file.name.length": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.file.path": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.file.path.length": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.pid": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.ppid": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.user": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.user_sid": - return reflect.String, nil + return "exit", reflect.String, nil case "open.registry.key_name": - return reflect.String, nil + return "open_key", reflect.String, nil case "open.registry.key_name.length": - return reflect.Int, nil + return "open_key", reflect.Int, nil case "open.registry.key_path": - return reflect.String, nil + return "open_key", reflect.String, nil case "open.registry.key_path.length": - return reflect.Int, nil + return "open_key", reflect.Int, nil case "open_key.registry.key_name": - return reflect.String, nil + return "open_key", reflect.String, nil case "open_key.registry.key_name.length": - return reflect.Int, nil + return "open_key", reflect.Int, nil case "open_key.registry.key_path": - return reflect.String, nil + return "open_key", reflect.String, nil case "open_key.registry.key_path.length": - return reflect.Int, nil + return "open_key", reflect.Int, nil case "process.ancestors.cmdline": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.container.id": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.created_at": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.envp": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.envs": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.file.name": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.file.name.length": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.file.path": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.file.path.length": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.length": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.pid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.ppid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.user": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.user_sid": - return reflect.String, nil + return "", reflect.String, nil case "process.cmdline": - return reflect.String, nil + return "", reflect.String, nil case "process.container.id": - return reflect.String, nil + return "", reflect.String, nil case "process.created_at": - return reflect.Int, nil + return "", reflect.Int, nil case "process.envp": - return reflect.String, nil + return "", reflect.String, nil case "process.envs": - return reflect.String, nil + return "", reflect.String, nil case "process.file.name": - return reflect.String, nil + return "", reflect.String, nil case "process.file.name.length": - return reflect.Int, nil + return "", reflect.Int, nil case "process.file.path": - return reflect.String, nil + return "", reflect.String, nil case "process.file.path.length": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.cmdline": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.container.id": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.created_at": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.envp": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.envs": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.file.name": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.file.name.length": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.file.path": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.file.path.length": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.pid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.ppid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.user": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.user_sid": - return reflect.String, nil + return "", reflect.String, nil case "process.pid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ppid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.user": - return reflect.String, nil + return "", reflect.String, nil case "process.user_sid": - return reflect.String, nil + return "", reflect.String, nil case "rename.file.destination.device_path": - return reflect.String, nil + return "rename", reflect.String, nil case "rename.file.destination.device_path.length": - return reflect.Int, nil + return "rename", reflect.Int, nil case "rename.file.destination.name": - return reflect.String, nil + return "rename", reflect.String, nil case "rename.file.destination.name.length": - return reflect.Int, nil + return "rename", reflect.Int, nil case "rename.file.destination.path": - return reflect.String, nil + return "rename", reflect.String, nil case "rename.file.destination.path.length": - return reflect.Int, nil + return "rename", reflect.Int, nil case "rename.file.device_path": - return reflect.String, nil + return "rename", reflect.String, nil case "rename.file.device_path.length": - return reflect.Int, nil + return "rename", reflect.Int, nil case "rename.file.name": - return reflect.String, nil + return "rename", reflect.String, nil case "rename.file.name.length": - return reflect.Int, nil + return "rename", reflect.Int, nil case "rename.file.path": - return reflect.String, nil + return "rename", reflect.String, nil case "rename.file.path.length": - return reflect.Int, nil + return "rename", reflect.Int, nil case "set.registry.key_name": - return reflect.String, nil + return "set_key_value", reflect.String, nil case "set.registry.key_name.length": - return reflect.Int, nil + return "set_key_value", reflect.Int, nil case "set.registry.key_path": - return reflect.String, nil + return "set_key_value", reflect.String, nil case "set.registry.key_path.length": - return reflect.Int, nil + return "set_key_value", reflect.Int, nil case "set.registry.value_name": - return reflect.String, nil + return "set_key_value", reflect.String, nil case "set.registry.value_name.length": - return reflect.Int, nil + return "set_key_value", reflect.Int, nil case "set.value_name": - return reflect.String, nil + return "set_key_value", reflect.String, nil case "set_key_value.registry.key_name": - return reflect.String, nil + return "set_key_value", reflect.String, nil case "set_key_value.registry.key_name.length": - return reflect.Int, nil + return "set_key_value", reflect.Int, nil case "set_key_value.registry.key_path": - return reflect.String, nil + return "set_key_value", reflect.String, nil case "set_key_value.registry.key_path.length": - return reflect.Int, nil + return "set_key_value", reflect.Int, nil case "set_key_value.registry.value_name": - return reflect.String, nil + return "set_key_value", reflect.String, nil case "set_key_value.registry.value_name.length": - return reflect.Int, nil + return "set_key_value", reflect.Int, nil case "set_key_value.value_name": - return reflect.String, nil + return "set_key_value", reflect.String, nil case "write.file.device_path": - return reflect.String, nil + return "write", reflect.String, nil case "write.file.device_path.length": - return reflect.Int, nil + return "write", reflect.Int, nil case "write.file.name": - return reflect.String, nil + return "write", reflect.String, nil case "write.file.name.length": - return reflect.Int, nil + return "write", reflect.Int, nil case "write.file.path": - return reflect.String, nil + return "write", reflect.String, nil case "write.file.path.length": - return reflect.Int, nil + return "write", reflect.Int, nil } - return reflect.Invalid, &eval.ErrFieldNotFound{Field: field} + return "", reflect.Invalid, &eval.ErrFieldNotFound{Field: field} } func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { switch field { case "change_permission.new_sd": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "ChangePermission.NewSd"} + return &eval.ErrValueTypeMismatch{Field: "change_permission.new_sd"} } ev.ChangePermission.NewSd = rv return nil case "change_permission.old_sd": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "ChangePermission.OldSd"} + return &eval.ErrValueTypeMismatch{Field: "change_permission.old_sd"} } ev.ChangePermission.OldSd = rv return nil case "change_permission.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "ChangePermission.ObjectName"} + return &eval.ErrValueTypeMismatch{Field: "change_permission.path"} } ev.ChangePermission.ObjectName = rv return nil case "change_permission.type": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "ChangePermission.ObjectType"} + return &eval.ErrValueTypeMismatch{Field: "change_permission.type"} } ev.ChangePermission.ObjectType = rv return nil case "change_permission.user_domain": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "ChangePermission.UserDomain"} + return &eval.ErrValueTypeMismatch{Field: "change_permission.user_domain"} } ev.ChangePermission.UserDomain = rv return nil case "change_permission.username": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "ChangePermission.UserName"} + return &eval.ErrValueTypeMismatch{Field: "change_permission.username"} } ev.ChangePermission.UserName = rv return nil @@ -3158,7 +2386,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ContainerContext.CreatedAt"} + return &eval.ErrValueTypeMismatch{Field: "container.created_at"} } ev.BaseEvent.ContainerContext.CreatedAt = uint64(rv) return nil @@ -3168,7 +2396,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ContainerContext.ContainerID"} + return &eval.ErrValueTypeMismatch{Field: "container.id"} } ev.BaseEvent.ContainerContext.ContainerID = containerutils.ContainerID(rv) return nil @@ -3178,7 +2406,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ContainerContext.Runtime"} + return &eval.ErrValueTypeMismatch{Field: "container.runtime"} } ev.BaseEvent.ContainerContext.Runtime = rv return nil @@ -3192,13 +2420,13 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ContainerContext.Tags = append(ev.BaseEvent.ContainerContext.Tags, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ContainerContext.Tags"} + return &eval.ErrValueTypeMismatch{Field: "container.tags"} } return nil case "create.file.device_path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "CreateNewFile.File.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "create.file.device_path"} } ev.CreateNewFile.File.PathnameStr = rv return nil @@ -3207,7 +2435,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "create.file.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "CreateNewFile.File.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "create.file.name"} } ev.CreateNewFile.File.BasenameStr = rv return nil @@ -3216,7 +2444,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "create.file.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "CreateNewFile.File.UserPathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "create.file.path"} } ev.CreateNewFile.File.UserPathnameStr = rv return nil @@ -3225,7 +2453,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "create.registry.key_name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "CreateRegistryKey.Registry.KeyName"} + return &eval.ErrValueTypeMismatch{Field: "create.registry.key_name"} } ev.CreateRegistryKey.Registry.KeyName = rv return nil @@ -3234,7 +2462,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "create.registry.key_path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "CreateRegistryKey.Registry.KeyPath"} + return &eval.ErrValueTypeMismatch{Field: "create.registry.key_path"} } ev.CreateRegistryKey.Registry.KeyPath = rv return nil @@ -3243,7 +2471,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "create_key.registry.key_name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "CreateRegistryKey.Registry.KeyName"} + return &eval.ErrValueTypeMismatch{Field: "create_key.registry.key_name"} } ev.CreateRegistryKey.Registry.KeyName = rv return nil @@ -3252,7 +2480,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "create_key.registry.key_path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "CreateRegistryKey.Registry.KeyPath"} + return &eval.ErrValueTypeMismatch{Field: "create_key.registry.key_path"} } ev.CreateRegistryKey.Registry.KeyPath = rv return nil @@ -3261,7 +2489,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "delete.file.device_path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "DeleteFile.File.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "delete.file.device_path"} } ev.DeleteFile.File.PathnameStr = rv return nil @@ -3270,7 +2498,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "delete.file.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "DeleteFile.File.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "delete.file.name"} } ev.DeleteFile.File.BasenameStr = rv return nil @@ -3279,7 +2507,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "delete.file.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "DeleteFile.File.UserPathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "delete.file.path"} } ev.DeleteFile.File.UserPathnameStr = rv return nil @@ -3288,7 +2516,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "delete.registry.key_name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "DeleteRegistryKey.Registry.KeyName"} + return &eval.ErrValueTypeMismatch{Field: "delete.registry.key_name"} } ev.DeleteRegistryKey.Registry.KeyName = rv return nil @@ -3297,7 +2525,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "delete.registry.key_path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "DeleteRegistryKey.Registry.KeyPath"} + return &eval.ErrValueTypeMismatch{Field: "delete.registry.key_path"} } ev.DeleteRegistryKey.Registry.KeyPath = rv return nil @@ -3306,7 +2534,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "delete_key.registry.key_name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "DeleteRegistryKey.Registry.KeyName"} + return &eval.ErrValueTypeMismatch{Field: "delete_key.registry.key_name"} } ev.DeleteRegistryKey.Registry.KeyName = rv return nil @@ -3315,7 +2543,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "delete_key.registry.key_path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "DeleteRegistryKey.Registry.KeyPath"} + return &eval.ErrValueTypeMismatch{Field: "delete_key.registry.key_path"} } ev.DeleteRegistryKey.Registry.KeyPath = rv return nil @@ -3324,35 +2552,35 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "event.hostname": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.Hostname"} + return &eval.ErrValueTypeMismatch{Field: "event.hostname"} } ev.BaseEvent.Hostname = rv return nil case "event.origin": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.Origin"} + return &eval.ErrValueTypeMismatch{Field: "event.origin"} } ev.BaseEvent.Origin = rv return nil case "event.os": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.Os"} + return &eval.ErrValueTypeMismatch{Field: "event.os"} } ev.BaseEvent.Os = rv return nil case "event.service": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.Service"} + return &eval.ErrValueTypeMismatch{Field: "event.service"} } ev.BaseEvent.Service = rv return nil case "event.timestamp": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.TimestampRaw"} + return &eval.ErrValueTypeMismatch{Field: "event.timestamp"} } ev.BaseEvent.TimestampRaw = uint64(rv) return nil @@ -3362,7 +2590,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.CmdLine"} + return &eval.ErrValueTypeMismatch{Field: "exec.cmdline"} } ev.Exec.Process.CmdLine = rv return nil @@ -3372,7 +2600,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.ContainerID"} + return &eval.ErrValueTypeMismatch{Field: "exec.container.id"} } ev.Exec.Process.ContainerID = rv return nil @@ -3382,7 +2610,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.CreatedAt"} + return &eval.ErrValueTypeMismatch{Field: "exec.created_at"} } ev.Exec.Process.CreatedAt = uint64(rv) return nil @@ -3396,7 +2624,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Exec.Process.Envp = append(ev.Exec.Process.Envp, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.Envp"} + return &eval.ErrValueTypeMismatch{Field: "exec.envp"} } return nil case "exec.envs": @@ -3409,7 +2637,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Exec.Process.Envs = append(ev.Exec.Process.Envs, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.Envs"} + return &eval.ErrValueTypeMismatch{Field: "exec.envs"} } return nil case "exec.file.name": @@ -3418,7 +2646,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.FileEvent.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "exec.file.name"} } ev.Exec.Process.FileEvent.BasenameStr = rv return nil @@ -3433,7 +2661,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.FileEvent.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "exec.file.path"} } ev.Exec.Process.FileEvent.PathnameStr = rv return nil @@ -3448,7 +2676,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.PIDContext.Pid"} + return &eval.ErrValueTypeMismatch{Field: "exec.pid"} } ev.Exec.Process.PIDContext.Pid = uint32(rv) return nil @@ -3458,7 +2686,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.PPid"} + return &eval.ErrValueTypeMismatch{Field: "exec.ppid"} } ev.Exec.Process.PPid = uint32(rv) return nil @@ -3468,7 +2696,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.User"} + return &eval.ErrValueTypeMismatch{Field: "exec.user"} } ev.Exec.Process.User = rv return nil @@ -3478,14 +2706,14 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.OwnerSidString"} + return &eval.ErrValueTypeMismatch{Field: "exec.user_sid"} } ev.Exec.Process.OwnerSidString = rv return nil case "exit.cause": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Cause"} + return &eval.ErrValueTypeMismatch{Field: "exit.cause"} } ev.Exit.Cause = uint32(rv) return nil @@ -3495,14 +2723,14 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.CmdLine"} + return &eval.ErrValueTypeMismatch{Field: "exit.cmdline"} } ev.Exit.Process.CmdLine = rv return nil case "exit.code": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Code"} + return &eval.ErrValueTypeMismatch{Field: "exit.code"} } ev.Exit.Code = uint32(rv) return nil @@ -3512,7 +2740,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.ContainerID"} + return &eval.ErrValueTypeMismatch{Field: "exit.container.id"} } ev.Exit.Process.ContainerID = rv return nil @@ -3522,7 +2750,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.CreatedAt"} + return &eval.ErrValueTypeMismatch{Field: "exit.created_at"} } ev.Exit.Process.CreatedAt = uint64(rv) return nil @@ -3536,7 +2764,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Exit.Process.Envp = append(ev.Exit.Process.Envp, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.Envp"} + return &eval.ErrValueTypeMismatch{Field: "exit.envp"} } return nil case "exit.envs": @@ -3549,7 +2777,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Exit.Process.Envs = append(ev.Exit.Process.Envs, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.Envs"} + return &eval.ErrValueTypeMismatch{Field: "exit.envs"} } return nil case "exit.file.name": @@ -3558,7 +2786,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.FileEvent.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "exit.file.name"} } ev.Exit.Process.FileEvent.BasenameStr = rv return nil @@ -3573,7 +2801,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.FileEvent.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "exit.file.path"} } ev.Exit.Process.FileEvent.PathnameStr = rv return nil @@ -3588,7 +2816,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.PIDContext.Pid"} + return &eval.ErrValueTypeMismatch{Field: "exit.pid"} } ev.Exit.Process.PIDContext.Pid = uint32(rv) return nil @@ -3598,7 +2826,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.PPid"} + return &eval.ErrValueTypeMismatch{Field: "exit.ppid"} } ev.Exit.Process.PPid = uint32(rv) return nil @@ -3608,7 +2836,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.User"} + return &eval.ErrValueTypeMismatch{Field: "exit.user"} } ev.Exit.Process.User = rv return nil @@ -3618,14 +2846,14 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.OwnerSidString"} + return &eval.ErrValueTypeMismatch{Field: "exit.user_sid"} } ev.Exit.Process.OwnerSidString = rv return nil case "open.registry.key_name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "OpenRegistryKey.Registry.KeyName"} + return &eval.ErrValueTypeMismatch{Field: "open.registry.key_name"} } ev.OpenRegistryKey.Registry.KeyName = rv return nil @@ -3634,7 +2862,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "open.registry.key_path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "OpenRegistryKey.Registry.KeyPath"} + return &eval.ErrValueTypeMismatch{Field: "open.registry.key_path"} } ev.OpenRegistryKey.Registry.KeyPath = rv return nil @@ -3643,7 +2871,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "open_key.registry.key_name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "OpenRegistryKey.Registry.KeyName"} + return &eval.ErrValueTypeMismatch{Field: "open_key.registry.key_name"} } ev.OpenRegistryKey.Registry.KeyName = rv return nil @@ -3652,7 +2880,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "open_key.registry.key_path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "OpenRegistryKey.Registry.KeyPath"} + return &eval.ErrValueTypeMismatch{Field: "open_key.registry.key_path"} } ev.OpenRegistryKey.Registry.KeyPath = rv return nil @@ -3667,7 +2895,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.CmdLine"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.cmdline"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.CmdLine = rv return nil @@ -3680,7 +2908,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.ContainerID"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.container.id"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.ContainerID = rv return nil @@ -3693,7 +2921,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.CreatedAt"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.created_at"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.CreatedAt = uint64(rv) return nil @@ -3710,7 +2938,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Envp = append(ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Envp, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Envp"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.envp"} } return nil case "process.ancestors.envs": @@ -3726,7 +2954,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Envs = append(ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Envs, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Envs"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.envs"} } return nil case "process.ancestors.file.name": @@ -3738,7 +2966,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.file.name"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.BasenameStr = rv return nil @@ -3759,7 +2987,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.file.path"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.PathnameStr = rv return nil @@ -3788,7 +3016,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.PIDContext.Pid"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.pid"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.PIDContext.Pid = uint32(rv) return nil @@ -3801,7 +3029,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.PPid"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.ppid"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.PPid = uint32(rv) return nil @@ -3814,7 +3042,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.User"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.user"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.User = rv return nil @@ -3827,7 +3055,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.OwnerSidString"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.user_sid"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.OwnerSidString = rv return nil @@ -3837,7 +3065,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.CmdLine"} + return &eval.ErrValueTypeMismatch{Field: "process.cmdline"} } ev.BaseEvent.ProcessContext.Process.CmdLine = rv return nil @@ -3847,7 +3075,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.ContainerID"} + return &eval.ErrValueTypeMismatch{Field: "process.container.id"} } ev.BaseEvent.ProcessContext.Process.ContainerID = rv return nil @@ -3857,7 +3085,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.CreatedAt"} + return &eval.ErrValueTypeMismatch{Field: "process.created_at"} } ev.BaseEvent.ProcessContext.Process.CreatedAt = uint64(rv) return nil @@ -3871,7 +3099,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ProcessContext.Process.Envp = append(ev.BaseEvent.ProcessContext.Process.Envp, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.Envp"} + return &eval.ErrValueTypeMismatch{Field: "process.envp"} } return nil case "process.envs": @@ -3884,7 +3112,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ProcessContext.Process.Envs = append(ev.BaseEvent.ProcessContext.Process.Envs, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.Envs"} + return &eval.ErrValueTypeMismatch{Field: "process.envs"} } return nil case "process.file.name": @@ -3893,7 +3121,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.FileEvent.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "process.file.name"} } ev.BaseEvent.ProcessContext.Process.FileEvent.BasenameStr = rv return nil @@ -3908,7 +3136,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.FileEvent.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "process.file.path"} } ev.BaseEvent.ProcessContext.Process.FileEvent.PathnameStr = rv return nil @@ -3926,7 +3154,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.CmdLine"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.cmdline"} } ev.BaseEvent.ProcessContext.Parent.CmdLine = rv return nil @@ -3939,7 +3167,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.ContainerID"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.container.id"} } ev.BaseEvent.ProcessContext.Parent.ContainerID = rv return nil @@ -3952,7 +3180,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.CreatedAt"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.created_at"} } ev.BaseEvent.ProcessContext.Parent.CreatedAt = uint64(rv) return nil @@ -3969,7 +3197,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ProcessContext.Parent.Envp = append(ev.BaseEvent.ProcessContext.Parent.Envp, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.Envp"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.envp"} } return nil case "process.parent.envs": @@ -3985,7 +3213,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ProcessContext.Parent.Envs = append(ev.BaseEvent.ProcessContext.Parent.Envs, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.Envs"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.envs"} } return nil case "process.parent.file.name": @@ -3997,7 +3225,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.FileEvent.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.file.name"} } ev.BaseEvent.ProcessContext.Parent.FileEvent.BasenameStr = rv return nil @@ -4018,7 +3246,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.FileEvent.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.file.path"} } ev.BaseEvent.ProcessContext.Parent.FileEvent.PathnameStr = rv return nil @@ -4039,7 +3267,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.PIDContext.Pid"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.pid"} } ev.BaseEvent.ProcessContext.Parent.PIDContext.Pid = uint32(rv) return nil @@ -4052,7 +3280,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.PPid"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.ppid"} } ev.BaseEvent.ProcessContext.Parent.PPid = uint32(rv) return nil @@ -4065,7 +3293,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.User"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.user"} } ev.BaseEvent.ProcessContext.Parent.User = rv return nil @@ -4078,7 +3306,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.OwnerSidString"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.user_sid"} } ev.BaseEvent.ProcessContext.Parent.OwnerSidString = rv return nil @@ -4088,7 +3316,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.PIDContext.Pid"} + return &eval.ErrValueTypeMismatch{Field: "process.pid"} } ev.BaseEvent.ProcessContext.Process.PIDContext.Pid = uint32(rv) return nil @@ -4098,7 +3326,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.PPid"} + return &eval.ErrValueTypeMismatch{Field: "process.ppid"} } ev.BaseEvent.ProcessContext.Process.PPid = uint32(rv) return nil @@ -4108,7 +3336,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.User"} + return &eval.ErrValueTypeMismatch{Field: "process.user"} } ev.BaseEvent.ProcessContext.Process.User = rv return nil @@ -4118,14 +3346,14 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.OwnerSidString"} + return &eval.ErrValueTypeMismatch{Field: "process.user_sid"} } ev.BaseEvent.ProcessContext.Process.OwnerSidString = rv return nil case "rename.file.destination.device_path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RenameFile.New.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.destination.device_path"} } ev.RenameFile.New.PathnameStr = rv return nil @@ -4134,7 +3362,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "rename.file.destination.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RenameFile.New.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.destination.name"} } ev.RenameFile.New.BasenameStr = rv return nil @@ -4143,7 +3371,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "rename.file.destination.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RenameFile.New.UserPathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.destination.path"} } ev.RenameFile.New.UserPathnameStr = rv return nil @@ -4152,7 +3380,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "rename.file.device_path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RenameFile.Old.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.device_path"} } ev.RenameFile.Old.PathnameStr = rv return nil @@ -4161,7 +3389,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "rename.file.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RenameFile.Old.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.name"} } ev.RenameFile.Old.BasenameStr = rv return nil @@ -4170,7 +3398,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "rename.file.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RenameFile.Old.UserPathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.path"} } ev.RenameFile.Old.UserPathnameStr = rv return nil @@ -4179,7 +3407,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "set.registry.key_name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetRegistryKeyValue.Registry.KeyName"} + return &eval.ErrValueTypeMismatch{Field: "set.registry.key_name"} } ev.SetRegistryKeyValue.Registry.KeyName = rv return nil @@ -4188,7 +3416,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "set.registry.key_path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetRegistryKeyValue.Registry.KeyPath"} + return &eval.ErrValueTypeMismatch{Field: "set.registry.key_path"} } ev.SetRegistryKeyValue.Registry.KeyPath = rv return nil @@ -4197,7 +3425,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "set.registry.value_name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetRegistryKeyValue.ValueName"} + return &eval.ErrValueTypeMismatch{Field: "set.registry.value_name"} } ev.SetRegistryKeyValue.ValueName = rv return nil @@ -4206,14 +3434,14 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "set.value_name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetRegistryKeyValue.ValueName"} + return &eval.ErrValueTypeMismatch{Field: "set.value_name"} } ev.SetRegistryKeyValue.ValueName = rv return nil case "set_key_value.registry.key_name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetRegistryKeyValue.Registry.KeyName"} + return &eval.ErrValueTypeMismatch{Field: "set_key_value.registry.key_name"} } ev.SetRegistryKeyValue.Registry.KeyName = rv return nil @@ -4222,7 +3450,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "set_key_value.registry.key_path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetRegistryKeyValue.Registry.KeyPath"} + return &eval.ErrValueTypeMismatch{Field: "set_key_value.registry.key_path"} } ev.SetRegistryKeyValue.Registry.KeyPath = rv return nil @@ -4231,7 +3459,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "set_key_value.registry.value_name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetRegistryKeyValue.ValueName"} + return &eval.ErrValueTypeMismatch{Field: "set_key_value.registry.value_name"} } ev.SetRegistryKeyValue.ValueName = rv return nil @@ -4240,14 +3468,14 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "set_key_value.value_name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetRegistryKeyValue.ValueName"} + return &eval.ErrValueTypeMismatch{Field: "set_key_value.value_name"} } ev.SetRegistryKeyValue.ValueName = rv return nil case "write.file.device_path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "WriteFile.File.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "write.file.device_path"} } ev.WriteFile.File.PathnameStr = rv return nil @@ -4256,7 +3484,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "write.file.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "WriteFile.File.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "write.file.name"} } ev.WriteFile.File.BasenameStr = rv return nil @@ -4265,7 +3493,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "write.file.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "WriteFile.File.UserPathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "write.file.path"} } ev.WriteFile.File.UserPathnameStr = rv return nil diff --git a/pkg/security/secl/model/args_envs.go b/pkg/security/secl/model/args_envs.go index b59049742d8a5..4d2d7aa244fc3 100644 --- a/pkg/security/secl/model/args_envs.go +++ b/pkg/security/secl/model/args_envs.go @@ -9,20 +9,15 @@ package model import ( "slices" "strings" -) -const ( - // MaxArgEnvSize maximum size of one argument or environment variable - MaxArgEnvSize = 256 - // MaxArgsEnvsSize maximum number of args and/or envs - MaxArgsEnvsSize = 256 + "github.com/DataDog/datadog-agent/pkg/security/secl/model/sharedconsts" ) // ArgsEnvs raw value for args and envs type ArgsEnvs struct { ID uint64 Size uint32 - ValuesRaw [MaxArgEnvSize]byte + ValuesRaw [sharedconsts.MaxArgEnvSize]byte } // ArgsEntry defines a args cache entry diff --git a/pkg/security/secl/model/category.go b/pkg/security/secl/model/category.go index f03a79ffeb7b9..9a1a5107c033d 100644 --- a/pkg/security/secl/model/category.go +++ b/pkg/security/secl/model/category.go @@ -57,6 +57,7 @@ func GetEventTypeCategory(eventType eval.EventType) EventCategory { MProtectEventType.String(), PTraceEventType.String(), UnloadModuleEventType.String(), + AcceptEventType.String(), BindEventType.String(), ConnectEventType.String(): return KernelCategory @@ -65,7 +66,8 @@ func GetEventTypeCategory(eventType eval.EventType) EventCategory { case IMDSEventType.String(), RawPacketEventType.String(), - DNSEventType.String(): + DNSEventType.String(), + NetworkFlowMonitorEventType.String(): return NetworkCategory } diff --git a/pkg/security/secl/model/consts_common.go b/pkg/security/secl/model/consts_common.go index 1b64957cba71e..1b3f9f9309bb2 100644 --- a/pkg/security/secl/model/consts_common.go +++ b/pkg/security/secl/model/consts_common.go @@ -13,6 +13,7 @@ import ( "syscall" "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" + "github.com/DataDog/datadog-agent/pkg/security/secl/model/sharedconsts" "github.com/DataDog/datadog-agent/pkg/security/secl/model/usersession" ) @@ -319,11 +320,18 @@ var ( "IP_PROTO_RAW": IPProtoRAW, } + // NetworkDirectionConstants is the list of supported network directions + // generate_constants:Network directions,Network directions are the supported directions of network packets. + NetworkDirectionConstants = map[string]NetworkDirection{ + "INGRESS": Ingress, + "EGRESS": Egress, + } + // exitCauseConstants is the list of supported Exit causes - exitCauseConstants = map[string]ExitCause{ - "EXITED": ExitExited, - "COREDUMPED": ExitCoreDumped, - "SIGNALED": ExitSignaled, + exitCauseConstants = map[string]sharedconsts.ExitCause{ + "EXITED": sharedconsts.ExitExited, + "COREDUMPED": sharedconsts.ExitCoreDumped, + "SIGNALED": sharedconsts.ExitSignaled, } tlsVersionContants = map[string]uint16{ @@ -337,13 +345,13 @@ var ( ) var ( - dnsQTypeStrings = map[uint32]string{} - dnsQClassStrings = map[uint32]string{} - l3ProtocolStrings = map[L3Protocol]string{} - l4ProtocolStrings = map[L4Protocol]string{} - addressFamilyStrings = map[uint16]string{} - exitCauseStrings = map[ExitCause]string{} - tlsVersionStrings = map[uint16]string{} + dnsQTypeStrings = map[uint32]string{} + dnsQClassStrings = map[uint32]string{} + l3ProtocolStrings = map[L3Protocol]string{} + l4ProtocolStrings = map[L4Protocol]string{} + networkDirectionStrings = map[NetworkDirection]string{} + addressFamilyStrings = map[uint16]string{} + tlsVersionStrings = map[uint16]string{} ) // File flags @@ -410,6 +418,13 @@ func initL4ProtocolConstants() { } } +func initNetworkDirectionContants() { + for k, v := range NetworkDirectionConstants { + seclConstants[k] = &eval.IntEvaluator{Value: int(v)} + networkDirectionStrings[v] = k + } +} + func initAddressFamilyConstants() { for k, v := range addressFamilyConstants { seclConstants[k] = &eval.IntEvaluator{Value: int(v)} @@ -423,7 +438,6 @@ func initAddressFamilyConstants() { func initExitCauseConstants() { for k, v := range exitCauseConstants { seclConstants[k] = &eval.IntEvaluator{Value: int(v)} - exitCauseStrings[v] = k } } @@ -463,6 +477,7 @@ func initConstants() { initDNSQTypeConstants() initL3ProtocolConstants() initL4ProtocolConstants() + initNetworkDirectionContants() initAddressFamilyConstants() initExitCauseConstants() initBPFMapNamesConstants() @@ -781,18 +796,16 @@ const ( IPProtoRAW L4Protocol = 255 ) -// ExitCause represents the cause of a process termination -type ExitCause uint32 +// NetworkDirection is used to identify the network direction of a flow +type NetworkDirection uint32 -func (cause ExitCause) String() string { - return exitCauseStrings[cause] +func (direction NetworkDirection) String() string { + return networkDirectionStrings[direction] } const ( - // ExitExited Process exited normally - ExitExited ExitCause = iota - // ExitCoreDumped Process was terminated with a coredump signal - ExitCoreDumped - // ExitSignaled Process was terminated with a signal other than a coredump - ExitSignaled + // Egress is used to identify egress traffic + Egress NetworkDirection = iota + 1 + // Ingress is used to identify ingress traffic + Ingress ) diff --git a/pkg/security/secl/model/consts_linux.go b/pkg/security/secl/model/consts_linux.go index 00e1d3351092f..01981583ecfad 100644 --- a/pkg/security/secl/model/consts_linux.go +++ b/pkg/security/secl/model/consts_linux.go @@ -15,6 +15,7 @@ import ( "syscall" "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" + "github.com/DataDog/datadog-agent/pkg/security/secl/model/sharedconsts" lru "github.com/hashicorp/golang-lru/v2" "golang.org/x/sys/unix" ) @@ -984,14 +985,9 @@ func initBPFMapNamesConstants() { } func initAUIDConstants() { - seclConstants["AUDIT_AUID_UNSET"] = &eval.IntEvaluator{Value: AuditUIDUnset} + seclConstants["AUDIT_AUID_UNSET"] = &eval.IntEvaluator{Value: sharedconsts.AuditUIDUnset} } -const ( - // AuditUIDUnset is used to specify that a login uid is not set - AuditUIDUnset = math.MaxUint32 -) - func bitmaskToStringArray(bitmask int, intToStrMap map[int]string) []string { var strs []string var result int diff --git a/pkg/security/secl/model/consts_map_names_linux.go b/pkg/security/secl/model/consts_map_names_linux.go index 55a6198fcfa78..0c76dbbc28bb2 100644 --- a/pkg/security/secl/model/consts_map_names_linux.go +++ b/pkg/security/secl/model/consts_map_names_linux.go @@ -31,7 +31,7 @@ var bpfMapNames = []string{ "events", "events_ringbuf_", "events_stats", - "exec_file_cache", + "inode_file", "exec_pid_transf", "fb_approver_sta", "fb_discarder_st", diff --git a/pkg/security/secl/model/events.go b/pkg/security/secl/model/events.go index 2c2e867ef17e8..77afd53f202e2 100644 --- a/pkg/security/secl/model/events.go +++ b/pkg/security/secl/model/events.go @@ -83,6 +83,8 @@ const ( NetDeviceEventType // VethPairEventType is sent when a new veth pair is created VethPairEventType + // AcceptEventType Accept event + AcceptEventType // BindEventType Bind event BindEventType // ConnectEventType Connect event @@ -101,6 +103,10 @@ const ( CgroupWriteEventType // RawPacketEventType raw packet event RawPacketEventType + // NetworkFlowMonitorEventType is sent to monitor network activity + NetworkFlowMonitorEventType + // StatEventType stat event (used kernel side only) + StatEventType // MaxKernelEventType is used internally to get the maximum number of kernel events. MaxKernelEventType @@ -219,6 +225,8 @@ func (t EventType) String() string { return "veth_pair" case BindEventType: return "bind" + case AcceptEventType: + return "accept" case ConnectEventType: return "connect" case UnshareMountNsEventType: @@ -231,6 +239,10 @@ func (t EventType) String() string { return "ondemand" case RawPacketEventType: return "packet" + case NetworkFlowMonitorEventType: + return "network_flow_monitor" + case StatEventType: + return "stat" case CustomEventType: return "custom_event" case CreateNewFileEventType: diff --git a/pkg/security/secl/model/field_accessors_unix.go b/pkg/security/secl/model/field_accessors_unix.go index 80a466602a68c..a64213aa03f45 100644 --- a/pkg/security/secl/model/field_accessors_unix.go +++ b/pkg/security/secl/model/field_accessors_unix.go @@ -14,19954 +14,2542 @@ import ( "time" ) -// GetBindAddrFamily returns the value of the field, resolving if necessary -func (ev *Event) GetBindAddrFamily() uint16 { - if ev.GetEventType().String() != "bind" { - return uint16(0) - } - return ev.Bind.AddrFamily -} +var _ = time.Time{} +var _ = net.IP{} +var _ = eval.NewContext -// GetBindAddrIp returns the value of the field, resolving if necessary -func (ev *Event) GetBindAddrIp() net.IPNet { - if ev.GetEventType().String() != "bind" { - return net.IPNet{} +// GetChdirFilePath returns the value of the field, resolving if necessary +func (ev *Event) GetChdirFilePath() string { + if ev.GetEventType().String() != "chdir" { + return "" } - return ev.Bind.Addr.IPNet + return ev.FieldHandlers.ResolveFilePath(ev, &ev.Chdir.File) } -// GetBindAddrIsPublic returns the value of the field, resolving if necessary -func (ev *Event) GetBindAddrIsPublic() bool { - if ev.GetEventType().String() != "bind" { - return false +// GetChdirFilePathLength returns the value of the field, resolving if necessary +func (ev *Event) GetChdirFilePathLength() int { + if ev.GetEventType().String() != "chdir" { + return 0 } - return ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.Bind.Addr) + return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Chdir.File)) } -// GetBindAddrPort returns the value of the field, resolving if necessary -func (ev *Event) GetBindAddrPort() uint16 { - if ev.GetEventType().String() != "bind" { - return uint16(0) +// GetChmodFilePath returns the value of the field, resolving if necessary +func (ev *Event) GetChmodFilePath() string { + if ev.GetEventType().String() != "chmod" { + return "" } - return ev.Bind.Addr.Port + return ev.FieldHandlers.ResolveFilePath(ev, &ev.Chmod.File) } -// GetBindProtocol returns the value of the field, resolving if necessary -func (ev *Event) GetBindProtocol() uint16 { - if ev.GetEventType().String() != "bind" { - return uint16(0) +// GetChmodFilePathLength returns the value of the field, resolving if necessary +func (ev *Event) GetChmodFilePathLength() int { + if ev.GetEventType().String() != "chmod" { + return 0 } - return ev.Bind.Protocol + return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Chmod.File)) } -// GetBindRetval returns the value of the field, resolving if necessary -func (ev *Event) GetBindRetval() int64 { - if ev.GetEventType().String() != "bind" { - return int64(0) +// GetChownFilePath returns the value of the field, resolving if necessary +func (ev *Event) GetChownFilePath() string { + if ev.GetEventType().String() != "chown" { + return "" } - return ev.Bind.SyscallEvent.Retval + return ev.FieldHandlers.ResolveFilePath(ev, &ev.Chown.File) } -// GetBpfCmd returns the value of the field, resolving if necessary -func (ev *Event) GetBpfCmd() uint32 { - if ev.GetEventType().String() != "bpf" { - return uint32(0) +// GetChownFilePathLength returns the value of the field, resolving if necessary +func (ev *Event) GetChownFilePathLength() int { + if ev.GetEventType().String() != "chown" { + return 0 } - return ev.BPF.Cmd + return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Chown.File)) } -// GetBpfMapName returns the value of the field, resolving if necessary -func (ev *Event) GetBpfMapName() string { - if ev.GetEventType().String() != "bpf" { - return "" +// GetContainerCreatedAt returns the value of the field, resolving if necessary +func (ev *Event) GetContainerCreatedAt() int { + if ev.BaseEvent.ContainerContext == nil { + return 0 } - return ev.BPF.Map.Name + return ev.FieldHandlers.ResolveContainerCreatedAt(ev, ev.BaseEvent.ContainerContext) } -// GetBpfMapType returns the value of the field, resolving if necessary -func (ev *Event) GetBpfMapType() uint32 { - if ev.GetEventType().String() != "bpf" { - return uint32(0) +// GetContainerId returns the value of the field, resolving if necessary +func (ev *Event) GetContainerId() string { + if ev.BaseEvent.ContainerContext == nil { + return "" } - return ev.BPF.Map.Type + return ev.FieldHandlers.ResolveContainerID(ev, ev.BaseEvent.ContainerContext) } -// GetBpfProgAttachType returns the value of the field, resolving if necessary -func (ev *Event) GetBpfProgAttachType() uint32 { - if ev.GetEventType().String() != "bpf" { - return uint32(0) - } - return ev.BPF.Program.AttachType +// GetEventService returns the value of the field, resolving if necessary +func (ev *Event) GetEventService() string { + return ev.FieldHandlers.ResolveService(ev, &ev.BaseEvent) } -// GetBpfProgHelpers returns the value of the field, resolving if necessary -func (ev *Event) GetBpfProgHelpers() []uint32 { - if ev.GetEventType().String() != "bpf" { - return []uint32{} +// GetExecCmdargv returns the value of the field, resolving if necessary +func (ev *Event) GetExecCmdargv() []string { + if ev.GetEventType().String() != "exec" { + return []string{} } - return ev.BPF.Program.Helpers -} - -// GetBpfProgName returns the value of the field, resolving if necessary -func (ev *Event) GetBpfProgName() string { - if ev.GetEventType().String() != "bpf" { - return "" + if ev.Exec.Process == nil { + return []string{} } - return ev.BPF.Program.Name + return ev.FieldHandlers.ResolveProcessCmdArgv(ev, ev.Exec.Process) } -// GetBpfProgTag returns the value of the field, resolving if necessary -func (ev *Event) GetBpfProgTag() string { - if ev.GetEventType().String() != "bpf" { - return "" +// GetExecEnvp returns the value of the field, resolving if necessary +func (ev *Event) GetExecEnvp() []string { + if ev.GetEventType().String() != "exec" { + return []string{} } - return ev.BPF.Program.Tag -} - -// GetBpfProgType returns the value of the field, resolving if necessary -func (ev *Event) GetBpfProgType() uint32 { - if ev.GetEventType().String() != "bpf" { - return uint32(0) + if ev.Exec.Process == nil { + return []string{} } - return ev.BPF.Program.Type + return ev.FieldHandlers.ResolveProcessEnvp(ev, ev.Exec.Process) } -// GetBpfRetval returns the value of the field, resolving if necessary -func (ev *Event) GetBpfRetval() int64 { - if ev.GetEventType().String() != "bpf" { - return int64(0) +// GetExecExecTime returns the value of the field, resolving if necessary +func (ev *Event) GetExecExecTime() time.Time { + if ev.GetEventType().String() != "exec" { + return time.Time{} } - return ev.BPF.SyscallEvent.Retval -} - -// GetCapsetCapEffective returns the value of the field, resolving if necessary -func (ev *Event) GetCapsetCapEffective() uint64 { - if ev.GetEventType().String() != "capset" { - return uint64(0) + if ev.Exec.Process == nil { + return time.Time{} } - return ev.Capset.CapEffective + return ev.Exec.Process.ExecTime } -// GetCapsetCapPermitted returns the value of the field, resolving if necessary -func (ev *Event) GetCapsetCapPermitted() uint64 { - if ev.GetEventType().String() != "capset" { - return uint64(0) +// GetExecExitTime returns the value of the field, resolving if necessary +func (ev *Event) GetExecExitTime() time.Time { + if ev.GetEventType().String() != "exec" { + return time.Time{} } - return ev.Capset.CapPermitted -} - -// GetCgroupFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetCgroupFileInode() uint64 { - return ev.CGroupContext.CGroupFile.Inode -} - -// GetCgroupFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetCgroupFileMountId() uint32 { - return ev.CGroupContext.CGroupFile.MountID -} - -// GetCgroupId returns the value of the field, resolving if necessary -func (ev *Event) GetCgroupId() string { - return ev.FieldHandlers.ResolveCGroupID(ev, &ev.CGroupContext) -} - -// GetCgroupManager returns the value of the field, resolving if necessary -func (ev *Event) GetCgroupManager() string { - return ev.FieldHandlers.ResolveCGroupManager(ev, &ev.CGroupContext) -} - -// GetCgroupVersion returns the value of the field, resolving if necessary -func (ev *Event) GetCgroupVersion() int { - return ev.FieldHandlers.ResolveCGroupVersion(ev, &ev.CGroupContext) -} - -// GetChdirFileChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetChdirFileChangeTime() uint64 { - if ev.GetEventType().String() != "chdir" { - return uint64(0) + if ev.Exec.Process == nil { + return time.Time{} } - return ev.Chdir.File.FileFields.CTime + return ev.Exec.Process.ExitTime } -// GetChdirFileFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetChdirFileFilesystem() string { - if ev.GetEventType().String() != "chdir" { +// GetExecFilePath returns the value of the field, resolving if necessary +func (ev *Event) GetExecFilePath() string { + if ev.GetEventType().String() != "exec" { return "" } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Chdir.File) -} - -// GetChdirFileGid returns the value of the field, resolving if necessary -func (ev *Event) GetChdirFileGid() uint32 { - if ev.GetEventType().String() != "chdir" { - return uint32(0) + if ev.Exec.Process == nil { + return "" } - return ev.Chdir.File.FileFields.GID -} - -// GetChdirFileGroup returns the value of the field, resolving if necessary -func (ev *Event) GetChdirFileGroup() string { - if ev.GetEventType().String() != "chdir" { + if !ev.Exec.Process.IsNotKworker() { return "" } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Chdir.File.FileFields) + return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exec.Process.FileEvent) } -// GetChdirFileHashes returns the value of the field, resolving if necessary -func (ev *Event) GetChdirFileHashes() []string { - if ev.GetEventType().String() != "chdir" { - return []string{} +// GetExecFilePathLength returns the value of the field, resolving if necessary +func (ev *Event) GetExecFilePathLength() int { + if ev.GetEventType().String() != "exec" { + return 0 } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Chdir.File) -} - -// GetChdirFileInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetChdirFileInUpperLayer() bool { - if ev.GetEventType().String() != "chdir" { - return false + if ev.Exec.Process == nil { + return 0 } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Chdir.File.FileFields) + return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Exec.Process.FileEvent)) } -// GetChdirFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetChdirFileInode() uint64 { - if ev.GetEventType().String() != "chdir" { - return uint64(0) +// GetExecForkTime returns the value of the field, resolving if necessary +func (ev *Event) GetExecForkTime() time.Time { + if ev.GetEventType().String() != "exec" { + return time.Time{} } - return ev.Chdir.File.FileFields.PathKey.Inode -} - -// GetChdirFileMode returns the value of the field, resolving if necessary -func (ev *Event) GetChdirFileMode() uint16 { - if ev.GetEventType().String() != "chdir" { - return uint16(0) + if ev.Exec.Process == nil { + return time.Time{} } - return ev.Chdir.File.FileFields.Mode + return ev.Exec.Process.ForkTime } -// GetChdirFileModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetChdirFileModificationTime() uint64 { - if ev.GetEventType().String() != "chdir" { - return uint64(0) +// GetExecGid returns the value of the field, resolving if necessary +func (ev *Event) GetExecGid() uint32 { + if ev.GetEventType().String() != "exec" { + return uint32(0) } - return ev.Chdir.File.FileFields.MTime -} - -// GetChdirFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetChdirFileMountId() uint32 { - if ev.GetEventType().String() != "chdir" { + if ev.Exec.Process == nil { return uint32(0) } - return ev.Chdir.File.FileFields.PathKey.MountID + return ev.Exec.Process.Credentials.GID } -// GetChdirFileName returns the value of the field, resolving if necessary -func (ev *Event) GetChdirFileName() string { - if ev.GetEventType().String() != "chdir" { +// GetExecGroup returns the value of the field, resolving if necessary +func (ev *Event) GetExecGroup() string { + if ev.GetEventType().String() != "exec" { return "" } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Chdir.File) -} - -// GetChdirFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetChdirFileNameLength() int { - if ev.GetEventType().String() != "chdir" { - return 0 - } - return len(ev.FieldHandlers.ResolveFileBasename(ev, &ev.Chdir.File)) -} - -// GetChdirFilePackageName returns the value of the field, resolving if necessary -func (ev *Event) GetChdirFilePackageName() string { - if ev.GetEventType().String() != "chdir" { + if ev.Exec.Process == nil { return "" } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Chdir.File) + return ev.Exec.Process.Credentials.Group } -// GetChdirFilePackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetChdirFilePackageSourceVersion() string { - if ev.GetEventType().String() != "chdir" { +// GetExecInterpreterFilePath returns the value of the field, resolving if necessary +func (ev *Event) GetExecInterpreterFilePath() string { + if ev.GetEventType().String() != "exec" { return "" } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Chdir.File) -} - -// GetChdirFilePackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetChdirFilePackageVersion() string { - if ev.GetEventType().String() != "chdir" { + if ev.Exec.Process == nil { return "" } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Chdir.File) -} - -// GetChdirFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetChdirFilePath() string { - if ev.GetEventType().String() != "chdir" { + if !ev.Exec.Process.HasInterpreter() { return "" } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Chdir.File) + return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exec.Process.LinuxBinprm.FileEvent) } -// GetChdirFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetChdirFilePathLength() int { - if ev.GetEventType().String() != "chdir" { +// GetExecInterpreterFilePathLength returns the value of the field, resolving if necessary +func (ev *Event) GetExecInterpreterFilePathLength() int { + if ev.GetEventType().String() != "exec" { return 0 } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Chdir.File)) -} - -// GetChdirFileRights returns the value of the field, resolving if necessary -func (ev *Event) GetChdirFileRights() int { - if ev.GetEventType().String() != "chdir" { + if ev.Exec.Process == nil { return 0 } - return ev.FieldHandlers.ResolveRights(ev, &ev.Chdir.File.FileFields) + return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Exec.Process.LinuxBinprm.FileEvent)) } -// GetChdirFileUid returns the value of the field, resolving if necessary -func (ev *Event) GetChdirFileUid() uint32 { - if ev.GetEventType().String() != "chdir" { +// GetExecPid returns the value of the field, resolving if necessary +func (ev *Event) GetExecPid() uint32 { + if ev.GetEventType().String() != "exec" { return uint32(0) } - return ev.Chdir.File.FileFields.UID -} - -// GetChdirFileUser returns the value of the field, resolving if necessary -func (ev *Event) GetChdirFileUser() string { - if ev.GetEventType().String() != "chdir" { - return "" + if ev.Exec.Process == nil { + return uint32(0) } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Chdir.File.FileFields) + return ev.Exec.Process.PIDContext.Pid } -// GetChdirRetval returns the value of the field, resolving if necessary -func (ev *Event) GetChdirRetval() int64 { - if ev.GetEventType().String() != "chdir" { - return int64(0) +// GetExecPpid returns the value of the field, resolving if necessary +func (ev *Event) GetExecPpid() uint32 { + if ev.GetEventType().String() != "exec" { + return uint32(0) } - return ev.Chdir.SyscallEvent.Retval -} - -// GetChdirSyscallInt1 returns the value of the field, resolving if necessary -func (ev *Event) GetChdirSyscallInt1() int { - if ev.GetEventType().String() != "chdir" { - return 0 + if ev.Exec.Process == nil { + return uint32(0) } - return ev.FieldHandlers.ResolveSyscallCtxArgsInt1(ev, &ev.Chdir.SyscallContext) + return ev.Exec.Process.PPid } -// GetChdirSyscallInt2 returns the value of the field, resolving if necessary -func (ev *Event) GetChdirSyscallInt2() int { - if ev.GetEventType().String() != "chdir" { - return 0 +// GetExecUid returns the value of the field, resolving if necessary +func (ev *Event) GetExecUid() uint32 { + if ev.GetEventType().String() != "exec" { + return uint32(0) } - return ev.FieldHandlers.ResolveSyscallCtxArgsInt2(ev, &ev.Chdir.SyscallContext) -} - -// GetChdirSyscallInt3 returns the value of the field, resolving if necessary -func (ev *Event) GetChdirSyscallInt3() int { - if ev.GetEventType().String() != "chdir" { - return 0 + if ev.Exec.Process == nil { + return uint32(0) } - return ev.FieldHandlers.ResolveSyscallCtxArgsInt3(ev, &ev.Chdir.SyscallContext) + return ev.Exec.Process.Credentials.UID } -// GetChdirSyscallPath returns the value of the field, resolving if necessary -func (ev *Event) GetChdirSyscallPath() string { - if ev.GetEventType().String() != "chdir" { +// GetExecUser returns the value of the field, resolving if necessary +func (ev *Event) GetExecUser() string { + if ev.GetEventType().String() != "exec" { return "" } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr1(ev, &ev.Chdir.SyscallContext) -} - -// GetChdirSyscallStr1 returns the value of the field, resolving if necessary -func (ev *Event) GetChdirSyscallStr1() string { - if ev.GetEventType().String() != "chdir" { + if ev.Exec.Process == nil { return "" } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr1(ev, &ev.Chdir.SyscallContext) + return ev.Exec.Process.Credentials.User } -// GetChdirSyscallStr2 returns the value of the field, resolving if necessary -func (ev *Event) GetChdirSyscallStr2() string { - if ev.GetEventType().String() != "chdir" { - return "" +// GetExitCmdargv returns the value of the field, resolving if necessary +func (ev *Event) GetExitCmdargv() []string { + if ev.GetEventType().String() != "exit" { + return []string{} } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr2(ev, &ev.Chdir.SyscallContext) -} - -// GetChdirSyscallStr3 returns the value of the field, resolving if necessary -func (ev *Event) GetChdirSyscallStr3() string { - if ev.GetEventType().String() != "chdir" { - return "" + if ev.Exit.Process == nil { + return []string{} } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr3(ev, &ev.Chdir.SyscallContext) + return ev.FieldHandlers.ResolveProcessCmdArgv(ev, ev.Exit.Process) } -// GetChmodFileChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetChmodFileChangeTime() uint64 { - if ev.GetEventType().String() != "chmod" { - return uint64(0) +// GetExitCode returns the value of the field, resolving if necessary +func (ev *Event) GetExitCode() uint32 { + if ev.GetEventType().String() != "exit" { + return uint32(0) } - return ev.Chmod.File.FileFields.CTime + return ev.Exit.Code } -// GetChmodFileDestinationMode returns the value of the field, resolving if necessary -func (ev *Event) GetChmodFileDestinationMode() uint32 { - if ev.GetEventType().String() != "chmod" { - return uint32(0) +// GetExitEnvp returns the value of the field, resolving if necessary +func (ev *Event) GetExitEnvp() []string { + if ev.GetEventType().String() != "exit" { + return []string{} } - return ev.Chmod.Mode -} - -// GetChmodFileDestinationRights returns the value of the field, resolving if necessary -func (ev *Event) GetChmodFileDestinationRights() uint32 { - if ev.GetEventType().String() != "chmod" { - return uint32(0) + if ev.Exit.Process == nil { + return []string{} } - return ev.Chmod.Mode + return ev.FieldHandlers.ResolveProcessEnvp(ev, ev.Exit.Process) } -// GetChmodFileFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetChmodFileFilesystem() string { - if ev.GetEventType().String() != "chmod" { - return "" +// GetExitExecTime returns the value of the field, resolving if necessary +func (ev *Event) GetExitExecTime() time.Time { + if ev.GetEventType().String() != "exit" { + return time.Time{} + } + if ev.Exit.Process == nil { + return time.Time{} } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Chmod.File) + return ev.Exit.Process.ExecTime } -// GetChmodFileGid returns the value of the field, resolving if necessary -func (ev *Event) GetChmodFileGid() uint32 { - if ev.GetEventType().String() != "chmod" { - return uint32(0) +// GetExitExitTime returns the value of the field, resolving if necessary +func (ev *Event) GetExitExitTime() time.Time { + if ev.GetEventType().String() != "exit" { + return time.Time{} } - return ev.Chmod.File.FileFields.GID + if ev.Exit.Process == nil { + return time.Time{} + } + return ev.Exit.Process.ExitTime } -// GetChmodFileGroup returns the value of the field, resolving if necessary -func (ev *Event) GetChmodFileGroup() string { - if ev.GetEventType().String() != "chmod" { +// GetExitFilePath returns the value of the field, resolving if necessary +func (ev *Event) GetExitFilePath() string { + if ev.GetEventType().String() != "exit" { return "" } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Chmod.File.FileFields) -} - -// GetChmodFileHashes returns the value of the field, resolving if necessary -func (ev *Event) GetChmodFileHashes() []string { - if ev.GetEventType().String() != "chmod" { - return []string{} + if ev.Exit.Process == nil { + return "" } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Chmod.File) -} - -// GetChmodFileInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetChmodFileInUpperLayer() bool { - if ev.GetEventType().String() != "chmod" { - return false + if !ev.Exit.Process.IsNotKworker() { + return "" } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Chmod.File.FileFields) + return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exit.Process.FileEvent) } -// GetChmodFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetChmodFileInode() uint64 { - if ev.GetEventType().String() != "chmod" { - return uint64(0) +// GetExitFilePathLength returns the value of the field, resolving if necessary +func (ev *Event) GetExitFilePathLength() int { + if ev.GetEventType().String() != "exit" { + return 0 } - return ev.Chmod.File.FileFields.PathKey.Inode -} - -// GetChmodFileMode returns the value of the field, resolving if necessary -func (ev *Event) GetChmodFileMode() uint16 { - if ev.GetEventType().String() != "chmod" { - return uint16(0) + if ev.Exit.Process == nil { + return 0 } - return ev.Chmod.File.FileFields.Mode + return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Exit.Process.FileEvent)) } -// GetChmodFileModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetChmodFileModificationTime() uint64 { - if ev.GetEventType().String() != "chmod" { - return uint64(0) +// GetExitForkTime returns the value of the field, resolving if necessary +func (ev *Event) GetExitForkTime() time.Time { + if ev.GetEventType().String() != "exit" { + return time.Time{} } - return ev.Chmod.File.FileFields.MTime + if ev.Exit.Process == nil { + return time.Time{} + } + return ev.Exit.Process.ForkTime } -// GetChmodFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetChmodFileMountId() uint32 { - if ev.GetEventType().String() != "chmod" { +// GetExitGid returns the value of the field, resolving if necessary +func (ev *Event) GetExitGid() uint32 { + if ev.GetEventType().String() != "exit" { return uint32(0) } - return ev.Chmod.File.FileFields.PathKey.MountID -} - -// GetChmodFileName returns the value of the field, resolving if necessary -func (ev *Event) GetChmodFileName() string { - if ev.GetEventType().String() != "chmod" { - return "" + if ev.Exit.Process == nil { + return uint32(0) } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Chmod.File) + return ev.Exit.Process.Credentials.GID } -// GetChmodFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetChmodFileNameLength() int { - if ev.GetEventType().String() != "chmod" { - return 0 +// GetExitGroup returns the value of the field, resolving if necessary +func (ev *Event) GetExitGroup() string { + if ev.GetEventType().String() != "exit" { + return "" } - return len(ev.FieldHandlers.ResolveFileBasename(ev, &ev.Chmod.File)) -} - -// GetChmodFilePackageName returns the value of the field, resolving if necessary -func (ev *Event) GetChmodFilePackageName() string { - if ev.GetEventType().String() != "chmod" { + if ev.Exit.Process == nil { return "" } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Chmod.File) + return ev.Exit.Process.Credentials.Group } -// GetChmodFilePackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetChmodFilePackageSourceVersion() string { - if ev.GetEventType().String() != "chmod" { +// GetExitInterpreterFilePath returns the value of the field, resolving if necessary +func (ev *Event) GetExitInterpreterFilePath() string { + if ev.GetEventType().String() != "exit" { return "" } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Chmod.File) -} - -// GetChmodFilePackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetChmodFilePackageVersion() string { - if ev.GetEventType().String() != "chmod" { + if ev.Exit.Process == nil { return "" } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Chmod.File) -} - -// GetChmodFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetChmodFilePath() string { - if ev.GetEventType().String() != "chmod" { + if !ev.Exit.Process.HasInterpreter() { return "" } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Chmod.File) + return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exit.Process.LinuxBinprm.FileEvent) } -// GetChmodFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetChmodFilePathLength() int { - if ev.GetEventType().String() != "chmod" { +// GetExitInterpreterFilePathLength returns the value of the field, resolving if necessary +func (ev *Event) GetExitInterpreterFilePathLength() int { + if ev.GetEventType().String() != "exit" { return 0 } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Chmod.File)) -} - -// GetChmodFileRights returns the value of the field, resolving if necessary -func (ev *Event) GetChmodFileRights() int { - if ev.GetEventType().String() != "chmod" { + if ev.Exit.Process == nil { return 0 } - return ev.FieldHandlers.ResolveRights(ev, &ev.Chmod.File.FileFields) + return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Exit.Process.LinuxBinprm.FileEvent)) } -// GetChmodFileUid returns the value of the field, resolving if necessary -func (ev *Event) GetChmodFileUid() uint32 { - if ev.GetEventType().String() != "chmod" { +// GetExitPid returns the value of the field, resolving if necessary +func (ev *Event) GetExitPid() uint32 { + if ev.GetEventType().String() != "exit" { return uint32(0) } - return ev.Chmod.File.FileFields.UID -} - -// GetChmodFileUser returns the value of the field, resolving if necessary -func (ev *Event) GetChmodFileUser() string { - if ev.GetEventType().String() != "chmod" { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Chmod.File.FileFields) -} - -// GetChmodRetval returns the value of the field, resolving if necessary -func (ev *Event) GetChmodRetval() int64 { - if ev.GetEventType().String() != "chmod" { - return int64(0) + if ev.Exit.Process == nil { + return uint32(0) } - return ev.Chmod.SyscallEvent.Retval + return ev.Exit.Process.PIDContext.Pid } -// GetChmodSyscallInt1 returns the value of the field, resolving if necessary -func (ev *Event) GetChmodSyscallInt1() int { - if ev.GetEventType().String() != "chmod" { - return 0 +// GetExitPpid returns the value of the field, resolving if necessary +func (ev *Event) GetExitPpid() uint32 { + if ev.GetEventType().String() != "exit" { + return uint32(0) } - return ev.FieldHandlers.ResolveSyscallCtxArgsInt1(ev, &ev.Chmod.SyscallContext) -} - -// GetChmodSyscallInt2 returns the value of the field, resolving if necessary -func (ev *Event) GetChmodSyscallInt2() int { - if ev.GetEventType().String() != "chmod" { - return 0 + if ev.Exit.Process == nil { + return uint32(0) } - return ev.FieldHandlers.ResolveSyscallCtxArgsInt2(ev, &ev.Chmod.SyscallContext) + return ev.Exit.Process.PPid } -// GetChmodSyscallInt3 returns the value of the field, resolving if necessary -func (ev *Event) GetChmodSyscallInt3() int { - if ev.GetEventType().String() != "chmod" { - return 0 +// GetExitUid returns the value of the field, resolving if necessary +func (ev *Event) GetExitUid() uint32 { + if ev.GetEventType().String() != "exit" { + return uint32(0) } - return ev.FieldHandlers.ResolveSyscallCtxArgsInt3(ev, &ev.Chmod.SyscallContext) -} - -// GetChmodSyscallMode returns the value of the field, resolving if necessary -func (ev *Event) GetChmodSyscallMode() int { - if ev.GetEventType().String() != "chmod" { - return 0 + if ev.Exit.Process == nil { + return uint32(0) } - return ev.FieldHandlers.ResolveSyscallCtxArgsInt2(ev, &ev.Chmod.SyscallContext) + return ev.Exit.Process.Credentials.UID } -// GetChmodSyscallPath returns the value of the field, resolving if necessary -func (ev *Event) GetChmodSyscallPath() string { - if ev.GetEventType().String() != "chmod" { +// GetExitUser returns the value of the field, resolving if necessary +func (ev *Event) GetExitUser() string { + if ev.GetEventType().String() != "exit" { return "" } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr1(ev, &ev.Chmod.SyscallContext) -} - -// GetChmodSyscallStr1 returns the value of the field, resolving if necessary -func (ev *Event) GetChmodSyscallStr1() string { - if ev.GetEventType().String() != "chmod" { + if ev.Exit.Process == nil { return "" } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr1(ev, &ev.Chmod.SyscallContext) + return ev.Exit.Process.Credentials.User } -// GetChmodSyscallStr2 returns the value of the field, resolving if necessary -func (ev *Event) GetChmodSyscallStr2() string { - if ev.GetEventType().String() != "chmod" { +// GetLinkFileDestinationPath returns the value of the field, resolving if necessary +func (ev *Event) GetLinkFileDestinationPath() string { + if ev.GetEventType().String() != "link" { return "" } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr2(ev, &ev.Chmod.SyscallContext) + return ev.FieldHandlers.ResolveFilePath(ev, &ev.Link.Target) } -// GetChmodSyscallStr3 returns the value of the field, resolving if necessary -func (ev *Event) GetChmodSyscallStr3() string { - if ev.GetEventType().String() != "chmod" { - return "" +// GetLinkFileDestinationPathLength returns the value of the field, resolving if necessary +func (ev *Event) GetLinkFileDestinationPathLength() int { + if ev.GetEventType().String() != "link" { + return 0 } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr3(ev, &ev.Chmod.SyscallContext) + return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Link.Target)) } -// GetChownFileChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetChownFileChangeTime() uint64 { - if ev.GetEventType().String() != "chown" { - return uint64(0) +// GetLinkFilePath returns the value of the field, resolving if necessary +func (ev *Event) GetLinkFilePath() string { + if ev.GetEventType().String() != "link" { + return "" } - return ev.Chown.File.FileFields.CTime + return ev.FieldHandlers.ResolveFilePath(ev, &ev.Link.Source) } -// GetChownFileDestinationGid returns the value of the field, resolving if necessary -func (ev *Event) GetChownFileDestinationGid() int64 { - if ev.GetEventType().String() != "chown" { - return int64(0) +// GetLinkFilePathLength returns the value of the field, resolving if necessary +func (ev *Event) GetLinkFilePathLength() int { + if ev.GetEventType().String() != "link" { + return 0 } - return ev.Chown.GID + return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Link.Source)) } -// GetChownFileDestinationGroup returns the value of the field, resolving if necessary -func (ev *Event) GetChownFileDestinationGroup() string { - if ev.GetEventType().String() != "chown" { +// GetLoadModuleFilePath returns the value of the field, resolving if necessary +func (ev *Event) GetLoadModuleFilePath() string { + if ev.GetEventType().String() != "load_module" { return "" } - return ev.FieldHandlers.ResolveChownGID(ev, &ev.Chown) -} - -// GetChownFileDestinationUid returns the value of the field, resolving if necessary -func (ev *Event) GetChownFileDestinationUid() int64 { - if ev.GetEventType().String() != "chown" { - return int64(0) - } - return ev.Chown.UID + return ev.FieldHandlers.ResolveFilePath(ev, &ev.LoadModule.File) } -// GetChownFileDestinationUser returns the value of the field, resolving if necessary -func (ev *Event) GetChownFileDestinationUser() string { - if ev.GetEventType().String() != "chown" { - return "" +// GetLoadModuleFilePathLength returns the value of the field, resolving if necessary +func (ev *Event) GetLoadModuleFilePathLength() int { + if ev.GetEventType().String() != "load_module" { + return 0 } - return ev.FieldHandlers.ResolveChownUID(ev, &ev.Chown) + return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.LoadModule.File)) } -// GetChownFileFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetChownFileFilesystem() string { - if ev.GetEventType().String() != "chown" { +// GetMkdirFilePath returns the value of the field, resolving if necessary +func (ev *Event) GetMkdirFilePath() string { + if ev.GetEventType().String() != "mkdir" { return "" } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Chown.File) + return ev.FieldHandlers.ResolveFilePath(ev, &ev.Mkdir.File) } -// GetChownFileGid returns the value of the field, resolving if necessary -func (ev *Event) GetChownFileGid() uint32 { - if ev.GetEventType().String() != "chown" { - return uint32(0) +// GetMkdirFilePathLength returns the value of the field, resolving if necessary +func (ev *Event) GetMkdirFilePathLength() int { + if ev.GetEventType().String() != "mkdir" { + return 0 } - return ev.Chown.File.FileFields.GID + return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Mkdir.File)) } -// GetChownFileGroup returns the value of the field, resolving if necessary -func (ev *Event) GetChownFileGroup() string { - if ev.GetEventType().String() != "chown" { +// GetMmapFilePath returns the value of the field, resolving if necessary +func (ev *Event) GetMmapFilePath() string { + if ev.GetEventType().String() != "mmap" { return "" } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Chown.File.FileFields) + return ev.FieldHandlers.ResolveFilePath(ev, &ev.MMap.File) } -// GetChownFileHashes returns the value of the field, resolving if necessary -func (ev *Event) GetChownFileHashes() []string { - if ev.GetEventType().String() != "chown" { - return []string{} +// GetMmapFilePathLength returns the value of the field, resolving if necessary +func (ev *Event) GetMmapFilePathLength() int { + if ev.GetEventType().String() != "mmap" { + return 0 } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Chown.File) + return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.MMap.File)) } -// GetChownFileInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetChownFileInUpperLayer() bool { - if ev.GetEventType().String() != "chown" { - return false +// GetMountMountpointPath returns the value of the field, resolving if necessary +func (ev *Event) GetMountMountpointPath() string { + if ev.GetEventType().String() != "mount" { + return "" } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Chown.File.FileFields) + return ev.FieldHandlers.ResolveMountPointPath(ev, &ev.Mount) } -// GetChownFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetChownFileInode() uint64 { - if ev.GetEventType().String() != "chown" { - return uint64(0) +// GetMountRootPath returns the value of the field, resolving if necessary +func (ev *Event) GetMountRootPath() string { + if ev.GetEventType().String() != "mount" { + return "" } - return ev.Chown.File.FileFields.PathKey.Inode + return ev.FieldHandlers.ResolveMountRootPath(ev, &ev.Mount) } -// GetChownFileMode returns the value of the field, resolving if necessary -func (ev *Event) GetChownFileMode() uint16 { - if ev.GetEventType().String() != "chown" { - return uint16(0) +// GetOpenFilePath returns the value of the field, resolving if necessary +func (ev *Event) GetOpenFilePath() string { + if ev.GetEventType().String() != "open" { + return "" } - return ev.Chown.File.FileFields.Mode + return ev.FieldHandlers.ResolveFilePath(ev, &ev.Open.File) } -// GetChownFileModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetChownFileModificationTime() uint64 { - if ev.GetEventType().String() != "chown" { - return uint64(0) +// GetOpenFilePathLength returns the value of the field, resolving if necessary +func (ev *Event) GetOpenFilePathLength() int { + if ev.GetEventType().String() != "open" { + return 0 } - return ev.Chown.File.FileFields.MTime + return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Open.File)) } -// GetChownFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetChownFileMountId() uint32 { - if ev.GetEventType().String() != "chown" { - return uint32(0) - } - return ev.Chown.File.FileFields.PathKey.MountID -} - -// GetChownFileName returns the value of the field, resolving if necessary -func (ev *Event) GetChownFileName() string { - if ev.GetEventType().String() != "chown" { - return "" - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Chown.File) -} - -// GetChownFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetChownFileNameLength() int { - if ev.GetEventType().String() != "chown" { - return 0 - } - return len(ev.FieldHandlers.ResolveFileBasename(ev, &ev.Chown.File)) -} - -// GetChownFilePackageName returns the value of the field, resolving if necessary -func (ev *Event) GetChownFilePackageName() string { - if ev.GetEventType().String() != "chown" { - return "" - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Chown.File) -} - -// GetChownFilePackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetChownFilePackageSourceVersion() string { - if ev.GetEventType().String() != "chown" { - return "" - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Chown.File) -} - -// GetChownFilePackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetChownFilePackageVersion() string { - if ev.GetEventType().String() != "chown" { - return "" - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Chown.File) -} - -// GetChownFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetChownFilePath() string { - if ev.GetEventType().String() != "chown" { - return "" - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Chown.File) -} - -// GetChownFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetChownFilePathLength() int { - if ev.GetEventType().String() != "chown" { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Chown.File)) -} - -// GetChownFileRights returns the value of the field, resolving if necessary -func (ev *Event) GetChownFileRights() int { - if ev.GetEventType().String() != "chown" { - return 0 - } - return ev.FieldHandlers.ResolveRights(ev, &ev.Chown.File.FileFields) -} - -// GetChownFileUid returns the value of the field, resolving if necessary -func (ev *Event) GetChownFileUid() uint32 { - if ev.GetEventType().String() != "chown" { - return uint32(0) - } - return ev.Chown.File.FileFields.UID -} - -// GetChownFileUser returns the value of the field, resolving if necessary -func (ev *Event) GetChownFileUser() string { - if ev.GetEventType().String() != "chown" { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Chown.File.FileFields) -} - -// GetChownRetval returns the value of the field, resolving if necessary -func (ev *Event) GetChownRetval() int64 { - if ev.GetEventType().String() != "chown" { - return int64(0) - } - return ev.Chown.SyscallEvent.Retval -} - -// GetChownSyscallGid returns the value of the field, resolving if necessary -func (ev *Event) GetChownSyscallGid() int { - if ev.GetEventType().String() != "chown" { - return 0 - } - return ev.FieldHandlers.ResolveSyscallCtxArgsInt3(ev, &ev.Chown.SyscallContext) -} - -// GetChownSyscallInt1 returns the value of the field, resolving if necessary -func (ev *Event) GetChownSyscallInt1() int { - if ev.GetEventType().String() != "chown" { - return 0 - } - return ev.FieldHandlers.ResolveSyscallCtxArgsInt1(ev, &ev.Chown.SyscallContext) -} - -// GetChownSyscallInt2 returns the value of the field, resolving if necessary -func (ev *Event) GetChownSyscallInt2() int { - if ev.GetEventType().String() != "chown" { - return 0 - } - return ev.FieldHandlers.ResolveSyscallCtxArgsInt2(ev, &ev.Chown.SyscallContext) -} - -// GetChownSyscallInt3 returns the value of the field, resolving if necessary -func (ev *Event) GetChownSyscallInt3() int { - if ev.GetEventType().String() != "chown" { - return 0 - } - return ev.FieldHandlers.ResolveSyscallCtxArgsInt3(ev, &ev.Chown.SyscallContext) -} - -// GetChownSyscallPath returns the value of the field, resolving if necessary -func (ev *Event) GetChownSyscallPath() string { - if ev.GetEventType().String() != "chown" { - return "" - } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr1(ev, &ev.Chown.SyscallContext) -} - -// GetChownSyscallStr1 returns the value of the field, resolving if necessary -func (ev *Event) GetChownSyscallStr1() string { - if ev.GetEventType().String() != "chown" { - return "" - } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr1(ev, &ev.Chown.SyscallContext) -} - -// GetChownSyscallStr2 returns the value of the field, resolving if necessary -func (ev *Event) GetChownSyscallStr2() string { - if ev.GetEventType().String() != "chown" { - return "" - } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr2(ev, &ev.Chown.SyscallContext) -} - -// GetChownSyscallStr3 returns the value of the field, resolving if necessary -func (ev *Event) GetChownSyscallStr3() string { - if ev.GetEventType().String() != "chown" { - return "" - } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr3(ev, &ev.Chown.SyscallContext) -} - -// GetChownSyscallUid returns the value of the field, resolving if necessary -func (ev *Event) GetChownSyscallUid() int { - if ev.GetEventType().String() != "chown" { - return 0 - } - return ev.FieldHandlers.ResolveSyscallCtxArgsInt2(ev, &ev.Chown.SyscallContext) -} - -// GetConnectAddrFamily returns the value of the field, resolving if necessary -func (ev *Event) GetConnectAddrFamily() uint16 { - if ev.GetEventType().String() != "connect" { - return uint16(0) - } - return ev.Connect.AddrFamily -} - -// GetConnectAddrIp returns the value of the field, resolving if necessary -func (ev *Event) GetConnectAddrIp() net.IPNet { - if ev.GetEventType().String() != "connect" { - return net.IPNet{} - } - return ev.Connect.Addr.IPNet -} - -// GetConnectAddrIsPublic returns the value of the field, resolving if necessary -func (ev *Event) GetConnectAddrIsPublic() bool { - if ev.GetEventType().String() != "connect" { - return false - } - return ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.Connect.Addr) -} - -// GetConnectAddrPort returns the value of the field, resolving if necessary -func (ev *Event) GetConnectAddrPort() uint16 { - if ev.GetEventType().String() != "connect" { - return uint16(0) - } - return ev.Connect.Addr.Port -} - -// GetConnectProtocol returns the value of the field, resolving if necessary -func (ev *Event) GetConnectProtocol() uint16 { - if ev.GetEventType().String() != "connect" { - return uint16(0) - } - return ev.Connect.Protocol -} - -// GetConnectRetval returns the value of the field, resolving if necessary -func (ev *Event) GetConnectRetval() int64 { - if ev.GetEventType().String() != "connect" { - return int64(0) - } - return ev.Connect.SyscallEvent.Retval -} - -// GetContainerCreatedAt returns the value of the field, resolving if necessary -func (ev *Event) GetContainerCreatedAt() int { - if ev.BaseEvent.ContainerContext == nil { - return 0 - } - return ev.FieldHandlers.ResolveContainerCreatedAt(ev, ev.BaseEvent.ContainerContext) -} - -// GetContainerId returns the value of the field, resolving if necessary -func (ev *Event) GetContainerId() string { - if ev.BaseEvent.ContainerContext == nil { - return "" - } - return ev.FieldHandlers.ResolveContainerID(ev, ev.BaseEvent.ContainerContext) -} - -// GetContainerRuntime returns the value of the field, resolving if necessary -func (ev *Event) GetContainerRuntime() string { - if ev.BaseEvent.ContainerContext == nil { - return "" - } - return ev.FieldHandlers.ResolveContainerRuntime(ev, ev.BaseEvent.ContainerContext) -} - -// GetContainerTags returns the value of the field, resolving if necessary -func (ev *Event) GetContainerTags() []string { - if ev.BaseEvent.ContainerContext == nil { - return []string{} - } - return ev.FieldHandlers.ResolveContainerTags(ev, ev.BaseEvent.ContainerContext) -} - -// GetDnsId returns the value of the field, resolving if necessary -func (ev *Event) GetDnsId() uint16 { - if ev.GetEventType().String() != "dns" { - return uint16(0) - } - return ev.DNS.ID -} - -// GetDnsQuestionClass returns the value of the field, resolving if necessary -func (ev *Event) GetDnsQuestionClass() uint16 { - if ev.GetEventType().String() != "dns" { - return uint16(0) - } - return ev.DNS.Class -} - -// GetDnsQuestionCount returns the value of the field, resolving if necessary -func (ev *Event) GetDnsQuestionCount() uint16 { - if ev.GetEventType().String() != "dns" { - return uint16(0) - } - return ev.DNS.Count -} - -// GetDnsQuestionLength returns the value of the field, resolving if necessary -func (ev *Event) GetDnsQuestionLength() uint16 { - if ev.GetEventType().String() != "dns" { - return uint16(0) - } - return ev.DNS.Size -} - -// GetDnsQuestionName returns the value of the field, resolving if necessary -func (ev *Event) GetDnsQuestionName() string { - if ev.GetEventType().String() != "dns" { - return "" - } - return ev.DNS.Name -} - -// GetDnsQuestionNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetDnsQuestionNameLength() int { - if ev.GetEventType().String() != "dns" { - return 0 - } - return len(ev.DNS.Name) -} - -// GetDnsQuestionType returns the value of the field, resolving if necessary -func (ev *Event) GetDnsQuestionType() uint16 { - if ev.GetEventType().String() != "dns" { - return uint16(0) - } - return ev.DNS.Type -} - -// GetEventAsync returns the value of the field, resolving if necessary -func (ev *Event) GetEventAsync() bool { - return ev.FieldHandlers.ResolveAsync(ev) -} - -// GetEventHostname returns the value of the field, resolving if necessary -func (ev *Event) GetEventHostname() string { - return ev.FieldHandlers.ResolveHostname(ev, &ev.BaseEvent) -} - -// GetEventOrigin returns the value of the field, resolving if necessary -func (ev *Event) GetEventOrigin() string { - return ev.BaseEvent.Origin -} - -// GetEventOs returns the value of the field, resolving if necessary -func (ev *Event) GetEventOs() string { - return ev.BaseEvent.Os -} - -// GetEventService returns the value of the field, resolving if necessary -func (ev *Event) GetEventService() string { - return ev.FieldHandlers.ResolveService(ev, &ev.BaseEvent) -} - -// GetEventTimestamp returns the value of the field, resolving if necessary -func (ev *Event) GetEventTimestamp() int { - return ev.FieldHandlers.ResolveEventTimestamp(ev, &ev.BaseEvent) -} - -// GetExecArgs returns the value of the field, resolving if necessary -func (ev *Event) GetExecArgs() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - return ev.FieldHandlers.ResolveProcessArgs(ev, ev.Exec.Process) -} - -// GetExecArgsFlags returns the value of the field, resolving if necessary -func (ev *Event) GetExecArgsFlags() []string { - if ev.GetEventType().String() != "exec" { - return []string{} - } - if ev.Exec.Process == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessArgsFlags(ev, ev.Exec.Process) -} - -// GetExecArgsOptions returns the value of the field, resolving if necessary -func (ev *Event) GetExecArgsOptions() []string { - if ev.GetEventType().String() != "exec" { - return []string{} - } - if ev.Exec.Process == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessArgsOptions(ev, ev.Exec.Process) -} - -// GetExecArgsScrubbed returns the value of the field, resolving if necessary -func (ev *Event) GetExecArgsScrubbed() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - return ev.FieldHandlers.ResolveProcessArgsScrubbed(ev, ev.Exec.Process) -} - -// GetExecArgsTruncated returns the value of the field, resolving if necessary -func (ev *Event) GetExecArgsTruncated() bool { - if ev.GetEventType().String() != "exec" { - return false - } - if ev.Exec.Process == nil { - return false - } - return ev.FieldHandlers.ResolveProcessArgsTruncated(ev, ev.Exec.Process) -} - -// GetExecArgv returns the value of the field, resolving if necessary -func (ev *Event) GetExecArgv() []string { - if ev.GetEventType().String() != "exec" { - return []string{} - } - if ev.Exec.Process == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessArgv(ev, ev.Exec.Process) -} - -// GetExecArgv0 returns the value of the field, resolving if necessary -func (ev *Event) GetExecArgv0() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - return ev.FieldHandlers.ResolveProcessArgv0(ev, ev.Exec.Process) -} - -// GetExecArgvScrubbed returns the value of the field, resolving if necessary -func (ev *Event) GetExecArgvScrubbed() []string { - if ev.GetEventType().String() != "exec" { - return []string{} - } - if ev.Exec.Process == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessArgvScrubbed(ev, ev.Exec.Process) -} - -// GetExecAuid returns the value of the field, resolving if necessary -func (ev *Event) GetExecAuid() uint32 { - if ev.GetEventType().String() != "exec" { - return uint32(0) - } - if ev.Exec.Process == nil { - return uint32(0) - } - return ev.Exec.Process.Credentials.AUID -} - -// GetExecCapEffective returns the value of the field, resolving if necessary -func (ev *Event) GetExecCapEffective() uint64 { - if ev.GetEventType().String() != "exec" { - return uint64(0) - } - if ev.Exec.Process == nil { - return uint64(0) - } - return ev.Exec.Process.Credentials.CapEffective -} - -// GetExecCapPermitted returns the value of the field, resolving if necessary -func (ev *Event) GetExecCapPermitted() uint64 { - if ev.GetEventType().String() != "exec" { - return uint64(0) - } - if ev.Exec.Process == nil { - return uint64(0) - } - return ev.Exec.Process.Credentials.CapPermitted -} - -// GetExecCgroupFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetExecCgroupFileInode() uint64 { - if ev.GetEventType().String() != "exec" { - return uint64(0) - } - if ev.Exec.Process == nil { - return uint64(0) - } - return ev.Exec.Process.CGroup.CGroupFile.Inode -} - -// GetExecCgroupFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetExecCgroupFileMountId() uint32 { - if ev.GetEventType().String() != "exec" { - return uint32(0) - } - if ev.Exec.Process == nil { - return uint32(0) - } - return ev.Exec.Process.CGroup.CGroupFile.MountID -} - -// GetExecCgroupId returns the value of the field, resolving if necessary -func (ev *Event) GetExecCgroupId() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - return ev.FieldHandlers.ResolveCGroupID(ev, &ev.Exec.Process.CGroup) -} - -// GetExecCgroupManager returns the value of the field, resolving if necessary -func (ev *Event) GetExecCgroupManager() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - return ev.FieldHandlers.ResolveCGroupManager(ev, &ev.Exec.Process.CGroup) -} - -// GetExecCgroupVersion returns the value of the field, resolving if necessary -func (ev *Event) GetExecCgroupVersion() int { - if ev.GetEventType().String() != "exec" { - return 0 - } - if ev.Exec.Process == nil { - return 0 - } - return ev.FieldHandlers.ResolveCGroupVersion(ev, &ev.Exec.Process.CGroup) -} - -// GetExecCmdargv returns the value of the field, resolving if necessary -func (ev *Event) GetExecCmdargv() []string { - if ev.GetEventType().String() != "exec" { - return []string{} - } - if ev.Exec.Process == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessCmdArgv(ev, ev.Exec.Process) -} - -// GetExecComm returns the value of the field, resolving if necessary -func (ev *Event) GetExecComm() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - return ev.Exec.Process.Comm -} - -// GetExecContainerId returns the value of the field, resolving if necessary -func (ev *Event) GetExecContainerId() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - return ev.FieldHandlers.ResolveProcessContainerID(ev, ev.Exec.Process) -} - -// GetExecCreatedAt returns the value of the field, resolving if necessary -func (ev *Event) GetExecCreatedAt() int { - if ev.GetEventType().String() != "exec" { - return 0 - } - if ev.Exec.Process == nil { - return 0 - } - return ev.FieldHandlers.ResolveProcessCreatedAt(ev, ev.Exec.Process) -} - -// GetExecEgid returns the value of the field, resolving if necessary -func (ev *Event) GetExecEgid() uint32 { - if ev.GetEventType().String() != "exec" { - return uint32(0) - } - if ev.Exec.Process == nil { - return uint32(0) - } - return ev.Exec.Process.Credentials.EGID -} - -// GetExecEgroup returns the value of the field, resolving if necessary -func (ev *Event) GetExecEgroup() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - return ev.Exec.Process.Credentials.EGroup -} - -// GetExecEnvp returns the value of the field, resolving if necessary -func (ev *Event) GetExecEnvp() []string { - if ev.GetEventType().String() != "exec" { - return []string{} - } - if ev.Exec.Process == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessEnvp(ev, ev.Exec.Process) -} - -// GetExecEnvs returns the value of the field, resolving if necessary -func (ev *Event) GetExecEnvs() []string { - if ev.GetEventType().String() != "exec" { - return []string{} - } - if ev.Exec.Process == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessEnvs(ev, ev.Exec.Process) -} - -// GetExecEnvsTruncated returns the value of the field, resolving if necessary -func (ev *Event) GetExecEnvsTruncated() bool { - if ev.GetEventType().String() != "exec" { - return false - } - if ev.Exec.Process == nil { - return false - } - return ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, ev.Exec.Process) -} - -// GetExecEuid returns the value of the field, resolving if necessary -func (ev *Event) GetExecEuid() uint32 { - if ev.GetEventType().String() != "exec" { - return uint32(0) - } - if ev.Exec.Process == nil { - return uint32(0) - } - return ev.Exec.Process.Credentials.EUID -} - -// GetExecEuser returns the value of the field, resolving if necessary -func (ev *Event) GetExecEuser() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - return ev.Exec.Process.Credentials.EUser -} - -// GetExecExecTime returns the value of the field, resolving if necessary -func (ev *Event) GetExecExecTime() time.Time { - if ev.GetEventType().String() != "exec" { - return time.Time{} - } - if ev.Exec.Process == nil { - return time.Time{} - } - return ev.Exec.Process.ExecTime -} - -// GetExecExitTime returns the value of the field, resolving if necessary -func (ev *Event) GetExecExitTime() time.Time { - if ev.GetEventType().String() != "exec" { - return time.Time{} - } - if ev.Exec.Process == nil { - return time.Time{} - } - return ev.Exec.Process.ExitTime -} - -// GetExecFileChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetExecFileChangeTime() uint64 { - if ev.GetEventType().String() != "exec" { - return uint64(0) - } - if ev.Exec.Process == nil { - return uint64(0) - } - if !ev.Exec.Process.IsNotKworker() { - return uint64(0) - } - return ev.Exec.Process.FileEvent.FileFields.CTime -} - -// GetExecFileFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetExecFileFilesystem() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - if !ev.Exec.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Exec.Process.FileEvent) -} - -// GetExecFileGid returns the value of the field, resolving if necessary -func (ev *Event) GetExecFileGid() uint32 { - if ev.GetEventType().String() != "exec" { - return uint32(0) - } - if ev.Exec.Process == nil { - return uint32(0) - } - if !ev.Exec.Process.IsNotKworker() { - return uint32(0) - } - return ev.Exec.Process.FileEvent.FileFields.GID -} - -// GetExecFileGroup returns the value of the field, resolving if necessary -func (ev *Event) GetExecFileGroup() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - if !ev.Exec.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Exec.Process.FileEvent.FileFields) -} - -// GetExecFileHashes returns the value of the field, resolving if necessary -func (ev *Event) GetExecFileHashes() []string { - if ev.GetEventType().String() != "exec" { - return []string{} - } - if ev.Exec.Process == nil { - return []string{} - } - if !ev.Exec.Process.IsNotKworker() { - return []string{} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Exec.Process.FileEvent) -} - -// GetExecFileInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetExecFileInUpperLayer() bool { - if ev.GetEventType().String() != "exec" { - return false - } - if ev.Exec.Process == nil { - return false - } - if !ev.Exec.Process.IsNotKworker() { - return false - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Exec.Process.FileEvent.FileFields) -} - -// GetExecFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetExecFileInode() uint64 { - if ev.GetEventType().String() != "exec" { - return uint64(0) - } - if ev.Exec.Process == nil { - return uint64(0) - } - if !ev.Exec.Process.IsNotKworker() { - return uint64(0) - } - return ev.Exec.Process.FileEvent.FileFields.PathKey.Inode -} - -// GetExecFileMode returns the value of the field, resolving if necessary -func (ev *Event) GetExecFileMode() uint16 { - if ev.GetEventType().String() != "exec" { - return uint16(0) - } - if ev.Exec.Process == nil { - return uint16(0) - } - if !ev.Exec.Process.IsNotKworker() { - return uint16(0) - } - return ev.Exec.Process.FileEvent.FileFields.Mode -} - -// GetExecFileModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetExecFileModificationTime() uint64 { - if ev.GetEventType().String() != "exec" { - return uint64(0) - } - if ev.Exec.Process == nil { - return uint64(0) - } - if !ev.Exec.Process.IsNotKworker() { - return uint64(0) - } - return ev.Exec.Process.FileEvent.FileFields.MTime -} - -// GetExecFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetExecFileMountId() uint32 { - if ev.GetEventType().String() != "exec" { - return uint32(0) - } - if ev.Exec.Process == nil { - return uint32(0) - } - if !ev.Exec.Process.IsNotKworker() { - return uint32(0) - } - return ev.Exec.Process.FileEvent.FileFields.PathKey.MountID -} - -// GetExecFileName returns the value of the field, resolving if necessary -func (ev *Event) GetExecFileName() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - if !ev.Exec.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Exec.Process.FileEvent) -} - -// GetExecFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetExecFileNameLength() int { - if ev.GetEventType().String() != "exec" { - return 0 - } - if ev.Exec.Process == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFileBasename(ev, &ev.Exec.Process.FileEvent)) -} - -// GetExecFilePackageName returns the value of the field, resolving if necessary -func (ev *Event) GetExecFilePackageName() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - if !ev.Exec.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Exec.Process.FileEvent) -} - -// GetExecFilePackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetExecFilePackageSourceVersion() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - if !ev.Exec.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Exec.Process.FileEvent) -} - -// GetExecFilePackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetExecFilePackageVersion() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - if !ev.Exec.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Exec.Process.FileEvent) -} - -// GetExecFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetExecFilePath() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - if !ev.Exec.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exec.Process.FileEvent) -} - -// GetExecFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetExecFilePathLength() int { - if ev.GetEventType().String() != "exec" { - return 0 - } - if ev.Exec.Process == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Exec.Process.FileEvent)) -} - -// GetExecFileRights returns the value of the field, resolving if necessary -func (ev *Event) GetExecFileRights() int { - if ev.GetEventType().String() != "exec" { - return 0 - } - if ev.Exec.Process == nil { - return 0 - } - if !ev.Exec.Process.IsNotKworker() { - return 0 - } - return ev.FieldHandlers.ResolveRights(ev, &ev.Exec.Process.FileEvent.FileFields) -} - -// GetExecFileUid returns the value of the field, resolving if necessary -func (ev *Event) GetExecFileUid() uint32 { - if ev.GetEventType().String() != "exec" { - return uint32(0) - } - if ev.Exec.Process == nil { - return uint32(0) - } - if !ev.Exec.Process.IsNotKworker() { - return uint32(0) - } - return ev.Exec.Process.FileEvent.FileFields.UID -} - -// GetExecFileUser returns the value of the field, resolving if necessary -func (ev *Event) GetExecFileUser() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - if !ev.Exec.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Exec.Process.FileEvent.FileFields) -} - -// GetExecForkTime returns the value of the field, resolving if necessary -func (ev *Event) GetExecForkTime() time.Time { - if ev.GetEventType().String() != "exec" { - return time.Time{} - } - if ev.Exec.Process == nil { - return time.Time{} - } - return ev.Exec.Process.ForkTime -} - -// GetExecFsgid returns the value of the field, resolving if necessary -func (ev *Event) GetExecFsgid() uint32 { - if ev.GetEventType().String() != "exec" { - return uint32(0) - } - if ev.Exec.Process == nil { - return uint32(0) - } - return ev.Exec.Process.Credentials.FSGID -} - -// GetExecFsgroup returns the value of the field, resolving if necessary -func (ev *Event) GetExecFsgroup() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - return ev.Exec.Process.Credentials.FSGroup -} - -// GetExecFsuid returns the value of the field, resolving if necessary -func (ev *Event) GetExecFsuid() uint32 { - if ev.GetEventType().String() != "exec" { - return uint32(0) - } - if ev.Exec.Process == nil { - return uint32(0) - } - return ev.Exec.Process.Credentials.FSUID -} - -// GetExecFsuser returns the value of the field, resolving if necessary -func (ev *Event) GetExecFsuser() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - return ev.Exec.Process.Credentials.FSUser -} - -// GetExecGid returns the value of the field, resolving if necessary -func (ev *Event) GetExecGid() uint32 { - if ev.GetEventType().String() != "exec" { - return uint32(0) - } - if ev.Exec.Process == nil { - return uint32(0) - } - return ev.Exec.Process.Credentials.GID -} - -// GetExecGroup returns the value of the field, resolving if necessary -func (ev *Event) GetExecGroup() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - return ev.Exec.Process.Credentials.Group -} - -// GetExecInterpreterFileChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetExecInterpreterFileChangeTime() uint64 { - if ev.GetEventType().String() != "exec" { - return uint64(0) - } - if ev.Exec.Process == nil { - return uint64(0) - } - if !ev.Exec.Process.HasInterpreter() { - return uint64(0) - } - return ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.CTime -} - -// GetExecInterpreterFileFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetExecInterpreterFileFilesystem() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - if !ev.Exec.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Exec.Process.LinuxBinprm.FileEvent) -} - -// GetExecInterpreterFileGid returns the value of the field, resolving if necessary -func (ev *Event) GetExecInterpreterFileGid() uint32 { - if ev.GetEventType().String() != "exec" { - return uint32(0) - } - if ev.Exec.Process == nil { - return uint32(0) - } - if !ev.Exec.Process.HasInterpreter() { - return uint32(0) - } - return ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.GID -} - -// GetExecInterpreterFileGroup returns the value of the field, resolving if necessary -func (ev *Event) GetExecInterpreterFileGroup() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - if !ev.Exec.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Exec.Process.LinuxBinprm.FileEvent.FileFields) -} - -// GetExecInterpreterFileHashes returns the value of the field, resolving if necessary -func (ev *Event) GetExecInterpreterFileHashes() []string { - if ev.GetEventType().String() != "exec" { - return []string{} - } - if ev.Exec.Process == nil { - return []string{} - } - if !ev.Exec.Process.HasInterpreter() { - return []string{} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Exec.Process.LinuxBinprm.FileEvent) -} - -// GetExecInterpreterFileInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetExecInterpreterFileInUpperLayer() bool { - if ev.GetEventType().String() != "exec" { - return false - } - if ev.Exec.Process == nil { - return false - } - if !ev.Exec.Process.HasInterpreter() { - return false - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Exec.Process.LinuxBinprm.FileEvent.FileFields) -} - -// GetExecInterpreterFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetExecInterpreterFileInode() uint64 { - if ev.GetEventType().String() != "exec" { - return uint64(0) - } - if ev.Exec.Process == nil { - return uint64(0) - } - if !ev.Exec.Process.HasInterpreter() { - return uint64(0) - } - return ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode -} - -// GetExecInterpreterFileMode returns the value of the field, resolving if necessary -func (ev *Event) GetExecInterpreterFileMode() uint16 { - if ev.GetEventType().String() != "exec" { - return uint16(0) - } - if ev.Exec.Process == nil { - return uint16(0) - } - if !ev.Exec.Process.HasInterpreter() { - return uint16(0) - } - return ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.Mode -} - -// GetExecInterpreterFileModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetExecInterpreterFileModificationTime() uint64 { - if ev.GetEventType().String() != "exec" { - return uint64(0) - } - if ev.Exec.Process == nil { - return uint64(0) - } - if !ev.Exec.Process.HasInterpreter() { - return uint64(0) - } - return ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.MTime -} - -// GetExecInterpreterFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetExecInterpreterFileMountId() uint32 { - if ev.GetEventType().String() != "exec" { - return uint32(0) - } - if ev.Exec.Process == nil { - return uint32(0) - } - if !ev.Exec.Process.HasInterpreter() { - return uint32(0) - } - return ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID -} - -// GetExecInterpreterFileName returns the value of the field, resolving if necessary -func (ev *Event) GetExecInterpreterFileName() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - if !ev.Exec.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Exec.Process.LinuxBinprm.FileEvent) -} - -// GetExecInterpreterFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetExecInterpreterFileNameLength() int { - if ev.GetEventType().String() != "exec" { - return 0 - } - if ev.Exec.Process == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFileBasename(ev, &ev.Exec.Process.LinuxBinprm.FileEvent)) -} - -// GetExecInterpreterFilePackageName returns the value of the field, resolving if necessary -func (ev *Event) GetExecInterpreterFilePackageName() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - if !ev.Exec.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Exec.Process.LinuxBinprm.FileEvent) -} - -// GetExecInterpreterFilePackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetExecInterpreterFilePackageSourceVersion() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - if !ev.Exec.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Exec.Process.LinuxBinprm.FileEvent) -} - -// GetExecInterpreterFilePackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetExecInterpreterFilePackageVersion() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - if !ev.Exec.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Exec.Process.LinuxBinprm.FileEvent) -} - -// GetExecInterpreterFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetExecInterpreterFilePath() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - if !ev.Exec.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exec.Process.LinuxBinprm.FileEvent) -} - -// GetExecInterpreterFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetExecInterpreterFilePathLength() int { - if ev.GetEventType().String() != "exec" { - return 0 - } - if ev.Exec.Process == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Exec.Process.LinuxBinprm.FileEvent)) -} - -// GetExecInterpreterFileRights returns the value of the field, resolving if necessary -func (ev *Event) GetExecInterpreterFileRights() int { - if ev.GetEventType().String() != "exec" { - return 0 - } - if ev.Exec.Process == nil { - return 0 - } - if !ev.Exec.Process.HasInterpreter() { - return 0 - } - return ev.FieldHandlers.ResolveRights(ev, &ev.Exec.Process.LinuxBinprm.FileEvent.FileFields) -} - -// GetExecInterpreterFileUid returns the value of the field, resolving if necessary -func (ev *Event) GetExecInterpreterFileUid() uint32 { - if ev.GetEventType().String() != "exec" { - return uint32(0) - } - if ev.Exec.Process == nil { - return uint32(0) - } - if !ev.Exec.Process.HasInterpreter() { - return uint32(0) - } - return ev.Exec.Process.LinuxBinprm.FileEvent.FileFields.UID -} - -// GetExecInterpreterFileUser returns the value of the field, resolving if necessary -func (ev *Event) GetExecInterpreterFileUser() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - if !ev.Exec.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Exec.Process.LinuxBinprm.FileEvent.FileFields) -} - -// GetExecIsExec returns the value of the field, resolving if necessary -func (ev *Event) GetExecIsExec() bool { - if ev.GetEventType().String() != "exec" { - return false - } - if ev.Exec.Process == nil { - return false - } - return ev.Exec.Process.IsExec -} - -// GetExecIsKworker returns the value of the field, resolving if necessary -func (ev *Event) GetExecIsKworker() bool { - if ev.GetEventType().String() != "exec" { - return false - } - if ev.Exec.Process == nil { - return false - } - return ev.Exec.Process.PIDContext.IsKworker -} - -// GetExecIsThread returns the value of the field, resolving if necessary -func (ev *Event) GetExecIsThread() bool { - if ev.GetEventType().String() != "exec" { - return false - } - if ev.Exec.Process == nil { - return false - } - return ev.FieldHandlers.ResolveProcessIsThread(ev, ev.Exec.Process) -} - -// GetExecPid returns the value of the field, resolving if necessary -func (ev *Event) GetExecPid() uint32 { - if ev.GetEventType().String() != "exec" { - return uint32(0) - } - if ev.Exec.Process == nil { - return uint32(0) - } - return ev.Exec.Process.PIDContext.Pid -} - -// GetExecPpid returns the value of the field, resolving if necessary -func (ev *Event) GetExecPpid() uint32 { - if ev.GetEventType().String() != "exec" { - return uint32(0) - } - if ev.Exec.Process == nil { - return uint32(0) - } - return ev.Exec.Process.PPid -} - -// GetExecSyscallInt1 returns the value of the field, resolving if necessary -func (ev *Event) GetExecSyscallInt1() int { - if ev.GetEventType().String() != "exec" { - return 0 - } - return ev.FieldHandlers.ResolveSyscallCtxArgsInt1(ev, &ev.Exec.SyscallContext) -} - -// GetExecSyscallInt2 returns the value of the field, resolving if necessary -func (ev *Event) GetExecSyscallInt2() int { - if ev.GetEventType().String() != "exec" { - return 0 - } - return ev.FieldHandlers.ResolveSyscallCtxArgsInt2(ev, &ev.Exec.SyscallContext) -} - -// GetExecSyscallInt3 returns the value of the field, resolving if necessary -func (ev *Event) GetExecSyscallInt3() int { - if ev.GetEventType().String() != "exec" { - return 0 - } - return ev.FieldHandlers.ResolveSyscallCtxArgsInt3(ev, &ev.Exec.SyscallContext) -} - -// GetExecSyscallPath returns the value of the field, resolving if necessary -func (ev *Event) GetExecSyscallPath() string { - if ev.GetEventType().String() != "exec" { - return "" - } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr1(ev, &ev.Exec.SyscallContext) -} - -// GetExecSyscallStr1 returns the value of the field, resolving if necessary -func (ev *Event) GetExecSyscallStr1() string { - if ev.GetEventType().String() != "exec" { - return "" - } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr1(ev, &ev.Exec.SyscallContext) -} - -// GetExecSyscallStr2 returns the value of the field, resolving if necessary -func (ev *Event) GetExecSyscallStr2() string { - if ev.GetEventType().String() != "exec" { - return "" - } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr2(ev, &ev.Exec.SyscallContext) -} - -// GetExecSyscallStr3 returns the value of the field, resolving if necessary -func (ev *Event) GetExecSyscallStr3() string { - if ev.GetEventType().String() != "exec" { - return "" - } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr3(ev, &ev.Exec.SyscallContext) -} - -// GetExecTid returns the value of the field, resolving if necessary -func (ev *Event) GetExecTid() uint32 { - if ev.GetEventType().String() != "exec" { - return uint32(0) - } - if ev.Exec.Process == nil { - return uint32(0) - } - return ev.Exec.Process.PIDContext.Tid -} - -// GetExecTtyName returns the value of the field, resolving if necessary -func (ev *Event) GetExecTtyName() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - return ev.Exec.Process.TTYName -} - -// GetExecUid returns the value of the field, resolving if necessary -func (ev *Event) GetExecUid() uint32 { - if ev.GetEventType().String() != "exec" { - return uint32(0) - } - if ev.Exec.Process == nil { - return uint32(0) - } - return ev.Exec.Process.Credentials.UID -} - -// GetExecUser returns the value of the field, resolving if necessary -func (ev *Event) GetExecUser() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - return ev.Exec.Process.Credentials.User -} - -// GetExecUserSessionK8sGroups returns the value of the field, resolving if necessary -func (ev *Event) GetExecUserSessionK8sGroups() []string { - if ev.GetEventType().String() != "exec" { - return []string{} - } - if ev.Exec.Process == nil { - return []string{} - } - return ev.FieldHandlers.ResolveK8SGroups(ev, &ev.Exec.Process.UserSession) -} - -// GetExecUserSessionK8sUid returns the value of the field, resolving if necessary -func (ev *Event) GetExecUserSessionK8sUid() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - return ev.FieldHandlers.ResolveK8SUID(ev, &ev.Exec.Process.UserSession) -} - -// GetExecUserSessionK8sUsername returns the value of the field, resolving if necessary -func (ev *Event) GetExecUserSessionK8sUsername() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - return ev.FieldHandlers.ResolveK8SUsername(ev, &ev.Exec.Process.UserSession) -} - -// GetExitArgs returns the value of the field, resolving if necessary -func (ev *Event) GetExitArgs() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - return ev.FieldHandlers.ResolveProcessArgs(ev, ev.Exit.Process) -} - -// GetExitArgsFlags returns the value of the field, resolving if necessary -func (ev *Event) GetExitArgsFlags() []string { - if ev.GetEventType().String() != "exit" { - return []string{} - } - if ev.Exit.Process == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessArgsFlags(ev, ev.Exit.Process) -} - -// GetExitArgsOptions returns the value of the field, resolving if necessary -func (ev *Event) GetExitArgsOptions() []string { - if ev.GetEventType().String() != "exit" { - return []string{} - } - if ev.Exit.Process == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessArgsOptions(ev, ev.Exit.Process) -} - -// GetExitArgsScrubbed returns the value of the field, resolving if necessary -func (ev *Event) GetExitArgsScrubbed() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - return ev.FieldHandlers.ResolveProcessArgsScrubbed(ev, ev.Exit.Process) -} - -// GetExitArgsTruncated returns the value of the field, resolving if necessary -func (ev *Event) GetExitArgsTruncated() bool { - if ev.GetEventType().String() != "exit" { - return false - } - if ev.Exit.Process == nil { - return false - } - return ev.FieldHandlers.ResolveProcessArgsTruncated(ev, ev.Exit.Process) -} - -// GetExitArgv returns the value of the field, resolving if necessary -func (ev *Event) GetExitArgv() []string { - if ev.GetEventType().String() != "exit" { - return []string{} - } - if ev.Exit.Process == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessArgv(ev, ev.Exit.Process) -} - -// GetExitArgv0 returns the value of the field, resolving if necessary -func (ev *Event) GetExitArgv0() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - return ev.FieldHandlers.ResolveProcessArgv0(ev, ev.Exit.Process) -} - -// GetExitArgvScrubbed returns the value of the field, resolving if necessary -func (ev *Event) GetExitArgvScrubbed() []string { - if ev.GetEventType().String() != "exit" { - return []string{} - } - if ev.Exit.Process == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessArgvScrubbed(ev, ev.Exit.Process) -} - -// GetExitAuid returns the value of the field, resolving if necessary -func (ev *Event) GetExitAuid() uint32 { - if ev.GetEventType().String() != "exit" { - return uint32(0) - } - if ev.Exit.Process == nil { - return uint32(0) - } - return ev.Exit.Process.Credentials.AUID -} - -// GetExitCapEffective returns the value of the field, resolving if necessary -func (ev *Event) GetExitCapEffective() uint64 { - if ev.GetEventType().String() != "exit" { - return uint64(0) - } - if ev.Exit.Process == nil { - return uint64(0) - } - return ev.Exit.Process.Credentials.CapEffective -} - -// GetExitCapPermitted returns the value of the field, resolving if necessary -func (ev *Event) GetExitCapPermitted() uint64 { - if ev.GetEventType().String() != "exit" { - return uint64(0) - } - if ev.Exit.Process == nil { - return uint64(0) - } - return ev.Exit.Process.Credentials.CapPermitted -} - -// GetExitCause returns the value of the field, resolving if necessary -func (ev *Event) GetExitCause() uint32 { - if ev.GetEventType().String() != "exit" { - return uint32(0) - } - return ev.Exit.Cause -} - -// GetExitCgroupFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetExitCgroupFileInode() uint64 { - if ev.GetEventType().String() != "exit" { - return uint64(0) - } - if ev.Exit.Process == nil { - return uint64(0) - } - return ev.Exit.Process.CGroup.CGroupFile.Inode -} - -// GetExitCgroupFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetExitCgroupFileMountId() uint32 { - if ev.GetEventType().String() != "exit" { - return uint32(0) - } - if ev.Exit.Process == nil { - return uint32(0) - } - return ev.Exit.Process.CGroup.CGroupFile.MountID -} - -// GetExitCgroupId returns the value of the field, resolving if necessary -func (ev *Event) GetExitCgroupId() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - return ev.FieldHandlers.ResolveCGroupID(ev, &ev.Exit.Process.CGroup) -} - -// GetExitCgroupManager returns the value of the field, resolving if necessary -func (ev *Event) GetExitCgroupManager() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - return ev.FieldHandlers.ResolveCGroupManager(ev, &ev.Exit.Process.CGroup) -} - -// GetExitCgroupVersion returns the value of the field, resolving if necessary -func (ev *Event) GetExitCgroupVersion() int { - if ev.GetEventType().String() != "exit" { - return 0 - } - if ev.Exit.Process == nil { - return 0 - } - return ev.FieldHandlers.ResolveCGroupVersion(ev, &ev.Exit.Process.CGroup) -} - -// GetExitCmdargv returns the value of the field, resolving if necessary -func (ev *Event) GetExitCmdargv() []string { - if ev.GetEventType().String() != "exit" { - return []string{} - } - if ev.Exit.Process == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessCmdArgv(ev, ev.Exit.Process) -} - -// GetExitCode returns the value of the field, resolving if necessary -func (ev *Event) GetExitCode() uint32 { - if ev.GetEventType().String() != "exit" { - return uint32(0) - } - return ev.Exit.Code -} - -// GetExitComm returns the value of the field, resolving if necessary -func (ev *Event) GetExitComm() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - return ev.Exit.Process.Comm -} - -// GetExitContainerId returns the value of the field, resolving if necessary -func (ev *Event) GetExitContainerId() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - return ev.FieldHandlers.ResolveProcessContainerID(ev, ev.Exit.Process) -} - -// GetExitCreatedAt returns the value of the field, resolving if necessary -func (ev *Event) GetExitCreatedAt() int { - if ev.GetEventType().String() != "exit" { - return 0 - } - if ev.Exit.Process == nil { - return 0 - } - return ev.FieldHandlers.ResolveProcessCreatedAt(ev, ev.Exit.Process) -} - -// GetExitEgid returns the value of the field, resolving if necessary -func (ev *Event) GetExitEgid() uint32 { - if ev.GetEventType().String() != "exit" { - return uint32(0) - } - if ev.Exit.Process == nil { - return uint32(0) - } - return ev.Exit.Process.Credentials.EGID -} - -// GetExitEgroup returns the value of the field, resolving if necessary -func (ev *Event) GetExitEgroup() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - return ev.Exit.Process.Credentials.EGroup -} - -// GetExitEnvp returns the value of the field, resolving if necessary -func (ev *Event) GetExitEnvp() []string { - if ev.GetEventType().String() != "exit" { - return []string{} - } - if ev.Exit.Process == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessEnvp(ev, ev.Exit.Process) -} - -// GetExitEnvs returns the value of the field, resolving if necessary -func (ev *Event) GetExitEnvs() []string { - if ev.GetEventType().String() != "exit" { - return []string{} - } - if ev.Exit.Process == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessEnvs(ev, ev.Exit.Process) -} - -// GetExitEnvsTruncated returns the value of the field, resolving if necessary -func (ev *Event) GetExitEnvsTruncated() bool { - if ev.GetEventType().String() != "exit" { - return false - } - if ev.Exit.Process == nil { - return false - } - return ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, ev.Exit.Process) -} - -// GetExitEuid returns the value of the field, resolving if necessary -func (ev *Event) GetExitEuid() uint32 { - if ev.GetEventType().String() != "exit" { - return uint32(0) - } - if ev.Exit.Process == nil { - return uint32(0) - } - return ev.Exit.Process.Credentials.EUID -} - -// GetExitEuser returns the value of the field, resolving if necessary -func (ev *Event) GetExitEuser() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - return ev.Exit.Process.Credentials.EUser -} - -// GetExitExecTime returns the value of the field, resolving if necessary -func (ev *Event) GetExitExecTime() time.Time { - if ev.GetEventType().String() != "exit" { - return time.Time{} - } - if ev.Exit.Process == nil { - return time.Time{} - } - return ev.Exit.Process.ExecTime -} - -// GetExitExitTime returns the value of the field, resolving if necessary -func (ev *Event) GetExitExitTime() time.Time { - if ev.GetEventType().String() != "exit" { - return time.Time{} - } - if ev.Exit.Process == nil { - return time.Time{} - } - return ev.Exit.Process.ExitTime -} - -// GetExitFileChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetExitFileChangeTime() uint64 { - if ev.GetEventType().String() != "exit" { - return uint64(0) - } - if ev.Exit.Process == nil { - return uint64(0) - } - if !ev.Exit.Process.IsNotKworker() { - return uint64(0) - } - return ev.Exit.Process.FileEvent.FileFields.CTime -} - -// GetExitFileFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetExitFileFilesystem() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - if !ev.Exit.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Exit.Process.FileEvent) -} - -// GetExitFileGid returns the value of the field, resolving if necessary -func (ev *Event) GetExitFileGid() uint32 { - if ev.GetEventType().String() != "exit" { - return uint32(0) - } - if ev.Exit.Process == nil { - return uint32(0) - } - if !ev.Exit.Process.IsNotKworker() { - return uint32(0) - } - return ev.Exit.Process.FileEvent.FileFields.GID -} - -// GetExitFileGroup returns the value of the field, resolving if necessary -func (ev *Event) GetExitFileGroup() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - if !ev.Exit.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Exit.Process.FileEvent.FileFields) -} - -// GetExitFileHashes returns the value of the field, resolving if necessary -func (ev *Event) GetExitFileHashes() []string { - if ev.GetEventType().String() != "exit" { - return []string{} - } - if ev.Exit.Process == nil { - return []string{} - } - if !ev.Exit.Process.IsNotKworker() { - return []string{} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Exit.Process.FileEvent) -} - -// GetExitFileInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetExitFileInUpperLayer() bool { - if ev.GetEventType().String() != "exit" { - return false - } - if ev.Exit.Process == nil { - return false - } - if !ev.Exit.Process.IsNotKworker() { - return false - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Exit.Process.FileEvent.FileFields) -} - -// GetExitFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetExitFileInode() uint64 { - if ev.GetEventType().String() != "exit" { - return uint64(0) - } - if ev.Exit.Process == nil { - return uint64(0) - } - if !ev.Exit.Process.IsNotKworker() { - return uint64(0) - } - return ev.Exit.Process.FileEvent.FileFields.PathKey.Inode -} - -// GetExitFileMode returns the value of the field, resolving if necessary -func (ev *Event) GetExitFileMode() uint16 { - if ev.GetEventType().String() != "exit" { - return uint16(0) - } - if ev.Exit.Process == nil { - return uint16(0) - } - if !ev.Exit.Process.IsNotKworker() { - return uint16(0) - } - return ev.Exit.Process.FileEvent.FileFields.Mode -} - -// GetExitFileModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetExitFileModificationTime() uint64 { - if ev.GetEventType().String() != "exit" { - return uint64(0) - } - if ev.Exit.Process == nil { - return uint64(0) - } - if !ev.Exit.Process.IsNotKworker() { - return uint64(0) - } - return ev.Exit.Process.FileEvent.FileFields.MTime -} - -// GetExitFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetExitFileMountId() uint32 { - if ev.GetEventType().String() != "exit" { - return uint32(0) - } - if ev.Exit.Process == nil { - return uint32(0) - } - if !ev.Exit.Process.IsNotKworker() { - return uint32(0) - } - return ev.Exit.Process.FileEvent.FileFields.PathKey.MountID -} - -// GetExitFileName returns the value of the field, resolving if necessary -func (ev *Event) GetExitFileName() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - if !ev.Exit.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Exit.Process.FileEvent) -} - -// GetExitFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetExitFileNameLength() int { - if ev.GetEventType().String() != "exit" { - return 0 - } - if ev.Exit.Process == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFileBasename(ev, &ev.Exit.Process.FileEvent)) -} - -// GetExitFilePackageName returns the value of the field, resolving if necessary -func (ev *Event) GetExitFilePackageName() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - if !ev.Exit.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Exit.Process.FileEvent) -} - -// GetExitFilePackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetExitFilePackageSourceVersion() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - if !ev.Exit.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Exit.Process.FileEvent) -} - -// GetExitFilePackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetExitFilePackageVersion() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - if !ev.Exit.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Exit.Process.FileEvent) -} - -// GetExitFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetExitFilePath() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - if !ev.Exit.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exit.Process.FileEvent) -} - -// GetExitFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetExitFilePathLength() int { - if ev.GetEventType().String() != "exit" { - return 0 - } - if ev.Exit.Process == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Exit.Process.FileEvent)) -} - -// GetExitFileRights returns the value of the field, resolving if necessary -func (ev *Event) GetExitFileRights() int { - if ev.GetEventType().String() != "exit" { - return 0 - } - if ev.Exit.Process == nil { - return 0 - } - if !ev.Exit.Process.IsNotKworker() { - return 0 - } - return ev.FieldHandlers.ResolveRights(ev, &ev.Exit.Process.FileEvent.FileFields) -} - -// GetExitFileUid returns the value of the field, resolving if necessary -func (ev *Event) GetExitFileUid() uint32 { - if ev.GetEventType().String() != "exit" { - return uint32(0) - } - if ev.Exit.Process == nil { - return uint32(0) - } - if !ev.Exit.Process.IsNotKworker() { - return uint32(0) - } - return ev.Exit.Process.FileEvent.FileFields.UID -} - -// GetExitFileUser returns the value of the field, resolving if necessary -func (ev *Event) GetExitFileUser() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - if !ev.Exit.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Exit.Process.FileEvent.FileFields) -} - -// GetExitForkTime returns the value of the field, resolving if necessary -func (ev *Event) GetExitForkTime() time.Time { - if ev.GetEventType().String() != "exit" { - return time.Time{} - } - if ev.Exit.Process == nil { - return time.Time{} - } - return ev.Exit.Process.ForkTime -} - -// GetExitFsgid returns the value of the field, resolving if necessary -func (ev *Event) GetExitFsgid() uint32 { - if ev.GetEventType().String() != "exit" { - return uint32(0) - } - if ev.Exit.Process == nil { - return uint32(0) - } - return ev.Exit.Process.Credentials.FSGID -} - -// GetExitFsgroup returns the value of the field, resolving if necessary -func (ev *Event) GetExitFsgroup() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - return ev.Exit.Process.Credentials.FSGroup -} - -// GetExitFsuid returns the value of the field, resolving if necessary -func (ev *Event) GetExitFsuid() uint32 { - if ev.GetEventType().String() != "exit" { - return uint32(0) - } - if ev.Exit.Process == nil { - return uint32(0) - } - return ev.Exit.Process.Credentials.FSUID -} - -// GetExitFsuser returns the value of the field, resolving if necessary -func (ev *Event) GetExitFsuser() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - return ev.Exit.Process.Credentials.FSUser -} - -// GetExitGid returns the value of the field, resolving if necessary -func (ev *Event) GetExitGid() uint32 { - if ev.GetEventType().String() != "exit" { - return uint32(0) - } - if ev.Exit.Process == nil { - return uint32(0) - } - return ev.Exit.Process.Credentials.GID -} - -// GetExitGroup returns the value of the field, resolving if necessary -func (ev *Event) GetExitGroup() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - return ev.Exit.Process.Credentials.Group -} - -// GetExitInterpreterFileChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetExitInterpreterFileChangeTime() uint64 { - if ev.GetEventType().String() != "exit" { - return uint64(0) - } - if ev.Exit.Process == nil { - return uint64(0) - } - if !ev.Exit.Process.HasInterpreter() { - return uint64(0) - } - return ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.CTime -} - -// GetExitInterpreterFileFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetExitInterpreterFileFilesystem() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - if !ev.Exit.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Exit.Process.LinuxBinprm.FileEvent) -} - -// GetExitInterpreterFileGid returns the value of the field, resolving if necessary -func (ev *Event) GetExitInterpreterFileGid() uint32 { - if ev.GetEventType().String() != "exit" { - return uint32(0) - } - if ev.Exit.Process == nil { - return uint32(0) - } - if !ev.Exit.Process.HasInterpreter() { - return uint32(0) - } - return ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.GID -} - -// GetExitInterpreterFileGroup returns the value of the field, resolving if necessary -func (ev *Event) GetExitInterpreterFileGroup() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - if !ev.Exit.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Exit.Process.LinuxBinprm.FileEvent.FileFields) -} - -// GetExitInterpreterFileHashes returns the value of the field, resolving if necessary -func (ev *Event) GetExitInterpreterFileHashes() []string { - if ev.GetEventType().String() != "exit" { - return []string{} - } - if ev.Exit.Process == nil { - return []string{} - } - if !ev.Exit.Process.HasInterpreter() { - return []string{} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Exit.Process.LinuxBinprm.FileEvent) -} - -// GetExitInterpreterFileInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetExitInterpreterFileInUpperLayer() bool { - if ev.GetEventType().String() != "exit" { - return false - } - if ev.Exit.Process == nil { - return false - } - if !ev.Exit.Process.HasInterpreter() { - return false - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Exit.Process.LinuxBinprm.FileEvent.FileFields) -} - -// GetExitInterpreterFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetExitInterpreterFileInode() uint64 { - if ev.GetEventType().String() != "exit" { - return uint64(0) - } - if ev.Exit.Process == nil { - return uint64(0) - } - if !ev.Exit.Process.HasInterpreter() { - return uint64(0) - } - return ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode -} - -// GetExitInterpreterFileMode returns the value of the field, resolving if necessary -func (ev *Event) GetExitInterpreterFileMode() uint16 { - if ev.GetEventType().String() != "exit" { - return uint16(0) - } - if ev.Exit.Process == nil { - return uint16(0) - } - if !ev.Exit.Process.HasInterpreter() { - return uint16(0) - } - return ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.Mode -} - -// GetExitInterpreterFileModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetExitInterpreterFileModificationTime() uint64 { - if ev.GetEventType().String() != "exit" { - return uint64(0) - } - if ev.Exit.Process == nil { - return uint64(0) - } - if !ev.Exit.Process.HasInterpreter() { - return uint64(0) - } - return ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.MTime -} - -// GetExitInterpreterFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetExitInterpreterFileMountId() uint32 { - if ev.GetEventType().String() != "exit" { - return uint32(0) - } - if ev.Exit.Process == nil { - return uint32(0) - } - if !ev.Exit.Process.HasInterpreter() { - return uint32(0) - } - return ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID -} - -// GetExitInterpreterFileName returns the value of the field, resolving if necessary -func (ev *Event) GetExitInterpreterFileName() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - if !ev.Exit.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Exit.Process.LinuxBinprm.FileEvent) -} - -// GetExitInterpreterFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetExitInterpreterFileNameLength() int { - if ev.GetEventType().String() != "exit" { - return 0 - } - if ev.Exit.Process == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFileBasename(ev, &ev.Exit.Process.LinuxBinprm.FileEvent)) -} - -// GetExitInterpreterFilePackageName returns the value of the field, resolving if necessary -func (ev *Event) GetExitInterpreterFilePackageName() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - if !ev.Exit.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Exit.Process.LinuxBinprm.FileEvent) -} - -// GetExitInterpreterFilePackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetExitInterpreterFilePackageSourceVersion() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - if !ev.Exit.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Exit.Process.LinuxBinprm.FileEvent) -} - -// GetExitInterpreterFilePackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetExitInterpreterFilePackageVersion() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - if !ev.Exit.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Exit.Process.LinuxBinprm.FileEvent) -} - -// GetExitInterpreterFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetExitInterpreterFilePath() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - if !ev.Exit.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exit.Process.LinuxBinprm.FileEvent) -} - -// GetExitInterpreterFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetExitInterpreterFilePathLength() int { - if ev.GetEventType().String() != "exit" { - return 0 - } - if ev.Exit.Process == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Exit.Process.LinuxBinprm.FileEvent)) -} - -// GetExitInterpreterFileRights returns the value of the field, resolving if necessary -func (ev *Event) GetExitInterpreterFileRights() int { - if ev.GetEventType().String() != "exit" { - return 0 - } - if ev.Exit.Process == nil { - return 0 - } - if !ev.Exit.Process.HasInterpreter() { - return 0 - } - return ev.FieldHandlers.ResolveRights(ev, &ev.Exit.Process.LinuxBinprm.FileEvent.FileFields) -} - -// GetExitInterpreterFileUid returns the value of the field, resolving if necessary -func (ev *Event) GetExitInterpreterFileUid() uint32 { - if ev.GetEventType().String() != "exit" { - return uint32(0) - } - if ev.Exit.Process == nil { - return uint32(0) - } - if !ev.Exit.Process.HasInterpreter() { - return uint32(0) - } - return ev.Exit.Process.LinuxBinprm.FileEvent.FileFields.UID -} - -// GetExitInterpreterFileUser returns the value of the field, resolving if necessary -func (ev *Event) GetExitInterpreterFileUser() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - if !ev.Exit.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Exit.Process.LinuxBinprm.FileEvent.FileFields) -} - -// GetExitIsExec returns the value of the field, resolving if necessary -func (ev *Event) GetExitIsExec() bool { - if ev.GetEventType().String() != "exit" { - return false - } - if ev.Exit.Process == nil { - return false - } - return ev.Exit.Process.IsExec -} - -// GetExitIsKworker returns the value of the field, resolving if necessary -func (ev *Event) GetExitIsKworker() bool { - if ev.GetEventType().String() != "exit" { - return false - } - if ev.Exit.Process == nil { - return false - } - return ev.Exit.Process.PIDContext.IsKworker -} - -// GetExitIsThread returns the value of the field, resolving if necessary -func (ev *Event) GetExitIsThread() bool { - if ev.GetEventType().String() != "exit" { - return false - } - if ev.Exit.Process == nil { - return false - } - return ev.FieldHandlers.ResolveProcessIsThread(ev, ev.Exit.Process) -} - -// GetExitPid returns the value of the field, resolving if necessary -func (ev *Event) GetExitPid() uint32 { - if ev.GetEventType().String() != "exit" { - return uint32(0) - } - if ev.Exit.Process == nil { - return uint32(0) - } - return ev.Exit.Process.PIDContext.Pid -} - -// GetExitPpid returns the value of the field, resolving if necessary -func (ev *Event) GetExitPpid() uint32 { - if ev.GetEventType().String() != "exit" { - return uint32(0) - } - if ev.Exit.Process == nil { - return uint32(0) - } - return ev.Exit.Process.PPid -} - -// GetExitTid returns the value of the field, resolving if necessary -func (ev *Event) GetExitTid() uint32 { - if ev.GetEventType().String() != "exit" { - return uint32(0) - } - if ev.Exit.Process == nil { - return uint32(0) - } - return ev.Exit.Process.PIDContext.Tid -} - -// GetExitTtyName returns the value of the field, resolving if necessary -func (ev *Event) GetExitTtyName() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - return ev.Exit.Process.TTYName -} - -// GetExitUid returns the value of the field, resolving if necessary -func (ev *Event) GetExitUid() uint32 { - if ev.GetEventType().String() != "exit" { - return uint32(0) - } - if ev.Exit.Process == nil { - return uint32(0) - } - return ev.Exit.Process.Credentials.UID -} - -// GetExitUser returns the value of the field, resolving if necessary -func (ev *Event) GetExitUser() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - return ev.Exit.Process.Credentials.User -} - -// GetExitUserSessionK8sGroups returns the value of the field, resolving if necessary -func (ev *Event) GetExitUserSessionK8sGroups() []string { - if ev.GetEventType().String() != "exit" { - return []string{} - } - if ev.Exit.Process == nil { - return []string{} - } - return ev.FieldHandlers.ResolveK8SGroups(ev, &ev.Exit.Process.UserSession) -} - -// GetExitUserSessionK8sUid returns the value of the field, resolving if necessary -func (ev *Event) GetExitUserSessionK8sUid() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - return ev.FieldHandlers.ResolveK8SUID(ev, &ev.Exit.Process.UserSession) -} - -// GetExitUserSessionK8sUsername returns the value of the field, resolving if necessary -func (ev *Event) GetExitUserSessionK8sUsername() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - return ev.FieldHandlers.ResolveK8SUsername(ev, &ev.Exit.Process.UserSession) -} - -// GetImdsAwsIsImdsV2 returns the value of the field, resolving if necessary -func (ev *Event) GetImdsAwsIsImdsV2() bool { - if ev.GetEventType().String() != "imds" { - return false - } - return ev.IMDS.AWS.IsIMDSv2 -} - -// GetImdsAwsSecurityCredentialsType returns the value of the field, resolving if necessary -func (ev *Event) GetImdsAwsSecurityCredentialsType() string { - if ev.GetEventType().String() != "imds" { - return "" - } - return ev.IMDS.AWS.SecurityCredentials.Type -} - -// GetImdsCloudProvider returns the value of the field, resolving if necessary -func (ev *Event) GetImdsCloudProvider() string { - if ev.GetEventType().String() != "imds" { - return "" - } - return ev.IMDS.CloudProvider -} - -// GetImdsHost returns the value of the field, resolving if necessary -func (ev *Event) GetImdsHost() string { - if ev.GetEventType().String() != "imds" { - return "" - } - return ev.IMDS.Host -} - -// GetImdsServer returns the value of the field, resolving if necessary -func (ev *Event) GetImdsServer() string { - if ev.GetEventType().String() != "imds" { - return "" - } - return ev.IMDS.Server -} - -// GetImdsType returns the value of the field, resolving if necessary -func (ev *Event) GetImdsType() string { - if ev.GetEventType().String() != "imds" { - return "" - } - return ev.IMDS.Type -} - -// GetImdsUrl returns the value of the field, resolving if necessary -func (ev *Event) GetImdsUrl() string { - if ev.GetEventType().String() != "imds" { - return "" - } - return ev.IMDS.URL -} - -// GetImdsUserAgent returns the value of the field, resolving if necessary -func (ev *Event) GetImdsUserAgent() string { - if ev.GetEventType().String() != "imds" { - return "" - } - return ev.IMDS.UserAgent -} - -// GetLinkFileChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFileChangeTime() uint64 { - if ev.GetEventType().String() != "link" { - return uint64(0) - } - return ev.Link.Source.FileFields.CTime -} - -// GetLinkFileDestinationChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFileDestinationChangeTime() uint64 { - if ev.GetEventType().String() != "link" { - return uint64(0) - } - return ev.Link.Target.FileFields.CTime -} - -// GetLinkFileDestinationFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFileDestinationFilesystem() string { - if ev.GetEventType().String() != "link" { - return "" - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Link.Target) -} - -// GetLinkFileDestinationGid returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFileDestinationGid() uint32 { - if ev.GetEventType().String() != "link" { - return uint32(0) - } - return ev.Link.Target.FileFields.GID -} - -// GetLinkFileDestinationGroup returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFileDestinationGroup() string { - if ev.GetEventType().String() != "link" { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Link.Target.FileFields) -} - -// GetLinkFileDestinationHashes returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFileDestinationHashes() []string { - if ev.GetEventType().String() != "link" { - return []string{} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Link.Target) -} - -// GetLinkFileDestinationInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFileDestinationInUpperLayer() bool { - if ev.GetEventType().String() != "link" { - return false - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Link.Target.FileFields) -} - -// GetLinkFileDestinationInode returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFileDestinationInode() uint64 { - if ev.GetEventType().String() != "link" { - return uint64(0) - } - return ev.Link.Target.FileFields.PathKey.Inode -} - -// GetLinkFileDestinationMode returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFileDestinationMode() uint16 { - if ev.GetEventType().String() != "link" { - return uint16(0) - } - return ev.Link.Target.FileFields.Mode -} - -// GetLinkFileDestinationModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFileDestinationModificationTime() uint64 { - if ev.GetEventType().String() != "link" { - return uint64(0) - } - return ev.Link.Target.FileFields.MTime -} - -// GetLinkFileDestinationMountId returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFileDestinationMountId() uint32 { - if ev.GetEventType().String() != "link" { - return uint32(0) - } - return ev.Link.Target.FileFields.PathKey.MountID -} - -// GetLinkFileDestinationName returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFileDestinationName() string { - if ev.GetEventType().String() != "link" { - return "" - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Link.Target) -} - -// GetLinkFileDestinationNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFileDestinationNameLength() int { - if ev.GetEventType().String() != "link" { - return 0 - } - return len(ev.FieldHandlers.ResolveFileBasename(ev, &ev.Link.Target)) -} - -// GetLinkFileDestinationPackageName returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFileDestinationPackageName() string { - if ev.GetEventType().String() != "link" { - return "" - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Link.Target) -} - -// GetLinkFileDestinationPackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFileDestinationPackageSourceVersion() string { - if ev.GetEventType().String() != "link" { - return "" - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Link.Target) -} - -// GetLinkFileDestinationPackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFileDestinationPackageVersion() string { - if ev.GetEventType().String() != "link" { - return "" - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Link.Target) -} - -// GetLinkFileDestinationPath returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFileDestinationPath() string { - if ev.GetEventType().String() != "link" { - return "" - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Link.Target) -} - -// GetLinkFileDestinationPathLength returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFileDestinationPathLength() int { - if ev.GetEventType().String() != "link" { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Link.Target)) -} - -// GetLinkFileDestinationRights returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFileDestinationRights() int { - if ev.GetEventType().String() != "link" { - return 0 - } - return ev.FieldHandlers.ResolveRights(ev, &ev.Link.Target.FileFields) -} - -// GetLinkFileDestinationUid returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFileDestinationUid() uint32 { - if ev.GetEventType().String() != "link" { - return uint32(0) - } - return ev.Link.Target.FileFields.UID -} - -// GetLinkFileDestinationUser returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFileDestinationUser() string { - if ev.GetEventType().String() != "link" { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Link.Target.FileFields) -} - -// GetLinkFileFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFileFilesystem() string { - if ev.GetEventType().String() != "link" { - return "" - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Link.Source) -} - -// GetLinkFileGid returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFileGid() uint32 { - if ev.GetEventType().String() != "link" { - return uint32(0) - } - return ev.Link.Source.FileFields.GID -} - -// GetLinkFileGroup returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFileGroup() string { - if ev.GetEventType().String() != "link" { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Link.Source.FileFields) -} - -// GetLinkFileHashes returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFileHashes() []string { - if ev.GetEventType().String() != "link" { - return []string{} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Link.Source) -} - -// GetLinkFileInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFileInUpperLayer() bool { - if ev.GetEventType().String() != "link" { - return false - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Link.Source.FileFields) -} - -// GetLinkFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFileInode() uint64 { - if ev.GetEventType().String() != "link" { - return uint64(0) - } - return ev.Link.Source.FileFields.PathKey.Inode -} - -// GetLinkFileMode returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFileMode() uint16 { - if ev.GetEventType().String() != "link" { - return uint16(0) - } - return ev.Link.Source.FileFields.Mode -} - -// GetLinkFileModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFileModificationTime() uint64 { - if ev.GetEventType().String() != "link" { - return uint64(0) - } - return ev.Link.Source.FileFields.MTime -} - -// GetLinkFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFileMountId() uint32 { - if ev.GetEventType().String() != "link" { - return uint32(0) - } - return ev.Link.Source.FileFields.PathKey.MountID -} - -// GetLinkFileName returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFileName() string { - if ev.GetEventType().String() != "link" { - return "" - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Link.Source) -} - -// GetLinkFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFileNameLength() int { - if ev.GetEventType().String() != "link" { - return 0 - } - return len(ev.FieldHandlers.ResolveFileBasename(ev, &ev.Link.Source)) -} - -// GetLinkFilePackageName returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFilePackageName() string { - if ev.GetEventType().String() != "link" { - return "" - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Link.Source) -} - -// GetLinkFilePackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFilePackageSourceVersion() string { - if ev.GetEventType().String() != "link" { - return "" - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Link.Source) -} - -// GetLinkFilePackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFilePackageVersion() string { - if ev.GetEventType().String() != "link" { - return "" - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Link.Source) -} - -// GetLinkFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFilePath() string { - if ev.GetEventType().String() != "link" { - return "" - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Link.Source) -} - -// GetLinkFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFilePathLength() int { - if ev.GetEventType().String() != "link" { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Link.Source)) -} - -// GetLinkFileRights returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFileRights() int { - if ev.GetEventType().String() != "link" { - return 0 - } - return ev.FieldHandlers.ResolveRights(ev, &ev.Link.Source.FileFields) -} - -// GetLinkFileUid returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFileUid() uint32 { - if ev.GetEventType().String() != "link" { - return uint32(0) - } - return ev.Link.Source.FileFields.UID -} - -// GetLinkFileUser returns the value of the field, resolving if necessary -func (ev *Event) GetLinkFileUser() string { - if ev.GetEventType().String() != "link" { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Link.Source.FileFields) -} - -// GetLinkRetval returns the value of the field, resolving if necessary -func (ev *Event) GetLinkRetval() int64 { - if ev.GetEventType().String() != "link" { - return int64(0) - } - return ev.Link.SyscallEvent.Retval -} - -// GetLinkSyscallDestinationPath returns the value of the field, resolving if necessary -func (ev *Event) GetLinkSyscallDestinationPath() string { - if ev.GetEventType().String() != "link" { - return "" - } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr2(ev, &ev.Link.SyscallContext) -} - -// GetLinkSyscallInt1 returns the value of the field, resolving if necessary -func (ev *Event) GetLinkSyscallInt1() int { - if ev.GetEventType().String() != "link" { - return 0 - } - return ev.FieldHandlers.ResolveSyscallCtxArgsInt1(ev, &ev.Link.SyscallContext) -} - -// GetLinkSyscallInt2 returns the value of the field, resolving if necessary -func (ev *Event) GetLinkSyscallInt2() int { - if ev.GetEventType().String() != "link" { - return 0 - } - return ev.FieldHandlers.ResolveSyscallCtxArgsInt2(ev, &ev.Link.SyscallContext) -} - -// GetLinkSyscallInt3 returns the value of the field, resolving if necessary -func (ev *Event) GetLinkSyscallInt3() int { - if ev.GetEventType().String() != "link" { - return 0 - } - return ev.FieldHandlers.ResolveSyscallCtxArgsInt3(ev, &ev.Link.SyscallContext) -} - -// GetLinkSyscallPath returns the value of the field, resolving if necessary -func (ev *Event) GetLinkSyscallPath() string { - if ev.GetEventType().String() != "link" { - return "" - } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr1(ev, &ev.Link.SyscallContext) -} - -// GetLinkSyscallStr1 returns the value of the field, resolving if necessary -func (ev *Event) GetLinkSyscallStr1() string { - if ev.GetEventType().String() != "link" { - return "" - } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr1(ev, &ev.Link.SyscallContext) -} - -// GetLinkSyscallStr2 returns the value of the field, resolving if necessary -func (ev *Event) GetLinkSyscallStr2() string { - if ev.GetEventType().String() != "link" { - return "" - } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr2(ev, &ev.Link.SyscallContext) -} - -// GetLinkSyscallStr3 returns the value of the field, resolving if necessary -func (ev *Event) GetLinkSyscallStr3() string { - if ev.GetEventType().String() != "link" { - return "" - } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr3(ev, &ev.Link.SyscallContext) -} - -// GetLoadModuleArgs returns the value of the field, resolving if necessary -func (ev *Event) GetLoadModuleArgs() string { - if ev.GetEventType().String() != "load_module" { - return "" - } - return ev.FieldHandlers.ResolveModuleArgs(ev, &ev.LoadModule) -} - -// GetLoadModuleArgsTruncated returns the value of the field, resolving if necessary -func (ev *Event) GetLoadModuleArgsTruncated() bool { - if ev.GetEventType().String() != "load_module" { - return false - } - return ev.LoadModule.ArgsTruncated -} - -// GetLoadModuleArgv returns the value of the field, resolving if necessary -func (ev *Event) GetLoadModuleArgv() []string { - if ev.GetEventType().String() != "load_module" { - return []string{} - } - return ev.FieldHandlers.ResolveModuleArgv(ev, &ev.LoadModule) -} - -// GetLoadModuleFileChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetLoadModuleFileChangeTime() uint64 { - if ev.GetEventType().String() != "load_module" { - return uint64(0) - } - return ev.LoadModule.File.FileFields.CTime -} - -// GetLoadModuleFileFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetLoadModuleFileFilesystem() string { - if ev.GetEventType().String() != "load_module" { - return "" - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.LoadModule.File) -} - -// GetLoadModuleFileGid returns the value of the field, resolving if necessary -func (ev *Event) GetLoadModuleFileGid() uint32 { - if ev.GetEventType().String() != "load_module" { - return uint32(0) - } - return ev.LoadModule.File.FileFields.GID -} - -// GetLoadModuleFileGroup returns the value of the field, resolving if necessary -func (ev *Event) GetLoadModuleFileGroup() string { - if ev.GetEventType().String() != "load_module" { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.LoadModule.File.FileFields) -} - -// GetLoadModuleFileHashes returns the value of the field, resolving if necessary -func (ev *Event) GetLoadModuleFileHashes() []string { - if ev.GetEventType().String() != "load_module" { - return []string{} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.LoadModule.File) -} - -// GetLoadModuleFileInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetLoadModuleFileInUpperLayer() bool { - if ev.GetEventType().String() != "load_module" { - return false - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.LoadModule.File.FileFields) -} - -// GetLoadModuleFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetLoadModuleFileInode() uint64 { - if ev.GetEventType().String() != "load_module" { - return uint64(0) - } - return ev.LoadModule.File.FileFields.PathKey.Inode -} - -// GetLoadModuleFileMode returns the value of the field, resolving if necessary -func (ev *Event) GetLoadModuleFileMode() uint16 { - if ev.GetEventType().String() != "load_module" { - return uint16(0) - } - return ev.LoadModule.File.FileFields.Mode -} - -// GetLoadModuleFileModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetLoadModuleFileModificationTime() uint64 { - if ev.GetEventType().String() != "load_module" { - return uint64(0) - } - return ev.LoadModule.File.FileFields.MTime -} - -// GetLoadModuleFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetLoadModuleFileMountId() uint32 { - if ev.GetEventType().String() != "load_module" { - return uint32(0) - } - return ev.LoadModule.File.FileFields.PathKey.MountID -} - -// GetLoadModuleFileName returns the value of the field, resolving if necessary -func (ev *Event) GetLoadModuleFileName() string { - if ev.GetEventType().String() != "load_module" { - return "" - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.LoadModule.File) -} - -// GetLoadModuleFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetLoadModuleFileNameLength() int { - if ev.GetEventType().String() != "load_module" { - return 0 - } - return len(ev.FieldHandlers.ResolveFileBasename(ev, &ev.LoadModule.File)) -} - -// GetLoadModuleFilePackageName returns the value of the field, resolving if necessary -func (ev *Event) GetLoadModuleFilePackageName() string { - if ev.GetEventType().String() != "load_module" { - return "" - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.LoadModule.File) -} - -// GetLoadModuleFilePackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetLoadModuleFilePackageSourceVersion() string { - if ev.GetEventType().String() != "load_module" { - return "" - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.LoadModule.File) -} - -// GetLoadModuleFilePackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetLoadModuleFilePackageVersion() string { - if ev.GetEventType().String() != "load_module" { - return "" - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.LoadModule.File) -} - -// GetLoadModuleFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetLoadModuleFilePath() string { - if ev.GetEventType().String() != "load_module" { - return "" - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.LoadModule.File) -} - -// GetLoadModuleFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetLoadModuleFilePathLength() int { - if ev.GetEventType().String() != "load_module" { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.LoadModule.File)) -} - -// GetLoadModuleFileRights returns the value of the field, resolving if necessary -func (ev *Event) GetLoadModuleFileRights() int { - if ev.GetEventType().String() != "load_module" { - return 0 - } - return ev.FieldHandlers.ResolveRights(ev, &ev.LoadModule.File.FileFields) -} - -// GetLoadModuleFileUid returns the value of the field, resolving if necessary -func (ev *Event) GetLoadModuleFileUid() uint32 { - if ev.GetEventType().String() != "load_module" { - return uint32(0) - } - return ev.LoadModule.File.FileFields.UID -} - -// GetLoadModuleFileUser returns the value of the field, resolving if necessary -func (ev *Event) GetLoadModuleFileUser() string { - if ev.GetEventType().String() != "load_module" { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.LoadModule.File.FileFields) -} - -// GetLoadModuleLoadedFromMemory returns the value of the field, resolving if necessary -func (ev *Event) GetLoadModuleLoadedFromMemory() bool { - if ev.GetEventType().String() != "load_module" { - return false - } - return ev.LoadModule.LoadedFromMemory -} - -// GetLoadModuleName returns the value of the field, resolving if necessary -func (ev *Event) GetLoadModuleName() string { - if ev.GetEventType().String() != "load_module" { - return "" - } - return ev.LoadModule.Name -} - -// GetLoadModuleRetval returns the value of the field, resolving if necessary -func (ev *Event) GetLoadModuleRetval() int64 { - if ev.GetEventType().String() != "load_module" { - return int64(0) - } - return ev.LoadModule.SyscallEvent.Retval -} - -// GetMkdirFileChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetMkdirFileChangeTime() uint64 { - if ev.GetEventType().String() != "mkdir" { - return uint64(0) - } - return ev.Mkdir.File.FileFields.CTime -} - -// GetMkdirFileDestinationMode returns the value of the field, resolving if necessary -func (ev *Event) GetMkdirFileDestinationMode() uint32 { - if ev.GetEventType().String() != "mkdir" { - return uint32(0) - } - return ev.Mkdir.Mode -} - -// GetMkdirFileDestinationRights returns the value of the field, resolving if necessary -func (ev *Event) GetMkdirFileDestinationRights() uint32 { - if ev.GetEventType().String() != "mkdir" { - return uint32(0) - } - return ev.Mkdir.Mode -} - -// GetMkdirFileFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetMkdirFileFilesystem() string { - if ev.GetEventType().String() != "mkdir" { - return "" - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Mkdir.File) -} - -// GetMkdirFileGid returns the value of the field, resolving if necessary -func (ev *Event) GetMkdirFileGid() uint32 { - if ev.GetEventType().String() != "mkdir" { - return uint32(0) - } - return ev.Mkdir.File.FileFields.GID -} - -// GetMkdirFileGroup returns the value of the field, resolving if necessary -func (ev *Event) GetMkdirFileGroup() string { - if ev.GetEventType().String() != "mkdir" { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Mkdir.File.FileFields) -} - -// GetMkdirFileHashes returns the value of the field, resolving if necessary -func (ev *Event) GetMkdirFileHashes() []string { - if ev.GetEventType().String() != "mkdir" { - return []string{} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Mkdir.File) -} - -// GetMkdirFileInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetMkdirFileInUpperLayer() bool { - if ev.GetEventType().String() != "mkdir" { - return false - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Mkdir.File.FileFields) -} - -// GetMkdirFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetMkdirFileInode() uint64 { - if ev.GetEventType().String() != "mkdir" { - return uint64(0) - } - return ev.Mkdir.File.FileFields.PathKey.Inode -} - -// GetMkdirFileMode returns the value of the field, resolving if necessary -func (ev *Event) GetMkdirFileMode() uint16 { - if ev.GetEventType().String() != "mkdir" { - return uint16(0) - } - return ev.Mkdir.File.FileFields.Mode -} - -// GetMkdirFileModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetMkdirFileModificationTime() uint64 { - if ev.GetEventType().String() != "mkdir" { - return uint64(0) - } - return ev.Mkdir.File.FileFields.MTime -} - -// GetMkdirFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetMkdirFileMountId() uint32 { - if ev.GetEventType().String() != "mkdir" { - return uint32(0) - } - return ev.Mkdir.File.FileFields.PathKey.MountID -} - -// GetMkdirFileName returns the value of the field, resolving if necessary -func (ev *Event) GetMkdirFileName() string { - if ev.GetEventType().String() != "mkdir" { - return "" - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Mkdir.File) -} - -// GetMkdirFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetMkdirFileNameLength() int { - if ev.GetEventType().String() != "mkdir" { - return 0 - } - return len(ev.FieldHandlers.ResolveFileBasename(ev, &ev.Mkdir.File)) -} - -// GetMkdirFilePackageName returns the value of the field, resolving if necessary -func (ev *Event) GetMkdirFilePackageName() string { - if ev.GetEventType().String() != "mkdir" { - return "" - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Mkdir.File) -} - -// GetMkdirFilePackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetMkdirFilePackageSourceVersion() string { - if ev.GetEventType().String() != "mkdir" { - return "" - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Mkdir.File) -} - -// GetMkdirFilePackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetMkdirFilePackageVersion() string { - if ev.GetEventType().String() != "mkdir" { - return "" - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Mkdir.File) -} - -// GetMkdirFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetMkdirFilePath() string { - if ev.GetEventType().String() != "mkdir" { - return "" - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Mkdir.File) -} - -// GetMkdirFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetMkdirFilePathLength() int { - if ev.GetEventType().String() != "mkdir" { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Mkdir.File)) -} - -// GetMkdirFileRights returns the value of the field, resolving if necessary -func (ev *Event) GetMkdirFileRights() int { - if ev.GetEventType().String() != "mkdir" { - return 0 - } - return ev.FieldHandlers.ResolveRights(ev, &ev.Mkdir.File.FileFields) -} - -// GetMkdirFileUid returns the value of the field, resolving if necessary -func (ev *Event) GetMkdirFileUid() uint32 { - if ev.GetEventType().String() != "mkdir" { - return uint32(0) - } - return ev.Mkdir.File.FileFields.UID -} - -// GetMkdirFileUser returns the value of the field, resolving if necessary -func (ev *Event) GetMkdirFileUser() string { - if ev.GetEventType().String() != "mkdir" { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Mkdir.File.FileFields) -} - -// GetMkdirRetval returns the value of the field, resolving if necessary -func (ev *Event) GetMkdirRetval() int64 { - if ev.GetEventType().String() != "mkdir" { - return int64(0) - } - return ev.Mkdir.SyscallEvent.Retval -} - -// GetMmapFileChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetMmapFileChangeTime() uint64 { - if ev.GetEventType().String() != "mmap" { - return uint64(0) - } - return ev.MMap.File.FileFields.CTime -} - -// GetMmapFileFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetMmapFileFilesystem() string { - if ev.GetEventType().String() != "mmap" { - return "" - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.MMap.File) -} - -// GetMmapFileGid returns the value of the field, resolving if necessary -func (ev *Event) GetMmapFileGid() uint32 { - if ev.GetEventType().String() != "mmap" { - return uint32(0) - } - return ev.MMap.File.FileFields.GID -} - -// GetMmapFileGroup returns the value of the field, resolving if necessary -func (ev *Event) GetMmapFileGroup() string { - if ev.GetEventType().String() != "mmap" { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.MMap.File.FileFields) -} - -// GetMmapFileHashes returns the value of the field, resolving if necessary -func (ev *Event) GetMmapFileHashes() []string { - if ev.GetEventType().String() != "mmap" { - return []string{} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.MMap.File) -} - -// GetMmapFileInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetMmapFileInUpperLayer() bool { - if ev.GetEventType().String() != "mmap" { - return false - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.MMap.File.FileFields) -} - -// GetMmapFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetMmapFileInode() uint64 { - if ev.GetEventType().String() != "mmap" { - return uint64(0) - } - return ev.MMap.File.FileFields.PathKey.Inode -} - -// GetMmapFileMode returns the value of the field, resolving if necessary -func (ev *Event) GetMmapFileMode() uint16 { - if ev.GetEventType().String() != "mmap" { - return uint16(0) - } - return ev.MMap.File.FileFields.Mode -} - -// GetMmapFileModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetMmapFileModificationTime() uint64 { - if ev.GetEventType().String() != "mmap" { - return uint64(0) - } - return ev.MMap.File.FileFields.MTime -} - -// GetMmapFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetMmapFileMountId() uint32 { - if ev.GetEventType().String() != "mmap" { - return uint32(0) - } - return ev.MMap.File.FileFields.PathKey.MountID -} - -// GetMmapFileName returns the value of the field, resolving if necessary -func (ev *Event) GetMmapFileName() string { - if ev.GetEventType().String() != "mmap" { - return "" - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.MMap.File) -} - -// GetMmapFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetMmapFileNameLength() int { - if ev.GetEventType().String() != "mmap" { - return 0 - } - return len(ev.FieldHandlers.ResolveFileBasename(ev, &ev.MMap.File)) -} - -// GetMmapFilePackageName returns the value of the field, resolving if necessary -func (ev *Event) GetMmapFilePackageName() string { - if ev.GetEventType().String() != "mmap" { - return "" - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.MMap.File) -} - -// GetMmapFilePackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetMmapFilePackageSourceVersion() string { - if ev.GetEventType().String() != "mmap" { - return "" - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.MMap.File) -} - -// GetMmapFilePackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetMmapFilePackageVersion() string { - if ev.GetEventType().String() != "mmap" { - return "" - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.MMap.File) -} - -// GetMmapFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetMmapFilePath() string { - if ev.GetEventType().String() != "mmap" { - return "" - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.MMap.File) -} - -// GetMmapFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetMmapFilePathLength() int { - if ev.GetEventType().String() != "mmap" { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.MMap.File)) -} - -// GetMmapFileRights returns the value of the field, resolving if necessary -func (ev *Event) GetMmapFileRights() int { - if ev.GetEventType().String() != "mmap" { - return 0 - } - return ev.FieldHandlers.ResolveRights(ev, &ev.MMap.File.FileFields) -} - -// GetMmapFileUid returns the value of the field, resolving if necessary -func (ev *Event) GetMmapFileUid() uint32 { - if ev.GetEventType().String() != "mmap" { - return uint32(0) - } - return ev.MMap.File.FileFields.UID -} - -// GetMmapFileUser returns the value of the field, resolving if necessary -func (ev *Event) GetMmapFileUser() string { - if ev.GetEventType().String() != "mmap" { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.MMap.File.FileFields) -} - -// GetMmapFlags returns the value of the field, resolving if necessary -func (ev *Event) GetMmapFlags() uint64 { - if ev.GetEventType().String() != "mmap" { - return uint64(0) - } - return ev.MMap.Flags -} - -// GetMmapProtection returns the value of the field, resolving if necessary -func (ev *Event) GetMmapProtection() uint64 { - if ev.GetEventType().String() != "mmap" { - return uint64(0) - } - return ev.MMap.Protection -} - -// GetMmapRetval returns the value of the field, resolving if necessary -func (ev *Event) GetMmapRetval() int64 { - if ev.GetEventType().String() != "mmap" { - return int64(0) - } - return ev.MMap.SyscallEvent.Retval -} - -// GetMountFsType returns the value of the field, resolving if necessary -func (ev *Event) GetMountFsType() string { - if ev.GetEventType().String() != "mount" { - return "" - } - return ev.Mount.Mount.FSType -} - -// GetMountMountpointPath returns the value of the field, resolving if necessary -func (ev *Event) GetMountMountpointPath() string { - if ev.GetEventType().String() != "mount" { - return "" - } - return ev.FieldHandlers.ResolveMountPointPath(ev, &ev.Mount) -} - -// GetMountRetval returns the value of the field, resolving if necessary -func (ev *Event) GetMountRetval() int64 { - if ev.GetEventType().String() != "mount" { - return int64(0) - } - return ev.Mount.SyscallEvent.Retval -} - -// GetMountRootPath returns the value of the field, resolving if necessary -func (ev *Event) GetMountRootPath() string { - if ev.GetEventType().String() != "mount" { - return "" - } - return ev.FieldHandlers.ResolveMountRootPath(ev, &ev.Mount) -} - -// GetMountSourcePath returns the value of the field, resolving if necessary -func (ev *Event) GetMountSourcePath() string { - if ev.GetEventType().String() != "mount" { - return "" - } - return ev.FieldHandlers.ResolveMountSourcePath(ev, &ev.Mount) -} - -// GetMountSyscallFsType returns the value of the field, resolving if necessary -func (ev *Event) GetMountSyscallFsType() string { - if ev.GetEventType().String() != "mount" { - return "" - } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr3(ev, &ev.Mount.SyscallContext) -} - -// GetMountSyscallInt1 returns the value of the field, resolving if necessary -func (ev *Event) GetMountSyscallInt1() int { - if ev.GetEventType().String() != "mount" { - return 0 - } - return ev.FieldHandlers.ResolveSyscallCtxArgsInt1(ev, &ev.Mount.SyscallContext) -} - -// GetMountSyscallInt2 returns the value of the field, resolving if necessary -func (ev *Event) GetMountSyscallInt2() int { - if ev.GetEventType().String() != "mount" { - return 0 - } - return ev.FieldHandlers.ResolveSyscallCtxArgsInt2(ev, &ev.Mount.SyscallContext) -} - -// GetMountSyscallInt3 returns the value of the field, resolving if necessary -func (ev *Event) GetMountSyscallInt3() int { - if ev.GetEventType().String() != "mount" { - return 0 - } - return ev.FieldHandlers.ResolveSyscallCtxArgsInt3(ev, &ev.Mount.SyscallContext) -} - -// GetMountSyscallMountpointPath returns the value of the field, resolving if necessary -func (ev *Event) GetMountSyscallMountpointPath() string { - if ev.GetEventType().String() != "mount" { - return "" - } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr2(ev, &ev.Mount.SyscallContext) -} - -// GetMountSyscallSourcePath returns the value of the field, resolving if necessary -func (ev *Event) GetMountSyscallSourcePath() string { - if ev.GetEventType().String() != "mount" { - return "" - } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr1(ev, &ev.Mount.SyscallContext) -} - -// GetMountSyscallStr1 returns the value of the field, resolving if necessary -func (ev *Event) GetMountSyscallStr1() string { - if ev.GetEventType().String() != "mount" { - return "" - } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr1(ev, &ev.Mount.SyscallContext) -} - -// GetMountSyscallStr2 returns the value of the field, resolving if necessary -func (ev *Event) GetMountSyscallStr2() string { - if ev.GetEventType().String() != "mount" { - return "" - } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr2(ev, &ev.Mount.SyscallContext) -} - -// GetMountSyscallStr3 returns the value of the field, resolving if necessary -func (ev *Event) GetMountSyscallStr3() string { - if ev.GetEventType().String() != "mount" { - return "" - } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr3(ev, &ev.Mount.SyscallContext) -} - -// GetMprotectReqProtection returns the value of the field, resolving if necessary -func (ev *Event) GetMprotectReqProtection() int { - if ev.GetEventType().String() != "mprotect" { - return 0 - } - return ev.MProtect.ReqProtection -} - -// GetMprotectRetval returns the value of the field, resolving if necessary -func (ev *Event) GetMprotectRetval() int64 { - if ev.GetEventType().String() != "mprotect" { - return int64(0) - } - return ev.MProtect.SyscallEvent.Retval -} - -// GetMprotectVmProtection returns the value of the field, resolving if necessary -func (ev *Event) GetMprotectVmProtection() int { - if ev.GetEventType().String() != "mprotect" { - return 0 - } - return ev.MProtect.VMProtection -} - -// GetNetworkDestinationIp returns the value of the field, resolving if necessary -func (ev *Event) GetNetworkDestinationIp() net.IPNet { - return ev.NetworkContext.Destination.IPNet -} - -// GetNetworkDestinationIsPublic returns the value of the field, resolving if necessary -func (ev *Event) GetNetworkDestinationIsPublic() bool { - return ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.NetworkContext.Destination) -} - -// GetNetworkDestinationPort returns the value of the field, resolving if necessary -func (ev *Event) GetNetworkDestinationPort() uint16 { - return ev.NetworkContext.Destination.Port -} - -// GetNetworkDeviceIfname returns the value of the field, resolving if necessary -func (ev *Event) GetNetworkDeviceIfname() string { - return ev.FieldHandlers.ResolveNetworkDeviceIfName(ev, &ev.NetworkContext.Device) -} - -// GetNetworkL3Protocol returns the value of the field, resolving if necessary -func (ev *Event) GetNetworkL3Protocol() uint16 { - return ev.NetworkContext.L3Protocol -} - -// GetNetworkL4Protocol returns the value of the field, resolving if necessary -func (ev *Event) GetNetworkL4Protocol() uint16 { - return ev.NetworkContext.L4Protocol -} - -// GetNetworkSize returns the value of the field, resolving if necessary -func (ev *Event) GetNetworkSize() uint32 { - return ev.NetworkContext.Size -} - -// GetNetworkSourceIp returns the value of the field, resolving if necessary -func (ev *Event) GetNetworkSourceIp() net.IPNet { - return ev.NetworkContext.Source.IPNet -} - -// GetNetworkSourceIsPublic returns the value of the field, resolving if necessary -func (ev *Event) GetNetworkSourceIsPublic() bool { - return ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.NetworkContext.Source) -} - -// GetNetworkSourcePort returns the value of the field, resolving if necessary -func (ev *Event) GetNetworkSourcePort() uint16 { - return ev.NetworkContext.Source.Port -} - -// GetOndemandArg1Str returns the value of the field, resolving if necessary -func (ev *Event) GetOndemandArg1Str() string { - if ev.GetEventType().String() != "ondemand" { - return "" - } - return ev.FieldHandlers.ResolveOnDemandArg1Str(ev, &ev.OnDemand) -} - -// GetOndemandArg1Uint returns the value of the field, resolving if necessary -func (ev *Event) GetOndemandArg1Uint() int { - if ev.GetEventType().String() != "ondemand" { - return 0 - } - return ev.FieldHandlers.ResolveOnDemandArg1Uint(ev, &ev.OnDemand) -} - -// GetOndemandArg2Str returns the value of the field, resolving if necessary -func (ev *Event) GetOndemandArg2Str() string { - if ev.GetEventType().String() != "ondemand" { - return "" - } - return ev.FieldHandlers.ResolveOnDemandArg2Str(ev, &ev.OnDemand) -} - -// GetOndemandArg2Uint returns the value of the field, resolving if necessary -func (ev *Event) GetOndemandArg2Uint() int { - if ev.GetEventType().String() != "ondemand" { - return 0 - } - return ev.FieldHandlers.ResolveOnDemandArg2Uint(ev, &ev.OnDemand) -} - -// GetOndemandArg3Str returns the value of the field, resolving if necessary -func (ev *Event) GetOndemandArg3Str() string { - if ev.GetEventType().String() != "ondemand" { - return "" - } - return ev.FieldHandlers.ResolveOnDemandArg3Str(ev, &ev.OnDemand) -} - -// GetOndemandArg3Uint returns the value of the field, resolving if necessary -func (ev *Event) GetOndemandArg3Uint() int { - if ev.GetEventType().String() != "ondemand" { - return 0 - } - return ev.FieldHandlers.ResolveOnDemandArg3Uint(ev, &ev.OnDemand) -} - -// GetOndemandArg4Str returns the value of the field, resolving if necessary -func (ev *Event) GetOndemandArg4Str() string { - if ev.GetEventType().String() != "ondemand" { - return "" - } - return ev.FieldHandlers.ResolveOnDemandArg4Str(ev, &ev.OnDemand) -} - -// GetOndemandArg4Uint returns the value of the field, resolving if necessary -func (ev *Event) GetOndemandArg4Uint() int { - if ev.GetEventType().String() != "ondemand" { - return 0 - } - return ev.FieldHandlers.ResolveOnDemandArg4Uint(ev, &ev.OnDemand) -} - -// GetOndemandName returns the value of the field, resolving if necessary -func (ev *Event) GetOndemandName() string { - if ev.GetEventType().String() != "ondemand" { - return "" - } - return ev.FieldHandlers.ResolveOnDemandName(ev, &ev.OnDemand) -} - -// GetOpenFileChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetOpenFileChangeTime() uint64 { - if ev.GetEventType().String() != "open" { - return uint64(0) - } - return ev.Open.File.FileFields.CTime -} - -// GetOpenFileDestinationMode returns the value of the field, resolving if necessary -func (ev *Event) GetOpenFileDestinationMode() uint32 { - if ev.GetEventType().String() != "open" { - return uint32(0) - } - return ev.Open.Mode -} - -// GetOpenFileFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetOpenFileFilesystem() string { - if ev.GetEventType().String() != "open" { - return "" - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Open.File) -} - -// GetOpenFileGid returns the value of the field, resolving if necessary -func (ev *Event) GetOpenFileGid() uint32 { - if ev.GetEventType().String() != "open" { - return uint32(0) - } - return ev.Open.File.FileFields.GID -} - -// GetOpenFileGroup returns the value of the field, resolving if necessary -func (ev *Event) GetOpenFileGroup() string { - if ev.GetEventType().String() != "open" { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Open.File.FileFields) -} - -// GetOpenFileHashes returns the value of the field, resolving if necessary -func (ev *Event) GetOpenFileHashes() []string { - if ev.GetEventType().String() != "open" { - return []string{} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Open.File) -} - -// GetOpenFileInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetOpenFileInUpperLayer() bool { - if ev.GetEventType().String() != "open" { - return false - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Open.File.FileFields) -} - -// GetOpenFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetOpenFileInode() uint64 { - if ev.GetEventType().String() != "open" { - return uint64(0) - } - return ev.Open.File.FileFields.PathKey.Inode -} - -// GetOpenFileMode returns the value of the field, resolving if necessary -func (ev *Event) GetOpenFileMode() uint16 { - if ev.GetEventType().String() != "open" { - return uint16(0) - } - return ev.Open.File.FileFields.Mode -} - -// GetOpenFileModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetOpenFileModificationTime() uint64 { - if ev.GetEventType().String() != "open" { - return uint64(0) - } - return ev.Open.File.FileFields.MTime -} - -// GetOpenFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetOpenFileMountId() uint32 { - if ev.GetEventType().String() != "open" { - return uint32(0) - } - return ev.Open.File.FileFields.PathKey.MountID -} - -// GetOpenFileName returns the value of the field, resolving if necessary -func (ev *Event) GetOpenFileName() string { - if ev.GetEventType().String() != "open" { - return "" - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Open.File) -} - -// GetOpenFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetOpenFileNameLength() int { - if ev.GetEventType().String() != "open" { - return 0 - } - return len(ev.FieldHandlers.ResolveFileBasename(ev, &ev.Open.File)) -} - -// GetOpenFilePackageName returns the value of the field, resolving if necessary -func (ev *Event) GetOpenFilePackageName() string { - if ev.GetEventType().String() != "open" { - return "" - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Open.File) -} - -// GetOpenFilePackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetOpenFilePackageSourceVersion() string { - if ev.GetEventType().String() != "open" { - return "" - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Open.File) -} - -// GetOpenFilePackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetOpenFilePackageVersion() string { - if ev.GetEventType().String() != "open" { - return "" - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Open.File) -} - -// GetOpenFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetOpenFilePath() string { - if ev.GetEventType().String() != "open" { - return "" - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Open.File) -} - -// GetOpenFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetOpenFilePathLength() int { - if ev.GetEventType().String() != "open" { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Open.File)) -} - -// GetOpenFileRights returns the value of the field, resolving if necessary -func (ev *Event) GetOpenFileRights() int { - if ev.GetEventType().String() != "open" { - return 0 - } - return ev.FieldHandlers.ResolveRights(ev, &ev.Open.File.FileFields) -} - -// GetOpenFileUid returns the value of the field, resolving if necessary -func (ev *Event) GetOpenFileUid() uint32 { - if ev.GetEventType().String() != "open" { - return uint32(0) - } - return ev.Open.File.FileFields.UID -} - -// GetOpenFileUser returns the value of the field, resolving if necessary -func (ev *Event) GetOpenFileUser() string { - if ev.GetEventType().String() != "open" { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Open.File.FileFields) -} - -// GetOpenFlags returns the value of the field, resolving if necessary -func (ev *Event) GetOpenFlags() uint32 { - if ev.GetEventType().String() != "open" { - return uint32(0) - } - return ev.Open.Flags -} - -// GetOpenRetval returns the value of the field, resolving if necessary -func (ev *Event) GetOpenRetval() int64 { - if ev.GetEventType().String() != "open" { - return int64(0) - } - return ev.Open.SyscallEvent.Retval -} - -// GetOpenSyscallFlags returns the value of the field, resolving if necessary -func (ev *Event) GetOpenSyscallFlags() int { - if ev.GetEventType().String() != "open" { - return 0 - } - return ev.FieldHandlers.ResolveSyscallCtxArgsInt2(ev, &ev.Open.SyscallContext) -} - -// GetOpenSyscallInt1 returns the value of the field, resolving if necessary -func (ev *Event) GetOpenSyscallInt1() int { - if ev.GetEventType().String() != "open" { - return 0 - } - return ev.FieldHandlers.ResolveSyscallCtxArgsInt1(ev, &ev.Open.SyscallContext) -} - -// GetOpenSyscallInt2 returns the value of the field, resolving if necessary -func (ev *Event) GetOpenSyscallInt2() int { - if ev.GetEventType().String() != "open" { - return 0 - } - return ev.FieldHandlers.ResolveSyscallCtxArgsInt2(ev, &ev.Open.SyscallContext) -} - -// GetOpenSyscallInt3 returns the value of the field, resolving if necessary -func (ev *Event) GetOpenSyscallInt3() int { - if ev.GetEventType().String() != "open" { - return 0 - } - return ev.FieldHandlers.ResolveSyscallCtxArgsInt3(ev, &ev.Open.SyscallContext) -} - -// GetOpenSyscallMode returns the value of the field, resolving if necessary -func (ev *Event) GetOpenSyscallMode() int { - if ev.GetEventType().String() != "open" { - return 0 - } - return ev.FieldHandlers.ResolveSyscallCtxArgsInt3(ev, &ev.Open.SyscallContext) -} - -// GetOpenSyscallPath returns the value of the field, resolving if necessary -func (ev *Event) GetOpenSyscallPath() string { - if ev.GetEventType().String() != "open" { - return "" - } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr1(ev, &ev.Open.SyscallContext) -} - -// GetOpenSyscallStr1 returns the value of the field, resolving if necessary -func (ev *Event) GetOpenSyscallStr1() string { - if ev.GetEventType().String() != "open" { - return "" - } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr1(ev, &ev.Open.SyscallContext) -} - -// GetOpenSyscallStr2 returns the value of the field, resolving if necessary -func (ev *Event) GetOpenSyscallStr2() string { - if ev.GetEventType().String() != "open" { - return "" - } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr2(ev, &ev.Open.SyscallContext) -} - -// GetOpenSyscallStr3 returns the value of the field, resolving if necessary -func (ev *Event) GetOpenSyscallStr3() string { - if ev.GetEventType().String() != "open" { - return "" - } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr3(ev, &ev.Open.SyscallContext) -} - -// GetPacketDestinationIp returns the value of the field, resolving if necessary -func (ev *Event) GetPacketDestinationIp() net.IPNet { - if ev.GetEventType().String() != "packet" { - return net.IPNet{} - } - return ev.RawPacket.NetworkContext.Destination.IPNet -} - -// GetPacketDestinationIsPublic returns the value of the field, resolving if necessary -func (ev *Event) GetPacketDestinationIsPublic() bool { - if ev.GetEventType().String() != "packet" { - return false - } - return ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.RawPacket.NetworkContext.Destination) -} - -// GetPacketDestinationPort returns the value of the field, resolving if necessary -func (ev *Event) GetPacketDestinationPort() uint16 { - if ev.GetEventType().String() != "packet" { - return uint16(0) - } - return ev.RawPacket.NetworkContext.Destination.Port -} - -// GetPacketDeviceIfname returns the value of the field, resolving if necessary -func (ev *Event) GetPacketDeviceIfname() string { - if ev.GetEventType().String() != "packet" { - return "" - } - return ev.FieldHandlers.ResolveNetworkDeviceIfName(ev, &ev.RawPacket.NetworkContext.Device) -} - -// GetPacketFilter returns the value of the field, resolving if necessary -func (ev *Event) GetPacketFilter() string { - if ev.GetEventType().String() != "packet" { - return "" - } - return ev.RawPacket.Filter -} - -// GetPacketL3Protocol returns the value of the field, resolving if necessary -func (ev *Event) GetPacketL3Protocol() uint16 { - if ev.GetEventType().String() != "packet" { - return uint16(0) - } - return ev.RawPacket.NetworkContext.L3Protocol -} - -// GetPacketL4Protocol returns the value of the field, resolving if necessary -func (ev *Event) GetPacketL4Protocol() uint16 { - if ev.GetEventType().String() != "packet" { - return uint16(0) - } - return ev.RawPacket.NetworkContext.L4Protocol -} - -// GetPacketSize returns the value of the field, resolving if necessary -func (ev *Event) GetPacketSize() uint32 { - if ev.GetEventType().String() != "packet" { - return uint32(0) - } - return ev.RawPacket.NetworkContext.Size -} - -// GetPacketSourceIp returns the value of the field, resolving if necessary -func (ev *Event) GetPacketSourceIp() net.IPNet { - if ev.GetEventType().String() != "packet" { - return net.IPNet{} - } - return ev.RawPacket.NetworkContext.Source.IPNet -} - -// GetPacketSourceIsPublic returns the value of the field, resolving if necessary -func (ev *Event) GetPacketSourceIsPublic() bool { - if ev.GetEventType().String() != "packet" { - return false - } - return ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.RawPacket.NetworkContext.Source) -} - -// GetPacketSourcePort returns the value of the field, resolving if necessary -func (ev *Event) GetPacketSourcePort() uint16 { - if ev.GetEventType().String() != "packet" { - return uint16(0) - } - return ev.RawPacket.NetworkContext.Source.Port -} - -// GetPacketTlsVersion returns the value of the field, resolving if necessary -func (ev *Event) GetPacketTlsVersion() uint16 { - if ev.GetEventType().String() != "packet" { - return uint16(0) - } - return ev.RawPacket.TLSContext.Version -} - -// GetProcessAncestorsArgs returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsArgs() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessArgs(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsArgsFlags returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsArgsFlags() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessArgsFlags(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsArgsOptions returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsArgsOptions() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessArgsOptions(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsArgsScrubbed returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsArgsScrubbed() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessArgsScrubbed(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsArgsTruncated returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsArgsTruncated() []bool { - if ev.BaseEvent.ProcessContext == nil { - return []bool{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []bool{} - } - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessArgsTruncated(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsArgv returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsArgv() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessArgv(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsArgv0 returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsArgv0() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessArgv0(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsArgvScrubbed returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsArgvScrubbed() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessArgvScrubbed(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsAuid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsAuid() []uint32 { - if ev.BaseEvent.ProcessContext == nil { - return []uint32{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.AUID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsCapEffective returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsCapEffective() []uint64 { - if ev.BaseEvent.ProcessContext == nil { - return []uint64{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []uint64{} - } - var values []uint64 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.CapEffective - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsCapPermitted returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsCapPermitted() []uint64 { - if ev.BaseEvent.ProcessContext == nil { - return []uint64{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []uint64{} - } - var values []uint64 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.CapPermitted - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsCgroupFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsCgroupFileInode() []uint64 { - if ev.BaseEvent.ProcessContext == nil { - return []uint64{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []uint64{} - } - var values []uint64 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.CGroup.CGroupFile.Inode - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsCgroupFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsCgroupFileMountId() []uint32 { - if ev.BaseEvent.ProcessContext == nil { - return []uint32{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.CGroup.CGroupFile.MountID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsCgroupId returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsCgroupId() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveCGroupID(ev, &element.ProcessContext.Process.CGroup) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsCgroupManager returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsCgroupManager() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveCGroupManager(ev, &element.ProcessContext.Process.CGroup) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsCgroupVersion returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsCgroupVersion() []int { - if ev.BaseEvent.ProcessContext == nil { - return []int{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []int{} - } - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveCGroupVersion(ev, &element.ProcessContext.Process.CGroup) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsCmdargv returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsCmdargv() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessCmdArgv(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsComm returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsComm() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Comm - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsContainerId returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsContainerId() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessContainerID(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsCreatedAt returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsCreatedAt() []int { - if ev.BaseEvent.ProcessContext == nil { - return []int{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []int{} - } - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &element.ProcessContext.Process)) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsEgid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsEgid() []uint32 { - if ev.BaseEvent.ProcessContext == nil { - return []uint32{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.EGID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsEgroup returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsEgroup() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.EGroup - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsEnvp returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsEnvp() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessEnvp(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsEnvs returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsEnvs() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessEnvs(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsEnvsTruncated returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsEnvsTruncated() []bool { - if ev.BaseEvent.ProcessContext == nil { - return []bool{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []bool{} - } - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsEuid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsEuid() []uint32 { - if ev.BaseEvent.ProcessContext == nil { - return []uint32{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.EUID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsEuser returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsEuser() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.EUser - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsFileChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsFileChangeTime() []uint64 { - if ev.BaseEvent.ProcessContext == nil { - return []uint64{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []uint64{} - } - var values []uint64 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.FileEvent.FileFields.CTime - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsFileFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsFileFilesystem() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveFileFilesystem(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsFileGid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsFileGid() []uint32 { - if ev.BaseEvent.ProcessContext == nil { - return []uint32{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.FileEvent.FileFields.GID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsFileGroup returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsFileGroup() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveFileFieldsGroup(ev, &element.ProcessContext.Process.FileEvent.FileFields) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsFileHashes returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsFileHashes() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveHashesFromEvent(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result...) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsFileInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsFileInUpperLayer() []bool { - if ev.BaseEvent.ProcessContext == nil { - return []bool{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []bool{} - } - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &element.ProcessContext.Process.FileEvent.FileFields) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsFileInode() []uint64 { - if ev.BaseEvent.ProcessContext == nil { - return []uint64{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []uint64{} - } - var values []uint64 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.FileEvent.FileFields.PathKey.Inode - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsFileMode returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsFileMode() []uint16 { - if ev.BaseEvent.ProcessContext == nil { - return []uint16{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []uint16{} - } - var values []uint16 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.FileEvent.FileFields.Mode - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsFileModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsFileModificationTime() []uint64 { - if ev.BaseEvent.ProcessContext == nil { - return []uint64{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []uint64{} - } - var values []uint64 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.FileEvent.FileFields.MTime - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsFileMountId() []uint32 { - if ev.BaseEvent.ProcessContext == nil { - return []uint32{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.FileEvent.FileFields.PathKey.MountID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsFileName returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsFileName() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsFileNameLength() []int { - if ev.BaseEvent.ProcessContext == nil { - return []int{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []int{} - } - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent)) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsFilePackageName returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsFilePackageName() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolvePackageName(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsFilePackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsFilePackageSourceVersion() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolvePackageSourceVersion(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsFilePackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsFilePackageVersion() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolvePackageVersion(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsFilePath() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsFilePathLength() []int { - if ev.BaseEvent.ProcessContext == nil { - return []int{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []int{} - } - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent)) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsFileRights returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsFileRights() []int { - if ev.BaseEvent.ProcessContext == nil { - return []int{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []int{} - } - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := int(ev.FieldHandlers.ResolveRights(ev, &element.ProcessContext.Process.FileEvent.FileFields)) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsFileUid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsFileUid() []uint32 { - if ev.BaseEvent.ProcessContext == nil { - return []uint32{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.FileEvent.FileFields.UID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsFileUser returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsFileUser() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveFileFieldsUser(ev, &element.ProcessContext.Process.FileEvent.FileFields) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsFsgid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsFsgid() []uint32 { - if ev.BaseEvent.ProcessContext == nil { - return []uint32{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.FSGID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsFsgroup returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsFsgroup() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.FSGroup - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsFsuid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsFsuid() []uint32 { - if ev.BaseEvent.ProcessContext == nil { - return []uint32{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.FSUID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsFsuser returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsFsuser() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.FSUser - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsGid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsGid() []uint32 { - if ev.BaseEvent.ProcessContext == nil { - return []uint32{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.GID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsGroup returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsGroup() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.Group - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsInterpreterFileChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsInterpreterFileChangeTime() []uint64 { - if ev.BaseEvent.ProcessContext == nil { - return []uint64{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []uint64{} - } - var values []uint64 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.CTime - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsInterpreterFileFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsInterpreterFileFilesystem() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveFileFilesystem(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsInterpreterFileGid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsInterpreterFileGid() []uint32 { - if ev.BaseEvent.ProcessContext == nil { - return []uint32{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.GID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsInterpreterFileGroup returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsInterpreterFileGroup() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveFileFieldsGroup(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsInterpreterFileHashes returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsInterpreterFileHashes() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveHashesFromEvent(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result...) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsInterpreterFileInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsInterpreterFileInUpperLayer() []bool { - if ev.BaseEvent.ProcessContext == nil { - return []bool{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []bool{} - } - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsInterpreterFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsInterpreterFileInode() []uint64 { - if ev.BaseEvent.ProcessContext == nil { - return []uint64{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []uint64{} - } - var values []uint64 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsInterpreterFileMode returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsInterpreterFileMode() []uint16 { - if ev.BaseEvent.ProcessContext == nil { - return []uint16{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []uint16{} - } - var values []uint16 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsInterpreterFileModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsInterpreterFileModificationTime() []uint64 { - if ev.BaseEvent.ProcessContext == nil { - return []uint64{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []uint64{} - } - var values []uint64 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.MTime - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsInterpreterFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsInterpreterFileMountId() []uint32 { - if ev.BaseEvent.ProcessContext == nil { - return []uint32{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsInterpreterFileName returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsInterpreterFileName() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsInterpreterFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsInterpreterFileNameLength() []int { - if ev.BaseEvent.ProcessContext == nil { - return []int{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []int{} - } - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsInterpreterFilePackageName returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsInterpreterFilePackageName() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolvePackageName(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsInterpreterFilePackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsInterpreterFilePackageSourceVersion() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolvePackageSourceVersion(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsInterpreterFilePackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsInterpreterFilePackageVersion() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolvePackageVersion(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsInterpreterFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsInterpreterFilePath() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsInterpreterFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsInterpreterFilePathLength() []int { - if ev.BaseEvent.ProcessContext == nil { - return []int{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []int{} - } - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsInterpreterFileRights returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsInterpreterFileRights() []int { - if ev.BaseEvent.ProcessContext == nil { - return []int{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []int{} - } - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := int(ev.FieldHandlers.ResolveRights(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsInterpreterFileUid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsInterpreterFileUid() []uint32 { - if ev.BaseEvent.ProcessContext == nil { - return []uint32{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.UID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsInterpreterFileUser returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsInterpreterFileUser() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveFileFieldsUser(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsIsExec returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsIsExec() []bool { - if ev.BaseEvent.ProcessContext == nil { - return []bool{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []bool{} - } - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.IsExec - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsIsKworker returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsIsKworker() []bool { - if ev.BaseEvent.ProcessContext == nil { - return []bool{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []bool{} - } - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.PIDContext.IsKworker - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsIsThread returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsIsThread() []bool { - if ev.BaseEvent.ProcessContext == nil { - return []bool{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []bool{} - } - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessIsThread(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsLength returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsLength() int { - if ev.BaseEvent.ProcessContext == nil { - return 0 - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return 0 - } - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - return iterator.Len(ctx) -} - -// GetProcessAncestorsPid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsPid() []uint32 { - if ev.BaseEvent.ProcessContext == nil { - return []uint32{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.PIDContext.Pid - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsPpid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsPpid() []uint32 { - if ev.BaseEvent.ProcessContext == nil { - return []uint32{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.PPid - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsTid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsTid() []uint32 { - if ev.BaseEvent.ProcessContext == nil { - return []uint32{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.PIDContext.Tid - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsTtyName returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsTtyName() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.TTYName - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsUid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsUid() []uint32 { - if ev.BaseEvent.ProcessContext == nil { - return []uint32{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.UID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsUser returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsUser() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.User - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsUserSessionK8sGroups returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsUserSessionK8sGroups() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveK8SGroups(ev, &element.ProcessContext.Process.UserSession) - values = append(values, result...) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsUserSessionK8sUid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsUserSessionK8sUid() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveK8SUID(ev, &element.ProcessContext.Process.UserSession) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsUserSessionK8sUsername returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsUserSessionK8sUsername() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveK8SUsername(ev, &element.ProcessContext.Process.UserSession) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessArgs returns the value of the field, resolving if necessary -func (ev *Event) GetProcessArgs() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - return ev.FieldHandlers.ResolveProcessArgs(ev, &ev.BaseEvent.ProcessContext.Process) -} - -// GetProcessArgsFlags returns the value of the field, resolving if necessary -func (ev *Event) GetProcessArgsFlags() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessArgsFlags(ev, &ev.BaseEvent.ProcessContext.Process) -} - -// GetProcessArgsOptions returns the value of the field, resolving if necessary -func (ev *Event) GetProcessArgsOptions() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessArgsOptions(ev, &ev.BaseEvent.ProcessContext.Process) -} - -// GetProcessArgsScrubbed returns the value of the field, resolving if necessary -func (ev *Event) GetProcessArgsScrubbed() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - return ev.FieldHandlers.ResolveProcessArgsScrubbed(ev, &ev.BaseEvent.ProcessContext.Process) -} - -// GetProcessArgsTruncated returns the value of the field, resolving if necessary -func (ev *Event) GetProcessArgsTruncated() bool { - if ev.BaseEvent.ProcessContext == nil { - return false - } - return ev.FieldHandlers.ResolveProcessArgsTruncated(ev, &ev.BaseEvent.ProcessContext.Process) -} - -// GetProcessArgv returns the value of the field, resolving if necessary -func (ev *Event) GetProcessArgv() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessArgv(ev, &ev.BaseEvent.ProcessContext.Process) -} - -// GetProcessArgv0 returns the value of the field, resolving if necessary -func (ev *Event) GetProcessArgv0() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - return ev.FieldHandlers.ResolveProcessArgv0(ev, &ev.BaseEvent.ProcessContext.Process) -} - -// GetProcessArgvScrubbed returns the value of the field, resolving if necessary -func (ev *Event) GetProcessArgvScrubbed() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessArgvScrubbed(ev, &ev.BaseEvent.ProcessContext.Process) -} - -// GetProcessAuid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAuid() uint32 { - if ev.BaseEvent.ProcessContext == nil { - return uint32(0) - } - return ev.BaseEvent.ProcessContext.Process.Credentials.AUID -} - -// GetProcessCapEffective returns the value of the field, resolving if necessary -func (ev *Event) GetProcessCapEffective() uint64 { - if ev.BaseEvent.ProcessContext == nil { - return uint64(0) - } - return ev.BaseEvent.ProcessContext.Process.Credentials.CapEffective -} - -// GetProcessCapPermitted returns the value of the field, resolving if necessary -func (ev *Event) GetProcessCapPermitted() uint64 { - if ev.BaseEvent.ProcessContext == nil { - return uint64(0) - } - return ev.BaseEvent.ProcessContext.Process.Credentials.CapPermitted -} - -// GetProcessCgroupFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetProcessCgroupFileInode() uint64 { - if ev.BaseEvent.ProcessContext == nil { - return uint64(0) - } - return ev.BaseEvent.ProcessContext.Process.CGroup.CGroupFile.Inode -} - -// GetProcessCgroupFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetProcessCgroupFileMountId() uint32 { - if ev.BaseEvent.ProcessContext == nil { - return uint32(0) - } - return ev.BaseEvent.ProcessContext.Process.CGroup.CGroupFile.MountID -} - -// GetProcessCgroupId returns the value of the field, resolving if necessary -func (ev *Event) GetProcessCgroupId() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - return ev.FieldHandlers.ResolveCGroupID(ev, &ev.BaseEvent.ProcessContext.Process.CGroup) -} - -// GetProcessCgroupManager returns the value of the field, resolving if necessary -func (ev *Event) GetProcessCgroupManager() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - return ev.FieldHandlers.ResolveCGroupManager(ev, &ev.BaseEvent.ProcessContext.Process.CGroup) -} - -// GetProcessCgroupVersion returns the value of the field, resolving if necessary -func (ev *Event) GetProcessCgroupVersion() int { - if ev.BaseEvent.ProcessContext == nil { - return 0 - } - return ev.FieldHandlers.ResolveCGroupVersion(ev, &ev.BaseEvent.ProcessContext.Process.CGroup) -} - -// GetProcessCmdargv returns the value of the field, resolving if necessary -func (ev *Event) GetProcessCmdargv() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessCmdArgv(ev, &ev.BaseEvent.ProcessContext.Process) -} - -// GetProcessComm returns the value of the field, resolving if necessary -func (ev *Event) GetProcessComm() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - return ev.BaseEvent.ProcessContext.Process.Comm -} - -// GetProcessContainerId returns the value of the field, resolving if necessary -func (ev *Event) GetProcessContainerId() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - return ev.FieldHandlers.ResolveProcessContainerID(ev, &ev.BaseEvent.ProcessContext.Process) -} - -// GetProcessCreatedAt returns the value of the field, resolving if necessary -func (ev *Event) GetProcessCreatedAt() int { - if ev.BaseEvent.ProcessContext == nil { - return 0 - } - return ev.FieldHandlers.ResolveProcessCreatedAt(ev, &ev.BaseEvent.ProcessContext.Process) -} - -// GetProcessEgid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessEgid() uint32 { - if ev.BaseEvent.ProcessContext == nil { - return uint32(0) - } - return ev.BaseEvent.ProcessContext.Process.Credentials.EGID -} - -// GetProcessEgroup returns the value of the field, resolving if necessary -func (ev *Event) GetProcessEgroup() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - return ev.BaseEvent.ProcessContext.Process.Credentials.EGroup -} - -// GetProcessEnvp returns the value of the field, resolving if necessary -func (ev *Event) GetProcessEnvp() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessEnvp(ev, &ev.BaseEvent.ProcessContext.Process) -} - -// GetProcessEnvs returns the value of the field, resolving if necessary -func (ev *Event) GetProcessEnvs() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessEnvs(ev, &ev.BaseEvent.ProcessContext.Process) -} - -// GetProcessEnvsTruncated returns the value of the field, resolving if necessary -func (ev *Event) GetProcessEnvsTruncated() bool { - if ev.BaseEvent.ProcessContext == nil { - return false - } - return ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, &ev.BaseEvent.ProcessContext.Process) -} - -// GetProcessEuid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessEuid() uint32 { - if ev.BaseEvent.ProcessContext == nil { - return uint32(0) - } - return ev.BaseEvent.ProcessContext.Process.Credentials.EUID -} - -// GetProcessEuser returns the value of the field, resolving if necessary -func (ev *Event) GetProcessEuser() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - return ev.BaseEvent.ProcessContext.Process.Credentials.EUser -} - -// GetProcessExecTime returns the value of the field, resolving if necessary -func (ev *Event) GetProcessExecTime() time.Time { - if ev.BaseEvent.ProcessContext == nil { - return time.Time{} - } - return ev.BaseEvent.ProcessContext.Process.ExecTime -} - -// GetProcessExitTime returns the value of the field, resolving if necessary -func (ev *Event) GetProcessExitTime() time.Time { - if ev.BaseEvent.ProcessContext == nil { - return time.Time{} - } - return ev.BaseEvent.ProcessContext.Process.ExitTime -} - -// GetProcessFileChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetProcessFileChangeTime() uint64 { - if ev.BaseEvent.ProcessContext == nil { - return uint64(0) - } - if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - return uint64(0) - } - return ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.CTime -} - -// GetProcessFileFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetProcessFileFilesystem() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent) -} - -// GetProcessFileGid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessFileGid() uint32 { - if ev.BaseEvent.ProcessContext == nil { - return uint32(0) - } - if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - return uint32(0) - } - return ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.GID -} - -// GetProcessFileGroup returns the value of the field, resolving if necessary -func (ev *Event) GetProcessFileGroup() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields) -} - -// GetProcessFileHashes returns the value of the field, resolving if necessary -func (ev *Event) GetProcessFileHashes() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - return []string{} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent) -} - -// GetProcessFileInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetProcessFileInUpperLayer() bool { - if ev.BaseEvent.ProcessContext == nil { - return false - } - if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - return false - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields) -} - -// GetProcessFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetProcessFileInode() uint64 { - if ev.BaseEvent.ProcessContext == nil { - return uint64(0) - } - if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - return uint64(0) - } - return ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.PathKey.Inode -} - -// GetProcessFileMode returns the value of the field, resolving if necessary -func (ev *Event) GetProcessFileMode() uint16 { - if ev.BaseEvent.ProcessContext == nil { - return uint16(0) - } - if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - return uint16(0) - } - return ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.Mode -} - -// GetProcessFileModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetProcessFileModificationTime() uint64 { - if ev.BaseEvent.ProcessContext == nil { - return uint64(0) - } - if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - return uint64(0) - } - return ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.MTime -} - -// GetProcessFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetProcessFileMountId() uint32 { - if ev.BaseEvent.ProcessContext == nil { - return uint32(0) - } - if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - return uint32(0) - } - return ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.PathKey.MountID -} - -// GetProcessFileName returns the value of the field, resolving if necessary -func (ev *Event) GetProcessFileName() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent) -} - -// GetProcessFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetProcessFileNameLength() int { - if ev.BaseEvent.ProcessContext == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent)) -} - -// GetProcessFilePackageName returns the value of the field, resolving if necessary -func (ev *Event) GetProcessFilePackageName() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent) -} - -// GetProcessFilePackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetProcessFilePackageSourceVersion() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent) -} - -// GetProcessFilePackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetProcessFilePackageVersion() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent) -} - -// GetProcessFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetProcessFilePath() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent) -} - -// GetProcessFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetProcessFilePathLength() int { - if ev.BaseEvent.ProcessContext == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent)) -} - -// GetProcessFileRights returns the value of the field, resolving if necessary -func (ev *Event) GetProcessFileRights() int { - if ev.BaseEvent.ProcessContext == nil { - return 0 - } - if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - return 0 - } - return ev.FieldHandlers.ResolveRights(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields) -} - -// GetProcessFileUid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessFileUid() uint32 { - if ev.BaseEvent.ProcessContext == nil { - return uint32(0) - } - if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - return uint32(0) - } - return ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields.UID -} - -// GetProcessFileUser returns the value of the field, resolving if necessary -func (ev *Event) GetProcessFileUser() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent.FileFields) -} - -// GetProcessForkTime returns the value of the field, resolving if necessary -func (ev *Event) GetProcessForkTime() time.Time { - if ev.BaseEvent.ProcessContext == nil { - return time.Time{} - } - return ev.BaseEvent.ProcessContext.Process.ForkTime -} - -// GetProcessFsgid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessFsgid() uint32 { - if ev.BaseEvent.ProcessContext == nil { - return uint32(0) - } - return ev.BaseEvent.ProcessContext.Process.Credentials.FSGID -} - -// GetProcessFsgroup returns the value of the field, resolving if necessary -func (ev *Event) GetProcessFsgroup() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - return ev.BaseEvent.ProcessContext.Process.Credentials.FSGroup -} - -// GetProcessFsuid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessFsuid() uint32 { - if ev.BaseEvent.ProcessContext == nil { - return uint32(0) - } - return ev.BaseEvent.ProcessContext.Process.Credentials.FSUID -} - -// GetProcessFsuser returns the value of the field, resolving if necessary -func (ev *Event) GetProcessFsuser() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - return ev.BaseEvent.ProcessContext.Process.Credentials.FSUser -} - -// GetProcessGid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessGid() uint32 { - if ev.BaseEvent.ProcessContext == nil { - return uint32(0) - } - return ev.BaseEvent.ProcessContext.Process.Credentials.GID -} - -// GetProcessGroup returns the value of the field, resolving if necessary -func (ev *Event) GetProcessGroup() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - return ev.BaseEvent.ProcessContext.Process.Credentials.Group -} - -// GetProcessInterpreterFileChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetProcessInterpreterFileChangeTime() uint64 { - if ev.BaseEvent.ProcessContext == nil { - return uint64(0) - } - if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - return uint64(0) - } - return ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.CTime -} - -// GetProcessInterpreterFileFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetProcessInterpreterFileFilesystem() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent) -} - -// GetProcessInterpreterFileGid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessInterpreterFileGid() uint32 { - if ev.BaseEvent.ProcessContext == nil { - return uint32(0) - } - if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - return uint32(0) - } - return ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.GID -} - -// GetProcessInterpreterFileGroup returns the value of the field, resolving if necessary -func (ev *Event) GetProcessInterpreterFileGroup() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) -} - -// GetProcessInterpreterFileHashes returns the value of the field, resolving if necessary -func (ev *Event) GetProcessInterpreterFileHashes() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - return []string{} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent) -} - -// GetProcessInterpreterFileInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetProcessInterpreterFileInUpperLayer() bool { - if ev.BaseEvent.ProcessContext == nil { - return false - } - if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - return false - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) -} - -// GetProcessInterpreterFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetProcessInterpreterFileInode() uint64 { - if ev.BaseEvent.ProcessContext == nil { - return uint64(0) - } - if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - return uint64(0) - } - return ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode -} - -// GetProcessInterpreterFileMode returns the value of the field, resolving if necessary -func (ev *Event) GetProcessInterpreterFileMode() uint16 { - if ev.BaseEvent.ProcessContext == nil { - return uint16(0) - } - if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - return uint16(0) - } - return ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode -} - -// GetProcessInterpreterFileModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetProcessInterpreterFileModificationTime() uint64 { - if ev.BaseEvent.ProcessContext == nil { - return uint64(0) - } - if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - return uint64(0) - } - return ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.MTime -} - -// GetProcessInterpreterFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetProcessInterpreterFileMountId() uint32 { - if ev.BaseEvent.ProcessContext == nil { - return uint32(0) - } - if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - return uint32(0) - } - return ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID -} - -// GetProcessInterpreterFileName returns the value of the field, resolving if necessary -func (ev *Event) GetProcessInterpreterFileName() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent) -} - -// GetProcessInterpreterFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetProcessInterpreterFileNameLength() int { - if ev.BaseEvent.ProcessContext == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent)) -} - -// GetProcessInterpreterFilePackageName returns the value of the field, resolving if necessary -func (ev *Event) GetProcessInterpreterFilePackageName() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent) -} - -// GetProcessInterpreterFilePackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetProcessInterpreterFilePackageSourceVersion() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent) -} - -// GetProcessInterpreterFilePackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetProcessInterpreterFilePackageVersion() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent) -} - -// GetProcessInterpreterFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetProcessInterpreterFilePath() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent) -} - -// GetProcessInterpreterFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetProcessInterpreterFilePathLength() int { - if ev.BaseEvent.ProcessContext == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent)) -} - -// GetProcessInterpreterFileRights returns the value of the field, resolving if necessary -func (ev *Event) GetProcessInterpreterFileRights() int { - if ev.BaseEvent.ProcessContext == nil { - return 0 - } - if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - return 0 - } - return ev.FieldHandlers.ResolveRights(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) -} - -// GetProcessInterpreterFileUid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessInterpreterFileUid() uint32 { - if ev.BaseEvent.ProcessContext == nil { - return uint32(0) - } - if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - return uint32(0) - } - return ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.UID -} - -// GetProcessInterpreterFileUser returns the value of the field, resolving if necessary -func (ev *Event) GetProcessInterpreterFileUser() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) -} - -// GetProcessIsExec returns the value of the field, resolving if necessary -func (ev *Event) GetProcessIsExec() bool { - if ev.BaseEvent.ProcessContext == nil { - return false - } - return ev.BaseEvent.ProcessContext.Process.IsExec -} - -// GetProcessIsKworker returns the value of the field, resolving if necessary -func (ev *Event) GetProcessIsKworker() bool { - if ev.BaseEvent.ProcessContext == nil { - return false - } - return ev.BaseEvent.ProcessContext.Process.PIDContext.IsKworker -} - -// GetProcessIsThread returns the value of the field, resolving if necessary -func (ev *Event) GetProcessIsThread() bool { - if ev.BaseEvent.ProcessContext == nil { - return false - } - return ev.FieldHandlers.ResolveProcessIsThread(ev, &ev.BaseEvent.ProcessContext.Process) -} - -// GetProcessParentArgs returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentArgs() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - return ev.FieldHandlers.ResolveProcessArgs(ev, ev.BaseEvent.ProcessContext.Parent) -} - -// GetProcessParentArgsFlags returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentArgsFlags() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return []string{} - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return []string{} - } - return ev.FieldHandlers.ResolveProcessArgsFlags(ev, ev.BaseEvent.ProcessContext.Parent) -} - -// GetProcessParentArgsOptions returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentArgsOptions() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return []string{} - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return []string{} - } - return ev.FieldHandlers.ResolveProcessArgsOptions(ev, ev.BaseEvent.ProcessContext.Parent) -} - -// GetProcessParentArgsScrubbed returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentArgsScrubbed() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - return ev.FieldHandlers.ResolveProcessArgsScrubbed(ev, ev.BaseEvent.ProcessContext.Parent) -} - -// GetProcessParentArgsTruncated returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentArgsTruncated() bool { - if ev.BaseEvent.ProcessContext == nil { - return false - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return false - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return false - } - return ev.FieldHandlers.ResolveProcessArgsTruncated(ev, ev.BaseEvent.ProcessContext.Parent) -} - -// GetProcessParentArgv returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentArgv() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return []string{} - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return []string{} - } - return ev.FieldHandlers.ResolveProcessArgv(ev, ev.BaseEvent.ProcessContext.Parent) -} - -// GetProcessParentArgv0 returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentArgv0() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - return ev.FieldHandlers.ResolveProcessArgv0(ev, ev.BaseEvent.ProcessContext.Parent) -} - -// GetProcessParentArgvScrubbed returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentArgvScrubbed() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return []string{} - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return []string{} - } - return ev.FieldHandlers.ResolveProcessArgvScrubbed(ev, ev.BaseEvent.ProcessContext.Parent) -} - -// GetProcessParentAuid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentAuid() uint32 { - if ev.BaseEvent.ProcessContext == nil { - return uint32(0) - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return uint32(0) - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return uint32(0) - } - return ev.BaseEvent.ProcessContext.Parent.Credentials.AUID -} - -// GetProcessParentCapEffective returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentCapEffective() uint64 { - if ev.BaseEvent.ProcessContext == nil { - return uint64(0) - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return uint64(0) - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return uint64(0) - } - return ev.BaseEvent.ProcessContext.Parent.Credentials.CapEffective -} - -// GetProcessParentCapPermitted returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentCapPermitted() uint64 { - if ev.BaseEvent.ProcessContext == nil { - return uint64(0) - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return uint64(0) - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return uint64(0) - } - return ev.BaseEvent.ProcessContext.Parent.Credentials.CapPermitted -} - -// GetProcessParentCgroupFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentCgroupFileInode() uint64 { - if ev.BaseEvent.ProcessContext == nil { - return uint64(0) - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return uint64(0) - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return uint64(0) - } - return ev.BaseEvent.ProcessContext.Parent.CGroup.CGroupFile.Inode -} - -// GetProcessParentCgroupFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentCgroupFileMountId() uint32 { - if ev.BaseEvent.ProcessContext == nil { - return uint32(0) - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return uint32(0) - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return uint32(0) - } - return ev.BaseEvent.ProcessContext.Parent.CGroup.CGroupFile.MountID -} - -// GetProcessParentCgroupId returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentCgroupId() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - return ev.FieldHandlers.ResolveCGroupID(ev, &ev.BaseEvent.ProcessContext.Parent.CGroup) -} - -// GetProcessParentCgroupManager returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentCgroupManager() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - return ev.FieldHandlers.ResolveCGroupManager(ev, &ev.BaseEvent.ProcessContext.Parent.CGroup) -} - -// GetProcessParentCgroupVersion returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentCgroupVersion() int { - if ev.BaseEvent.ProcessContext == nil { - return 0 - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return 0 - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0 - } - return ev.FieldHandlers.ResolveCGroupVersion(ev, &ev.BaseEvent.ProcessContext.Parent.CGroup) -} - -// GetProcessParentCmdargv returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentCmdargv() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return []string{} - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return []string{} - } - return ev.FieldHandlers.ResolveProcessCmdArgv(ev, ev.BaseEvent.ProcessContext.Parent) -} - -// GetProcessParentComm returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentComm() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - return ev.BaseEvent.ProcessContext.Parent.Comm -} - -// GetProcessParentContainerId returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentContainerId() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - return ev.FieldHandlers.ResolveProcessContainerID(ev, ev.BaseEvent.ProcessContext.Parent) -} - -// GetProcessParentCreatedAt returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentCreatedAt() int { - if ev.BaseEvent.ProcessContext == nil { - return 0 - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return 0 - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0 - } - return ev.FieldHandlers.ResolveProcessCreatedAt(ev, ev.BaseEvent.ProcessContext.Parent) -} - -// GetProcessParentEgid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentEgid() uint32 { - if ev.BaseEvent.ProcessContext == nil { - return uint32(0) - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return uint32(0) - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return uint32(0) - } - return ev.BaseEvent.ProcessContext.Parent.Credentials.EGID -} - -// GetProcessParentEgroup returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentEgroup() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - return ev.BaseEvent.ProcessContext.Parent.Credentials.EGroup -} - -// GetProcessParentEnvp returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentEnvp() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return []string{} - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return []string{} - } - return ev.FieldHandlers.ResolveProcessEnvp(ev, ev.BaseEvent.ProcessContext.Parent) -} - -// GetProcessParentEnvs returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentEnvs() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return []string{} - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return []string{} - } - return ev.FieldHandlers.ResolveProcessEnvs(ev, ev.BaseEvent.ProcessContext.Parent) -} - -// GetProcessParentEnvsTruncated returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentEnvsTruncated() bool { - if ev.BaseEvent.ProcessContext == nil { - return false - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return false - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return false - } - return ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, ev.BaseEvent.ProcessContext.Parent) -} - -// GetProcessParentEuid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentEuid() uint32 { - if ev.BaseEvent.ProcessContext == nil { - return uint32(0) - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return uint32(0) - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return uint32(0) - } - return ev.BaseEvent.ProcessContext.Parent.Credentials.EUID -} - -// GetProcessParentEuser returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentEuser() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - return ev.BaseEvent.ProcessContext.Parent.Credentials.EUser -} - -// GetProcessParentFileChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentFileChangeTime() uint64 { - if ev.BaseEvent.ProcessContext == nil { - return uint64(0) - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return uint64(0) - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return uint64(0) - } - if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - return uint64(0) - } - return ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.CTime -} - -// GetProcessParentFileFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentFileFilesystem() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent) -} - -// GetProcessParentFileGid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentFileGid() uint32 { - if ev.BaseEvent.ProcessContext == nil { - return uint32(0) - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return uint32(0) - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return uint32(0) - } - if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - return uint32(0) - } - return ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.GID -} - -// GetProcessParentFileGroup returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentFileGroup() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields) -} - -// GetProcessParentFileHashes returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentFileHashes() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return []string{} - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return []string{} - } - if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - return []string{} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent) -} - -// GetProcessParentFileInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentFileInUpperLayer() bool { - if ev.BaseEvent.ProcessContext == nil { - return false - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return false - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return false - } - if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - return false - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields) -} - -// GetProcessParentFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentFileInode() uint64 { - if ev.BaseEvent.ProcessContext == nil { - return uint64(0) - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return uint64(0) - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return uint64(0) - } - if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - return uint64(0) - } - return ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.PathKey.Inode -} - -// GetProcessParentFileMode returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentFileMode() uint16 { - if ev.BaseEvent.ProcessContext == nil { - return uint16(0) - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return uint16(0) - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return uint16(0) - } - if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - return uint16(0) - } - return ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.Mode -} - -// GetProcessParentFileModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentFileModificationTime() uint64 { - if ev.BaseEvent.ProcessContext == nil { - return uint64(0) - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return uint64(0) - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return uint64(0) - } - if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - return uint64(0) - } - return ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.MTime -} - -// GetProcessParentFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentFileMountId() uint32 { - if ev.BaseEvent.ProcessContext == nil { - return uint32(0) - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return uint32(0) - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return uint32(0) - } - if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - return uint32(0) - } - return ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.PathKey.MountID -} - -// GetProcessParentFileName returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentFileName() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent) -} - -// GetProcessParentFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentFileNameLength() int { - if ev.BaseEvent.ProcessContext == nil { - return 0 - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent)) -} - -// GetProcessParentFilePackageName returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentFilePackageName() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent) -} - -// GetProcessParentFilePackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentFilePackageSourceVersion() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent) -} - -// GetProcessParentFilePackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentFilePackageVersion() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent) -} - -// GetProcessParentFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentFilePath() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent) -} - -// GetProcessParentFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentFilePathLength() int { - if ev.BaseEvent.ProcessContext == nil { - return 0 - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent)) -} - -// GetProcessParentFileRights returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentFileRights() int { - if ev.BaseEvent.ProcessContext == nil { - return 0 - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return 0 - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0 - } - if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - return 0 - } - return ev.FieldHandlers.ResolveRights(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields) -} - -// GetProcessParentFileUid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentFileUid() uint32 { - if ev.BaseEvent.ProcessContext == nil { - return uint32(0) - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return uint32(0) - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return uint32(0) - } - if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - return uint32(0) - } - return ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields.UID -} - -// GetProcessParentFileUser returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentFileUser() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent.FileFields) -} - -// GetProcessParentFsgid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentFsgid() uint32 { - if ev.BaseEvent.ProcessContext == nil { - return uint32(0) - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return uint32(0) - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return uint32(0) - } - return ev.BaseEvent.ProcessContext.Parent.Credentials.FSGID -} - -// GetProcessParentFsgroup returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentFsgroup() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - return ev.BaseEvent.ProcessContext.Parent.Credentials.FSGroup -} - -// GetProcessParentFsuid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentFsuid() uint32 { - if ev.BaseEvent.ProcessContext == nil { - return uint32(0) - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return uint32(0) - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return uint32(0) - } - return ev.BaseEvent.ProcessContext.Parent.Credentials.FSUID -} - -// GetProcessParentFsuser returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentFsuser() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - return ev.BaseEvent.ProcessContext.Parent.Credentials.FSUser -} - -// GetProcessParentGid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentGid() uint32 { - if ev.BaseEvent.ProcessContext == nil { - return uint32(0) - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return uint32(0) - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return uint32(0) - } - return ev.BaseEvent.ProcessContext.Parent.Credentials.GID -} - -// GetProcessParentGroup returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentGroup() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - return ev.BaseEvent.ProcessContext.Parent.Credentials.Group -} - -// GetProcessParentInterpreterFileChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentInterpreterFileChangeTime() uint64 { - if ev.BaseEvent.ProcessContext == nil { - return uint64(0) - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return uint64(0) - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return uint64(0) - } - if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - return uint64(0) - } - return ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.CTime -} - -// GetProcessParentInterpreterFileFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentInterpreterFileFilesystem() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent) -} - -// GetProcessParentInterpreterFileGid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentInterpreterFileGid() uint32 { - if ev.BaseEvent.ProcessContext == nil { - return uint32(0) - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return uint32(0) - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return uint32(0) - } - if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - return uint32(0) - } - return ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.GID -} - -// GetProcessParentInterpreterFileGroup returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentInterpreterFileGroup() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields) -} - -// GetProcessParentInterpreterFileHashes returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentInterpreterFileHashes() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return []string{} - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return []string{} - } - if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - return []string{} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent) -} - -// GetProcessParentInterpreterFileInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentInterpreterFileInUpperLayer() bool { - if ev.BaseEvent.ProcessContext == nil { - return false - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return false - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return false - } - if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - return false - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields) -} - -// GetProcessParentInterpreterFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentInterpreterFileInode() uint64 { - if ev.BaseEvent.ProcessContext == nil { - return uint64(0) - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return uint64(0) - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return uint64(0) - } - if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - return uint64(0) - } - return ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.PathKey.Inode -} - -// GetProcessParentInterpreterFileMode returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentInterpreterFileMode() uint16 { - if ev.BaseEvent.ProcessContext == nil { - return uint16(0) - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return uint16(0) - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return uint16(0) - } - if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - return uint16(0) - } - return ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.Mode -} - -// GetProcessParentInterpreterFileModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentInterpreterFileModificationTime() uint64 { - if ev.BaseEvent.ProcessContext == nil { - return uint64(0) - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return uint64(0) - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return uint64(0) - } - if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - return uint64(0) - } - return ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.MTime -} - -// GetProcessParentInterpreterFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentInterpreterFileMountId() uint32 { - if ev.BaseEvent.ProcessContext == nil { - return uint32(0) - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return uint32(0) - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return uint32(0) - } - if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - return uint32(0) - } - return ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.PathKey.MountID -} - -// GetProcessParentInterpreterFileName returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentInterpreterFileName() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent) -} - -// GetProcessParentInterpreterFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentInterpreterFileNameLength() int { - if ev.BaseEvent.ProcessContext == nil { - return 0 - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent)) -} - -// GetProcessParentInterpreterFilePackageName returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentInterpreterFilePackageName() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent) -} - -// GetProcessParentInterpreterFilePackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentInterpreterFilePackageSourceVersion() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent) -} - -// GetProcessParentInterpreterFilePackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentInterpreterFilePackageVersion() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent) -} - -// GetProcessParentInterpreterFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentInterpreterFilePath() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent) -} - -// GetProcessParentInterpreterFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentInterpreterFilePathLength() int { - if ev.BaseEvent.ProcessContext == nil { - return 0 - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent)) -} - -// GetProcessParentInterpreterFileRights returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentInterpreterFileRights() int { - if ev.BaseEvent.ProcessContext == nil { - return 0 - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return 0 - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0 - } - if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - return 0 - } - return ev.FieldHandlers.ResolveRights(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields) -} - -// GetProcessParentInterpreterFileUid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentInterpreterFileUid() uint32 { - if ev.BaseEvent.ProcessContext == nil { - return uint32(0) - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return uint32(0) - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return uint32(0) - } - if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - return uint32(0) - } - return ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields.UID -} - -// GetProcessParentInterpreterFileUser returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentInterpreterFileUser() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent.FileFields) -} - -// GetProcessParentIsExec returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentIsExec() bool { - if ev.BaseEvent.ProcessContext == nil { - return false - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return false - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return false - } - return ev.BaseEvent.ProcessContext.Parent.IsExec -} - -// GetProcessParentIsKworker returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentIsKworker() bool { - if ev.BaseEvent.ProcessContext == nil { - return false - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return false - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return false - } - return ev.BaseEvent.ProcessContext.Parent.PIDContext.IsKworker -} - -// GetProcessParentIsThread returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentIsThread() bool { - if ev.BaseEvent.ProcessContext == nil { - return false - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return false - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return false - } - return ev.FieldHandlers.ResolveProcessIsThread(ev, ev.BaseEvent.ProcessContext.Parent) -} - -// GetProcessParentPid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentPid() uint32 { - if ev.BaseEvent.ProcessContext == nil { - return uint32(0) - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return uint32(0) - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return uint32(0) - } - return ev.BaseEvent.ProcessContext.Parent.PIDContext.Pid -} - -// GetProcessParentPpid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentPpid() uint32 { - if ev.BaseEvent.ProcessContext == nil { - return uint32(0) - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return uint32(0) - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return uint32(0) - } - return ev.BaseEvent.ProcessContext.Parent.PPid -} - -// GetProcessParentTid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentTid() uint32 { - if ev.BaseEvent.ProcessContext == nil { - return uint32(0) - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return uint32(0) - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return uint32(0) - } - return ev.BaseEvent.ProcessContext.Parent.PIDContext.Tid -} - -// GetProcessParentTtyName returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentTtyName() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - return ev.BaseEvent.ProcessContext.Parent.TTYName -} - -// GetProcessParentUid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentUid() uint32 { - if ev.BaseEvent.ProcessContext == nil { - return uint32(0) - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return uint32(0) - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return uint32(0) - } - return ev.BaseEvent.ProcessContext.Parent.Credentials.UID -} - -// GetProcessParentUser returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentUser() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - return ev.BaseEvent.ProcessContext.Parent.Credentials.User -} - -// GetProcessParentUserSessionK8sGroups returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentUserSessionK8sGroups() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return []string{} - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return []string{} - } - return ev.FieldHandlers.ResolveK8SGroups(ev, &ev.BaseEvent.ProcessContext.Parent.UserSession) -} - -// GetProcessParentUserSessionK8sUid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentUserSessionK8sUid() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - return ev.FieldHandlers.ResolveK8SUID(ev, &ev.BaseEvent.ProcessContext.Parent.UserSession) -} - -// GetProcessParentUserSessionK8sUsername returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentUserSessionK8sUsername() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - return ev.FieldHandlers.ResolveK8SUsername(ev, &ev.BaseEvent.ProcessContext.Parent.UserSession) -} - -// GetProcessPid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessPid() uint32 { - if ev.BaseEvent.ProcessContext == nil { - return uint32(0) - } - return ev.BaseEvent.ProcessContext.Process.PIDContext.Pid -} - -// GetProcessPpid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessPpid() uint32 { - if ev.BaseEvent.ProcessContext == nil { - return uint32(0) - } - return ev.BaseEvent.ProcessContext.Process.PPid -} - -// GetProcessTid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessTid() uint32 { - if ev.BaseEvent.ProcessContext == nil { - return uint32(0) - } - return ev.BaseEvent.ProcessContext.Process.PIDContext.Tid -} - -// GetProcessTtyName returns the value of the field, resolving if necessary -func (ev *Event) GetProcessTtyName() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - return ev.BaseEvent.ProcessContext.Process.TTYName -} - -// GetProcessUid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessUid() uint32 { - if ev.BaseEvent.ProcessContext == nil { - return uint32(0) - } - return ev.BaseEvent.ProcessContext.Process.Credentials.UID -} - -// GetProcessUser returns the value of the field, resolving if necessary -func (ev *Event) GetProcessUser() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - return ev.BaseEvent.ProcessContext.Process.Credentials.User -} - -// GetProcessUserSessionK8sGroups returns the value of the field, resolving if necessary -func (ev *Event) GetProcessUserSessionK8sGroups() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - return ev.FieldHandlers.ResolveK8SGroups(ev, &ev.BaseEvent.ProcessContext.Process.UserSession) -} - -// GetProcessUserSessionK8sUid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessUserSessionK8sUid() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - return ev.FieldHandlers.ResolveK8SUID(ev, &ev.BaseEvent.ProcessContext.Process.UserSession) -} - -// GetProcessUserSessionK8sUsername returns the value of the field, resolving if necessary -func (ev *Event) GetProcessUserSessionK8sUsername() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - return ev.FieldHandlers.ResolveK8SUsername(ev, &ev.BaseEvent.ProcessContext.Process.UserSession) -} - -// GetPtraceRequest returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceRequest() uint32 { - if ev.GetEventType().String() != "ptrace" { - return uint32(0) - } - return ev.PTrace.Request -} - -// GetPtraceRetval returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceRetval() int64 { - if ev.GetEventType().String() != "ptrace" { - return int64(0) - } - return ev.PTrace.SyscallEvent.Retval -} - -// GetPtraceTraceeAncestorsArgs returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsArgs() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessArgs(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsArgsFlags returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsArgsFlags() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessArgsFlags(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsArgsOptions returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsArgsOptions() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessArgsOptions(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsArgsScrubbed returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsArgsScrubbed() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessArgsScrubbed(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsArgsTruncated returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsArgsTruncated() []bool { - if ev.GetEventType().String() != "ptrace" { - return []bool{} - } - if ev.PTrace.Tracee == nil { - return []bool{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []bool{} - } - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessArgsTruncated(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsArgv returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsArgv() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessArgv(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsArgv0 returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsArgv0() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessArgv0(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsArgvScrubbed returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsArgvScrubbed() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessArgvScrubbed(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsAuid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsAuid() []uint32 { - if ev.GetEventType().String() != "ptrace" { - return []uint32{} - } - if ev.PTrace.Tracee == nil { - return []uint32{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.AUID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsCapEffective returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsCapEffective() []uint64 { - if ev.GetEventType().String() != "ptrace" { - return []uint64{} - } - if ev.PTrace.Tracee == nil { - return []uint64{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []uint64{} - } - var values []uint64 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.CapEffective - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsCapPermitted returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsCapPermitted() []uint64 { - if ev.GetEventType().String() != "ptrace" { - return []uint64{} - } - if ev.PTrace.Tracee == nil { - return []uint64{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []uint64{} - } - var values []uint64 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.CapPermitted - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsCgroupFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsCgroupFileInode() []uint64 { - if ev.GetEventType().String() != "ptrace" { - return []uint64{} - } - if ev.PTrace.Tracee == nil { - return []uint64{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []uint64{} - } - var values []uint64 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.CGroup.CGroupFile.Inode - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsCgroupFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsCgroupFileMountId() []uint32 { - if ev.GetEventType().String() != "ptrace" { - return []uint32{} - } - if ev.PTrace.Tracee == nil { - return []uint32{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.CGroup.CGroupFile.MountID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsCgroupId returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsCgroupId() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveCGroupID(ev, &element.ProcessContext.Process.CGroup) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsCgroupManager returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsCgroupManager() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveCGroupManager(ev, &element.ProcessContext.Process.CGroup) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsCgroupVersion returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsCgroupVersion() []int { - if ev.GetEventType().String() != "ptrace" { - return []int{} - } - if ev.PTrace.Tracee == nil { - return []int{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []int{} - } - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveCGroupVersion(ev, &element.ProcessContext.Process.CGroup) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsCmdargv returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsCmdargv() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessCmdArgv(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsComm returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsComm() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Comm - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsContainerId returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsContainerId() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessContainerID(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsCreatedAt returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsCreatedAt() []int { - if ev.GetEventType().String() != "ptrace" { - return []int{} - } - if ev.PTrace.Tracee == nil { - return []int{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []int{} - } - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &element.ProcessContext.Process)) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsEgid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsEgid() []uint32 { - if ev.GetEventType().String() != "ptrace" { - return []uint32{} - } - if ev.PTrace.Tracee == nil { - return []uint32{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.EGID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsEgroup returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsEgroup() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.EGroup - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsEnvp returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsEnvp() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessEnvp(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsEnvs returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsEnvs() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessEnvs(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsEnvsTruncated returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsEnvsTruncated() []bool { - if ev.GetEventType().String() != "ptrace" { - return []bool{} - } - if ev.PTrace.Tracee == nil { - return []bool{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []bool{} - } - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsEuid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsEuid() []uint32 { - if ev.GetEventType().String() != "ptrace" { - return []uint32{} - } - if ev.PTrace.Tracee == nil { - return []uint32{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.EUID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsEuser returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsEuser() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.EUser - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsFileChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsFileChangeTime() []uint64 { - if ev.GetEventType().String() != "ptrace" { - return []uint64{} - } - if ev.PTrace.Tracee == nil { - return []uint64{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []uint64{} - } - var values []uint64 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.FileEvent.FileFields.CTime - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsFileFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsFileFilesystem() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveFileFilesystem(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsFileGid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsFileGid() []uint32 { - if ev.GetEventType().String() != "ptrace" { - return []uint32{} - } - if ev.PTrace.Tracee == nil { - return []uint32{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.FileEvent.FileFields.GID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsFileGroup returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsFileGroup() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveFileFieldsGroup(ev, &element.ProcessContext.Process.FileEvent.FileFields) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsFileHashes returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsFileHashes() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveHashesFromEvent(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result...) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsFileInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsFileInUpperLayer() []bool { - if ev.GetEventType().String() != "ptrace" { - return []bool{} - } - if ev.PTrace.Tracee == nil { - return []bool{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []bool{} - } - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &element.ProcessContext.Process.FileEvent.FileFields) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsFileInode() []uint64 { - if ev.GetEventType().String() != "ptrace" { - return []uint64{} - } - if ev.PTrace.Tracee == nil { - return []uint64{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []uint64{} - } - var values []uint64 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.FileEvent.FileFields.PathKey.Inode - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsFileMode returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsFileMode() []uint16 { - if ev.GetEventType().String() != "ptrace" { - return []uint16{} - } - if ev.PTrace.Tracee == nil { - return []uint16{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []uint16{} - } - var values []uint16 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.FileEvent.FileFields.Mode - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsFileModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsFileModificationTime() []uint64 { - if ev.GetEventType().String() != "ptrace" { - return []uint64{} - } - if ev.PTrace.Tracee == nil { - return []uint64{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []uint64{} - } - var values []uint64 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.FileEvent.FileFields.MTime - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsFileMountId() []uint32 { - if ev.GetEventType().String() != "ptrace" { - return []uint32{} - } - if ev.PTrace.Tracee == nil { - return []uint32{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.FileEvent.FileFields.PathKey.MountID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsFileName returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsFileName() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsFileNameLength() []int { - if ev.GetEventType().String() != "ptrace" { - return []int{} - } - if ev.PTrace.Tracee == nil { - return []int{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []int{} - } - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent)) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsFilePackageName returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsFilePackageName() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolvePackageName(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsFilePackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsFilePackageSourceVersion() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolvePackageSourceVersion(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsFilePackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsFilePackageVersion() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolvePackageVersion(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsFilePath() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsFilePathLength() []int { - if ev.GetEventType().String() != "ptrace" { - return []int{} - } - if ev.PTrace.Tracee == nil { - return []int{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []int{} - } - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent)) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsFileRights returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsFileRights() []int { - if ev.GetEventType().String() != "ptrace" { - return []int{} - } - if ev.PTrace.Tracee == nil { - return []int{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []int{} - } - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := int(ev.FieldHandlers.ResolveRights(ev, &element.ProcessContext.Process.FileEvent.FileFields)) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsFileUid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsFileUid() []uint32 { - if ev.GetEventType().String() != "ptrace" { - return []uint32{} - } - if ev.PTrace.Tracee == nil { - return []uint32{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.FileEvent.FileFields.UID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsFileUser returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsFileUser() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveFileFieldsUser(ev, &element.ProcessContext.Process.FileEvent.FileFields) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsFsgid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsFsgid() []uint32 { - if ev.GetEventType().String() != "ptrace" { - return []uint32{} - } - if ev.PTrace.Tracee == nil { - return []uint32{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.FSGID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsFsgroup returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsFsgroup() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.FSGroup - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsFsuid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsFsuid() []uint32 { - if ev.GetEventType().String() != "ptrace" { - return []uint32{} - } - if ev.PTrace.Tracee == nil { - return []uint32{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.FSUID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsFsuser returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsFsuser() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.FSUser - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsGid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsGid() []uint32 { - if ev.GetEventType().String() != "ptrace" { - return []uint32{} - } - if ev.PTrace.Tracee == nil { - return []uint32{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.GID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsGroup returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsGroup() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.Group - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsInterpreterFileChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsInterpreterFileChangeTime() []uint64 { - if ev.GetEventType().String() != "ptrace" { - return []uint64{} - } - if ev.PTrace.Tracee == nil { - return []uint64{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []uint64{} - } - var values []uint64 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.CTime - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsInterpreterFileFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsInterpreterFileFilesystem() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveFileFilesystem(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsInterpreterFileGid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsInterpreterFileGid() []uint32 { - if ev.GetEventType().String() != "ptrace" { - return []uint32{} - } - if ev.PTrace.Tracee == nil { - return []uint32{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.GID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsInterpreterFileGroup returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsInterpreterFileGroup() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveFileFieldsGroup(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsInterpreterFileHashes returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsInterpreterFileHashes() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveHashesFromEvent(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result...) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsInterpreterFileInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsInterpreterFileInUpperLayer() []bool { - if ev.GetEventType().String() != "ptrace" { - return []bool{} - } - if ev.PTrace.Tracee == nil { - return []bool{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []bool{} - } - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsInterpreterFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsInterpreterFileInode() []uint64 { - if ev.GetEventType().String() != "ptrace" { - return []uint64{} - } - if ev.PTrace.Tracee == nil { - return []uint64{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []uint64{} - } - var values []uint64 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsInterpreterFileMode returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsInterpreterFileMode() []uint16 { - if ev.GetEventType().String() != "ptrace" { - return []uint16{} - } - if ev.PTrace.Tracee == nil { - return []uint16{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []uint16{} - } - var values []uint16 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsInterpreterFileModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsInterpreterFileModificationTime() []uint64 { - if ev.GetEventType().String() != "ptrace" { - return []uint64{} - } - if ev.PTrace.Tracee == nil { - return []uint64{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []uint64{} - } - var values []uint64 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.MTime - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsInterpreterFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsInterpreterFileMountId() []uint32 { - if ev.GetEventType().String() != "ptrace" { - return []uint32{} - } - if ev.PTrace.Tracee == nil { - return []uint32{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsInterpreterFileName returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsInterpreterFileName() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsInterpreterFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsInterpreterFileNameLength() []int { - if ev.GetEventType().String() != "ptrace" { - return []int{} - } - if ev.PTrace.Tracee == nil { - return []int{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []int{} - } - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsInterpreterFilePackageName returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsInterpreterFilePackageName() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolvePackageName(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsInterpreterFilePackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsInterpreterFilePackageSourceVersion() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolvePackageSourceVersion(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsInterpreterFilePackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsInterpreterFilePackageVersion() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolvePackageVersion(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsInterpreterFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsInterpreterFilePath() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsInterpreterFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsInterpreterFilePathLength() []int { - if ev.GetEventType().String() != "ptrace" { - return []int{} - } - if ev.PTrace.Tracee == nil { - return []int{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []int{} - } - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsInterpreterFileRights returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsInterpreterFileRights() []int { - if ev.GetEventType().String() != "ptrace" { - return []int{} - } - if ev.PTrace.Tracee == nil { - return []int{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []int{} - } - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := int(ev.FieldHandlers.ResolveRights(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsInterpreterFileUid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsInterpreterFileUid() []uint32 { - if ev.GetEventType().String() != "ptrace" { - return []uint32{} - } - if ev.PTrace.Tracee == nil { - return []uint32{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.UID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsInterpreterFileUser returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsInterpreterFileUser() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveFileFieldsUser(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsIsExec returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsIsExec() []bool { - if ev.GetEventType().String() != "ptrace" { - return []bool{} - } - if ev.PTrace.Tracee == nil { - return []bool{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []bool{} - } - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.IsExec - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsIsKworker returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsIsKworker() []bool { - if ev.GetEventType().String() != "ptrace" { - return []bool{} - } - if ev.PTrace.Tracee == nil { - return []bool{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []bool{} - } - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.PIDContext.IsKworker - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsIsThread returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsIsThread() []bool { - if ev.GetEventType().String() != "ptrace" { - return []bool{} - } - if ev.PTrace.Tracee == nil { - return []bool{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []bool{} - } - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessIsThread(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsLength returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsLength() int { - if ev.GetEventType().String() != "ptrace" { - return 0 - } - if ev.PTrace.Tracee == nil { - return 0 - } - if ev.PTrace.Tracee.Ancestor == nil { - return 0 - } - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - return iterator.Len(ctx) -} - -// GetPtraceTraceeAncestorsPid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsPid() []uint32 { - if ev.GetEventType().String() != "ptrace" { - return []uint32{} - } - if ev.PTrace.Tracee == nil { - return []uint32{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.PIDContext.Pid - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsPpid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsPpid() []uint32 { - if ev.GetEventType().String() != "ptrace" { - return []uint32{} - } - if ev.PTrace.Tracee == nil { - return []uint32{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.PPid - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsTid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsTid() []uint32 { - if ev.GetEventType().String() != "ptrace" { - return []uint32{} - } - if ev.PTrace.Tracee == nil { - return []uint32{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.PIDContext.Tid - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsTtyName returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsTtyName() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.TTYName - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsUid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsUid() []uint32 { - if ev.GetEventType().String() != "ptrace" { - return []uint32{} - } - if ev.PTrace.Tracee == nil { - return []uint32{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.UID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsUser returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsUser() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.User - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsUserSessionK8sGroups returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsUserSessionK8sGroups() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveK8SGroups(ev, &element.ProcessContext.Process.UserSession) - values = append(values, result...) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsUserSessionK8sUid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsUserSessionK8sUid() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveK8SUID(ev, &element.ProcessContext.Process.UserSession) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeAncestorsUserSessionK8sUsername returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAncestorsUserSessionK8sUsername() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveK8SUsername(ev, &element.ProcessContext.Process.UserSession) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetPtraceTraceeArgs returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeArgs() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - return ev.FieldHandlers.ResolveProcessArgs(ev, &ev.PTrace.Tracee.Process) -} - -// GetPtraceTraceeArgsFlags returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeArgsFlags() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessArgsFlags(ev, &ev.PTrace.Tracee.Process) -} - -// GetPtraceTraceeArgsOptions returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeArgsOptions() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessArgsOptions(ev, &ev.PTrace.Tracee.Process) -} - -// GetPtraceTraceeArgsScrubbed returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeArgsScrubbed() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - return ev.FieldHandlers.ResolveProcessArgsScrubbed(ev, &ev.PTrace.Tracee.Process) -} - -// GetPtraceTraceeArgsTruncated returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeArgsTruncated() bool { - if ev.GetEventType().String() != "ptrace" { - return false - } - if ev.PTrace.Tracee == nil { - return false - } - return ev.FieldHandlers.ResolveProcessArgsTruncated(ev, &ev.PTrace.Tracee.Process) -} - -// GetPtraceTraceeArgv returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeArgv() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessArgv(ev, &ev.PTrace.Tracee.Process) -} - -// GetPtraceTraceeArgv0 returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeArgv0() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - return ev.FieldHandlers.ResolveProcessArgv0(ev, &ev.PTrace.Tracee.Process) -} - -// GetPtraceTraceeArgvScrubbed returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeArgvScrubbed() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessArgvScrubbed(ev, &ev.PTrace.Tracee.Process) -} - -// GetPtraceTraceeAuid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeAuid() uint32 { - if ev.GetEventType().String() != "ptrace" { - return uint32(0) - } - if ev.PTrace.Tracee == nil { - return uint32(0) - } - return ev.PTrace.Tracee.Process.Credentials.AUID -} - -// GetPtraceTraceeCapEffective returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeCapEffective() uint64 { - if ev.GetEventType().String() != "ptrace" { - return uint64(0) - } - if ev.PTrace.Tracee == nil { - return uint64(0) - } - return ev.PTrace.Tracee.Process.Credentials.CapEffective -} - -// GetPtraceTraceeCapPermitted returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeCapPermitted() uint64 { - if ev.GetEventType().String() != "ptrace" { - return uint64(0) - } - if ev.PTrace.Tracee == nil { - return uint64(0) - } - return ev.PTrace.Tracee.Process.Credentials.CapPermitted -} - -// GetPtraceTraceeCgroupFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeCgroupFileInode() uint64 { - if ev.GetEventType().String() != "ptrace" { - return uint64(0) - } - if ev.PTrace.Tracee == nil { - return uint64(0) - } - return ev.PTrace.Tracee.Process.CGroup.CGroupFile.Inode -} - -// GetPtraceTraceeCgroupFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeCgroupFileMountId() uint32 { - if ev.GetEventType().String() != "ptrace" { - return uint32(0) - } - if ev.PTrace.Tracee == nil { - return uint32(0) - } - return ev.PTrace.Tracee.Process.CGroup.CGroupFile.MountID -} - -// GetPtraceTraceeCgroupId returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeCgroupId() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - return ev.FieldHandlers.ResolveCGroupID(ev, &ev.PTrace.Tracee.Process.CGroup) -} - -// GetPtraceTraceeCgroupManager returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeCgroupManager() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - return ev.FieldHandlers.ResolveCGroupManager(ev, &ev.PTrace.Tracee.Process.CGroup) -} - -// GetPtraceTraceeCgroupVersion returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeCgroupVersion() int { - if ev.GetEventType().String() != "ptrace" { - return 0 - } - if ev.PTrace.Tracee == nil { - return 0 - } - return ev.FieldHandlers.ResolveCGroupVersion(ev, &ev.PTrace.Tracee.Process.CGroup) -} - -// GetPtraceTraceeCmdargv returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeCmdargv() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessCmdArgv(ev, &ev.PTrace.Tracee.Process) -} - -// GetPtraceTraceeComm returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeComm() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - return ev.PTrace.Tracee.Process.Comm -} - -// GetPtraceTraceeContainerId returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeContainerId() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - return ev.FieldHandlers.ResolveProcessContainerID(ev, &ev.PTrace.Tracee.Process) -} - -// GetPtraceTraceeCreatedAt returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeCreatedAt() int { - if ev.GetEventType().String() != "ptrace" { - return 0 - } - if ev.PTrace.Tracee == nil { - return 0 - } - return ev.FieldHandlers.ResolveProcessCreatedAt(ev, &ev.PTrace.Tracee.Process) -} - -// GetPtraceTraceeEgid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeEgid() uint32 { - if ev.GetEventType().String() != "ptrace" { - return uint32(0) - } - if ev.PTrace.Tracee == nil { - return uint32(0) - } - return ev.PTrace.Tracee.Process.Credentials.EGID -} - -// GetPtraceTraceeEgroup returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeEgroup() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - return ev.PTrace.Tracee.Process.Credentials.EGroup -} - -// GetPtraceTraceeEnvp returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeEnvp() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessEnvp(ev, &ev.PTrace.Tracee.Process) -} - -// GetPtraceTraceeEnvs returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeEnvs() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessEnvs(ev, &ev.PTrace.Tracee.Process) -} - -// GetPtraceTraceeEnvsTruncated returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeEnvsTruncated() bool { - if ev.GetEventType().String() != "ptrace" { - return false - } - if ev.PTrace.Tracee == nil { - return false - } - return ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, &ev.PTrace.Tracee.Process) -} - -// GetPtraceTraceeEuid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeEuid() uint32 { - if ev.GetEventType().String() != "ptrace" { - return uint32(0) - } - if ev.PTrace.Tracee == nil { - return uint32(0) - } - return ev.PTrace.Tracee.Process.Credentials.EUID -} - -// GetPtraceTraceeEuser returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeEuser() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - return ev.PTrace.Tracee.Process.Credentials.EUser -} - -// GetPtraceTraceeExecTime returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeExecTime() time.Time { - if ev.GetEventType().String() != "ptrace" { - return time.Time{} - } - if ev.PTrace.Tracee == nil { - return time.Time{} - } - return ev.PTrace.Tracee.Process.ExecTime -} - -// GetPtraceTraceeExitTime returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeExitTime() time.Time { - if ev.GetEventType().String() != "ptrace" { - return time.Time{} - } - if ev.PTrace.Tracee == nil { - return time.Time{} - } - return ev.PTrace.Tracee.Process.ExitTime -} - -// GetPtraceTraceeFileChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeFileChangeTime() uint64 { - if ev.GetEventType().String() != "ptrace" { - return uint64(0) - } - if ev.PTrace.Tracee == nil { - return uint64(0) - } - if !ev.PTrace.Tracee.Process.IsNotKworker() { - return uint64(0) - } - return ev.PTrace.Tracee.Process.FileEvent.FileFields.CTime -} - -// GetPtraceTraceeFileFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeFileFilesystem() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if !ev.PTrace.Tracee.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.PTrace.Tracee.Process.FileEvent) -} - -// GetPtraceTraceeFileGid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeFileGid() uint32 { - if ev.GetEventType().String() != "ptrace" { - return uint32(0) - } - if ev.PTrace.Tracee == nil { - return uint32(0) - } - if !ev.PTrace.Tracee.Process.IsNotKworker() { - return uint32(0) - } - return ev.PTrace.Tracee.Process.FileEvent.FileFields.GID -} - -// GetPtraceTraceeFileGroup returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeFileGroup() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if !ev.PTrace.Tracee.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.PTrace.Tracee.Process.FileEvent.FileFields) -} - -// GetPtraceTraceeFileHashes returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeFileHashes() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if !ev.PTrace.Tracee.Process.IsNotKworker() { - return []string{} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.PTrace.Tracee.Process.FileEvent) -} - -// GetPtraceTraceeFileInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeFileInUpperLayer() bool { - if ev.GetEventType().String() != "ptrace" { - return false - } - if ev.PTrace.Tracee == nil { - return false - } - if !ev.PTrace.Tracee.Process.IsNotKworker() { - return false - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.PTrace.Tracee.Process.FileEvent.FileFields) -} - -// GetPtraceTraceeFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeFileInode() uint64 { - if ev.GetEventType().String() != "ptrace" { - return uint64(0) - } - if ev.PTrace.Tracee == nil { - return uint64(0) - } - if !ev.PTrace.Tracee.Process.IsNotKworker() { - return uint64(0) - } - return ev.PTrace.Tracee.Process.FileEvent.FileFields.PathKey.Inode -} - -// GetPtraceTraceeFileMode returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeFileMode() uint16 { - if ev.GetEventType().String() != "ptrace" { - return uint16(0) - } - if ev.PTrace.Tracee == nil { - return uint16(0) - } - if !ev.PTrace.Tracee.Process.IsNotKworker() { - return uint16(0) - } - return ev.PTrace.Tracee.Process.FileEvent.FileFields.Mode -} - -// GetPtraceTraceeFileModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeFileModificationTime() uint64 { - if ev.GetEventType().String() != "ptrace" { - return uint64(0) - } - if ev.PTrace.Tracee == nil { - return uint64(0) - } - if !ev.PTrace.Tracee.Process.IsNotKworker() { - return uint64(0) - } - return ev.PTrace.Tracee.Process.FileEvent.FileFields.MTime -} - -// GetPtraceTraceeFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeFileMountId() uint32 { - if ev.GetEventType().String() != "ptrace" { - return uint32(0) - } - if ev.PTrace.Tracee == nil { - return uint32(0) - } - if !ev.PTrace.Tracee.Process.IsNotKworker() { - return uint32(0) - } - return ev.PTrace.Tracee.Process.FileEvent.FileFields.PathKey.MountID -} - -// GetPtraceTraceeFileName returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeFileName() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if !ev.PTrace.Tracee.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.PTrace.Tracee.Process.FileEvent) -} - -// GetPtraceTraceeFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeFileNameLength() int { - if ev.GetEventType().String() != "ptrace" { - return 0 - } - if ev.PTrace.Tracee == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFileBasename(ev, &ev.PTrace.Tracee.Process.FileEvent)) -} - -// GetPtraceTraceeFilePackageName returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeFilePackageName() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if !ev.PTrace.Tracee.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.PTrace.Tracee.Process.FileEvent) -} - -// GetPtraceTraceeFilePackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeFilePackageSourceVersion() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if !ev.PTrace.Tracee.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.PTrace.Tracee.Process.FileEvent) -} - -// GetPtraceTraceeFilePackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeFilePackageVersion() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if !ev.PTrace.Tracee.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.PTrace.Tracee.Process.FileEvent) -} - -// GetPtraceTraceeFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeFilePath() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if !ev.PTrace.Tracee.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Process.FileEvent) -} - -// GetPtraceTraceeFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeFilePathLength() int { - if ev.GetEventType().String() != "ptrace" { - return 0 - } - if ev.PTrace.Tracee == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Process.FileEvent)) -} - -// GetPtraceTraceeFileRights returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeFileRights() int { - if ev.GetEventType().String() != "ptrace" { - return 0 - } - if ev.PTrace.Tracee == nil { - return 0 - } - if !ev.PTrace.Tracee.Process.IsNotKworker() { - return 0 - } - return ev.FieldHandlers.ResolveRights(ev, &ev.PTrace.Tracee.Process.FileEvent.FileFields) -} - -// GetPtraceTraceeFileUid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeFileUid() uint32 { - if ev.GetEventType().String() != "ptrace" { - return uint32(0) - } - if ev.PTrace.Tracee == nil { - return uint32(0) - } - if !ev.PTrace.Tracee.Process.IsNotKworker() { - return uint32(0) - } - return ev.PTrace.Tracee.Process.FileEvent.FileFields.UID -} - -// GetPtraceTraceeFileUser returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeFileUser() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if !ev.PTrace.Tracee.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.PTrace.Tracee.Process.FileEvent.FileFields) -} - -// GetPtraceTraceeForkTime returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeForkTime() time.Time { - if ev.GetEventType().String() != "ptrace" { - return time.Time{} - } - if ev.PTrace.Tracee == nil { - return time.Time{} - } - return ev.PTrace.Tracee.Process.ForkTime -} - -// GetPtraceTraceeFsgid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeFsgid() uint32 { - if ev.GetEventType().String() != "ptrace" { - return uint32(0) - } - if ev.PTrace.Tracee == nil { - return uint32(0) - } - return ev.PTrace.Tracee.Process.Credentials.FSGID -} - -// GetPtraceTraceeFsgroup returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeFsgroup() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - return ev.PTrace.Tracee.Process.Credentials.FSGroup -} - -// GetPtraceTraceeFsuid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeFsuid() uint32 { - if ev.GetEventType().String() != "ptrace" { - return uint32(0) - } - if ev.PTrace.Tracee == nil { - return uint32(0) - } - return ev.PTrace.Tracee.Process.Credentials.FSUID -} - -// GetPtraceTraceeFsuser returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeFsuser() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - return ev.PTrace.Tracee.Process.Credentials.FSUser -} - -// GetPtraceTraceeGid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeGid() uint32 { - if ev.GetEventType().String() != "ptrace" { - return uint32(0) - } - if ev.PTrace.Tracee == nil { - return uint32(0) - } - return ev.PTrace.Tracee.Process.Credentials.GID -} - -// GetPtraceTraceeGroup returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeGroup() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - return ev.PTrace.Tracee.Process.Credentials.Group -} - -// GetPtraceTraceeInterpreterFileChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeInterpreterFileChangeTime() uint64 { - if ev.GetEventType().String() != "ptrace" { - return uint64(0) - } - if ev.PTrace.Tracee == nil { - return uint64(0) - } - if !ev.PTrace.Tracee.Process.HasInterpreter() { - return uint64(0) - } - return ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.CTime -} - -// GetPtraceTraceeInterpreterFileFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeInterpreterFileFilesystem() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if !ev.PTrace.Tracee.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent) -} - -// GetPtraceTraceeInterpreterFileGid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeInterpreterFileGid() uint32 { - if ev.GetEventType().String() != "ptrace" { - return uint32(0) - } - if ev.PTrace.Tracee == nil { - return uint32(0) - } - if !ev.PTrace.Tracee.Process.HasInterpreter() { - return uint32(0) - } - return ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.GID -} - -// GetPtraceTraceeInterpreterFileGroup returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeInterpreterFileGroup() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if !ev.PTrace.Tracee.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields) -} - -// GetPtraceTraceeInterpreterFileHashes returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeInterpreterFileHashes() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if !ev.PTrace.Tracee.Process.HasInterpreter() { - return []string{} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent) -} - -// GetPtraceTraceeInterpreterFileInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeInterpreterFileInUpperLayer() bool { - if ev.GetEventType().String() != "ptrace" { - return false - } - if ev.PTrace.Tracee == nil { - return false - } - if !ev.PTrace.Tracee.Process.HasInterpreter() { - return false - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields) -} - -// GetPtraceTraceeInterpreterFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeInterpreterFileInode() uint64 { - if ev.GetEventType().String() != "ptrace" { - return uint64(0) - } - if ev.PTrace.Tracee == nil { - return uint64(0) - } - if !ev.PTrace.Tracee.Process.HasInterpreter() { - return uint64(0) - } - return ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode -} - -// GetPtraceTraceeInterpreterFileMode returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeInterpreterFileMode() uint16 { - if ev.GetEventType().String() != "ptrace" { - return uint16(0) - } - if ev.PTrace.Tracee == nil { - return uint16(0) - } - if !ev.PTrace.Tracee.Process.HasInterpreter() { - return uint16(0) - } - return ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.Mode -} - -// GetPtraceTraceeInterpreterFileModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeInterpreterFileModificationTime() uint64 { - if ev.GetEventType().String() != "ptrace" { - return uint64(0) - } - if ev.PTrace.Tracee == nil { - return uint64(0) - } - if !ev.PTrace.Tracee.Process.HasInterpreter() { - return uint64(0) - } - return ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.MTime -} - -// GetPtraceTraceeInterpreterFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeInterpreterFileMountId() uint32 { - if ev.GetEventType().String() != "ptrace" { - return uint32(0) - } - if ev.PTrace.Tracee == nil { - return uint32(0) - } - if !ev.PTrace.Tracee.Process.HasInterpreter() { - return uint32(0) - } - return ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID -} - -// GetPtraceTraceeInterpreterFileName returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeInterpreterFileName() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if !ev.PTrace.Tracee.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent) -} - -// GetPtraceTraceeInterpreterFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeInterpreterFileNameLength() int { - if ev.GetEventType().String() != "ptrace" { - return 0 - } - if ev.PTrace.Tracee == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFileBasename(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent)) -} - -// GetPtraceTraceeInterpreterFilePackageName returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeInterpreterFilePackageName() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if !ev.PTrace.Tracee.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent) -} - -// GetPtraceTraceeInterpreterFilePackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeInterpreterFilePackageSourceVersion() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if !ev.PTrace.Tracee.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent) -} - -// GetPtraceTraceeInterpreterFilePackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeInterpreterFilePackageVersion() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if !ev.PTrace.Tracee.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent) -} - -// GetPtraceTraceeInterpreterFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeInterpreterFilePath() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if !ev.PTrace.Tracee.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent) -} - -// GetPtraceTraceeInterpreterFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeInterpreterFilePathLength() int { - if ev.GetEventType().String() != "ptrace" { - return 0 - } - if ev.PTrace.Tracee == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent)) -} - -// GetPtraceTraceeInterpreterFileRights returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeInterpreterFileRights() int { - if ev.GetEventType().String() != "ptrace" { - return 0 - } - if ev.PTrace.Tracee == nil { - return 0 - } - if !ev.PTrace.Tracee.Process.HasInterpreter() { - return 0 - } - return ev.FieldHandlers.ResolveRights(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields) -} - -// GetPtraceTraceeInterpreterFileUid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeInterpreterFileUid() uint32 { - if ev.GetEventType().String() != "ptrace" { - return uint32(0) - } - if ev.PTrace.Tracee == nil { - return uint32(0) - } - if !ev.PTrace.Tracee.Process.HasInterpreter() { - return uint32(0) - } - return ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields.UID -} - -// GetPtraceTraceeInterpreterFileUser returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeInterpreterFileUser() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if !ev.PTrace.Tracee.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent.FileFields) -} - -// GetPtraceTraceeIsExec returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeIsExec() bool { - if ev.GetEventType().String() != "ptrace" { - return false - } - if ev.PTrace.Tracee == nil { - return false - } - return ev.PTrace.Tracee.Process.IsExec -} - -// GetPtraceTraceeIsKworker returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeIsKworker() bool { - if ev.GetEventType().String() != "ptrace" { - return false - } - if ev.PTrace.Tracee == nil { - return false - } - return ev.PTrace.Tracee.Process.PIDContext.IsKworker -} - -// GetPtraceTraceeIsThread returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeIsThread() bool { - if ev.GetEventType().String() != "ptrace" { - return false - } - if ev.PTrace.Tracee == nil { - return false - } - return ev.FieldHandlers.ResolveProcessIsThread(ev, &ev.PTrace.Tracee.Process) -} - -// GetPtraceTraceeParentArgs returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentArgs() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if ev.PTrace.Tracee.Parent == nil { - return "" - } - if !ev.PTrace.Tracee.HasParent() { - return "" - } - return ev.FieldHandlers.ResolveProcessArgs(ev, ev.PTrace.Tracee.Parent) -} - -// GetPtraceTraceeParentArgsFlags returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentArgsFlags() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Parent == nil { - return []string{} - } - if !ev.PTrace.Tracee.HasParent() { - return []string{} - } - return ev.FieldHandlers.ResolveProcessArgsFlags(ev, ev.PTrace.Tracee.Parent) -} - -// GetPtraceTraceeParentArgsOptions returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentArgsOptions() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Parent == nil { - return []string{} - } - if !ev.PTrace.Tracee.HasParent() { - return []string{} - } - return ev.FieldHandlers.ResolveProcessArgsOptions(ev, ev.PTrace.Tracee.Parent) -} - -// GetPtraceTraceeParentArgsScrubbed returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentArgsScrubbed() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if ev.PTrace.Tracee.Parent == nil { - return "" - } - if !ev.PTrace.Tracee.HasParent() { - return "" - } - return ev.FieldHandlers.ResolveProcessArgsScrubbed(ev, ev.PTrace.Tracee.Parent) -} - -// GetPtraceTraceeParentArgsTruncated returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentArgsTruncated() bool { - if ev.GetEventType().String() != "ptrace" { - return false - } - if ev.PTrace.Tracee == nil { - return false - } - if ev.PTrace.Tracee.Parent == nil { - return false - } - if !ev.PTrace.Tracee.HasParent() { - return false - } - return ev.FieldHandlers.ResolveProcessArgsTruncated(ev, ev.PTrace.Tracee.Parent) -} - -// GetPtraceTraceeParentArgv returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentArgv() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Parent == nil { - return []string{} - } - if !ev.PTrace.Tracee.HasParent() { - return []string{} - } - return ev.FieldHandlers.ResolveProcessArgv(ev, ev.PTrace.Tracee.Parent) -} - -// GetPtraceTraceeParentArgv0 returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentArgv0() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if ev.PTrace.Tracee.Parent == nil { - return "" - } - if !ev.PTrace.Tracee.HasParent() { - return "" - } - return ev.FieldHandlers.ResolveProcessArgv0(ev, ev.PTrace.Tracee.Parent) -} - -// GetPtraceTraceeParentArgvScrubbed returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentArgvScrubbed() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Parent == nil { - return []string{} - } - if !ev.PTrace.Tracee.HasParent() { - return []string{} - } - return ev.FieldHandlers.ResolveProcessArgvScrubbed(ev, ev.PTrace.Tracee.Parent) -} - -// GetPtraceTraceeParentAuid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentAuid() uint32 { - if ev.GetEventType().String() != "ptrace" { - return uint32(0) - } - if ev.PTrace.Tracee == nil { - return uint32(0) - } - if ev.PTrace.Tracee.Parent == nil { - return uint32(0) - } - if !ev.PTrace.Tracee.HasParent() { - return uint32(0) - } - return ev.PTrace.Tracee.Parent.Credentials.AUID -} - -// GetPtraceTraceeParentCapEffective returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentCapEffective() uint64 { - if ev.GetEventType().String() != "ptrace" { - return uint64(0) - } - if ev.PTrace.Tracee == nil { - return uint64(0) - } - if ev.PTrace.Tracee.Parent == nil { - return uint64(0) - } - if !ev.PTrace.Tracee.HasParent() { - return uint64(0) - } - return ev.PTrace.Tracee.Parent.Credentials.CapEffective -} - -// GetPtraceTraceeParentCapPermitted returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentCapPermitted() uint64 { - if ev.GetEventType().String() != "ptrace" { - return uint64(0) - } - if ev.PTrace.Tracee == nil { - return uint64(0) - } - if ev.PTrace.Tracee.Parent == nil { - return uint64(0) - } - if !ev.PTrace.Tracee.HasParent() { - return uint64(0) - } - return ev.PTrace.Tracee.Parent.Credentials.CapPermitted -} - -// GetPtraceTraceeParentCgroupFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentCgroupFileInode() uint64 { - if ev.GetEventType().String() != "ptrace" { - return uint64(0) - } - if ev.PTrace.Tracee == nil { - return uint64(0) - } - if ev.PTrace.Tracee.Parent == nil { - return uint64(0) - } - if !ev.PTrace.Tracee.HasParent() { - return uint64(0) - } - return ev.PTrace.Tracee.Parent.CGroup.CGroupFile.Inode -} - -// GetPtraceTraceeParentCgroupFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentCgroupFileMountId() uint32 { - if ev.GetEventType().String() != "ptrace" { - return uint32(0) - } - if ev.PTrace.Tracee == nil { - return uint32(0) - } - if ev.PTrace.Tracee.Parent == nil { - return uint32(0) - } - if !ev.PTrace.Tracee.HasParent() { - return uint32(0) - } - return ev.PTrace.Tracee.Parent.CGroup.CGroupFile.MountID -} - -// GetPtraceTraceeParentCgroupId returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentCgroupId() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if ev.PTrace.Tracee.Parent == nil { - return "" - } - if !ev.PTrace.Tracee.HasParent() { - return "" - } - return ev.FieldHandlers.ResolveCGroupID(ev, &ev.PTrace.Tracee.Parent.CGroup) -} - -// GetPtraceTraceeParentCgroupManager returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentCgroupManager() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if ev.PTrace.Tracee.Parent == nil { - return "" - } - if !ev.PTrace.Tracee.HasParent() { - return "" - } - return ev.FieldHandlers.ResolveCGroupManager(ev, &ev.PTrace.Tracee.Parent.CGroup) -} - -// GetPtraceTraceeParentCgroupVersion returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentCgroupVersion() int { - if ev.GetEventType().String() != "ptrace" { - return 0 - } - if ev.PTrace.Tracee == nil { - return 0 - } - if ev.PTrace.Tracee.Parent == nil { - return 0 - } - if !ev.PTrace.Tracee.HasParent() { - return 0 - } - return ev.FieldHandlers.ResolveCGroupVersion(ev, &ev.PTrace.Tracee.Parent.CGroup) -} - -// GetPtraceTraceeParentCmdargv returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentCmdargv() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Parent == nil { - return []string{} - } - if !ev.PTrace.Tracee.HasParent() { - return []string{} - } - return ev.FieldHandlers.ResolveProcessCmdArgv(ev, ev.PTrace.Tracee.Parent) -} - -// GetPtraceTraceeParentComm returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentComm() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if ev.PTrace.Tracee.Parent == nil { - return "" - } - if !ev.PTrace.Tracee.HasParent() { - return "" - } - return ev.PTrace.Tracee.Parent.Comm -} - -// GetPtraceTraceeParentContainerId returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentContainerId() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if ev.PTrace.Tracee.Parent == nil { - return "" - } - if !ev.PTrace.Tracee.HasParent() { - return "" - } - return ev.FieldHandlers.ResolveProcessContainerID(ev, ev.PTrace.Tracee.Parent) -} - -// GetPtraceTraceeParentCreatedAt returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentCreatedAt() int { - if ev.GetEventType().String() != "ptrace" { - return 0 - } - if ev.PTrace.Tracee == nil { - return 0 - } - if ev.PTrace.Tracee.Parent == nil { - return 0 - } - if !ev.PTrace.Tracee.HasParent() { - return 0 - } - return ev.FieldHandlers.ResolveProcessCreatedAt(ev, ev.PTrace.Tracee.Parent) -} - -// GetPtraceTraceeParentEgid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentEgid() uint32 { - if ev.GetEventType().String() != "ptrace" { - return uint32(0) - } - if ev.PTrace.Tracee == nil { - return uint32(0) - } - if ev.PTrace.Tracee.Parent == nil { - return uint32(0) - } - if !ev.PTrace.Tracee.HasParent() { - return uint32(0) - } - return ev.PTrace.Tracee.Parent.Credentials.EGID -} - -// GetPtraceTraceeParentEgroup returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentEgroup() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if ev.PTrace.Tracee.Parent == nil { - return "" - } - if !ev.PTrace.Tracee.HasParent() { - return "" - } - return ev.PTrace.Tracee.Parent.Credentials.EGroup -} - -// GetPtraceTraceeParentEnvp returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentEnvp() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Parent == nil { - return []string{} - } - if !ev.PTrace.Tracee.HasParent() { - return []string{} - } - return ev.FieldHandlers.ResolveProcessEnvp(ev, ev.PTrace.Tracee.Parent) -} - -// GetPtraceTraceeParentEnvs returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentEnvs() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Parent == nil { - return []string{} - } - if !ev.PTrace.Tracee.HasParent() { - return []string{} - } - return ev.FieldHandlers.ResolveProcessEnvs(ev, ev.PTrace.Tracee.Parent) -} - -// GetPtraceTraceeParentEnvsTruncated returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentEnvsTruncated() bool { - if ev.GetEventType().String() != "ptrace" { - return false - } - if ev.PTrace.Tracee == nil { - return false - } - if ev.PTrace.Tracee.Parent == nil { - return false - } - if !ev.PTrace.Tracee.HasParent() { - return false - } - return ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, ev.PTrace.Tracee.Parent) -} - -// GetPtraceTraceeParentEuid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentEuid() uint32 { - if ev.GetEventType().String() != "ptrace" { - return uint32(0) - } - if ev.PTrace.Tracee == nil { - return uint32(0) - } - if ev.PTrace.Tracee.Parent == nil { - return uint32(0) - } - if !ev.PTrace.Tracee.HasParent() { - return uint32(0) - } - return ev.PTrace.Tracee.Parent.Credentials.EUID -} - -// GetPtraceTraceeParentEuser returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentEuser() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if ev.PTrace.Tracee.Parent == nil { - return "" - } - if !ev.PTrace.Tracee.HasParent() { - return "" - } - return ev.PTrace.Tracee.Parent.Credentials.EUser -} - -// GetPtraceTraceeParentFileChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentFileChangeTime() uint64 { - if ev.GetEventType().String() != "ptrace" { - return uint64(0) - } - if ev.PTrace.Tracee == nil { - return uint64(0) - } - if ev.PTrace.Tracee.Parent == nil { - return uint64(0) - } - if !ev.PTrace.Tracee.HasParent() { - return uint64(0) - } - if !ev.PTrace.Tracee.Parent.IsNotKworker() { - return uint64(0) - } - return ev.PTrace.Tracee.Parent.FileEvent.FileFields.CTime -} - -// GetPtraceTraceeParentFileFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentFileFilesystem() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if ev.PTrace.Tracee.Parent == nil { - return "" - } - if !ev.PTrace.Tracee.HasParent() { - return "" - } - if !ev.PTrace.Tracee.Parent.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.PTrace.Tracee.Parent.FileEvent) -} - -// GetPtraceTraceeParentFileGid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentFileGid() uint32 { - if ev.GetEventType().String() != "ptrace" { - return uint32(0) - } - if ev.PTrace.Tracee == nil { - return uint32(0) - } - if ev.PTrace.Tracee.Parent == nil { - return uint32(0) - } - if !ev.PTrace.Tracee.HasParent() { - return uint32(0) - } - if !ev.PTrace.Tracee.Parent.IsNotKworker() { - return uint32(0) - } - return ev.PTrace.Tracee.Parent.FileEvent.FileFields.GID -} - -// GetPtraceTraceeParentFileGroup returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentFileGroup() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if ev.PTrace.Tracee.Parent == nil { - return "" - } - if !ev.PTrace.Tracee.HasParent() { - return "" - } - if !ev.PTrace.Tracee.Parent.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.PTrace.Tracee.Parent.FileEvent.FileFields) -} - -// GetPtraceTraceeParentFileHashes returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentFileHashes() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Parent == nil { - return []string{} - } - if !ev.PTrace.Tracee.HasParent() { - return []string{} - } - if !ev.PTrace.Tracee.Parent.IsNotKworker() { - return []string{} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.PTrace.Tracee.Parent.FileEvent) -} - -// GetPtraceTraceeParentFileInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentFileInUpperLayer() bool { - if ev.GetEventType().String() != "ptrace" { - return false - } - if ev.PTrace.Tracee == nil { - return false - } - if ev.PTrace.Tracee.Parent == nil { - return false - } - if !ev.PTrace.Tracee.HasParent() { - return false - } - if !ev.PTrace.Tracee.Parent.IsNotKworker() { - return false - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.PTrace.Tracee.Parent.FileEvent.FileFields) -} - -// GetPtraceTraceeParentFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentFileInode() uint64 { - if ev.GetEventType().String() != "ptrace" { - return uint64(0) - } - if ev.PTrace.Tracee == nil { - return uint64(0) - } - if ev.PTrace.Tracee.Parent == nil { - return uint64(0) - } - if !ev.PTrace.Tracee.HasParent() { - return uint64(0) - } - if !ev.PTrace.Tracee.Parent.IsNotKworker() { - return uint64(0) - } - return ev.PTrace.Tracee.Parent.FileEvent.FileFields.PathKey.Inode -} - -// GetPtraceTraceeParentFileMode returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentFileMode() uint16 { - if ev.GetEventType().String() != "ptrace" { - return uint16(0) - } - if ev.PTrace.Tracee == nil { - return uint16(0) - } - if ev.PTrace.Tracee.Parent == nil { - return uint16(0) - } - if !ev.PTrace.Tracee.HasParent() { - return uint16(0) - } - if !ev.PTrace.Tracee.Parent.IsNotKworker() { - return uint16(0) - } - return ev.PTrace.Tracee.Parent.FileEvent.FileFields.Mode -} - -// GetPtraceTraceeParentFileModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentFileModificationTime() uint64 { - if ev.GetEventType().String() != "ptrace" { - return uint64(0) - } - if ev.PTrace.Tracee == nil { - return uint64(0) - } - if ev.PTrace.Tracee.Parent == nil { - return uint64(0) - } - if !ev.PTrace.Tracee.HasParent() { - return uint64(0) - } - if !ev.PTrace.Tracee.Parent.IsNotKworker() { - return uint64(0) - } - return ev.PTrace.Tracee.Parent.FileEvent.FileFields.MTime -} - -// GetPtraceTraceeParentFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentFileMountId() uint32 { - if ev.GetEventType().String() != "ptrace" { - return uint32(0) - } - if ev.PTrace.Tracee == nil { - return uint32(0) - } - if ev.PTrace.Tracee.Parent == nil { - return uint32(0) - } - if !ev.PTrace.Tracee.HasParent() { - return uint32(0) - } - if !ev.PTrace.Tracee.Parent.IsNotKworker() { - return uint32(0) - } - return ev.PTrace.Tracee.Parent.FileEvent.FileFields.PathKey.MountID -} - -// GetPtraceTraceeParentFileName returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentFileName() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if ev.PTrace.Tracee.Parent == nil { - return "" - } - if !ev.PTrace.Tracee.HasParent() { - return "" - } - if !ev.PTrace.Tracee.Parent.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.PTrace.Tracee.Parent.FileEvent) -} - -// GetPtraceTraceeParentFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentFileNameLength() int { - if ev.GetEventType().String() != "ptrace" { - return 0 - } - if ev.PTrace.Tracee == nil { - return 0 - } - if ev.PTrace.Tracee.Parent == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFileBasename(ev, &ev.PTrace.Tracee.Parent.FileEvent)) -} - -// GetPtraceTraceeParentFilePackageName returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentFilePackageName() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if ev.PTrace.Tracee.Parent == nil { - return "" - } - if !ev.PTrace.Tracee.HasParent() { - return "" - } - if !ev.PTrace.Tracee.Parent.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.PTrace.Tracee.Parent.FileEvent) -} - -// GetPtraceTraceeParentFilePackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentFilePackageSourceVersion() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if ev.PTrace.Tracee.Parent == nil { - return "" - } - if !ev.PTrace.Tracee.HasParent() { - return "" - } - if !ev.PTrace.Tracee.Parent.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.PTrace.Tracee.Parent.FileEvent) -} - -// GetPtraceTraceeParentFilePackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentFilePackageVersion() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if ev.PTrace.Tracee.Parent == nil { - return "" - } - if !ev.PTrace.Tracee.HasParent() { - return "" - } - if !ev.PTrace.Tracee.Parent.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.PTrace.Tracee.Parent.FileEvent) -} - -// GetPtraceTraceeParentFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentFilePath() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if ev.PTrace.Tracee.Parent == nil { - return "" - } - if !ev.PTrace.Tracee.HasParent() { - return "" - } - if !ev.PTrace.Tracee.Parent.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Parent.FileEvent) -} - -// GetPtraceTraceeParentFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentFilePathLength() int { - if ev.GetEventType().String() != "ptrace" { - return 0 - } - if ev.PTrace.Tracee == nil { - return 0 - } - if ev.PTrace.Tracee.Parent == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Parent.FileEvent)) -} - -// GetPtraceTraceeParentFileRights returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentFileRights() int { - if ev.GetEventType().String() != "ptrace" { - return 0 - } - if ev.PTrace.Tracee == nil { - return 0 - } - if ev.PTrace.Tracee.Parent == nil { - return 0 - } - if !ev.PTrace.Tracee.HasParent() { - return 0 - } - if !ev.PTrace.Tracee.Parent.IsNotKworker() { - return 0 - } - return ev.FieldHandlers.ResolveRights(ev, &ev.PTrace.Tracee.Parent.FileEvent.FileFields) -} - -// GetPtraceTraceeParentFileUid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentFileUid() uint32 { - if ev.GetEventType().String() != "ptrace" { - return uint32(0) - } - if ev.PTrace.Tracee == nil { - return uint32(0) - } - if ev.PTrace.Tracee.Parent == nil { - return uint32(0) - } - if !ev.PTrace.Tracee.HasParent() { - return uint32(0) - } - if !ev.PTrace.Tracee.Parent.IsNotKworker() { - return uint32(0) - } - return ev.PTrace.Tracee.Parent.FileEvent.FileFields.UID -} - -// GetPtraceTraceeParentFileUser returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentFileUser() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if ev.PTrace.Tracee.Parent == nil { - return "" - } - if !ev.PTrace.Tracee.HasParent() { - return "" - } - if !ev.PTrace.Tracee.Parent.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.PTrace.Tracee.Parent.FileEvent.FileFields) -} - -// GetPtraceTraceeParentFsgid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentFsgid() uint32 { - if ev.GetEventType().String() != "ptrace" { - return uint32(0) - } - if ev.PTrace.Tracee == nil { - return uint32(0) - } - if ev.PTrace.Tracee.Parent == nil { - return uint32(0) - } - if !ev.PTrace.Tracee.HasParent() { - return uint32(0) - } - return ev.PTrace.Tracee.Parent.Credentials.FSGID -} - -// GetPtraceTraceeParentFsgroup returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentFsgroup() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if ev.PTrace.Tracee.Parent == nil { - return "" - } - if !ev.PTrace.Tracee.HasParent() { - return "" - } - return ev.PTrace.Tracee.Parent.Credentials.FSGroup -} - -// GetPtraceTraceeParentFsuid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentFsuid() uint32 { - if ev.GetEventType().String() != "ptrace" { - return uint32(0) - } - if ev.PTrace.Tracee == nil { - return uint32(0) - } - if ev.PTrace.Tracee.Parent == nil { - return uint32(0) - } - if !ev.PTrace.Tracee.HasParent() { - return uint32(0) - } - return ev.PTrace.Tracee.Parent.Credentials.FSUID -} - -// GetPtraceTraceeParentFsuser returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentFsuser() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if ev.PTrace.Tracee.Parent == nil { - return "" - } - if !ev.PTrace.Tracee.HasParent() { - return "" - } - return ev.PTrace.Tracee.Parent.Credentials.FSUser -} - -// GetPtraceTraceeParentGid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentGid() uint32 { - if ev.GetEventType().String() != "ptrace" { - return uint32(0) - } - if ev.PTrace.Tracee == nil { - return uint32(0) - } - if ev.PTrace.Tracee.Parent == nil { - return uint32(0) - } - if !ev.PTrace.Tracee.HasParent() { - return uint32(0) - } - return ev.PTrace.Tracee.Parent.Credentials.GID -} - -// GetPtraceTraceeParentGroup returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentGroup() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if ev.PTrace.Tracee.Parent == nil { - return "" - } - if !ev.PTrace.Tracee.HasParent() { - return "" - } - return ev.PTrace.Tracee.Parent.Credentials.Group -} - -// GetPtraceTraceeParentInterpreterFileChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentInterpreterFileChangeTime() uint64 { - if ev.GetEventType().String() != "ptrace" { - return uint64(0) - } - if ev.PTrace.Tracee == nil { - return uint64(0) - } - if ev.PTrace.Tracee.Parent == nil { - return uint64(0) - } - if !ev.PTrace.Tracee.HasParent() { - return uint64(0) - } - if !ev.PTrace.Tracee.Parent.HasInterpreter() { - return uint64(0) - } - return ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.CTime -} - -// GetPtraceTraceeParentInterpreterFileFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentInterpreterFileFilesystem() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if ev.PTrace.Tracee.Parent == nil { - return "" - } - if !ev.PTrace.Tracee.HasParent() { - return "" - } - if !ev.PTrace.Tracee.Parent.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent) -} - -// GetPtraceTraceeParentInterpreterFileGid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentInterpreterFileGid() uint32 { - if ev.GetEventType().String() != "ptrace" { - return uint32(0) - } - if ev.PTrace.Tracee == nil { - return uint32(0) - } - if ev.PTrace.Tracee.Parent == nil { - return uint32(0) - } - if !ev.PTrace.Tracee.HasParent() { - return uint32(0) - } - if !ev.PTrace.Tracee.Parent.HasInterpreter() { - return uint32(0) - } - return ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.GID -} - -// GetPtraceTraceeParentInterpreterFileGroup returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentInterpreterFileGroup() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if ev.PTrace.Tracee.Parent == nil { - return "" - } - if !ev.PTrace.Tracee.HasParent() { - return "" - } - if !ev.PTrace.Tracee.Parent.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields) -} - -// GetPtraceTraceeParentInterpreterFileHashes returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentInterpreterFileHashes() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Parent == nil { - return []string{} - } - if !ev.PTrace.Tracee.HasParent() { - return []string{} - } - if !ev.PTrace.Tracee.Parent.HasInterpreter() { - return []string{} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent) -} - -// GetPtraceTraceeParentInterpreterFileInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentInterpreterFileInUpperLayer() bool { - if ev.GetEventType().String() != "ptrace" { - return false - } - if ev.PTrace.Tracee == nil { - return false - } - if ev.PTrace.Tracee.Parent == nil { - return false - } - if !ev.PTrace.Tracee.HasParent() { - return false - } - if !ev.PTrace.Tracee.Parent.HasInterpreter() { - return false - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields) -} - -// GetPtraceTraceeParentInterpreterFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentInterpreterFileInode() uint64 { - if ev.GetEventType().String() != "ptrace" { - return uint64(0) - } - if ev.PTrace.Tracee == nil { - return uint64(0) - } - if ev.PTrace.Tracee.Parent == nil { - return uint64(0) - } - if !ev.PTrace.Tracee.HasParent() { - return uint64(0) - } - if !ev.PTrace.Tracee.Parent.HasInterpreter() { - return uint64(0) - } - return ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.PathKey.Inode -} - -// GetPtraceTraceeParentInterpreterFileMode returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentInterpreterFileMode() uint16 { - if ev.GetEventType().String() != "ptrace" { - return uint16(0) - } - if ev.PTrace.Tracee == nil { - return uint16(0) - } - if ev.PTrace.Tracee.Parent == nil { - return uint16(0) - } - if !ev.PTrace.Tracee.HasParent() { - return uint16(0) - } - if !ev.PTrace.Tracee.Parent.HasInterpreter() { - return uint16(0) - } - return ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.Mode -} - -// GetPtraceTraceeParentInterpreterFileModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentInterpreterFileModificationTime() uint64 { - if ev.GetEventType().String() != "ptrace" { - return uint64(0) - } - if ev.PTrace.Tracee == nil { - return uint64(0) - } - if ev.PTrace.Tracee.Parent == nil { - return uint64(0) - } - if !ev.PTrace.Tracee.HasParent() { - return uint64(0) - } - if !ev.PTrace.Tracee.Parent.HasInterpreter() { - return uint64(0) - } - return ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.MTime -} - -// GetPtraceTraceeParentInterpreterFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentInterpreterFileMountId() uint32 { - if ev.GetEventType().String() != "ptrace" { - return uint32(0) - } - if ev.PTrace.Tracee == nil { - return uint32(0) - } - if ev.PTrace.Tracee.Parent == nil { - return uint32(0) - } - if !ev.PTrace.Tracee.HasParent() { - return uint32(0) - } - if !ev.PTrace.Tracee.Parent.HasInterpreter() { - return uint32(0) - } - return ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.PathKey.MountID -} - -// GetPtraceTraceeParentInterpreterFileName returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentInterpreterFileName() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if ev.PTrace.Tracee.Parent == nil { - return "" - } - if !ev.PTrace.Tracee.HasParent() { - return "" - } - if !ev.PTrace.Tracee.Parent.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent) -} - -// GetPtraceTraceeParentInterpreterFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentInterpreterFileNameLength() int { - if ev.GetEventType().String() != "ptrace" { - return 0 - } - if ev.PTrace.Tracee == nil { - return 0 - } - if ev.PTrace.Tracee.Parent == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFileBasename(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent)) -} - -// GetPtraceTraceeParentInterpreterFilePackageName returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentInterpreterFilePackageName() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if ev.PTrace.Tracee.Parent == nil { - return "" - } - if !ev.PTrace.Tracee.HasParent() { - return "" - } - if !ev.PTrace.Tracee.Parent.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent) -} - -// GetPtraceTraceeParentInterpreterFilePackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentInterpreterFilePackageSourceVersion() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if ev.PTrace.Tracee.Parent == nil { - return "" - } - if !ev.PTrace.Tracee.HasParent() { - return "" - } - if !ev.PTrace.Tracee.Parent.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent) -} - -// GetPtraceTraceeParentInterpreterFilePackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentInterpreterFilePackageVersion() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if ev.PTrace.Tracee.Parent == nil { - return "" - } - if !ev.PTrace.Tracee.HasParent() { - return "" - } - if !ev.PTrace.Tracee.Parent.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent) -} - -// GetPtraceTraceeParentInterpreterFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentInterpreterFilePath() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if ev.PTrace.Tracee.Parent == nil { - return "" - } - if !ev.PTrace.Tracee.HasParent() { - return "" - } - if !ev.PTrace.Tracee.Parent.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent) -} - -// GetPtraceTraceeParentInterpreterFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentInterpreterFilePathLength() int { - if ev.GetEventType().String() != "ptrace" { - return 0 - } - if ev.PTrace.Tracee == nil { - return 0 - } - if ev.PTrace.Tracee.Parent == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent)) -} - -// GetPtraceTraceeParentInterpreterFileRights returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentInterpreterFileRights() int { - if ev.GetEventType().String() != "ptrace" { - return 0 - } - if ev.PTrace.Tracee == nil { - return 0 - } - if ev.PTrace.Tracee.Parent == nil { - return 0 - } - if !ev.PTrace.Tracee.HasParent() { - return 0 - } - if !ev.PTrace.Tracee.Parent.HasInterpreter() { - return 0 - } - return ev.FieldHandlers.ResolveRights(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields) -} - -// GetPtraceTraceeParentInterpreterFileUid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentInterpreterFileUid() uint32 { - if ev.GetEventType().String() != "ptrace" { - return uint32(0) - } - if ev.PTrace.Tracee == nil { - return uint32(0) - } - if ev.PTrace.Tracee.Parent == nil { - return uint32(0) - } - if !ev.PTrace.Tracee.HasParent() { - return uint32(0) - } - if !ev.PTrace.Tracee.Parent.HasInterpreter() { - return uint32(0) - } - return ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields.UID -} - -// GetPtraceTraceeParentInterpreterFileUser returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentInterpreterFileUser() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if ev.PTrace.Tracee.Parent == nil { - return "" - } - if !ev.PTrace.Tracee.HasParent() { - return "" - } - if !ev.PTrace.Tracee.Parent.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent.FileFields) -} - -// GetPtraceTraceeParentIsExec returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentIsExec() bool { - if ev.GetEventType().String() != "ptrace" { - return false - } - if ev.PTrace.Tracee == nil { - return false - } - if ev.PTrace.Tracee.Parent == nil { - return false - } - if !ev.PTrace.Tracee.HasParent() { - return false - } - return ev.PTrace.Tracee.Parent.IsExec -} - -// GetPtraceTraceeParentIsKworker returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentIsKworker() bool { - if ev.GetEventType().String() != "ptrace" { - return false - } - if ev.PTrace.Tracee == nil { - return false - } - if ev.PTrace.Tracee.Parent == nil { - return false - } - if !ev.PTrace.Tracee.HasParent() { - return false - } - return ev.PTrace.Tracee.Parent.PIDContext.IsKworker -} - -// GetPtraceTraceeParentIsThread returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentIsThread() bool { - if ev.GetEventType().String() != "ptrace" { - return false - } - if ev.PTrace.Tracee == nil { - return false - } - if ev.PTrace.Tracee.Parent == nil { - return false - } - if !ev.PTrace.Tracee.HasParent() { - return false - } - return ev.FieldHandlers.ResolveProcessIsThread(ev, ev.PTrace.Tracee.Parent) -} - -// GetPtraceTraceeParentPid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentPid() uint32 { - if ev.GetEventType().String() != "ptrace" { - return uint32(0) - } - if ev.PTrace.Tracee == nil { - return uint32(0) - } - if ev.PTrace.Tracee.Parent == nil { - return uint32(0) - } - if !ev.PTrace.Tracee.HasParent() { - return uint32(0) - } - return ev.PTrace.Tracee.Parent.PIDContext.Pid -} - -// GetPtraceTraceeParentPpid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentPpid() uint32 { - if ev.GetEventType().String() != "ptrace" { - return uint32(0) - } - if ev.PTrace.Tracee == nil { - return uint32(0) - } - if ev.PTrace.Tracee.Parent == nil { - return uint32(0) - } - if !ev.PTrace.Tracee.HasParent() { - return uint32(0) - } - return ev.PTrace.Tracee.Parent.PPid -} - -// GetPtraceTraceeParentTid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentTid() uint32 { - if ev.GetEventType().String() != "ptrace" { - return uint32(0) - } - if ev.PTrace.Tracee == nil { - return uint32(0) - } - if ev.PTrace.Tracee.Parent == nil { - return uint32(0) - } - if !ev.PTrace.Tracee.HasParent() { - return uint32(0) - } - return ev.PTrace.Tracee.Parent.PIDContext.Tid -} - -// GetPtraceTraceeParentTtyName returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentTtyName() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if ev.PTrace.Tracee.Parent == nil { - return "" - } - if !ev.PTrace.Tracee.HasParent() { - return "" - } - return ev.PTrace.Tracee.Parent.TTYName -} - -// GetPtraceTraceeParentUid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentUid() uint32 { - if ev.GetEventType().String() != "ptrace" { - return uint32(0) - } - if ev.PTrace.Tracee == nil { - return uint32(0) - } - if ev.PTrace.Tracee.Parent == nil { - return uint32(0) - } - if !ev.PTrace.Tracee.HasParent() { - return uint32(0) - } - return ev.PTrace.Tracee.Parent.Credentials.UID -} - -// GetPtraceTraceeParentUser returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentUser() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if ev.PTrace.Tracee.Parent == nil { - return "" - } - if !ev.PTrace.Tracee.HasParent() { - return "" - } - return ev.PTrace.Tracee.Parent.Credentials.User -} - -// GetPtraceTraceeParentUserSessionK8sGroups returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentUserSessionK8sGroups() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - if ev.PTrace.Tracee.Parent == nil { - return []string{} - } - if !ev.PTrace.Tracee.HasParent() { - return []string{} - } - return ev.FieldHandlers.ResolveK8SGroups(ev, &ev.PTrace.Tracee.Parent.UserSession) -} - -// GetPtraceTraceeParentUserSessionK8sUid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentUserSessionK8sUid() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if ev.PTrace.Tracee.Parent == nil { - return "" - } - if !ev.PTrace.Tracee.HasParent() { - return "" - } - return ev.FieldHandlers.ResolveK8SUID(ev, &ev.PTrace.Tracee.Parent.UserSession) -} - -// GetPtraceTraceeParentUserSessionK8sUsername returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeParentUserSessionK8sUsername() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - if ev.PTrace.Tracee.Parent == nil { - return "" - } - if !ev.PTrace.Tracee.HasParent() { - return "" - } - return ev.FieldHandlers.ResolveK8SUsername(ev, &ev.PTrace.Tracee.Parent.UserSession) -} - -// GetPtraceTraceePid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceePid() uint32 { - if ev.GetEventType().String() != "ptrace" { - return uint32(0) - } - if ev.PTrace.Tracee == nil { - return uint32(0) - } - return ev.PTrace.Tracee.Process.PIDContext.Pid -} - -// GetPtraceTraceePpid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceePpid() uint32 { - if ev.GetEventType().String() != "ptrace" { - return uint32(0) - } - if ev.PTrace.Tracee == nil { - return uint32(0) - } - return ev.PTrace.Tracee.Process.PPid -} - -// GetPtraceTraceeTid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeTid() uint32 { - if ev.GetEventType().String() != "ptrace" { - return uint32(0) - } - if ev.PTrace.Tracee == nil { - return uint32(0) - } - return ev.PTrace.Tracee.Process.PIDContext.Tid -} - -// GetPtraceTraceeTtyName returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeTtyName() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - return ev.PTrace.Tracee.Process.TTYName -} - -// GetPtraceTraceeUid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeUid() uint32 { - if ev.GetEventType().String() != "ptrace" { - return uint32(0) - } - if ev.PTrace.Tracee == nil { - return uint32(0) - } - return ev.PTrace.Tracee.Process.Credentials.UID -} - -// GetPtraceTraceeUser returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeUser() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - return ev.PTrace.Tracee.Process.Credentials.User -} - -// GetPtraceTraceeUserSessionK8sGroups returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeUserSessionK8sGroups() []string { - if ev.GetEventType().String() != "ptrace" { - return []string{} - } - if ev.PTrace.Tracee == nil { - return []string{} - } - return ev.FieldHandlers.ResolveK8SGroups(ev, &ev.PTrace.Tracee.Process.UserSession) -} - -// GetPtraceTraceeUserSessionK8sUid returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeUserSessionK8sUid() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - return ev.FieldHandlers.ResolveK8SUID(ev, &ev.PTrace.Tracee.Process.UserSession) -} - -// GetPtraceTraceeUserSessionK8sUsername returns the value of the field, resolving if necessary -func (ev *Event) GetPtraceTraceeUserSessionK8sUsername() string { - if ev.GetEventType().String() != "ptrace" { - return "" - } - if ev.PTrace.Tracee == nil { - return "" - } - return ev.FieldHandlers.ResolveK8SUsername(ev, &ev.PTrace.Tracee.Process.UserSession) -} - -// GetRemovexattrFileChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetRemovexattrFileChangeTime() uint64 { - if ev.GetEventType().String() != "removexattr" { - return uint64(0) - } - return ev.RemoveXAttr.File.FileFields.CTime -} - -// GetRemovexattrFileDestinationName returns the value of the field, resolving if necessary -func (ev *Event) GetRemovexattrFileDestinationName() string { - if ev.GetEventType().String() != "removexattr" { - return "" - } - return ev.FieldHandlers.ResolveXAttrName(ev, &ev.RemoveXAttr) -} - -// GetRemovexattrFileDestinationNamespace returns the value of the field, resolving if necessary -func (ev *Event) GetRemovexattrFileDestinationNamespace() string { - if ev.GetEventType().String() != "removexattr" { - return "" - } - return ev.FieldHandlers.ResolveXAttrNamespace(ev, &ev.RemoveXAttr) -} - -// GetRemovexattrFileFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetRemovexattrFileFilesystem() string { - if ev.GetEventType().String() != "removexattr" { - return "" - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.RemoveXAttr.File) -} - -// GetRemovexattrFileGid returns the value of the field, resolving if necessary -func (ev *Event) GetRemovexattrFileGid() uint32 { - if ev.GetEventType().String() != "removexattr" { - return uint32(0) - } - return ev.RemoveXAttr.File.FileFields.GID -} - -// GetRemovexattrFileGroup returns the value of the field, resolving if necessary -func (ev *Event) GetRemovexattrFileGroup() string { - if ev.GetEventType().String() != "removexattr" { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.RemoveXAttr.File.FileFields) -} - -// GetRemovexattrFileHashes returns the value of the field, resolving if necessary -func (ev *Event) GetRemovexattrFileHashes() []string { - if ev.GetEventType().String() != "removexattr" { - return []string{} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.RemoveXAttr.File) -} - -// GetRemovexattrFileInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetRemovexattrFileInUpperLayer() bool { - if ev.GetEventType().String() != "removexattr" { - return false - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.RemoveXAttr.File.FileFields) -} - -// GetRemovexattrFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetRemovexattrFileInode() uint64 { - if ev.GetEventType().String() != "removexattr" { - return uint64(0) - } - return ev.RemoveXAttr.File.FileFields.PathKey.Inode -} - -// GetRemovexattrFileMode returns the value of the field, resolving if necessary -func (ev *Event) GetRemovexattrFileMode() uint16 { - if ev.GetEventType().String() != "removexattr" { - return uint16(0) - } - return ev.RemoveXAttr.File.FileFields.Mode -} - -// GetRemovexattrFileModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetRemovexattrFileModificationTime() uint64 { - if ev.GetEventType().String() != "removexattr" { - return uint64(0) - } - return ev.RemoveXAttr.File.FileFields.MTime -} - -// GetRemovexattrFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetRemovexattrFileMountId() uint32 { - if ev.GetEventType().String() != "removexattr" { - return uint32(0) - } - return ev.RemoveXAttr.File.FileFields.PathKey.MountID -} - -// GetRemovexattrFileName returns the value of the field, resolving if necessary -func (ev *Event) GetRemovexattrFileName() string { - if ev.GetEventType().String() != "removexattr" { - return "" - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.RemoveXAttr.File) -} - -// GetRemovexattrFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetRemovexattrFileNameLength() int { - if ev.GetEventType().String() != "removexattr" { - return 0 - } - return len(ev.FieldHandlers.ResolveFileBasename(ev, &ev.RemoveXAttr.File)) -} - -// GetRemovexattrFilePackageName returns the value of the field, resolving if necessary -func (ev *Event) GetRemovexattrFilePackageName() string { - if ev.GetEventType().String() != "removexattr" { - return "" - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.RemoveXAttr.File) -} - -// GetRemovexattrFilePackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetRemovexattrFilePackageSourceVersion() string { - if ev.GetEventType().String() != "removexattr" { - return "" - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.RemoveXAttr.File) -} - -// GetRemovexattrFilePackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetRemovexattrFilePackageVersion() string { - if ev.GetEventType().String() != "removexattr" { - return "" - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.RemoveXAttr.File) -} - -// GetRemovexattrFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetRemovexattrFilePath() string { - if ev.GetEventType().String() != "removexattr" { - return "" - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.RemoveXAttr.File) -} - -// GetRemovexattrFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetRemovexattrFilePathLength() int { - if ev.GetEventType().String() != "removexattr" { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.RemoveXAttr.File)) -} - -// GetRemovexattrFileRights returns the value of the field, resolving if necessary -func (ev *Event) GetRemovexattrFileRights() int { - if ev.GetEventType().String() != "removexattr" { - return 0 - } - return ev.FieldHandlers.ResolveRights(ev, &ev.RemoveXAttr.File.FileFields) -} - -// GetRemovexattrFileUid returns the value of the field, resolving if necessary -func (ev *Event) GetRemovexattrFileUid() uint32 { - if ev.GetEventType().String() != "removexattr" { - return uint32(0) - } - return ev.RemoveXAttr.File.FileFields.UID -} - -// GetRemovexattrFileUser returns the value of the field, resolving if necessary -func (ev *Event) GetRemovexattrFileUser() string { - if ev.GetEventType().String() != "removexattr" { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.RemoveXAttr.File.FileFields) -} - -// GetRemovexattrRetval returns the value of the field, resolving if necessary -func (ev *Event) GetRemovexattrRetval() int64 { - if ev.GetEventType().String() != "removexattr" { - return int64(0) - } - return ev.RemoveXAttr.SyscallEvent.Retval -} - -// GetRenameFileChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileChangeTime() uint64 { - if ev.GetEventType().String() != "rename" { - return uint64(0) - } - return ev.Rename.Old.FileFields.CTime -} - -// GetRenameFileDestinationChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileDestinationChangeTime() uint64 { - if ev.GetEventType().String() != "rename" { - return uint64(0) - } - return ev.Rename.New.FileFields.CTime -} - -// GetRenameFileDestinationFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileDestinationFilesystem() string { - if ev.GetEventType().String() != "rename" { - return "" - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Rename.New) -} - -// GetRenameFileDestinationGid returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileDestinationGid() uint32 { - if ev.GetEventType().String() != "rename" { - return uint32(0) - } - return ev.Rename.New.FileFields.GID -} - -// GetRenameFileDestinationGroup returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileDestinationGroup() string { - if ev.GetEventType().String() != "rename" { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Rename.New.FileFields) -} - -// GetRenameFileDestinationHashes returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileDestinationHashes() []string { - if ev.GetEventType().String() != "rename" { - return []string{} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Rename.New) -} - -// GetRenameFileDestinationInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileDestinationInUpperLayer() bool { - if ev.GetEventType().String() != "rename" { - return false - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Rename.New.FileFields) -} - -// GetRenameFileDestinationInode returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileDestinationInode() uint64 { - if ev.GetEventType().String() != "rename" { - return uint64(0) - } - return ev.Rename.New.FileFields.PathKey.Inode -} - -// GetRenameFileDestinationMode returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileDestinationMode() uint16 { - if ev.GetEventType().String() != "rename" { - return uint16(0) - } - return ev.Rename.New.FileFields.Mode -} - -// GetRenameFileDestinationModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileDestinationModificationTime() uint64 { - if ev.GetEventType().String() != "rename" { - return uint64(0) - } - return ev.Rename.New.FileFields.MTime -} - -// GetRenameFileDestinationMountId returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileDestinationMountId() uint32 { - if ev.GetEventType().String() != "rename" { - return uint32(0) - } - return ev.Rename.New.FileFields.PathKey.MountID -} - -// GetRenameFileDestinationName returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileDestinationName() string { - if ev.GetEventType().String() != "rename" { - return "" - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Rename.New) -} - -// GetRenameFileDestinationNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileDestinationNameLength() int { - if ev.GetEventType().String() != "rename" { - return 0 - } - return len(ev.FieldHandlers.ResolveFileBasename(ev, &ev.Rename.New)) -} - -// GetRenameFileDestinationPackageName returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileDestinationPackageName() string { - if ev.GetEventType().String() != "rename" { - return "" - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Rename.New) -} - -// GetRenameFileDestinationPackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileDestinationPackageSourceVersion() string { - if ev.GetEventType().String() != "rename" { - return "" - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Rename.New) -} - -// GetRenameFileDestinationPackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileDestinationPackageVersion() string { - if ev.GetEventType().String() != "rename" { - return "" - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Rename.New) -} - -// GetRenameFileDestinationPath returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileDestinationPath() string { - if ev.GetEventType().String() != "rename" { - return "" - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Rename.New) -} - -// GetRenameFileDestinationPathLength returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileDestinationPathLength() int { - if ev.GetEventType().String() != "rename" { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Rename.New)) -} - -// GetRenameFileDestinationRights returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileDestinationRights() int { - if ev.GetEventType().String() != "rename" { - return 0 - } - return ev.FieldHandlers.ResolveRights(ev, &ev.Rename.New.FileFields) -} - -// GetRenameFileDestinationUid returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileDestinationUid() uint32 { - if ev.GetEventType().String() != "rename" { - return uint32(0) - } - return ev.Rename.New.FileFields.UID -} - -// GetRenameFileDestinationUser returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileDestinationUser() string { - if ev.GetEventType().String() != "rename" { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Rename.New.FileFields) -} - -// GetRenameFileFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileFilesystem() string { - if ev.GetEventType().String() != "rename" { - return "" - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Rename.Old) -} - -// GetRenameFileGid returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileGid() uint32 { - if ev.GetEventType().String() != "rename" { - return uint32(0) - } - return ev.Rename.Old.FileFields.GID -} - -// GetRenameFileGroup returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileGroup() string { - if ev.GetEventType().String() != "rename" { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Rename.Old.FileFields) -} - -// GetRenameFileHashes returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileHashes() []string { - if ev.GetEventType().String() != "rename" { - return []string{} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Rename.Old) -} - -// GetRenameFileInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileInUpperLayer() bool { - if ev.GetEventType().String() != "rename" { - return false - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Rename.Old.FileFields) -} - -// GetRenameFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileInode() uint64 { - if ev.GetEventType().String() != "rename" { - return uint64(0) - } - return ev.Rename.Old.FileFields.PathKey.Inode -} - -// GetRenameFileMode returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileMode() uint16 { - if ev.GetEventType().String() != "rename" { - return uint16(0) - } - return ev.Rename.Old.FileFields.Mode -} - -// GetRenameFileModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileModificationTime() uint64 { - if ev.GetEventType().String() != "rename" { - return uint64(0) - } - return ev.Rename.Old.FileFields.MTime -} - -// GetRenameFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileMountId() uint32 { - if ev.GetEventType().String() != "rename" { - return uint32(0) - } - return ev.Rename.Old.FileFields.PathKey.MountID -} - -// GetRenameFileName returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileName() string { - if ev.GetEventType().String() != "rename" { - return "" - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Rename.Old) -} - -// GetRenameFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileNameLength() int { - if ev.GetEventType().String() != "rename" { - return 0 - } - return len(ev.FieldHandlers.ResolveFileBasename(ev, &ev.Rename.Old)) -} - -// GetRenameFilePackageName returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFilePackageName() string { - if ev.GetEventType().String() != "rename" { - return "" - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Rename.Old) -} - -// GetRenameFilePackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFilePackageSourceVersion() string { - if ev.GetEventType().String() != "rename" { - return "" - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Rename.Old) -} - -// GetRenameFilePackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFilePackageVersion() string { - if ev.GetEventType().String() != "rename" { - return "" - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Rename.Old) -} - -// GetRenameFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFilePath() string { - if ev.GetEventType().String() != "rename" { - return "" - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Rename.Old) -} - -// GetRenameFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFilePathLength() int { - if ev.GetEventType().String() != "rename" { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Rename.Old)) -} - -// GetRenameFileRights returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileRights() int { - if ev.GetEventType().String() != "rename" { - return 0 - } - return ev.FieldHandlers.ResolveRights(ev, &ev.Rename.Old.FileFields) -} - -// GetRenameFileUid returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileUid() uint32 { - if ev.GetEventType().String() != "rename" { - return uint32(0) - } - return ev.Rename.Old.FileFields.UID -} - -// GetRenameFileUser returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileUser() string { - if ev.GetEventType().String() != "rename" { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Rename.Old.FileFields) -} - -// GetRenameRetval returns the value of the field, resolving if necessary -func (ev *Event) GetRenameRetval() int64 { - if ev.GetEventType().String() != "rename" { - return int64(0) - } - return ev.Rename.SyscallEvent.Retval -} - -// GetRenameSyscallDestinationPath returns the value of the field, resolving if necessary -func (ev *Event) GetRenameSyscallDestinationPath() string { - if ev.GetEventType().String() != "rename" { - return "" - } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr2(ev, &ev.Rename.SyscallContext) -} - -// GetRenameSyscallInt1 returns the value of the field, resolving if necessary -func (ev *Event) GetRenameSyscallInt1() int { - if ev.GetEventType().String() != "rename" { - return 0 - } - return ev.FieldHandlers.ResolveSyscallCtxArgsInt1(ev, &ev.Rename.SyscallContext) -} - -// GetRenameSyscallInt2 returns the value of the field, resolving if necessary -func (ev *Event) GetRenameSyscallInt2() int { - if ev.GetEventType().String() != "rename" { - return 0 - } - return ev.FieldHandlers.ResolveSyscallCtxArgsInt2(ev, &ev.Rename.SyscallContext) -} - -// GetRenameSyscallInt3 returns the value of the field, resolving if necessary -func (ev *Event) GetRenameSyscallInt3() int { - if ev.GetEventType().String() != "rename" { - return 0 - } - return ev.FieldHandlers.ResolveSyscallCtxArgsInt3(ev, &ev.Rename.SyscallContext) -} - -// GetRenameSyscallPath returns the value of the field, resolving if necessary -func (ev *Event) GetRenameSyscallPath() string { - if ev.GetEventType().String() != "rename" { - return "" - } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr1(ev, &ev.Rename.SyscallContext) -} - -// GetRenameSyscallStr1 returns the value of the field, resolving if necessary -func (ev *Event) GetRenameSyscallStr1() string { - if ev.GetEventType().String() != "rename" { - return "" - } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr1(ev, &ev.Rename.SyscallContext) -} - -// GetRenameSyscallStr2 returns the value of the field, resolving if necessary -func (ev *Event) GetRenameSyscallStr2() string { - if ev.GetEventType().String() != "rename" { - return "" - } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr2(ev, &ev.Rename.SyscallContext) -} - -// GetRenameSyscallStr3 returns the value of the field, resolving if necessary -func (ev *Event) GetRenameSyscallStr3() string { - if ev.GetEventType().String() != "rename" { - return "" - } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr3(ev, &ev.Rename.SyscallContext) -} - -// GetRmdirFileChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetRmdirFileChangeTime() uint64 { - if ev.GetEventType().String() != "rmdir" { - return uint64(0) - } - return ev.Rmdir.File.FileFields.CTime -} - -// GetRmdirFileFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetRmdirFileFilesystem() string { - if ev.GetEventType().String() != "rmdir" { - return "" - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Rmdir.File) -} - -// GetRmdirFileGid returns the value of the field, resolving if necessary -func (ev *Event) GetRmdirFileGid() uint32 { - if ev.GetEventType().String() != "rmdir" { - return uint32(0) - } - return ev.Rmdir.File.FileFields.GID -} - -// GetRmdirFileGroup returns the value of the field, resolving if necessary -func (ev *Event) GetRmdirFileGroup() string { - if ev.GetEventType().String() != "rmdir" { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Rmdir.File.FileFields) -} - -// GetRmdirFileHashes returns the value of the field, resolving if necessary -func (ev *Event) GetRmdirFileHashes() []string { - if ev.GetEventType().String() != "rmdir" { - return []string{} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Rmdir.File) -} - -// GetRmdirFileInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetRmdirFileInUpperLayer() bool { - if ev.GetEventType().String() != "rmdir" { - return false - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Rmdir.File.FileFields) -} - -// GetRmdirFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetRmdirFileInode() uint64 { - if ev.GetEventType().String() != "rmdir" { - return uint64(0) - } - return ev.Rmdir.File.FileFields.PathKey.Inode -} - -// GetRmdirFileMode returns the value of the field, resolving if necessary -func (ev *Event) GetRmdirFileMode() uint16 { - if ev.GetEventType().String() != "rmdir" { - return uint16(0) - } - return ev.Rmdir.File.FileFields.Mode -} - -// GetRmdirFileModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetRmdirFileModificationTime() uint64 { - if ev.GetEventType().String() != "rmdir" { - return uint64(0) - } - return ev.Rmdir.File.FileFields.MTime -} - -// GetRmdirFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetRmdirFileMountId() uint32 { - if ev.GetEventType().String() != "rmdir" { - return uint32(0) - } - return ev.Rmdir.File.FileFields.PathKey.MountID -} - -// GetRmdirFileName returns the value of the field, resolving if necessary -func (ev *Event) GetRmdirFileName() string { - if ev.GetEventType().String() != "rmdir" { - return "" - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Rmdir.File) -} - -// GetRmdirFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetRmdirFileNameLength() int { - if ev.GetEventType().String() != "rmdir" { - return 0 - } - return len(ev.FieldHandlers.ResolveFileBasename(ev, &ev.Rmdir.File)) -} - -// GetRmdirFilePackageName returns the value of the field, resolving if necessary -func (ev *Event) GetRmdirFilePackageName() string { - if ev.GetEventType().String() != "rmdir" { - return "" - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Rmdir.File) -} - -// GetRmdirFilePackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetRmdirFilePackageSourceVersion() string { - if ev.GetEventType().String() != "rmdir" { - return "" - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Rmdir.File) -} - -// GetRmdirFilePackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetRmdirFilePackageVersion() string { - if ev.GetEventType().String() != "rmdir" { - return "" - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Rmdir.File) -} - -// GetRmdirFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetRmdirFilePath() string { - if ev.GetEventType().String() != "rmdir" { - return "" - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Rmdir.File) -} - -// GetRmdirFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetRmdirFilePathLength() int { - if ev.GetEventType().String() != "rmdir" { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Rmdir.File)) -} - -// GetRmdirFileRights returns the value of the field, resolving if necessary -func (ev *Event) GetRmdirFileRights() int { - if ev.GetEventType().String() != "rmdir" { - return 0 - } - return ev.FieldHandlers.ResolveRights(ev, &ev.Rmdir.File.FileFields) -} - -// GetRmdirFileUid returns the value of the field, resolving if necessary -func (ev *Event) GetRmdirFileUid() uint32 { - if ev.GetEventType().String() != "rmdir" { - return uint32(0) - } - return ev.Rmdir.File.FileFields.UID -} - -// GetRmdirFileUser returns the value of the field, resolving if necessary -func (ev *Event) GetRmdirFileUser() string { - if ev.GetEventType().String() != "rmdir" { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Rmdir.File.FileFields) -} - -// GetRmdirRetval returns the value of the field, resolving if necessary -func (ev *Event) GetRmdirRetval() int64 { - if ev.GetEventType().String() != "rmdir" { - return int64(0) - } - return ev.Rmdir.SyscallEvent.Retval -} - -// GetSelinuxBoolName returns the value of the field, resolving if necessary -func (ev *Event) GetSelinuxBoolName() string { - if ev.GetEventType().String() != "selinux" { - return "" - } - return ev.FieldHandlers.ResolveSELinuxBoolName(ev, &ev.SELinux) -} - -// GetSelinuxBoolState returns the value of the field, resolving if necessary -func (ev *Event) GetSelinuxBoolState() string { - if ev.GetEventType().String() != "selinux" { - return "" - } - return ev.SELinux.BoolChangeValue -} - -// GetSelinuxBoolCommitState returns the value of the field, resolving if necessary -func (ev *Event) GetSelinuxBoolCommitState() bool { - if ev.GetEventType().String() != "selinux" { - return false - } - return ev.SELinux.BoolCommitValue -} - -// GetSelinuxEnforceStatus returns the value of the field, resolving if necessary -func (ev *Event) GetSelinuxEnforceStatus() string { - if ev.GetEventType().String() != "selinux" { - return "" - } - return ev.SELinux.EnforceStatus -} - -// GetSetgidEgid returns the value of the field, resolving if necessary -func (ev *Event) GetSetgidEgid() uint32 { - if ev.GetEventType().String() != "setgid" { - return uint32(0) - } - return ev.SetGID.EGID -} - -// GetSetgidEgroup returns the value of the field, resolving if necessary -func (ev *Event) GetSetgidEgroup() string { - if ev.GetEventType().String() != "setgid" { - return "" - } - return ev.FieldHandlers.ResolveSetgidEGroup(ev, &ev.SetGID) -} - -// GetSetgidFsgid returns the value of the field, resolving if necessary -func (ev *Event) GetSetgidFsgid() uint32 { - if ev.GetEventType().String() != "setgid" { - return uint32(0) - } - return ev.SetGID.FSGID -} - -// GetSetgidFsgroup returns the value of the field, resolving if necessary -func (ev *Event) GetSetgidFsgroup() string { - if ev.GetEventType().String() != "setgid" { - return "" - } - return ev.FieldHandlers.ResolveSetgidFSGroup(ev, &ev.SetGID) -} - -// GetSetgidGid returns the value of the field, resolving if necessary -func (ev *Event) GetSetgidGid() uint32 { - if ev.GetEventType().String() != "setgid" { - return uint32(0) - } - return ev.SetGID.GID -} - -// GetSetgidGroup returns the value of the field, resolving if necessary -func (ev *Event) GetSetgidGroup() string { - if ev.GetEventType().String() != "setgid" { - return "" - } - return ev.FieldHandlers.ResolveSetgidGroup(ev, &ev.SetGID) -} - -// GetSetuidEuid returns the value of the field, resolving if necessary -func (ev *Event) GetSetuidEuid() uint32 { - if ev.GetEventType().String() != "setuid" { - return uint32(0) - } - return ev.SetUID.EUID -} - -// GetSetuidEuser returns the value of the field, resolving if necessary -func (ev *Event) GetSetuidEuser() string { - if ev.GetEventType().String() != "setuid" { - return "" - } - return ev.FieldHandlers.ResolveSetuidEUser(ev, &ev.SetUID) -} - -// GetSetuidFsuid returns the value of the field, resolving if necessary -func (ev *Event) GetSetuidFsuid() uint32 { - if ev.GetEventType().String() != "setuid" { - return uint32(0) - } - return ev.SetUID.FSUID -} - -// GetSetuidFsuser returns the value of the field, resolving if necessary -func (ev *Event) GetSetuidFsuser() string { - if ev.GetEventType().String() != "setuid" { - return "" - } - return ev.FieldHandlers.ResolveSetuidFSUser(ev, &ev.SetUID) -} - -// GetSetuidUid returns the value of the field, resolving if necessary -func (ev *Event) GetSetuidUid() uint32 { - if ev.GetEventType().String() != "setuid" { - return uint32(0) - } - return ev.SetUID.UID -} - -// GetSetuidUser returns the value of the field, resolving if necessary -func (ev *Event) GetSetuidUser() string { - if ev.GetEventType().String() != "setuid" { - return "" - } - return ev.FieldHandlers.ResolveSetuidUser(ev, &ev.SetUID) -} - -// GetSetxattrFileChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetSetxattrFileChangeTime() uint64 { - if ev.GetEventType().String() != "setxattr" { - return uint64(0) - } - return ev.SetXAttr.File.FileFields.CTime -} - -// GetSetxattrFileDestinationName returns the value of the field, resolving if necessary -func (ev *Event) GetSetxattrFileDestinationName() string { - if ev.GetEventType().String() != "setxattr" { - return "" - } - return ev.FieldHandlers.ResolveXAttrName(ev, &ev.SetXAttr) -} - -// GetSetxattrFileDestinationNamespace returns the value of the field, resolving if necessary -func (ev *Event) GetSetxattrFileDestinationNamespace() string { - if ev.GetEventType().String() != "setxattr" { - return "" - } - return ev.FieldHandlers.ResolveXAttrNamespace(ev, &ev.SetXAttr) -} - -// GetSetxattrFileFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetSetxattrFileFilesystem() string { - if ev.GetEventType().String() != "setxattr" { - return "" - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.SetXAttr.File) -} - -// GetSetxattrFileGid returns the value of the field, resolving if necessary -func (ev *Event) GetSetxattrFileGid() uint32 { - if ev.GetEventType().String() != "setxattr" { - return uint32(0) - } - return ev.SetXAttr.File.FileFields.GID -} - -// GetSetxattrFileGroup returns the value of the field, resolving if necessary -func (ev *Event) GetSetxattrFileGroup() string { - if ev.GetEventType().String() != "setxattr" { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.SetXAttr.File.FileFields) -} - -// GetSetxattrFileHashes returns the value of the field, resolving if necessary -func (ev *Event) GetSetxattrFileHashes() []string { - if ev.GetEventType().String() != "setxattr" { - return []string{} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.SetXAttr.File) -} - -// GetSetxattrFileInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetSetxattrFileInUpperLayer() bool { - if ev.GetEventType().String() != "setxattr" { - return false - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.SetXAttr.File.FileFields) -} - -// GetSetxattrFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetSetxattrFileInode() uint64 { - if ev.GetEventType().String() != "setxattr" { - return uint64(0) - } - return ev.SetXAttr.File.FileFields.PathKey.Inode -} - -// GetSetxattrFileMode returns the value of the field, resolving if necessary -func (ev *Event) GetSetxattrFileMode() uint16 { - if ev.GetEventType().String() != "setxattr" { - return uint16(0) - } - return ev.SetXAttr.File.FileFields.Mode -} - -// GetSetxattrFileModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetSetxattrFileModificationTime() uint64 { - if ev.GetEventType().String() != "setxattr" { - return uint64(0) - } - return ev.SetXAttr.File.FileFields.MTime -} - -// GetSetxattrFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetSetxattrFileMountId() uint32 { - if ev.GetEventType().String() != "setxattr" { - return uint32(0) - } - return ev.SetXAttr.File.FileFields.PathKey.MountID -} - -// GetSetxattrFileName returns the value of the field, resolving if necessary -func (ev *Event) GetSetxattrFileName() string { - if ev.GetEventType().String() != "setxattr" { - return "" - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.SetXAttr.File) -} - -// GetSetxattrFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetSetxattrFileNameLength() int { - if ev.GetEventType().String() != "setxattr" { - return 0 - } - return len(ev.FieldHandlers.ResolveFileBasename(ev, &ev.SetXAttr.File)) -} - -// GetSetxattrFilePackageName returns the value of the field, resolving if necessary -func (ev *Event) GetSetxattrFilePackageName() string { - if ev.GetEventType().String() != "setxattr" { - return "" - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.SetXAttr.File) -} - -// GetSetxattrFilePackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetSetxattrFilePackageSourceVersion() string { - if ev.GetEventType().String() != "setxattr" { - return "" - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.SetXAttr.File) -} - -// GetSetxattrFilePackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetSetxattrFilePackageVersion() string { - if ev.GetEventType().String() != "setxattr" { - return "" - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.SetXAttr.File) -} - -// GetSetxattrFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetSetxattrFilePath() string { - if ev.GetEventType().String() != "setxattr" { - return "" - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.SetXAttr.File) -} - -// GetSetxattrFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetSetxattrFilePathLength() int { - if ev.GetEventType().String() != "setxattr" { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.SetXAttr.File)) -} - -// GetSetxattrFileRights returns the value of the field, resolving if necessary -func (ev *Event) GetSetxattrFileRights() int { - if ev.GetEventType().String() != "setxattr" { - return 0 - } - return ev.FieldHandlers.ResolveRights(ev, &ev.SetXAttr.File.FileFields) -} - -// GetSetxattrFileUid returns the value of the field, resolving if necessary -func (ev *Event) GetSetxattrFileUid() uint32 { - if ev.GetEventType().String() != "setxattr" { - return uint32(0) - } - return ev.SetXAttr.File.FileFields.UID -} - -// GetSetxattrFileUser returns the value of the field, resolving if necessary -func (ev *Event) GetSetxattrFileUser() string { - if ev.GetEventType().String() != "setxattr" { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.SetXAttr.File.FileFields) -} - -// GetSetxattrRetval returns the value of the field, resolving if necessary -func (ev *Event) GetSetxattrRetval() int64 { - if ev.GetEventType().String() != "setxattr" { - return int64(0) - } - return ev.SetXAttr.SyscallEvent.Retval -} - -// GetSignalPid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalPid() uint32 { - if ev.GetEventType().String() != "signal" { - return uint32(0) - } - return ev.Signal.PID -} - -// GetSignalRetval returns the value of the field, resolving if necessary -func (ev *Event) GetSignalRetval() int64 { - if ev.GetEventType().String() != "signal" { - return int64(0) - } - return ev.Signal.SyscallEvent.Retval -} - -// GetSignalTargetAncestorsArgs returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsArgs() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessArgs(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsArgsFlags returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsArgsFlags() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessArgsFlags(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsArgsOptions returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsArgsOptions() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessArgsOptions(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsArgsScrubbed returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsArgsScrubbed() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessArgsScrubbed(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsArgsTruncated returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsArgsTruncated() []bool { - if ev.GetEventType().String() != "signal" { - return []bool{} - } - if ev.Signal.Target == nil { - return []bool{} - } - if ev.Signal.Target.Ancestor == nil { - return []bool{} - } - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessArgsTruncated(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsArgv returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsArgv() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessArgv(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsArgv0 returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsArgv0() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessArgv0(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsArgvScrubbed returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsArgvScrubbed() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessArgvScrubbed(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsAuid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsAuid() []uint32 { - if ev.GetEventType().String() != "signal" { - return []uint32{} - } - if ev.Signal.Target == nil { - return []uint32{} - } - if ev.Signal.Target.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.AUID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsCapEffective returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsCapEffective() []uint64 { - if ev.GetEventType().String() != "signal" { - return []uint64{} - } - if ev.Signal.Target == nil { - return []uint64{} - } - if ev.Signal.Target.Ancestor == nil { - return []uint64{} - } - var values []uint64 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.CapEffective - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsCapPermitted returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsCapPermitted() []uint64 { - if ev.GetEventType().String() != "signal" { - return []uint64{} - } - if ev.Signal.Target == nil { - return []uint64{} - } - if ev.Signal.Target.Ancestor == nil { - return []uint64{} - } - var values []uint64 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.CapPermitted - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsCgroupFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsCgroupFileInode() []uint64 { - if ev.GetEventType().String() != "signal" { - return []uint64{} - } - if ev.Signal.Target == nil { - return []uint64{} - } - if ev.Signal.Target.Ancestor == nil { - return []uint64{} - } - var values []uint64 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.CGroup.CGroupFile.Inode - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsCgroupFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsCgroupFileMountId() []uint32 { - if ev.GetEventType().String() != "signal" { - return []uint32{} - } - if ev.Signal.Target == nil { - return []uint32{} - } - if ev.Signal.Target.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.CGroup.CGroupFile.MountID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsCgroupId returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsCgroupId() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveCGroupID(ev, &element.ProcessContext.Process.CGroup) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsCgroupManager returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsCgroupManager() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveCGroupManager(ev, &element.ProcessContext.Process.CGroup) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsCgroupVersion returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsCgroupVersion() []int { - if ev.GetEventType().String() != "signal" { - return []int{} - } - if ev.Signal.Target == nil { - return []int{} - } - if ev.Signal.Target.Ancestor == nil { - return []int{} - } - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveCGroupVersion(ev, &element.ProcessContext.Process.CGroup) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsCmdargv returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsCmdargv() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessCmdArgv(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsComm returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsComm() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Comm - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsContainerId returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsContainerId() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessContainerID(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsCreatedAt returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsCreatedAt() []int { - if ev.GetEventType().String() != "signal" { - return []int{} - } - if ev.Signal.Target == nil { - return []int{} - } - if ev.Signal.Target.Ancestor == nil { - return []int{} - } - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &element.ProcessContext.Process)) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsEgid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsEgid() []uint32 { - if ev.GetEventType().String() != "signal" { - return []uint32{} - } - if ev.Signal.Target == nil { - return []uint32{} - } - if ev.Signal.Target.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.EGID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsEgroup returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsEgroup() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.EGroup - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsEnvp returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsEnvp() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessEnvp(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsEnvs returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsEnvs() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessEnvs(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsEnvsTruncated returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsEnvsTruncated() []bool { - if ev.GetEventType().String() != "signal" { - return []bool{} - } - if ev.Signal.Target == nil { - return []bool{} - } - if ev.Signal.Target.Ancestor == nil { - return []bool{} - } - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsEuid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsEuid() []uint32 { - if ev.GetEventType().String() != "signal" { - return []uint32{} - } - if ev.Signal.Target == nil { - return []uint32{} - } - if ev.Signal.Target.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.EUID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsEuser returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsEuser() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.EUser - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsFileChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsFileChangeTime() []uint64 { - if ev.GetEventType().String() != "signal" { - return []uint64{} - } - if ev.Signal.Target == nil { - return []uint64{} - } - if ev.Signal.Target.Ancestor == nil { - return []uint64{} - } - var values []uint64 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.FileEvent.FileFields.CTime - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsFileFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsFileFilesystem() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveFileFilesystem(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsFileGid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsFileGid() []uint32 { - if ev.GetEventType().String() != "signal" { - return []uint32{} - } - if ev.Signal.Target == nil { - return []uint32{} - } - if ev.Signal.Target.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.FileEvent.FileFields.GID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsFileGroup returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsFileGroup() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveFileFieldsGroup(ev, &element.ProcessContext.Process.FileEvent.FileFields) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsFileHashes returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsFileHashes() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveHashesFromEvent(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result...) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsFileInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsFileInUpperLayer() []bool { - if ev.GetEventType().String() != "signal" { - return []bool{} - } - if ev.Signal.Target == nil { - return []bool{} - } - if ev.Signal.Target.Ancestor == nil { - return []bool{} - } - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &element.ProcessContext.Process.FileEvent.FileFields) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsFileInode() []uint64 { - if ev.GetEventType().String() != "signal" { - return []uint64{} - } - if ev.Signal.Target == nil { - return []uint64{} - } - if ev.Signal.Target.Ancestor == nil { - return []uint64{} - } - var values []uint64 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.FileEvent.FileFields.PathKey.Inode - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsFileMode returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsFileMode() []uint16 { - if ev.GetEventType().String() != "signal" { - return []uint16{} - } - if ev.Signal.Target == nil { - return []uint16{} - } - if ev.Signal.Target.Ancestor == nil { - return []uint16{} - } - var values []uint16 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.FileEvent.FileFields.Mode - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsFileModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsFileModificationTime() []uint64 { - if ev.GetEventType().String() != "signal" { - return []uint64{} - } - if ev.Signal.Target == nil { - return []uint64{} - } - if ev.Signal.Target.Ancestor == nil { - return []uint64{} - } - var values []uint64 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.FileEvent.FileFields.MTime - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsFileMountId() []uint32 { - if ev.GetEventType().String() != "signal" { - return []uint32{} - } - if ev.Signal.Target == nil { - return []uint32{} - } - if ev.Signal.Target.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.FileEvent.FileFields.PathKey.MountID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsFileName returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsFileName() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsFileNameLength() []int { - if ev.GetEventType().String() != "signal" { - return []int{} - } - if ev.Signal.Target == nil { - return []int{} - } - if ev.Signal.Target.Ancestor == nil { - return []int{} - } - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent)) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsFilePackageName returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsFilePackageName() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolvePackageName(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsFilePackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsFilePackageSourceVersion() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolvePackageSourceVersion(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsFilePackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsFilePackageVersion() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolvePackageVersion(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsFilePath() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsFilePathLength() []int { - if ev.GetEventType().String() != "signal" { - return []int{} - } - if ev.Signal.Target == nil { - return []int{} - } - if ev.Signal.Target.Ancestor == nil { - return []int{} - } - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent)) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsFileRights returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsFileRights() []int { - if ev.GetEventType().String() != "signal" { - return []int{} - } - if ev.Signal.Target == nil { - return []int{} - } - if ev.Signal.Target.Ancestor == nil { - return []int{} - } - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := int(ev.FieldHandlers.ResolveRights(ev, &element.ProcessContext.Process.FileEvent.FileFields)) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsFileUid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsFileUid() []uint32 { - if ev.GetEventType().String() != "signal" { - return []uint32{} - } - if ev.Signal.Target == nil { - return []uint32{} - } - if ev.Signal.Target.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.FileEvent.FileFields.UID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsFileUser returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsFileUser() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveFileFieldsUser(ev, &element.ProcessContext.Process.FileEvent.FileFields) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsFsgid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsFsgid() []uint32 { - if ev.GetEventType().String() != "signal" { - return []uint32{} - } - if ev.Signal.Target == nil { - return []uint32{} - } - if ev.Signal.Target.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.FSGID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsFsgroup returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsFsgroup() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.FSGroup - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsFsuid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsFsuid() []uint32 { - if ev.GetEventType().String() != "signal" { - return []uint32{} - } - if ev.Signal.Target == nil { - return []uint32{} - } - if ev.Signal.Target.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.FSUID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsFsuser returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsFsuser() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.FSUser - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsGid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsGid() []uint32 { - if ev.GetEventType().String() != "signal" { - return []uint32{} - } - if ev.Signal.Target == nil { - return []uint32{} - } - if ev.Signal.Target.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.GID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsGroup returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsGroup() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.Group - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsInterpreterFileChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsInterpreterFileChangeTime() []uint64 { - if ev.GetEventType().String() != "signal" { - return []uint64{} - } - if ev.Signal.Target == nil { - return []uint64{} - } - if ev.Signal.Target.Ancestor == nil { - return []uint64{} - } - var values []uint64 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.CTime - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsInterpreterFileFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsInterpreterFileFilesystem() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveFileFilesystem(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsInterpreterFileGid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsInterpreterFileGid() []uint32 { - if ev.GetEventType().String() != "signal" { - return []uint32{} - } - if ev.Signal.Target == nil { - return []uint32{} - } - if ev.Signal.Target.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.GID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsInterpreterFileGroup returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsInterpreterFileGroup() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveFileFieldsGroup(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsInterpreterFileHashes returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsInterpreterFileHashes() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveHashesFromEvent(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result...) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsInterpreterFileInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsInterpreterFileInUpperLayer() []bool { - if ev.GetEventType().String() != "signal" { - return []bool{} - } - if ev.Signal.Target == nil { - return []bool{} - } - if ev.Signal.Target.Ancestor == nil { - return []bool{} - } - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsInterpreterFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsInterpreterFileInode() []uint64 { - if ev.GetEventType().String() != "signal" { - return []uint64{} - } - if ev.Signal.Target == nil { - return []uint64{} - } - if ev.Signal.Target.Ancestor == nil { - return []uint64{} - } - var values []uint64 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsInterpreterFileMode returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsInterpreterFileMode() []uint16 { - if ev.GetEventType().String() != "signal" { - return []uint16{} - } - if ev.Signal.Target == nil { - return []uint16{} - } - if ev.Signal.Target.Ancestor == nil { - return []uint16{} - } - var values []uint16 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.Mode - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsInterpreterFileModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsInterpreterFileModificationTime() []uint64 { - if ev.GetEventType().String() != "signal" { - return []uint64{} - } - if ev.Signal.Target == nil { - return []uint64{} - } - if ev.Signal.Target.Ancestor == nil { - return []uint64{} - } - var values []uint64 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.MTime - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsInterpreterFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsInterpreterFileMountId() []uint32 { - if ev.GetEventType().String() != "signal" { - return []uint32{} - } - if ev.Signal.Target == nil { - return []uint32{} - } - if ev.Signal.Target.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsInterpreterFileName returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsInterpreterFileName() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsInterpreterFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsInterpreterFileNameLength() []int { - if ev.GetEventType().String() != "signal" { - return []int{} - } - if ev.Signal.Target == nil { - return []int{} - } - if ev.Signal.Target.Ancestor == nil { - return []int{} - } - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsInterpreterFilePackageName returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsInterpreterFilePackageName() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolvePackageName(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsInterpreterFilePackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsInterpreterFilePackageSourceVersion() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolvePackageSourceVersion(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsInterpreterFilePackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsInterpreterFilePackageVersion() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolvePackageVersion(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsInterpreterFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsInterpreterFilePath() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsInterpreterFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsInterpreterFilePathLength() []int { - if ev.GetEventType().String() != "signal" { - return []int{} - } - if ev.Signal.Target == nil { - return []int{} - } - if ev.Signal.Target.Ancestor == nil { - return []int{} - } - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsInterpreterFileRights returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsInterpreterFileRights() []int { - if ev.GetEventType().String() != "signal" { - return []int{} - } - if ev.Signal.Target == nil { - return []int{} - } - if ev.Signal.Target.Ancestor == nil { - return []int{} - } - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := int(ev.FieldHandlers.ResolveRights(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields)) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsInterpreterFileUid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsInterpreterFileUid() []uint32 { - if ev.GetEventType().String() != "signal" { - return []uint32{} - } - if ev.Signal.Target == nil { - return []uint32{} - } - if ev.Signal.Target.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields.UID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsInterpreterFileUser returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsInterpreterFileUser() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveFileFieldsUser(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent.FileFields) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsIsExec returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsIsExec() []bool { - if ev.GetEventType().String() != "signal" { - return []bool{} - } - if ev.Signal.Target == nil { - return []bool{} - } - if ev.Signal.Target.Ancestor == nil { - return []bool{} - } - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.IsExec - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsIsKworker returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsIsKworker() []bool { - if ev.GetEventType().String() != "signal" { - return []bool{} - } - if ev.Signal.Target == nil { - return []bool{} - } - if ev.Signal.Target.Ancestor == nil { - return []bool{} - } - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.PIDContext.IsKworker - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsIsThread returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsIsThread() []bool { - if ev.GetEventType().String() != "signal" { - return []bool{} - } - if ev.Signal.Target == nil { - return []bool{} - } - if ev.Signal.Target.Ancestor == nil { - return []bool{} - } - var values []bool - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessIsThread(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsLength returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsLength() int { - if ev.GetEventType().String() != "signal" { - return 0 - } - if ev.Signal.Target == nil { - return 0 - } - if ev.Signal.Target.Ancestor == nil { - return 0 - } - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - return iterator.Len(ctx) -} - -// GetSignalTargetAncestorsPid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsPid() []uint32 { - if ev.GetEventType().String() != "signal" { - return []uint32{} - } - if ev.Signal.Target == nil { - return []uint32{} - } - if ev.Signal.Target.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.PIDContext.Pid - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsPpid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsPpid() []uint32 { - if ev.GetEventType().String() != "signal" { - return []uint32{} - } - if ev.Signal.Target == nil { - return []uint32{} - } - if ev.Signal.Target.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.PPid - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsTid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsTid() []uint32 { - if ev.GetEventType().String() != "signal" { - return []uint32{} - } - if ev.Signal.Target == nil { - return []uint32{} - } - if ev.Signal.Target.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.PIDContext.Tid - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsTtyName returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsTtyName() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.TTYName - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsUid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsUid() []uint32 { - if ev.GetEventType().String() != "signal" { - return []uint32{} - } - if ev.Signal.Target == nil { - return []uint32{} - } - if ev.Signal.Target.Ancestor == nil { - return []uint32{} - } - var values []uint32 - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.UID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsUser returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsUser() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.Credentials.User - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsUserSessionK8sGroups returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsUserSessionK8sGroups() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveK8SGroups(ev, &element.ProcessContext.Process.UserSession) - values = append(values, result...) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsUserSessionK8sUid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsUserSessionK8sUid() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveK8SUID(ev, &element.ProcessContext.Process.UserSession) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetAncestorsUserSessionK8sUsername returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAncestorsUserSessionK8sUsername() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveK8SUsername(ev, &element.ProcessContext.Process.UserSession) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetSignalTargetArgs returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetArgs() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - return ev.FieldHandlers.ResolveProcessArgs(ev, &ev.Signal.Target.Process) -} - -// GetSignalTargetArgsFlags returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetArgsFlags() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessArgsFlags(ev, &ev.Signal.Target.Process) -} - -// GetSignalTargetArgsOptions returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetArgsOptions() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessArgsOptions(ev, &ev.Signal.Target.Process) -} - -// GetSignalTargetArgsScrubbed returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetArgsScrubbed() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - return ev.FieldHandlers.ResolveProcessArgsScrubbed(ev, &ev.Signal.Target.Process) -} - -// GetSignalTargetArgsTruncated returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetArgsTruncated() bool { - if ev.GetEventType().String() != "signal" { - return false - } - if ev.Signal.Target == nil { - return false - } - return ev.FieldHandlers.ResolveProcessArgsTruncated(ev, &ev.Signal.Target.Process) -} - -// GetSignalTargetArgv returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetArgv() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessArgv(ev, &ev.Signal.Target.Process) -} - -// GetSignalTargetArgv0 returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetArgv0() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - return ev.FieldHandlers.ResolveProcessArgv0(ev, &ev.Signal.Target.Process) -} - -// GetSignalTargetArgvScrubbed returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetArgvScrubbed() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessArgvScrubbed(ev, &ev.Signal.Target.Process) -} - -// GetSignalTargetAuid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetAuid() uint32 { - if ev.GetEventType().String() != "signal" { - return uint32(0) - } - if ev.Signal.Target == nil { - return uint32(0) - } - return ev.Signal.Target.Process.Credentials.AUID -} - -// GetSignalTargetCapEffective returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetCapEffective() uint64 { - if ev.GetEventType().String() != "signal" { - return uint64(0) - } - if ev.Signal.Target == nil { - return uint64(0) - } - return ev.Signal.Target.Process.Credentials.CapEffective -} - -// GetSignalTargetCapPermitted returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetCapPermitted() uint64 { - if ev.GetEventType().String() != "signal" { - return uint64(0) - } - if ev.Signal.Target == nil { - return uint64(0) - } - return ev.Signal.Target.Process.Credentials.CapPermitted -} - -// GetSignalTargetCgroupFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetCgroupFileInode() uint64 { - if ev.GetEventType().String() != "signal" { - return uint64(0) - } - if ev.Signal.Target == nil { - return uint64(0) - } - return ev.Signal.Target.Process.CGroup.CGroupFile.Inode -} - -// GetSignalTargetCgroupFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetCgroupFileMountId() uint32 { - if ev.GetEventType().String() != "signal" { - return uint32(0) - } - if ev.Signal.Target == nil { - return uint32(0) - } - return ev.Signal.Target.Process.CGroup.CGroupFile.MountID -} - -// GetSignalTargetCgroupId returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetCgroupId() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - return ev.FieldHandlers.ResolveCGroupID(ev, &ev.Signal.Target.Process.CGroup) -} - -// GetSignalTargetCgroupManager returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetCgroupManager() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - return ev.FieldHandlers.ResolveCGroupManager(ev, &ev.Signal.Target.Process.CGroup) -} - -// GetSignalTargetCgroupVersion returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetCgroupVersion() int { - if ev.GetEventType().String() != "signal" { - return 0 - } - if ev.Signal.Target == nil { - return 0 - } - return ev.FieldHandlers.ResolveCGroupVersion(ev, &ev.Signal.Target.Process.CGroup) -} - -// GetSignalTargetCmdargv returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetCmdargv() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessCmdArgv(ev, &ev.Signal.Target.Process) -} - -// GetSignalTargetComm returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetComm() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - return ev.Signal.Target.Process.Comm -} - -// GetSignalTargetContainerId returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetContainerId() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - return ev.FieldHandlers.ResolveProcessContainerID(ev, &ev.Signal.Target.Process) -} - -// GetSignalTargetCreatedAt returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetCreatedAt() int { - if ev.GetEventType().String() != "signal" { - return 0 - } - if ev.Signal.Target == nil { - return 0 - } - return ev.FieldHandlers.ResolveProcessCreatedAt(ev, &ev.Signal.Target.Process) -} - -// GetSignalTargetEgid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetEgid() uint32 { - if ev.GetEventType().String() != "signal" { - return uint32(0) - } - if ev.Signal.Target == nil { - return uint32(0) - } - return ev.Signal.Target.Process.Credentials.EGID -} - -// GetSignalTargetEgroup returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetEgroup() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - return ev.Signal.Target.Process.Credentials.EGroup -} - -// GetSignalTargetEnvp returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetEnvp() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessEnvp(ev, &ev.Signal.Target.Process) -} - -// GetSignalTargetEnvs returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetEnvs() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessEnvs(ev, &ev.Signal.Target.Process) -} - -// GetSignalTargetEnvsTruncated returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetEnvsTruncated() bool { - if ev.GetEventType().String() != "signal" { - return false - } - if ev.Signal.Target == nil { - return false - } - return ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, &ev.Signal.Target.Process) -} - -// GetSignalTargetEuid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetEuid() uint32 { - if ev.GetEventType().String() != "signal" { - return uint32(0) - } - if ev.Signal.Target == nil { - return uint32(0) - } - return ev.Signal.Target.Process.Credentials.EUID -} - -// GetSignalTargetEuser returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetEuser() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - return ev.Signal.Target.Process.Credentials.EUser -} - -// GetSignalTargetExecTime returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetExecTime() time.Time { - if ev.GetEventType().String() != "signal" { - return time.Time{} - } - if ev.Signal.Target == nil { - return time.Time{} - } - return ev.Signal.Target.Process.ExecTime -} - -// GetSignalTargetExitTime returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetExitTime() time.Time { - if ev.GetEventType().String() != "signal" { - return time.Time{} - } - if ev.Signal.Target == nil { - return time.Time{} - } - return ev.Signal.Target.Process.ExitTime -} - -// GetSignalTargetFileChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetFileChangeTime() uint64 { - if ev.GetEventType().String() != "signal" { - return uint64(0) - } - if ev.Signal.Target == nil { - return uint64(0) - } - if !ev.Signal.Target.Process.IsNotKworker() { - return uint64(0) - } - return ev.Signal.Target.Process.FileEvent.FileFields.CTime -} - -// GetSignalTargetFileFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetFileFilesystem() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - if !ev.Signal.Target.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Signal.Target.Process.FileEvent) -} - -// GetSignalTargetFileGid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetFileGid() uint32 { - if ev.GetEventType().String() != "signal" { - return uint32(0) - } - if ev.Signal.Target == nil { - return uint32(0) - } - if !ev.Signal.Target.Process.IsNotKworker() { - return uint32(0) - } - return ev.Signal.Target.Process.FileEvent.FileFields.GID -} - -// GetSignalTargetFileGroup returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetFileGroup() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - if !ev.Signal.Target.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Signal.Target.Process.FileEvent.FileFields) -} - -// GetSignalTargetFileHashes returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetFileHashes() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if !ev.Signal.Target.Process.IsNotKworker() { - return []string{} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Signal.Target.Process.FileEvent) -} - -// GetSignalTargetFileInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetFileInUpperLayer() bool { - if ev.GetEventType().String() != "signal" { - return false - } - if ev.Signal.Target == nil { - return false - } - if !ev.Signal.Target.Process.IsNotKworker() { - return false - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Signal.Target.Process.FileEvent.FileFields) -} - -// GetSignalTargetFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetFileInode() uint64 { - if ev.GetEventType().String() != "signal" { - return uint64(0) - } - if ev.Signal.Target == nil { - return uint64(0) - } - if !ev.Signal.Target.Process.IsNotKworker() { - return uint64(0) - } - return ev.Signal.Target.Process.FileEvent.FileFields.PathKey.Inode -} - -// GetSignalTargetFileMode returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetFileMode() uint16 { - if ev.GetEventType().String() != "signal" { - return uint16(0) - } - if ev.Signal.Target == nil { - return uint16(0) - } - if !ev.Signal.Target.Process.IsNotKworker() { - return uint16(0) - } - return ev.Signal.Target.Process.FileEvent.FileFields.Mode -} - -// GetSignalTargetFileModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetFileModificationTime() uint64 { - if ev.GetEventType().String() != "signal" { - return uint64(0) - } - if ev.Signal.Target == nil { - return uint64(0) - } - if !ev.Signal.Target.Process.IsNotKworker() { - return uint64(0) - } - return ev.Signal.Target.Process.FileEvent.FileFields.MTime -} - -// GetSignalTargetFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetFileMountId() uint32 { - if ev.GetEventType().String() != "signal" { - return uint32(0) - } - if ev.Signal.Target == nil { - return uint32(0) - } - if !ev.Signal.Target.Process.IsNotKworker() { - return uint32(0) - } - return ev.Signal.Target.Process.FileEvent.FileFields.PathKey.MountID -} - -// GetSignalTargetFileName returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetFileName() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - if !ev.Signal.Target.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Signal.Target.Process.FileEvent) -} - -// GetSignalTargetFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetFileNameLength() int { - if ev.GetEventType().String() != "signal" { - return 0 - } - if ev.Signal.Target == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFileBasename(ev, &ev.Signal.Target.Process.FileEvent)) -} - -// GetSignalTargetFilePackageName returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetFilePackageName() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - if !ev.Signal.Target.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Signal.Target.Process.FileEvent) -} - -// GetSignalTargetFilePackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetFilePackageSourceVersion() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - if !ev.Signal.Target.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Signal.Target.Process.FileEvent) -} - -// GetSignalTargetFilePackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetFilePackageVersion() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - if !ev.Signal.Target.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Signal.Target.Process.FileEvent) -} - -// GetSignalTargetFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetFilePath() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - if !ev.Signal.Target.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Process.FileEvent) -} - -// GetSignalTargetFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetFilePathLength() int { - if ev.GetEventType().String() != "signal" { - return 0 - } - if ev.Signal.Target == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Process.FileEvent)) -} - -// GetSignalTargetFileRights returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetFileRights() int { - if ev.GetEventType().String() != "signal" { - return 0 - } - if ev.Signal.Target == nil { - return 0 - } - if !ev.Signal.Target.Process.IsNotKworker() { - return 0 - } - return ev.FieldHandlers.ResolveRights(ev, &ev.Signal.Target.Process.FileEvent.FileFields) -} - -// GetSignalTargetFileUid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetFileUid() uint32 { - if ev.GetEventType().String() != "signal" { - return uint32(0) - } - if ev.Signal.Target == nil { - return uint32(0) - } - if !ev.Signal.Target.Process.IsNotKworker() { - return uint32(0) - } - return ev.Signal.Target.Process.FileEvent.FileFields.UID -} - -// GetSignalTargetFileUser returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetFileUser() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - if !ev.Signal.Target.Process.IsNotKworker() { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Signal.Target.Process.FileEvent.FileFields) -} - -// GetSignalTargetForkTime returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetForkTime() time.Time { - if ev.GetEventType().String() != "signal" { - return time.Time{} - } - if ev.Signal.Target == nil { - return time.Time{} - } - return ev.Signal.Target.Process.ForkTime -} - -// GetSignalTargetFsgid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetFsgid() uint32 { - if ev.GetEventType().String() != "signal" { - return uint32(0) - } - if ev.Signal.Target == nil { - return uint32(0) - } - return ev.Signal.Target.Process.Credentials.FSGID -} - -// GetSignalTargetFsgroup returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetFsgroup() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - return ev.Signal.Target.Process.Credentials.FSGroup -} - -// GetSignalTargetFsuid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetFsuid() uint32 { - if ev.GetEventType().String() != "signal" { - return uint32(0) - } - if ev.Signal.Target == nil { - return uint32(0) - } - return ev.Signal.Target.Process.Credentials.FSUID -} - -// GetSignalTargetFsuser returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetFsuser() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - return ev.Signal.Target.Process.Credentials.FSUser -} - -// GetSignalTargetGid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetGid() uint32 { - if ev.GetEventType().String() != "signal" { - return uint32(0) - } - if ev.Signal.Target == nil { - return uint32(0) - } - return ev.Signal.Target.Process.Credentials.GID -} - -// GetSignalTargetGroup returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetGroup() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - return ev.Signal.Target.Process.Credentials.Group -} - -// GetSignalTargetInterpreterFileChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetInterpreterFileChangeTime() uint64 { - if ev.GetEventType().String() != "signal" { - return uint64(0) - } - if ev.Signal.Target == nil { - return uint64(0) - } - if !ev.Signal.Target.Process.HasInterpreter() { - return uint64(0) - } - return ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.CTime -} - -// GetSignalTargetInterpreterFileFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetInterpreterFileFilesystem() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - if !ev.Signal.Target.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent) -} - -// GetSignalTargetInterpreterFileGid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetInterpreterFileGid() uint32 { - if ev.GetEventType().String() != "signal" { - return uint32(0) - } - if ev.Signal.Target == nil { - return uint32(0) - } - if !ev.Signal.Target.Process.HasInterpreter() { - return uint32(0) - } - return ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.GID -} - -// GetSignalTargetInterpreterFileGroup returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetInterpreterFileGroup() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - if !ev.Signal.Target.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields) -} - -// GetSignalTargetInterpreterFileHashes returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetInterpreterFileHashes() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if !ev.Signal.Target.Process.HasInterpreter() { - return []string{} - } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent) -} - -// GetSignalTargetInterpreterFileInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetInterpreterFileInUpperLayer() bool { - if ev.GetEventType().String() != "signal" { - return false - } - if ev.Signal.Target == nil { - return false - } - if !ev.Signal.Target.Process.HasInterpreter() { - return false - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields) -} - -// GetSignalTargetInterpreterFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetInterpreterFileInode() uint64 { - if ev.GetEventType().String() != "signal" { - return uint64(0) - } - if ev.Signal.Target == nil { - return uint64(0) - } - if !ev.Signal.Target.Process.HasInterpreter() { - return uint64(0) - } - return ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.PathKey.Inode -} - -// GetSignalTargetInterpreterFileMode returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetInterpreterFileMode() uint16 { - if ev.GetEventType().String() != "signal" { - return uint16(0) - } - if ev.Signal.Target == nil { - return uint16(0) - } - if !ev.Signal.Target.Process.HasInterpreter() { - return uint16(0) - } - return ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.Mode -} - -// GetSignalTargetInterpreterFileModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetInterpreterFileModificationTime() uint64 { - if ev.GetEventType().String() != "signal" { - return uint64(0) - } - if ev.Signal.Target == nil { - return uint64(0) - } - if !ev.Signal.Target.Process.HasInterpreter() { - return uint64(0) - } - return ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.MTime -} - -// GetSignalTargetInterpreterFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetInterpreterFileMountId() uint32 { - if ev.GetEventType().String() != "signal" { - return uint32(0) - } - if ev.Signal.Target == nil { - return uint32(0) - } - if !ev.Signal.Target.Process.HasInterpreter() { - return uint32(0) - } - return ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.PathKey.MountID -} - -// GetSignalTargetInterpreterFileName returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetInterpreterFileName() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - if !ev.Signal.Target.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent) -} - -// GetSignalTargetInterpreterFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetInterpreterFileNameLength() int { - if ev.GetEventType().String() != "signal" { - return 0 - } - if ev.Signal.Target == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFileBasename(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent)) -} - -// GetSignalTargetInterpreterFilePackageName returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetInterpreterFilePackageName() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - if !ev.Signal.Target.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent) -} - -// GetSignalTargetInterpreterFilePackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetInterpreterFilePackageSourceVersion() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - if !ev.Signal.Target.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent) -} - -// GetSignalTargetInterpreterFilePackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetInterpreterFilePackageVersion() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - if !ev.Signal.Target.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent) -} - -// GetSignalTargetInterpreterFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetInterpreterFilePath() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - if !ev.Signal.Target.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent) -} - -// GetSignalTargetInterpreterFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetInterpreterFilePathLength() int { - if ev.GetEventType().String() != "signal" { - return 0 - } - if ev.Signal.Target == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent)) -} - -// GetSignalTargetInterpreterFileRights returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetInterpreterFileRights() int { - if ev.GetEventType().String() != "signal" { - return 0 - } - if ev.Signal.Target == nil { - return 0 - } - if !ev.Signal.Target.Process.HasInterpreter() { - return 0 - } - return ev.FieldHandlers.ResolveRights(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields) -} - -// GetSignalTargetInterpreterFileUid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetInterpreterFileUid() uint32 { - if ev.GetEventType().String() != "signal" { - return uint32(0) - } - if ev.Signal.Target == nil { - return uint32(0) - } - if !ev.Signal.Target.Process.HasInterpreter() { - return uint32(0) - } - return ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields.UID -} - -// GetSignalTargetInterpreterFileUser returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetInterpreterFileUser() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - if !ev.Signal.Target.Process.HasInterpreter() { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent.FileFields) -} - -// GetSignalTargetIsExec returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetIsExec() bool { - if ev.GetEventType().String() != "signal" { - return false - } - if ev.Signal.Target == nil { - return false - } - return ev.Signal.Target.Process.IsExec -} - -// GetSignalTargetIsKworker returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetIsKworker() bool { - if ev.GetEventType().String() != "signal" { - return false - } - if ev.Signal.Target == nil { - return false - } - return ev.Signal.Target.Process.PIDContext.IsKworker -} - -// GetSignalTargetIsThread returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetIsThread() bool { - if ev.GetEventType().String() != "signal" { - return false - } - if ev.Signal.Target == nil { - return false - } - return ev.FieldHandlers.ResolveProcessIsThread(ev, &ev.Signal.Target.Process) -} - -// GetSignalTargetParentArgs returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentArgs() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - if ev.Signal.Target.Parent == nil { - return "" - } - if !ev.Signal.Target.HasParent() { - return "" - } - return ev.FieldHandlers.ResolveProcessArgs(ev, ev.Signal.Target.Parent) -} - -// GetSignalTargetParentArgsFlags returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentArgsFlags() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Parent == nil { - return []string{} - } - if !ev.Signal.Target.HasParent() { - return []string{} - } - return ev.FieldHandlers.ResolveProcessArgsFlags(ev, ev.Signal.Target.Parent) -} - -// GetSignalTargetParentArgsOptions returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentArgsOptions() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Parent == nil { - return []string{} - } - if !ev.Signal.Target.HasParent() { - return []string{} - } - return ev.FieldHandlers.ResolveProcessArgsOptions(ev, ev.Signal.Target.Parent) -} - -// GetSignalTargetParentArgsScrubbed returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentArgsScrubbed() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - if ev.Signal.Target.Parent == nil { - return "" - } - if !ev.Signal.Target.HasParent() { - return "" - } - return ev.FieldHandlers.ResolveProcessArgsScrubbed(ev, ev.Signal.Target.Parent) -} - -// GetSignalTargetParentArgsTruncated returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentArgsTruncated() bool { - if ev.GetEventType().String() != "signal" { - return false - } - if ev.Signal.Target == nil { - return false - } - if ev.Signal.Target.Parent == nil { - return false - } - if !ev.Signal.Target.HasParent() { - return false - } - return ev.FieldHandlers.ResolveProcessArgsTruncated(ev, ev.Signal.Target.Parent) -} - -// GetSignalTargetParentArgv returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentArgv() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Parent == nil { +// GetProcessAncestorsCmdargv returns the value of the field, resolving if necessary +func (ev *Event) GetProcessAncestorsCmdargv() []string { + if ev.BaseEvent.ProcessContext == nil { return []string{} } - if !ev.Signal.Target.HasParent() { + if ev.BaseEvent.ProcessContext.Ancestor == nil { return []string{} } - return ev.FieldHandlers.ResolveProcessArgv(ev, ev.Signal.Target.Parent) -} - -// GetSignalTargetParentArgv0 returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentArgv0() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - if ev.Signal.Target.Parent == nil { - return "" - } - if !ev.Signal.Target.HasParent() { - return "" + var values []string + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + ptr := iterator.Front(ctx) + for ptr != nil { + element := (*ProcessCacheEntry)(ptr) + result := ev.FieldHandlers.ResolveProcessCmdArgv(ev, &element.ProcessContext.Process) + values = append(values, result...) + ptr = iterator.Next(ctx) } - return ev.FieldHandlers.ResolveProcessArgv0(ev, ev.Signal.Target.Parent) + return values } -// GetSignalTargetParentArgvScrubbed returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentArgvScrubbed() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Parent == nil { +// GetProcessAncestorsEnvp returns the value of the field, resolving if necessary +func (ev *Event) GetProcessAncestorsEnvp() []string { + if ev.BaseEvent.ProcessContext == nil { return []string{} } - if !ev.Signal.Target.HasParent() { + if ev.BaseEvent.ProcessContext.Ancestor == nil { return []string{} } - return ev.FieldHandlers.ResolveProcessArgvScrubbed(ev, ev.Signal.Target.Parent) -} - -// GetSignalTargetParentAuid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentAuid() uint32 { - if ev.GetEventType().String() != "signal" { - return uint32(0) - } - if ev.Signal.Target == nil { - return uint32(0) - } - if ev.Signal.Target.Parent == nil { - return uint32(0) - } - if !ev.Signal.Target.HasParent() { - return uint32(0) - } - return ev.Signal.Target.Parent.Credentials.AUID -} - -// GetSignalTargetParentCapEffective returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentCapEffective() uint64 { - if ev.GetEventType().String() != "signal" { - return uint64(0) - } - if ev.Signal.Target == nil { - return uint64(0) - } - if ev.Signal.Target.Parent == nil { - return uint64(0) - } - if !ev.Signal.Target.HasParent() { - return uint64(0) - } - return ev.Signal.Target.Parent.Credentials.CapEffective -} - -// GetSignalTargetParentCapPermitted returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentCapPermitted() uint64 { - if ev.GetEventType().String() != "signal" { - return uint64(0) - } - if ev.Signal.Target == nil { - return uint64(0) - } - if ev.Signal.Target.Parent == nil { - return uint64(0) - } - if !ev.Signal.Target.HasParent() { - return uint64(0) - } - return ev.Signal.Target.Parent.Credentials.CapPermitted -} - -// GetSignalTargetParentCgroupFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentCgroupFileInode() uint64 { - if ev.GetEventType().String() != "signal" { - return uint64(0) - } - if ev.Signal.Target == nil { - return uint64(0) - } - if ev.Signal.Target.Parent == nil { - return uint64(0) - } - if !ev.Signal.Target.HasParent() { - return uint64(0) - } - return ev.Signal.Target.Parent.CGroup.CGroupFile.Inode -} - -// GetSignalTargetParentCgroupFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentCgroupFileMountId() uint32 { - if ev.GetEventType().String() != "signal" { - return uint32(0) - } - if ev.Signal.Target == nil { - return uint32(0) - } - if ev.Signal.Target.Parent == nil { - return uint32(0) - } - if !ev.Signal.Target.HasParent() { - return uint32(0) - } - return ev.Signal.Target.Parent.CGroup.CGroupFile.MountID -} - -// GetSignalTargetParentCgroupId returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentCgroupId() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - if ev.Signal.Target.Parent == nil { - return "" - } - if !ev.Signal.Target.HasParent() { - return "" - } - return ev.FieldHandlers.ResolveCGroupID(ev, &ev.Signal.Target.Parent.CGroup) -} - -// GetSignalTargetParentCgroupManager returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentCgroupManager() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - if ev.Signal.Target.Parent == nil { - return "" - } - if !ev.Signal.Target.HasParent() { - return "" - } - return ev.FieldHandlers.ResolveCGroupManager(ev, &ev.Signal.Target.Parent.CGroup) -} - -// GetSignalTargetParentCgroupVersion returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentCgroupVersion() int { - if ev.GetEventType().String() != "signal" { - return 0 - } - if ev.Signal.Target == nil { - return 0 - } - if ev.Signal.Target.Parent == nil { - return 0 - } - if !ev.Signal.Target.HasParent() { - return 0 + var values []string + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + ptr := iterator.Front(ctx) + for ptr != nil { + element := (*ProcessCacheEntry)(ptr) + result := ev.FieldHandlers.ResolveProcessEnvp(ev, &element.ProcessContext.Process) + values = append(values, result...) + ptr = iterator.Next(ctx) } - return ev.FieldHandlers.ResolveCGroupVersion(ev, &ev.Signal.Target.Parent.CGroup) + return values } -// GetSignalTargetParentCmdargv returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentCmdargv() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Parent == nil { +// GetProcessAncestorsFilePath returns the value of the field, resolving if necessary +func (ev *Event) GetProcessAncestorsFilePath() []string { + if ev.BaseEvent.ProcessContext == nil { return []string{} } - if !ev.Signal.Target.HasParent() { + if ev.BaseEvent.ProcessContext.Ancestor == nil { return []string{} } - return ev.FieldHandlers.ResolveProcessCmdArgv(ev, ev.Signal.Target.Parent) -} - -// GetSignalTargetParentComm returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentComm() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - if ev.Signal.Target.Parent == nil { - return "" - } - if !ev.Signal.Target.HasParent() { - return "" - } - return ev.Signal.Target.Parent.Comm -} - -// GetSignalTargetParentContainerId returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentContainerId() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - if ev.Signal.Target.Parent == nil { - return "" - } - if !ev.Signal.Target.HasParent() { - return "" - } - return ev.FieldHandlers.ResolveProcessContainerID(ev, ev.Signal.Target.Parent) -} - -// GetSignalTargetParentCreatedAt returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentCreatedAt() int { - if ev.GetEventType().String() != "signal" { - return 0 - } - if ev.Signal.Target == nil { - return 0 - } - if ev.Signal.Target.Parent == nil { - return 0 - } - if !ev.Signal.Target.HasParent() { - return 0 + var values []string + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + ptr := iterator.Front(ctx) + for ptr != nil { + element := (*ProcessCacheEntry)(ptr) + result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent) + values = append(values, result) + ptr = iterator.Next(ctx) } - return ev.FieldHandlers.ResolveProcessCreatedAt(ev, ev.Signal.Target.Parent) + return values } -// GetSignalTargetParentEgid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentEgid() uint32 { - if ev.GetEventType().String() != "signal" { - return uint32(0) - } - if ev.Signal.Target == nil { - return uint32(0) +// GetProcessAncestorsFilePathLength returns the value of the field, resolving if necessary +func (ev *Event) GetProcessAncestorsFilePathLength() []int { + if ev.BaseEvent.ProcessContext == nil { + return []int{} } - if ev.Signal.Target.Parent == nil { - return uint32(0) + if ev.BaseEvent.ProcessContext.Ancestor == nil { + return []int{} } - if !ev.Signal.Target.HasParent() { - return uint32(0) + var values []int + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + ptr := iterator.Front(ctx) + for ptr != nil { + element := (*ProcessCacheEntry)(ptr) + result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent)) + values = append(values, result) + ptr = iterator.Next(ctx) } - return ev.Signal.Target.Parent.Credentials.EGID + return values } -// GetSignalTargetParentEgroup returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentEgroup() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" +// GetProcessAncestorsGid returns the value of the field, resolving if necessary +func (ev *Event) GetProcessAncestorsGid() []uint32 { + if ev.BaseEvent.ProcessContext == nil { + return []uint32{} } - if ev.Signal.Target.Parent == nil { - return "" + if ev.BaseEvent.ProcessContext.Ancestor == nil { + return []uint32{} } - if !ev.Signal.Target.HasParent() { - return "" + var values []uint32 + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + ptr := iterator.Front(ctx) + for ptr != nil { + element := (*ProcessCacheEntry)(ptr) + result := element.ProcessContext.Process.Credentials.GID + values = append(values, result) + ptr = iterator.Next(ctx) } - return ev.Signal.Target.Parent.Credentials.EGroup + return values } -// GetSignalTargetParentEnvp returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentEnvp() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { +// GetProcessAncestorsGroup returns the value of the field, resolving if necessary +func (ev *Event) GetProcessAncestorsGroup() []string { + if ev.BaseEvent.ProcessContext == nil { return []string{} } - if ev.Signal.Target.Parent == nil { + if ev.BaseEvent.ProcessContext.Ancestor == nil { return []string{} } - if !ev.Signal.Target.HasParent() { - return []string{} + var values []string + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + ptr := iterator.Front(ctx) + for ptr != nil { + element := (*ProcessCacheEntry)(ptr) + result := element.ProcessContext.Process.Credentials.Group + values = append(values, result) + ptr = iterator.Next(ctx) } - return ev.FieldHandlers.ResolveProcessEnvp(ev, ev.Signal.Target.Parent) + return values } -// GetSignalTargetParentEnvs returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentEnvs() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { +// GetProcessAncestorsInterpreterFilePath returns the value of the field, resolving if necessary +func (ev *Event) GetProcessAncestorsInterpreterFilePath() []string { + if ev.BaseEvent.ProcessContext == nil { return []string{} } - if ev.Signal.Target.Parent == nil { + if ev.BaseEvent.ProcessContext.Ancestor == nil { return []string{} } - if !ev.Signal.Target.HasParent() { - return []string{} + var values []string + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + ptr := iterator.Front(ctx) + for ptr != nil { + element := (*ProcessCacheEntry)(ptr) + result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) + values = append(values, result) + ptr = iterator.Next(ctx) } - return ev.FieldHandlers.ResolveProcessEnvs(ev, ev.Signal.Target.Parent) + return values } -// GetSignalTargetParentEnvsTruncated returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentEnvsTruncated() bool { - if ev.GetEventType().String() != "signal" { - return false - } - if ev.Signal.Target == nil { - return false +// GetProcessAncestorsInterpreterFilePathLength returns the value of the field, resolving if necessary +func (ev *Event) GetProcessAncestorsInterpreterFilePathLength() []int { + if ev.BaseEvent.ProcessContext == nil { + return []int{} } - if ev.Signal.Target.Parent == nil { - return false + if ev.BaseEvent.ProcessContext.Ancestor == nil { + return []int{} } - if !ev.Signal.Target.HasParent() { - return false + var values []int + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + ptr := iterator.Front(ctx) + for ptr != nil { + element := (*ProcessCacheEntry)(ptr) + result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)) + values = append(values, result) + ptr = iterator.Next(ctx) } - return ev.FieldHandlers.ResolveProcessEnvsTruncated(ev, ev.Signal.Target.Parent) + return values } -// GetSignalTargetParentEuid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentEuid() uint32 { - if ev.GetEventType().String() != "signal" { - return uint32(0) - } - if ev.Signal.Target == nil { - return uint32(0) +// GetProcessAncestorsPid returns the value of the field, resolving if necessary +func (ev *Event) GetProcessAncestorsPid() []uint32 { + if ev.BaseEvent.ProcessContext == nil { + return []uint32{} } - if ev.Signal.Target.Parent == nil { - return uint32(0) + if ev.BaseEvent.ProcessContext.Ancestor == nil { + return []uint32{} } - if !ev.Signal.Target.HasParent() { - return uint32(0) + var values []uint32 + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + ptr := iterator.Front(ctx) + for ptr != nil { + element := (*ProcessCacheEntry)(ptr) + result := element.ProcessContext.Process.PIDContext.Pid + values = append(values, result) + ptr = iterator.Next(ctx) } - return ev.Signal.Target.Parent.Credentials.EUID + return values } -// GetSignalTargetParentEuser returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentEuser() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" +// GetProcessAncestorsPpid returns the value of the field, resolving if necessary +func (ev *Event) GetProcessAncestorsPpid() []uint32 { + if ev.BaseEvent.ProcessContext == nil { + return []uint32{} } - if ev.Signal.Target.Parent == nil { - return "" + if ev.BaseEvent.ProcessContext.Ancestor == nil { + return []uint32{} } - if !ev.Signal.Target.HasParent() { - return "" + var values []uint32 + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + ptr := iterator.Front(ctx) + for ptr != nil { + element := (*ProcessCacheEntry)(ptr) + result := element.ProcessContext.Process.PPid + values = append(values, result) + ptr = iterator.Next(ctx) } - return ev.Signal.Target.Parent.Credentials.EUser + return values } -// GetSignalTargetParentFileChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentFileChangeTime() uint64 { - if ev.GetEventType().String() != "signal" { - return uint64(0) - } - if ev.Signal.Target == nil { - return uint64(0) - } - if ev.Signal.Target.Parent == nil { - return uint64(0) +// GetProcessAncestorsUid returns the value of the field, resolving if necessary +func (ev *Event) GetProcessAncestorsUid() []uint32 { + if ev.BaseEvent.ProcessContext == nil { + return []uint32{} } - if !ev.Signal.Target.HasParent() { - return uint64(0) + if ev.BaseEvent.ProcessContext.Ancestor == nil { + return []uint32{} } - if !ev.Signal.Target.Parent.IsNotKworker() { - return uint64(0) + var values []uint32 + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + ptr := iterator.Front(ctx) + for ptr != nil { + element := (*ProcessCacheEntry)(ptr) + result := element.ProcessContext.Process.Credentials.UID + values = append(values, result) + ptr = iterator.Next(ctx) } - return ev.Signal.Target.Parent.FileEvent.FileFields.CTime + return values } -// GetSignalTargetParentFileFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentFileFilesystem() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - if ev.Signal.Target.Parent == nil { - return "" +// GetProcessAncestorsUser returns the value of the field, resolving if necessary +func (ev *Event) GetProcessAncestorsUser() []string { + if ev.BaseEvent.ProcessContext == nil { + return []string{} } - if !ev.Signal.Target.HasParent() { - return "" + if ev.BaseEvent.ProcessContext.Ancestor == nil { + return []string{} } - if !ev.Signal.Target.Parent.IsNotKworker() { - return "" + var values []string + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + ptr := iterator.Front(ctx) + for ptr != nil { + element := (*ProcessCacheEntry)(ptr) + result := element.ProcessContext.Process.Credentials.User + values = append(values, result) + ptr = iterator.Next(ctx) } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Signal.Target.Parent.FileEvent) + return values } -// GetSignalTargetParentFileGid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentFileGid() uint32 { - if ev.GetEventType().String() != "signal" { - return uint32(0) - } - if ev.Signal.Target == nil { - return uint32(0) - } - if ev.Signal.Target.Parent == nil { - return uint32(0) - } - if !ev.Signal.Target.HasParent() { - return uint32(0) - } - if !ev.Signal.Target.Parent.IsNotKworker() { - return uint32(0) +// GetProcessCmdargv returns the value of the field, resolving if necessary +func (ev *Event) GetProcessCmdargv() []string { + if ev.BaseEvent.ProcessContext == nil { + return []string{} } - return ev.Signal.Target.Parent.FileEvent.FileFields.GID + return ev.FieldHandlers.ResolveProcessCmdArgv(ev, &ev.BaseEvent.ProcessContext.Process) } -// GetSignalTargetParentFileGroup returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentFileGroup() string { - if ev.GetEventType().String() != "signal" { - return "" +// GetProcessEnvp returns the value of the field, resolving if necessary +func (ev *Event) GetProcessEnvp() []string { + if ev.BaseEvent.ProcessContext == nil { + return []string{} } - if ev.Signal.Target == nil { - return "" + return ev.FieldHandlers.ResolveProcessEnvp(ev, &ev.BaseEvent.ProcessContext.Process) +} + +// GetProcessExecTime returns the value of the field, resolving if necessary +func (ev *Event) GetProcessExecTime() time.Time { + if ev.BaseEvent.ProcessContext == nil { + return time.Time{} } - if ev.Signal.Target.Parent == nil { - return "" + return ev.BaseEvent.ProcessContext.Process.ExecTime +} + +// GetProcessExitTime returns the value of the field, resolving if necessary +func (ev *Event) GetProcessExitTime() time.Time { + if ev.BaseEvent.ProcessContext == nil { + return time.Time{} } - if !ev.Signal.Target.HasParent() { + return ev.BaseEvent.ProcessContext.Process.ExitTime +} + +// GetProcessFilePath returns the value of the field, resolving if necessary +func (ev *Event) GetProcessFilePath() string { + if ev.BaseEvent.ProcessContext == nil { return "" } - if !ev.Signal.Target.Parent.IsNotKworker() { + if !ev.BaseEvent.ProcessContext.Process.IsNotKworker() { return "" } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Signal.Target.Parent.FileEvent.FileFields) + return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent) } -// GetSignalTargetParentFileHashes returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentFileHashes() []string { - if ev.GetEventType().String() != "signal" { - return []string{} - } - if ev.Signal.Target == nil { - return []string{} - } - if ev.Signal.Target.Parent == nil { - return []string{} - } - if !ev.Signal.Target.HasParent() { - return []string{} +// GetProcessFilePathLength returns the value of the field, resolving if necessary +func (ev *Event) GetProcessFilePathLength() int { + if ev.BaseEvent.ProcessContext == nil { + return 0 } - if !ev.Signal.Target.Parent.IsNotKworker() { - return []string{} + return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent)) +} + +// GetProcessForkTime returns the value of the field, resolving if necessary +func (ev *Event) GetProcessForkTime() time.Time { + if ev.BaseEvent.ProcessContext == nil { + return time.Time{} } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Signal.Target.Parent.FileEvent) + return ev.BaseEvent.ProcessContext.Process.ForkTime } -// GetSignalTargetParentFileInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentFileInUpperLayer() bool { - if ev.GetEventType().String() != "signal" { - return false - } - if ev.Signal.Target == nil { - return false - } - if ev.Signal.Target.Parent == nil { - return false - } - if !ev.Signal.Target.HasParent() { - return false - } - if !ev.Signal.Target.Parent.IsNotKworker() { - return false +// GetProcessGid returns the value of the field, resolving if necessary +func (ev *Event) GetProcessGid() uint32 { + if ev.BaseEvent.ProcessContext == nil { + return uint32(0) } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Signal.Target.Parent.FileEvent.FileFields) + return ev.BaseEvent.ProcessContext.Process.Credentials.GID } -// GetSignalTargetParentFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentFileInode() uint64 { - if ev.GetEventType().String() != "signal" { - return uint64(0) - } - if ev.Signal.Target == nil { - return uint64(0) - } - if ev.Signal.Target.Parent == nil { - return uint64(0) - } - if !ev.Signal.Target.HasParent() { - return uint64(0) - } - if !ev.Signal.Target.Parent.IsNotKworker() { - return uint64(0) +// GetProcessGroup returns the value of the field, resolving if necessary +func (ev *Event) GetProcessGroup() string { + if ev.BaseEvent.ProcessContext == nil { + return "" } - return ev.Signal.Target.Parent.FileEvent.FileFields.PathKey.Inode + return ev.BaseEvent.ProcessContext.Process.Credentials.Group } -// GetSignalTargetParentFileMode returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentFileMode() uint16 { - if ev.GetEventType().String() != "signal" { - return uint16(0) - } - if ev.Signal.Target == nil { - return uint16(0) - } - if ev.Signal.Target.Parent == nil { - return uint16(0) - } - if !ev.Signal.Target.HasParent() { - return uint16(0) +// GetProcessInterpreterFilePath returns the value of the field, resolving if necessary +func (ev *Event) GetProcessInterpreterFilePath() string { + if ev.BaseEvent.ProcessContext == nil { + return "" } - if !ev.Signal.Target.Parent.IsNotKworker() { - return uint16(0) + if !ev.BaseEvent.ProcessContext.Process.HasInterpreter() { + return "" } - return ev.Signal.Target.Parent.FileEvent.FileFields.Mode + return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent) } -// GetSignalTargetParentFileModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentFileModificationTime() uint64 { - if ev.GetEventType().String() != "signal" { - return uint64(0) - } - if ev.Signal.Target == nil { - return uint64(0) +// GetProcessInterpreterFilePathLength returns the value of the field, resolving if necessary +func (ev *Event) GetProcessInterpreterFilePathLength() int { + if ev.BaseEvent.ProcessContext == nil { + return 0 } - if ev.Signal.Target.Parent == nil { - return uint64(0) + return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Process.LinuxBinprm.FileEvent)) +} + +// GetProcessParentCmdargv returns the value of the field, resolving if necessary +func (ev *Event) GetProcessParentCmdargv() []string { + if ev.BaseEvent.ProcessContext == nil { + return []string{} } - if !ev.Signal.Target.HasParent() { - return uint64(0) + if ev.BaseEvent.ProcessContext.Parent == nil { + return []string{} } - if !ev.Signal.Target.Parent.IsNotKworker() { - return uint64(0) + if !ev.BaseEvent.ProcessContext.HasParent() { + return []string{} } - return ev.Signal.Target.Parent.FileEvent.FileFields.MTime + return ev.FieldHandlers.ResolveProcessCmdArgv(ev, ev.BaseEvent.ProcessContext.Parent) } -// GetSignalTargetParentFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentFileMountId() uint32 { - if ev.GetEventType().String() != "signal" { - return uint32(0) - } - if ev.Signal.Target == nil { - return uint32(0) - } - if ev.Signal.Target.Parent == nil { - return uint32(0) +// GetProcessParentEnvp returns the value of the field, resolving if necessary +func (ev *Event) GetProcessParentEnvp() []string { + if ev.BaseEvent.ProcessContext == nil { + return []string{} } - if !ev.Signal.Target.HasParent() { - return uint32(0) + if ev.BaseEvent.ProcessContext.Parent == nil { + return []string{} } - if !ev.Signal.Target.Parent.IsNotKworker() { - return uint32(0) + if !ev.BaseEvent.ProcessContext.HasParent() { + return []string{} } - return ev.Signal.Target.Parent.FileEvent.FileFields.PathKey.MountID + return ev.FieldHandlers.ResolveProcessEnvp(ev, ev.BaseEvent.ProcessContext.Parent) } -// GetSignalTargetParentFileName returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentFileName() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { +// GetProcessParentFilePath returns the value of the field, resolving if necessary +func (ev *Event) GetProcessParentFilePath() string { + if ev.BaseEvent.ProcessContext == nil { return "" } - if ev.Signal.Target.Parent == nil { + if ev.BaseEvent.ProcessContext.Parent == nil { return "" } - if !ev.Signal.Target.HasParent() { + if !ev.BaseEvent.ProcessContext.HasParent() { return "" } - if !ev.Signal.Target.Parent.IsNotKworker() { + if !ev.BaseEvent.ProcessContext.Parent.IsNotKworker() { return "" } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Signal.Target.Parent.FileEvent) + return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent) } -// GetSignalTargetParentFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentFileNameLength() int { - if ev.GetEventType().String() != "signal" { - return 0 - } - if ev.Signal.Target == nil { +// GetProcessParentFilePathLength returns the value of the field, resolving if necessary +func (ev *Event) GetProcessParentFilePathLength() int { + if ev.BaseEvent.ProcessContext == nil { return 0 } - if ev.Signal.Target.Parent == nil { + if ev.BaseEvent.ProcessContext.Parent == nil { return 0 } - return len(ev.FieldHandlers.ResolveFileBasename(ev, &ev.Signal.Target.Parent.FileEvent)) + return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent)) } -// GetSignalTargetParentFilePackageName returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentFilePackageName() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - if ev.Signal.Target.Parent == nil { - return "" +// GetProcessParentGid returns the value of the field, resolving if necessary +func (ev *Event) GetProcessParentGid() uint32 { + if ev.BaseEvent.ProcessContext == nil { + return uint32(0) } - if !ev.Signal.Target.HasParent() { - return "" + if ev.BaseEvent.ProcessContext.Parent == nil { + return uint32(0) } - if !ev.Signal.Target.Parent.IsNotKworker() { - return "" + if !ev.BaseEvent.ProcessContext.HasParent() { + return uint32(0) } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Signal.Target.Parent.FileEvent) + return ev.BaseEvent.ProcessContext.Parent.Credentials.GID } -// GetSignalTargetParentFilePackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentFilePackageSourceVersion() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" - } - if ev.Signal.Target.Parent == nil { +// GetProcessParentGroup returns the value of the field, resolving if necessary +func (ev *Event) GetProcessParentGroup() string { + if ev.BaseEvent.ProcessContext == nil { return "" } - if !ev.Signal.Target.HasParent() { + if ev.BaseEvent.ProcessContext.Parent == nil { return "" } - if !ev.Signal.Target.Parent.IsNotKworker() { + if !ev.BaseEvent.ProcessContext.HasParent() { return "" } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Signal.Target.Parent.FileEvent) + return ev.BaseEvent.ProcessContext.Parent.Credentials.Group } -// GetSignalTargetParentFilePackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentFilePackageVersion() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { +// GetProcessParentInterpreterFilePath returns the value of the field, resolving if necessary +func (ev *Event) GetProcessParentInterpreterFilePath() string { + if ev.BaseEvent.ProcessContext == nil { return "" } - if ev.Signal.Target.Parent == nil { + if ev.BaseEvent.ProcessContext.Parent == nil { return "" } - if !ev.Signal.Target.HasParent() { + if !ev.BaseEvent.ProcessContext.HasParent() { return "" } - if !ev.Signal.Target.Parent.IsNotKworker() { + if !ev.BaseEvent.ProcessContext.Parent.HasInterpreter() { return "" } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Signal.Target.Parent.FileEvent) + return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent) } -// GetSignalTargetParentFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentFilePath() string { - if ev.GetEventType().String() != "signal" { - return "" +// GetProcessParentInterpreterFilePathLength returns the value of the field, resolving if necessary +func (ev *Event) GetProcessParentInterpreterFilePathLength() int { + if ev.BaseEvent.ProcessContext == nil { + return 0 } - if ev.Signal.Target == nil { - return "" + if ev.BaseEvent.ProcessContext.Parent == nil { + return 0 } - if ev.Signal.Target.Parent == nil { - return "" + return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Parent.LinuxBinprm.FileEvent)) +} + +// GetProcessParentPid returns the value of the field, resolving if necessary +func (ev *Event) GetProcessParentPid() uint32 { + if ev.BaseEvent.ProcessContext == nil { + return uint32(0) } - if !ev.Signal.Target.HasParent() { - return "" + if ev.BaseEvent.ProcessContext.Parent == nil { + return uint32(0) } - if !ev.Signal.Target.Parent.IsNotKworker() { - return "" + if !ev.BaseEvent.ProcessContext.HasParent() { + return uint32(0) } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Parent.FileEvent) + return ev.BaseEvent.ProcessContext.Parent.PIDContext.Pid } -// GetSignalTargetParentFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentFilePathLength() int { - if ev.GetEventType().String() != "signal" { - return 0 +// GetProcessParentPpid returns the value of the field, resolving if necessary +func (ev *Event) GetProcessParentPpid() uint32 { + if ev.BaseEvent.ProcessContext == nil { + return uint32(0) } - if ev.Signal.Target == nil { - return 0 + if ev.BaseEvent.ProcessContext.Parent == nil { + return uint32(0) } - if ev.Signal.Target.Parent == nil { - return 0 + if !ev.BaseEvent.ProcessContext.HasParent() { + return uint32(0) } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Parent.FileEvent)) + return ev.BaseEvent.ProcessContext.Parent.PPid } -// GetSignalTargetParentFileRights returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentFileRights() int { - if ev.GetEventType().String() != "signal" { - return 0 - } - if ev.Signal.Target == nil { - return 0 - } - if ev.Signal.Target.Parent == nil { - return 0 +// GetProcessParentUid returns the value of the field, resolving if necessary +func (ev *Event) GetProcessParentUid() uint32 { + if ev.BaseEvent.ProcessContext == nil { + return uint32(0) } - if !ev.Signal.Target.HasParent() { - return 0 + if ev.BaseEvent.ProcessContext.Parent == nil { + return uint32(0) } - if !ev.Signal.Target.Parent.IsNotKworker() { - return 0 + if !ev.BaseEvent.ProcessContext.HasParent() { + return uint32(0) } - return ev.FieldHandlers.ResolveRights(ev, &ev.Signal.Target.Parent.FileEvent.FileFields) + return ev.BaseEvent.ProcessContext.Parent.Credentials.UID } -// GetSignalTargetParentFileUid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentFileUid() uint32 { - if ev.GetEventType().String() != "signal" { - return uint32(0) +// GetProcessParentUser returns the value of the field, resolving if necessary +func (ev *Event) GetProcessParentUser() string { + if ev.BaseEvent.ProcessContext == nil { + return "" } - if ev.Signal.Target == nil { - return uint32(0) + if ev.BaseEvent.ProcessContext.Parent == nil { + return "" } - if ev.Signal.Target.Parent == nil { + if !ev.BaseEvent.ProcessContext.HasParent() { + return "" + } + return ev.BaseEvent.ProcessContext.Parent.Credentials.User +} + +// GetProcessPid returns the value of the field, resolving if necessary +func (ev *Event) GetProcessPid() uint32 { + if ev.BaseEvent.ProcessContext == nil { return uint32(0) } - if !ev.Signal.Target.HasParent() { + return ev.BaseEvent.ProcessContext.Process.PIDContext.Pid +} + +// GetProcessPpid returns the value of the field, resolving if necessary +func (ev *Event) GetProcessPpid() uint32 { + if ev.BaseEvent.ProcessContext == nil { return uint32(0) } - if !ev.Signal.Target.Parent.IsNotKworker() { + return ev.BaseEvent.ProcessContext.Process.PPid +} + +// GetProcessUid returns the value of the field, resolving if necessary +func (ev *Event) GetProcessUid() uint32 { + if ev.BaseEvent.ProcessContext == nil { return uint32(0) } - return ev.Signal.Target.Parent.FileEvent.FileFields.UID + return ev.BaseEvent.ProcessContext.Process.Credentials.UID } -// GetSignalTargetParentFileUser returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentFileUser() string { - if ev.GetEventType().String() != "signal" { +// GetProcessUser returns the value of the field, resolving if necessary +func (ev *Event) GetProcessUser() string { + if ev.BaseEvent.ProcessContext == nil { return "" } - if ev.Signal.Target == nil { - return "" + return ev.BaseEvent.ProcessContext.Process.Credentials.User +} + +// GetPtraceTraceeAncestorsCmdargv returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeAncestorsCmdargv() []string { + if ev.GetEventType().String() != "ptrace" { + return []string{} } - if ev.Signal.Target.Parent == nil { - return "" + if ev.PTrace.Tracee == nil { + return []string{} } - if !ev.Signal.Target.HasParent() { - return "" + if ev.PTrace.Tracee.Ancestor == nil { + return []string{} } - if !ev.Signal.Target.Parent.IsNotKworker() { - return "" + var values []string + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + ptr := iterator.Front(ctx) + for ptr != nil { + element := (*ProcessCacheEntry)(ptr) + result := ev.FieldHandlers.ResolveProcessCmdArgv(ev, &element.ProcessContext.Process) + values = append(values, result...) + ptr = iterator.Next(ctx) } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Signal.Target.Parent.FileEvent.FileFields) + return values } -// GetSignalTargetParentFsgid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentFsgid() uint32 { - if ev.GetEventType().String() != "signal" { - return uint32(0) +// GetPtraceTraceeAncestorsEnvp returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeAncestorsEnvp() []string { + if ev.GetEventType().String() != "ptrace" { + return []string{} } - if ev.Signal.Target == nil { - return uint32(0) + if ev.PTrace.Tracee == nil { + return []string{} } - if ev.Signal.Target.Parent == nil { - return uint32(0) + if ev.PTrace.Tracee.Ancestor == nil { + return []string{} } - if !ev.Signal.Target.HasParent() { - return uint32(0) + var values []string + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + ptr := iterator.Front(ctx) + for ptr != nil { + element := (*ProcessCacheEntry)(ptr) + result := ev.FieldHandlers.ResolveProcessEnvp(ev, &element.ProcessContext.Process) + values = append(values, result...) + ptr = iterator.Next(ctx) } - return ev.Signal.Target.Parent.Credentials.FSGID + return values } -// GetSignalTargetParentFsgroup returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentFsgroup() string { - if ev.GetEventType().String() != "signal" { - return "" +// GetPtraceTraceeAncestorsFilePath returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeAncestorsFilePath() []string { + if ev.GetEventType().String() != "ptrace" { + return []string{} } - if ev.Signal.Target == nil { - return "" + if ev.PTrace.Tracee == nil { + return []string{} } - if ev.Signal.Target.Parent == nil { - return "" + if ev.PTrace.Tracee.Ancestor == nil { + return []string{} } - if !ev.Signal.Target.HasParent() { - return "" + var values []string + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + ptr := iterator.Front(ctx) + for ptr != nil { + element := (*ProcessCacheEntry)(ptr) + result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent) + values = append(values, result) + ptr = iterator.Next(ctx) } - return ev.Signal.Target.Parent.Credentials.FSGroup + return values } -// GetSignalTargetParentFsuid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentFsuid() uint32 { - if ev.GetEventType().String() != "signal" { - return uint32(0) +// GetPtraceTraceeAncestorsFilePathLength returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeAncestorsFilePathLength() []int { + if ev.GetEventType().String() != "ptrace" { + return []int{} } - if ev.Signal.Target == nil { - return uint32(0) + if ev.PTrace.Tracee == nil { + return []int{} } - if ev.Signal.Target.Parent == nil { - return uint32(0) + if ev.PTrace.Tracee.Ancestor == nil { + return []int{} } - if !ev.Signal.Target.HasParent() { - return uint32(0) + var values []int + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + ptr := iterator.Front(ctx) + for ptr != nil { + element := (*ProcessCacheEntry)(ptr) + result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent)) + values = append(values, result) + ptr = iterator.Next(ctx) } - return ev.Signal.Target.Parent.Credentials.FSUID + return values } -// GetSignalTargetParentFsuser returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentFsuser() string { - if ev.GetEventType().String() != "signal" { - return "" +// GetPtraceTraceeAncestorsGid returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeAncestorsGid() []uint32 { + if ev.GetEventType().String() != "ptrace" { + return []uint32{} } - if ev.Signal.Target == nil { - return "" + if ev.PTrace.Tracee == nil { + return []uint32{} } - if ev.Signal.Target.Parent == nil { - return "" + if ev.PTrace.Tracee.Ancestor == nil { + return []uint32{} } - if !ev.Signal.Target.HasParent() { - return "" + var values []uint32 + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + ptr := iterator.Front(ctx) + for ptr != nil { + element := (*ProcessCacheEntry)(ptr) + result := element.ProcessContext.Process.Credentials.GID + values = append(values, result) + ptr = iterator.Next(ctx) } - return ev.Signal.Target.Parent.Credentials.FSUser + return values } -// GetSignalTargetParentGid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentGid() uint32 { - if ev.GetEventType().String() != "signal" { - return uint32(0) +// GetPtraceTraceeAncestorsGroup returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeAncestorsGroup() []string { + if ev.GetEventType().String() != "ptrace" { + return []string{} } - if ev.Signal.Target == nil { - return uint32(0) + if ev.PTrace.Tracee == nil { + return []string{} } - if ev.Signal.Target.Parent == nil { - return uint32(0) + if ev.PTrace.Tracee.Ancestor == nil { + return []string{} } - if !ev.Signal.Target.HasParent() { - return uint32(0) + var values []string + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + ptr := iterator.Front(ctx) + for ptr != nil { + element := (*ProcessCacheEntry)(ptr) + result := element.ProcessContext.Process.Credentials.Group + values = append(values, result) + ptr = iterator.Next(ctx) } - return ev.Signal.Target.Parent.Credentials.GID + return values } -// GetSignalTargetParentGroup returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentGroup() string { - if ev.GetEventType().String() != "signal" { - return "" +// GetPtraceTraceeAncestorsInterpreterFilePath returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeAncestorsInterpreterFilePath() []string { + if ev.GetEventType().String() != "ptrace" { + return []string{} } - if ev.Signal.Target == nil { - return "" + if ev.PTrace.Tracee == nil { + return []string{} } - if ev.Signal.Target.Parent == nil { - return "" + if ev.PTrace.Tracee.Ancestor == nil { + return []string{} } - if !ev.Signal.Target.HasParent() { - return "" + var values []string + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + ptr := iterator.Front(ctx) + for ptr != nil { + element := (*ProcessCacheEntry)(ptr) + result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) + values = append(values, result) + ptr = iterator.Next(ctx) } - return ev.Signal.Target.Parent.Credentials.Group + return values } -// GetSignalTargetParentInterpreterFileChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentInterpreterFileChangeTime() uint64 { - if ev.GetEventType().String() != "signal" { - return uint64(0) - } - if ev.Signal.Target == nil { - return uint64(0) +// GetPtraceTraceeAncestorsInterpreterFilePathLength returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeAncestorsInterpreterFilePathLength() []int { + if ev.GetEventType().String() != "ptrace" { + return []int{} } - if ev.Signal.Target.Parent == nil { - return uint64(0) + if ev.PTrace.Tracee == nil { + return []int{} } - if !ev.Signal.Target.HasParent() { - return uint64(0) + if ev.PTrace.Tracee.Ancestor == nil { + return []int{} } - if !ev.Signal.Target.Parent.HasInterpreter() { - return uint64(0) + var values []int + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + ptr := iterator.Front(ctx) + for ptr != nil { + element := (*ProcessCacheEntry)(ptr) + result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)) + values = append(values, result) + ptr = iterator.Next(ctx) } - return ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.CTime + return values } -// GetSignalTargetParentInterpreterFileFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentInterpreterFileFilesystem() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" +// GetPtraceTraceeAncestorsPid returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeAncestorsPid() []uint32 { + if ev.GetEventType().String() != "ptrace" { + return []uint32{} } - if ev.Signal.Target.Parent == nil { - return "" + if ev.PTrace.Tracee == nil { + return []uint32{} } - if !ev.Signal.Target.HasParent() { - return "" + if ev.PTrace.Tracee.Ancestor == nil { + return []uint32{} } - if !ev.Signal.Target.Parent.HasInterpreter() { - return "" + var values []uint32 + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + ptr := iterator.Front(ctx) + for ptr != nil { + element := (*ProcessCacheEntry)(ptr) + result := element.ProcessContext.Process.PIDContext.Pid + values = append(values, result) + ptr = iterator.Next(ctx) } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent) + return values } -// GetSignalTargetParentInterpreterFileGid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentInterpreterFileGid() uint32 { - if ev.GetEventType().String() != "signal" { - return uint32(0) - } - if ev.Signal.Target == nil { - return uint32(0) +// GetPtraceTraceeAncestorsPpid returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeAncestorsPpid() []uint32 { + if ev.GetEventType().String() != "ptrace" { + return []uint32{} } - if ev.Signal.Target.Parent == nil { - return uint32(0) + if ev.PTrace.Tracee == nil { + return []uint32{} } - if !ev.Signal.Target.HasParent() { - return uint32(0) + if ev.PTrace.Tracee.Ancestor == nil { + return []uint32{} } - if !ev.Signal.Target.Parent.HasInterpreter() { - return uint32(0) + var values []uint32 + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + ptr := iterator.Front(ctx) + for ptr != nil { + element := (*ProcessCacheEntry)(ptr) + result := element.ProcessContext.Process.PPid + values = append(values, result) + ptr = iterator.Next(ctx) } - return ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.GID + return values } -// GetSignalTargetParentInterpreterFileGroup returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentInterpreterFileGroup() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { - return "" +// GetPtraceTraceeAncestorsUid returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeAncestorsUid() []uint32 { + if ev.GetEventType().String() != "ptrace" { + return []uint32{} } - if ev.Signal.Target.Parent == nil { - return "" + if ev.PTrace.Tracee == nil { + return []uint32{} } - if !ev.Signal.Target.HasParent() { - return "" + if ev.PTrace.Tracee.Ancestor == nil { + return []uint32{} } - if !ev.Signal.Target.Parent.HasInterpreter() { - return "" + var values []uint32 + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + ptr := iterator.Front(ctx) + for ptr != nil { + element := (*ProcessCacheEntry)(ptr) + result := element.ProcessContext.Process.Credentials.UID + values = append(values, result) + ptr = iterator.Next(ctx) } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields) + return values } -// GetSignalTargetParentInterpreterFileHashes returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentInterpreterFileHashes() []string { - if ev.GetEventType().String() != "signal" { +// GetPtraceTraceeAncestorsUser returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeAncestorsUser() []string { + if ev.GetEventType().String() != "ptrace" { return []string{} } - if ev.Signal.Target == nil { + if ev.PTrace.Tracee == nil { return []string{} } - if ev.Signal.Target.Parent == nil { + if ev.PTrace.Tracee.Ancestor == nil { return []string{} } - if !ev.Signal.Target.HasParent() { + var values []string + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + ptr := iterator.Front(ctx) + for ptr != nil { + element := (*ProcessCacheEntry)(ptr) + result := element.ProcessContext.Process.Credentials.User + values = append(values, result) + ptr = iterator.Next(ctx) + } + return values +} + +// GetPtraceTraceeCmdargv returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeCmdargv() []string { + if ev.GetEventType().String() != "ptrace" { return []string{} } - if !ev.Signal.Target.Parent.HasInterpreter() { + if ev.PTrace.Tracee == nil { return []string{} } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent) + return ev.FieldHandlers.ResolveProcessCmdArgv(ev, &ev.PTrace.Tracee.Process) } -// GetSignalTargetParentInterpreterFileInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentInterpreterFileInUpperLayer() bool { - if ev.GetEventType().String() != "signal" { - return false - } - if ev.Signal.Target == nil { - return false - } - if ev.Signal.Target.Parent == nil { - return false - } - if !ev.Signal.Target.HasParent() { - return false +// GetPtraceTraceeEnvp returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeEnvp() []string { + if ev.GetEventType().String() != "ptrace" { + return []string{} } - if !ev.Signal.Target.Parent.HasInterpreter() { - return false + if ev.PTrace.Tracee == nil { + return []string{} } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields) + return ev.FieldHandlers.ResolveProcessEnvp(ev, &ev.PTrace.Tracee.Process) } -// GetSignalTargetParentInterpreterFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentInterpreterFileInode() uint64 { - if ev.GetEventType().String() != "signal" { - return uint64(0) - } - if ev.Signal.Target == nil { - return uint64(0) - } - if ev.Signal.Target.Parent == nil { - return uint64(0) - } - if !ev.Signal.Target.HasParent() { - return uint64(0) +// GetPtraceTraceeExecTime returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeExecTime() time.Time { + if ev.GetEventType().String() != "ptrace" { + return time.Time{} } - if !ev.Signal.Target.Parent.HasInterpreter() { - return uint64(0) + if ev.PTrace.Tracee == nil { + return time.Time{} } - return ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.PathKey.Inode + return ev.PTrace.Tracee.Process.ExecTime } -// GetSignalTargetParentInterpreterFileMode returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentInterpreterFileMode() uint16 { - if ev.GetEventType().String() != "signal" { - return uint16(0) - } - if ev.Signal.Target == nil { - return uint16(0) - } - if ev.Signal.Target.Parent == nil { - return uint16(0) - } - if !ev.Signal.Target.HasParent() { - return uint16(0) +// GetPtraceTraceeExitTime returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeExitTime() time.Time { + if ev.GetEventType().String() != "ptrace" { + return time.Time{} } - if !ev.Signal.Target.Parent.HasInterpreter() { - return uint16(0) + if ev.PTrace.Tracee == nil { + return time.Time{} } - return ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.Mode + return ev.PTrace.Tracee.Process.ExitTime } -// GetSignalTargetParentInterpreterFileModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentInterpreterFileModificationTime() uint64 { - if ev.GetEventType().String() != "signal" { - return uint64(0) +// GetPtraceTraceeFilePath returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeFilePath() string { + if ev.GetEventType().String() != "ptrace" { + return "" } - if ev.Signal.Target == nil { - return uint64(0) + if ev.PTrace.Tracee == nil { + return "" } - if ev.Signal.Target.Parent == nil { - return uint64(0) + if !ev.PTrace.Tracee.Process.IsNotKworker() { + return "" } - if !ev.Signal.Target.HasParent() { - return uint64(0) + return ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Process.FileEvent) +} + +// GetPtraceTraceeFilePathLength returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeFilePathLength() int { + if ev.GetEventType().String() != "ptrace" { + return 0 } - if !ev.Signal.Target.Parent.HasInterpreter() { - return uint64(0) + if ev.PTrace.Tracee == nil { + return 0 } - return ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.MTime + return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Process.FileEvent)) } -// GetSignalTargetParentInterpreterFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentInterpreterFileMountId() uint32 { - if ev.GetEventType().String() != "signal" { - return uint32(0) - } - if ev.Signal.Target == nil { - return uint32(0) +// GetPtraceTraceeForkTime returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeForkTime() time.Time { + if ev.GetEventType().String() != "ptrace" { + return time.Time{} } - if ev.Signal.Target.Parent == nil { - return uint32(0) + if ev.PTrace.Tracee == nil { + return time.Time{} } - if !ev.Signal.Target.HasParent() { + return ev.PTrace.Tracee.Process.ForkTime +} + +// GetPtraceTraceeGid returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeGid() uint32 { + if ev.GetEventType().String() != "ptrace" { return uint32(0) } - if !ev.Signal.Target.Parent.HasInterpreter() { + if ev.PTrace.Tracee == nil { return uint32(0) } - return ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.PathKey.MountID + return ev.PTrace.Tracee.Process.Credentials.GID } -// GetSignalTargetParentInterpreterFileName returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentInterpreterFileName() string { - if ev.GetEventType().String() != "signal" { +// GetPtraceTraceeGroup returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeGroup() string { + if ev.GetEventType().String() != "ptrace" { return "" } - if ev.Signal.Target == nil { + if ev.PTrace.Tracee == nil { return "" } - if ev.Signal.Target.Parent == nil { + return ev.PTrace.Tracee.Process.Credentials.Group +} + +// GetPtraceTraceeInterpreterFilePath returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeInterpreterFilePath() string { + if ev.GetEventType().String() != "ptrace" { return "" } - if !ev.Signal.Target.HasParent() { + if ev.PTrace.Tracee == nil { return "" } - if !ev.Signal.Target.Parent.HasInterpreter() { + if !ev.PTrace.Tracee.Process.HasInterpreter() { return "" } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent) + return ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent) } -// GetSignalTargetParentInterpreterFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentInterpreterFileNameLength() int { - if ev.GetEventType().String() != "signal" { +// GetPtraceTraceeInterpreterFilePathLength returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeInterpreterFilePathLength() int { + if ev.GetEventType().String() != "ptrace" { return 0 } - if ev.Signal.Target == nil { + if ev.PTrace.Tracee == nil { return 0 } - if ev.Signal.Target.Parent == nil { - return 0 + return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Process.LinuxBinprm.FileEvent)) +} + +// GetPtraceTraceeParentCmdargv returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeParentCmdargv() []string { + if ev.GetEventType().String() != "ptrace" { + return []string{} + } + if ev.PTrace.Tracee == nil { + return []string{} + } + if ev.PTrace.Tracee.Parent == nil { + return []string{} + } + if !ev.PTrace.Tracee.HasParent() { + return []string{} } - return len(ev.FieldHandlers.ResolveFileBasename(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent)) + return ev.FieldHandlers.ResolveProcessCmdArgv(ev, ev.PTrace.Tracee.Parent) } -// GetSignalTargetParentInterpreterFilePackageName returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentInterpreterFilePackageName() string { - if ev.GetEventType().String() != "signal" { +// GetPtraceTraceeParentEnvp returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeParentEnvp() []string { + if ev.GetEventType().String() != "ptrace" { + return []string{} + } + if ev.PTrace.Tracee == nil { + return []string{} + } + if ev.PTrace.Tracee.Parent == nil { + return []string{} + } + if !ev.PTrace.Tracee.HasParent() { + return []string{} + } + return ev.FieldHandlers.ResolveProcessEnvp(ev, ev.PTrace.Tracee.Parent) +} + +// GetPtraceTraceeParentFilePath returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeParentFilePath() string { + if ev.GetEventType().String() != "ptrace" { return "" } - if ev.Signal.Target == nil { + if ev.PTrace.Tracee == nil { return "" } - if ev.Signal.Target.Parent == nil { + if ev.PTrace.Tracee.Parent == nil { return "" } - if !ev.Signal.Target.HasParent() { + if !ev.PTrace.Tracee.HasParent() { return "" } - if !ev.Signal.Target.Parent.HasInterpreter() { + if !ev.PTrace.Tracee.Parent.IsNotKworker() { return "" } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent) + return ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Parent.FileEvent) } -// GetSignalTargetParentInterpreterFilePackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentInterpreterFilePackageSourceVersion() string { - if ev.GetEventType().String() != "signal" { - return "" +// GetPtraceTraceeParentFilePathLength returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeParentFilePathLength() int { + if ev.GetEventType().String() != "ptrace" { + return 0 } - if ev.Signal.Target == nil { - return "" + if ev.PTrace.Tracee == nil { + return 0 + } + if ev.PTrace.Tracee.Parent == nil { + return 0 + } + return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Parent.FileEvent)) +} + +// GetPtraceTraceeParentGid returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeParentGid() uint32 { + if ev.GetEventType().String() != "ptrace" { + return uint32(0) } - if ev.Signal.Target.Parent == nil { - return "" + if ev.PTrace.Tracee == nil { + return uint32(0) } - if !ev.Signal.Target.HasParent() { - return "" + if ev.PTrace.Tracee.Parent == nil { + return uint32(0) } - if !ev.Signal.Target.Parent.HasInterpreter() { - return "" + if !ev.PTrace.Tracee.HasParent() { + return uint32(0) } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent) + return ev.PTrace.Tracee.Parent.Credentials.GID } -// GetSignalTargetParentInterpreterFilePackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentInterpreterFilePackageVersion() string { - if ev.GetEventType().String() != "signal" { - return "" - } - if ev.Signal.Target == nil { +// GetPtraceTraceeParentGroup returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeParentGroup() string { + if ev.GetEventType().String() != "ptrace" { return "" } - if ev.Signal.Target.Parent == nil { + if ev.PTrace.Tracee == nil { return "" } - if !ev.Signal.Target.HasParent() { + if ev.PTrace.Tracee.Parent == nil { return "" } - if !ev.Signal.Target.Parent.HasInterpreter() { + if !ev.PTrace.Tracee.HasParent() { return "" } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent) + return ev.PTrace.Tracee.Parent.Credentials.Group } -// GetSignalTargetParentInterpreterFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentInterpreterFilePath() string { - if ev.GetEventType().String() != "signal" { +// GetPtraceTraceeParentInterpreterFilePath returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeParentInterpreterFilePath() string { + if ev.GetEventType().String() != "ptrace" { return "" } - if ev.Signal.Target == nil { + if ev.PTrace.Tracee == nil { return "" } - if ev.Signal.Target.Parent == nil { + if ev.PTrace.Tracee.Parent == nil { return "" } - if !ev.Signal.Target.HasParent() { + if !ev.PTrace.Tracee.HasParent() { return "" } - if !ev.Signal.Target.Parent.HasInterpreter() { + if !ev.PTrace.Tracee.Parent.HasInterpreter() { return "" } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent) + return ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent) } -// GetSignalTargetParentInterpreterFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentInterpreterFilePathLength() int { - if ev.GetEventType().String() != "signal" { +// GetPtraceTraceeParentInterpreterFilePathLength returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeParentInterpreterFilePathLength() int { + if ev.GetEventType().String() != "ptrace" { return 0 } - if ev.Signal.Target == nil { + if ev.PTrace.Tracee == nil { return 0 } - if ev.Signal.Target.Parent == nil { + if ev.PTrace.Tracee.Parent == nil { return 0 } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent)) + return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.PTrace.Tracee.Parent.LinuxBinprm.FileEvent)) } -// GetSignalTargetParentInterpreterFileRights returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentInterpreterFileRights() int { - if ev.GetEventType().String() != "signal" { - return 0 - } - if ev.Signal.Target == nil { - return 0 +// GetPtraceTraceeParentPid returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeParentPid() uint32 { + if ev.GetEventType().String() != "ptrace" { + return uint32(0) } - if ev.Signal.Target.Parent == nil { - return 0 + if ev.PTrace.Tracee == nil { + return uint32(0) } - if !ev.Signal.Target.HasParent() { - return 0 + if ev.PTrace.Tracee.Parent == nil { + return uint32(0) } - if !ev.Signal.Target.Parent.HasInterpreter() { - return 0 + if !ev.PTrace.Tracee.HasParent() { + return uint32(0) } - return ev.FieldHandlers.ResolveRights(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields) + return ev.PTrace.Tracee.Parent.PIDContext.Pid } -// GetSignalTargetParentInterpreterFileUid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentInterpreterFileUid() uint32 { - if ev.GetEventType().String() != "signal" { +// GetPtraceTraceeParentPpid returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeParentPpid() uint32 { + if ev.GetEventType().String() != "ptrace" { return uint32(0) } - if ev.Signal.Target == nil { + if ev.PTrace.Tracee == nil { return uint32(0) } - if ev.Signal.Target.Parent == nil { + if ev.PTrace.Tracee.Parent == nil { return uint32(0) } - if !ev.Signal.Target.HasParent() { + if !ev.PTrace.Tracee.HasParent() { return uint32(0) } - if !ev.Signal.Target.Parent.HasInterpreter() { + return ev.PTrace.Tracee.Parent.PPid +} + +// GetPtraceTraceeParentUid returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeParentUid() uint32 { + if ev.GetEventType().String() != "ptrace" { + return uint32(0) + } + if ev.PTrace.Tracee == nil { + return uint32(0) + } + if ev.PTrace.Tracee.Parent == nil { + return uint32(0) + } + if !ev.PTrace.Tracee.HasParent() { return uint32(0) } - return ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields.UID + return ev.PTrace.Tracee.Parent.Credentials.UID } -// GetSignalTargetParentInterpreterFileUser returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentInterpreterFileUser() string { - if ev.GetEventType().String() != "signal" { +// GetPtraceTraceeParentUser returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeParentUser() string { + if ev.GetEventType().String() != "ptrace" { return "" } - if ev.Signal.Target == nil { + if ev.PTrace.Tracee == nil { return "" } - if ev.Signal.Target.Parent == nil { + if ev.PTrace.Tracee.Parent == nil { return "" } - if !ev.Signal.Target.HasParent() { + if !ev.PTrace.Tracee.HasParent() { return "" } - if !ev.Signal.Target.Parent.HasInterpreter() { - return "" + return ev.PTrace.Tracee.Parent.Credentials.User +} + +// GetPtraceTraceePid returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceePid() uint32 { + if ev.GetEventType().String() != "ptrace" { + return uint32(0) } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent.FileFields) + if ev.PTrace.Tracee == nil { + return uint32(0) + } + return ev.PTrace.Tracee.Process.PIDContext.Pid } -// GetSignalTargetParentIsExec returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentIsExec() bool { - if ev.GetEventType().String() != "signal" { - return false +// GetPtraceTraceePpid returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceePpid() uint32 { + if ev.GetEventType().String() != "ptrace" { + return uint32(0) } - if ev.Signal.Target == nil { - return false + if ev.PTrace.Tracee == nil { + return uint32(0) } - if ev.Signal.Target.Parent == nil { - return false + return ev.PTrace.Tracee.Process.PPid +} + +// GetPtraceTraceeUid returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeUid() uint32 { + if ev.GetEventType().String() != "ptrace" { + return uint32(0) } - if !ev.Signal.Target.HasParent() { - return false + if ev.PTrace.Tracee == nil { + return uint32(0) } - return ev.Signal.Target.Parent.IsExec + return ev.PTrace.Tracee.Process.Credentials.UID } -// GetSignalTargetParentIsKworker returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentIsKworker() bool { - if ev.GetEventType().String() != "signal" { - return false +// GetPtraceTraceeUser returns the value of the field, resolving if necessary +func (ev *Event) GetPtraceTraceeUser() string { + if ev.GetEventType().String() != "ptrace" { + return "" } - if ev.Signal.Target == nil { - return false + if ev.PTrace.Tracee == nil { + return "" } - if ev.Signal.Target.Parent == nil { - return false + return ev.PTrace.Tracee.Process.Credentials.User +} + +// GetRemovexattrFilePath returns the value of the field, resolving if necessary +func (ev *Event) GetRemovexattrFilePath() string { + if ev.GetEventType().String() != "removexattr" { + return "" } - if !ev.Signal.Target.HasParent() { - return false + return ev.FieldHandlers.ResolveFilePath(ev, &ev.RemoveXAttr.File) +} + +// GetRemovexattrFilePathLength returns the value of the field, resolving if necessary +func (ev *Event) GetRemovexattrFilePathLength() int { + if ev.GetEventType().String() != "removexattr" { + return 0 } - return ev.Signal.Target.Parent.PIDContext.IsKworker + return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.RemoveXAttr.File)) } -// GetSignalTargetParentIsThread returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentIsThread() bool { - if ev.GetEventType().String() != "signal" { - return false +// GetRenameFileDestinationPath returns the value of the field, resolving if necessary +func (ev *Event) GetRenameFileDestinationPath() string { + if ev.GetEventType().String() != "rename" { + return "" } - if ev.Signal.Target == nil { - return false + return ev.FieldHandlers.ResolveFilePath(ev, &ev.Rename.New) +} + +// GetRenameFileDestinationPathLength returns the value of the field, resolving if necessary +func (ev *Event) GetRenameFileDestinationPathLength() int { + if ev.GetEventType().String() != "rename" { + return 0 } - if ev.Signal.Target.Parent == nil { - return false + return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Rename.New)) +} + +// GetRenameFilePath returns the value of the field, resolving if necessary +func (ev *Event) GetRenameFilePath() string { + if ev.GetEventType().String() != "rename" { + return "" } - if !ev.Signal.Target.HasParent() { - return false + return ev.FieldHandlers.ResolveFilePath(ev, &ev.Rename.Old) +} + +// GetRenameFilePathLength returns the value of the field, resolving if necessary +func (ev *Event) GetRenameFilePathLength() int { + if ev.GetEventType().String() != "rename" { + return 0 } - return ev.FieldHandlers.ResolveProcessIsThread(ev, ev.Signal.Target.Parent) + return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Rename.Old)) } -// GetSignalTargetParentPid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentPid() uint32 { +// GetRmdirFilePath returns the value of the field, resolving if necessary +func (ev *Event) GetRmdirFilePath() string { + if ev.GetEventType().String() != "rmdir" { + return "" + } + return ev.FieldHandlers.ResolveFilePath(ev, &ev.Rmdir.File) +} + +// GetRmdirFilePathLength returns the value of the field, resolving if necessary +func (ev *Event) GetRmdirFilePathLength() int { + if ev.GetEventType().String() != "rmdir" { + return 0 + } + return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Rmdir.File)) +} + +// GetSetxattrFilePath returns the value of the field, resolving if necessary +func (ev *Event) GetSetxattrFilePath() string { + if ev.GetEventType().String() != "setxattr" { + return "" + } + return ev.FieldHandlers.ResolveFilePath(ev, &ev.SetXAttr.File) +} + +// GetSetxattrFilePathLength returns the value of the field, resolving if necessary +func (ev *Event) GetSetxattrFilePathLength() int { + if ev.GetEventType().String() != "setxattr" { + return 0 + } + return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.SetXAttr.File)) +} + +// GetSignalTargetAncestorsCmdargv returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetAncestorsCmdargv() []string { if ev.GetEventType().String() != "signal" { - return uint32(0) + return []string{} } if ev.Signal.Target == nil { - return uint32(0) + return []string{} } - if ev.Signal.Target.Parent == nil { - return uint32(0) + if ev.Signal.Target.Ancestor == nil { + return []string{} } - if !ev.Signal.Target.HasParent() { - return uint32(0) + var values []string + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + ptr := iterator.Front(ctx) + for ptr != nil { + element := (*ProcessCacheEntry)(ptr) + result := ev.FieldHandlers.ResolveProcessCmdArgv(ev, &element.ProcessContext.Process) + values = append(values, result...) + ptr = iterator.Next(ctx) } - return ev.Signal.Target.Parent.PIDContext.Pid + return values } -// GetSignalTargetParentPpid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentPpid() uint32 { +// GetSignalTargetAncestorsEnvp returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetAncestorsEnvp() []string { if ev.GetEventType().String() != "signal" { - return uint32(0) + return []string{} } if ev.Signal.Target == nil { - return uint32(0) + return []string{} } - if ev.Signal.Target.Parent == nil { - return uint32(0) + if ev.Signal.Target.Ancestor == nil { + return []string{} } - if !ev.Signal.Target.HasParent() { - return uint32(0) + var values []string + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + ptr := iterator.Front(ctx) + for ptr != nil { + element := (*ProcessCacheEntry)(ptr) + result := ev.FieldHandlers.ResolveProcessEnvp(ev, &element.ProcessContext.Process) + values = append(values, result...) + ptr = iterator.Next(ctx) } - return ev.Signal.Target.Parent.PPid + return values } -// GetSignalTargetParentTid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentTid() uint32 { +// GetSignalTargetAncestorsFilePath returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetAncestorsFilePath() []string { if ev.GetEventType().String() != "signal" { - return uint32(0) + return []string{} } if ev.Signal.Target == nil { - return uint32(0) + return []string{} } - if ev.Signal.Target.Parent == nil { - return uint32(0) + if ev.Signal.Target.Ancestor == nil { + return []string{} } - if !ev.Signal.Target.HasParent() { - return uint32(0) + var values []string + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + ptr := iterator.Front(ctx) + for ptr != nil { + element := (*ProcessCacheEntry)(ptr) + result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent) + values = append(values, result) + ptr = iterator.Next(ctx) } - return ev.Signal.Target.Parent.PIDContext.Tid + return values } -// GetSignalTargetParentTtyName returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentTtyName() string { +// GetSignalTargetAncestorsFilePathLength returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetAncestorsFilePathLength() []int { if ev.GetEventType().String() != "signal" { - return "" + return []int{} } if ev.Signal.Target == nil { - return "" + return []int{} } - if ev.Signal.Target.Parent == nil { - return "" + if ev.Signal.Target.Ancestor == nil { + return []int{} } - if !ev.Signal.Target.HasParent() { - return "" + var values []int + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + ptr := iterator.Front(ctx) + for ptr != nil { + element := (*ProcessCacheEntry)(ptr) + result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent)) + values = append(values, result) + ptr = iterator.Next(ctx) } - return ev.Signal.Target.Parent.TTYName + return values } -// GetSignalTargetParentUid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentUid() uint32 { +// GetSignalTargetAncestorsGid returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetAncestorsGid() []uint32 { if ev.GetEventType().String() != "signal" { - return uint32(0) + return []uint32{} } if ev.Signal.Target == nil { - return uint32(0) + return []uint32{} } - if ev.Signal.Target.Parent == nil { - return uint32(0) + if ev.Signal.Target.Ancestor == nil { + return []uint32{} } - if !ev.Signal.Target.HasParent() { - return uint32(0) + var values []uint32 + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + ptr := iterator.Front(ctx) + for ptr != nil { + element := (*ProcessCacheEntry)(ptr) + result := element.ProcessContext.Process.Credentials.GID + values = append(values, result) + ptr = iterator.Next(ctx) } - return ev.Signal.Target.Parent.Credentials.UID + return values } -// GetSignalTargetParentUser returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentUser() string { +// GetSignalTargetAncestorsGroup returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetAncestorsGroup() []string { if ev.GetEventType().String() != "signal" { - return "" + return []string{} } if ev.Signal.Target == nil { - return "" + return []string{} } - if ev.Signal.Target.Parent == nil { - return "" + if ev.Signal.Target.Ancestor == nil { + return []string{} } - if !ev.Signal.Target.HasParent() { - return "" + var values []string + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + ptr := iterator.Front(ctx) + for ptr != nil { + element := (*ProcessCacheEntry)(ptr) + result := element.ProcessContext.Process.Credentials.Group + values = append(values, result) + ptr = iterator.Next(ctx) } - return ev.Signal.Target.Parent.Credentials.User + return values } -// GetSignalTargetParentUserSessionK8sGroups returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentUserSessionK8sGroups() []string { +// GetSignalTargetAncestorsInterpreterFilePath returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetAncestorsInterpreterFilePath() []string { if ev.GetEventType().String() != "signal" { return []string{} } if ev.Signal.Target == nil { return []string{} } - if ev.Signal.Target.Parent == nil { + if ev.Signal.Target.Ancestor == nil { return []string{} } - if !ev.Signal.Target.HasParent() { - return []string{} + var values []string + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + ptr := iterator.Front(ctx) + for ptr != nil { + element := (*ProcessCacheEntry)(ptr) + result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent) + values = append(values, result) + ptr = iterator.Next(ctx) } - return ev.FieldHandlers.ResolveK8SGroups(ev, &ev.Signal.Target.Parent.UserSession) + return values } -// GetSignalTargetParentUserSessionK8sUid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentUserSessionK8sUid() string { +// GetSignalTargetAncestorsInterpreterFilePathLength returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetAncestorsInterpreterFilePathLength() []int { if ev.GetEventType().String() != "signal" { - return "" + return []int{} } if ev.Signal.Target == nil { - return "" + return []int{} } - if ev.Signal.Target.Parent == nil { - return "" + if ev.Signal.Target.Ancestor == nil { + return []int{} } - if !ev.Signal.Target.HasParent() { - return "" + var values []int + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + ptr := iterator.Front(ctx) + for ptr != nil { + element := (*ProcessCacheEntry)(ptr) + result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.LinuxBinprm.FileEvent)) + values = append(values, result) + ptr = iterator.Next(ctx) } - return ev.FieldHandlers.ResolveK8SUID(ev, &ev.Signal.Target.Parent.UserSession) + return values } -// GetSignalTargetParentUserSessionK8sUsername returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetParentUserSessionK8sUsername() string { +// GetSignalTargetAncestorsPid returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetAncestorsPid() []uint32 { if ev.GetEventType().String() != "signal" { - return "" + return []uint32{} } if ev.Signal.Target == nil { - return "" + return []uint32{} } - if ev.Signal.Target.Parent == nil { - return "" + if ev.Signal.Target.Ancestor == nil { + return []uint32{} } - if !ev.Signal.Target.HasParent() { - return "" + var values []uint32 + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + ptr := iterator.Front(ctx) + for ptr != nil { + element := (*ProcessCacheEntry)(ptr) + result := element.ProcessContext.Process.PIDContext.Pid + values = append(values, result) + ptr = iterator.Next(ctx) } - return ev.FieldHandlers.ResolveK8SUsername(ev, &ev.Signal.Target.Parent.UserSession) + return values } -// GetSignalTargetPid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetPid() uint32 { +// GetSignalTargetAncestorsPpid returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetAncestorsPpid() []uint32 { if ev.GetEventType().String() != "signal" { - return uint32(0) + return []uint32{} } if ev.Signal.Target == nil { - return uint32(0) + return []uint32{} } - return ev.Signal.Target.Process.PIDContext.Pid -} - -// GetSignalTargetPpid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetPpid() uint32 { - if ev.GetEventType().String() != "signal" { - return uint32(0) + if ev.Signal.Target.Ancestor == nil { + return []uint32{} } - if ev.Signal.Target == nil { - return uint32(0) + var values []uint32 + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + ptr := iterator.Front(ctx) + for ptr != nil { + element := (*ProcessCacheEntry)(ptr) + result := element.ProcessContext.Process.PPid + values = append(values, result) + ptr = iterator.Next(ctx) } - return ev.Signal.Target.Process.PPid + return values } -// GetSignalTargetTid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetTid() uint32 { +// GetSignalTargetAncestorsUid returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetAncestorsUid() []uint32 { if ev.GetEventType().String() != "signal" { - return uint32(0) + return []uint32{} } if ev.Signal.Target == nil { - return uint32(0) + return []uint32{} } - return ev.Signal.Target.Process.PIDContext.Tid -} - -// GetSignalTargetTtyName returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetTtyName() string { - if ev.GetEventType().String() != "signal" { - return "" + if ev.Signal.Target.Ancestor == nil { + return []uint32{} } - if ev.Signal.Target == nil { - return "" + var values []uint32 + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + ptr := iterator.Front(ctx) + for ptr != nil { + element := (*ProcessCacheEntry)(ptr) + result := element.ProcessContext.Process.Credentials.UID + values = append(values, result) + ptr = iterator.Next(ctx) } - return ev.Signal.Target.Process.TTYName + return values } -// GetSignalTargetUid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetUid() uint32 { +// GetSignalTargetAncestorsUser returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetAncestorsUser() []string { if ev.GetEventType().String() != "signal" { - return uint32(0) + return []string{} } if ev.Signal.Target == nil { - return uint32(0) + return []string{} } - return ev.Signal.Target.Process.Credentials.UID -} - -// GetSignalTargetUser returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetUser() string { - if ev.GetEventType().String() != "signal" { - return "" + if ev.Signal.Target.Ancestor == nil { + return []string{} } - if ev.Signal.Target == nil { - return "" + var values []string + ctx := eval.NewContext(ev) + iterator := &ProcessAncestorsIterator{} + ptr := iterator.Front(ctx) + for ptr != nil { + element := (*ProcessCacheEntry)(ptr) + result := element.ProcessContext.Process.Credentials.User + values = append(values, result) + ptr = iterator.Next(ctx) } - return ev.Signal.Target.Process.Credentials.User + return values } -// GetSignalTargetUserSessionK8sGroups returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetUserSessionK8sGroups() []string { +// GetSignalTargetCmdargv returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetCmdargv() []string { if ev.GetEventType().String() != "signal" { return []string{} } if ev.Signal.Target == nil { return []string{} } - return ev.FieldHandlers.ResolveK8SGroups(ev, &ev.Signal.Target.Process.UserSession) + return ev.FieldHandlers.ResolveProcessCmdArgv(ev, &ev.Signal.Target.Process) } -// GetSignalTargetUserSessionK8sUid returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetUserSessionK8sUid() string { +// GetSignalTargetEnvp returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetEnvp() []string { if ev.GetEventType().String() != "signal" { - return "" + return []string{} } if ev.Signal.Target == nil { - return "" + return []string{} } - return ev.FieldHandlers.ResolveK8SUID(ev, &ev.Signal.Target.Process.UserSession) + return ev.FieldHandlers.ResolveProcessEnvp(ev, &ev.Signal.Target.Process) } -// GetSignalTargetUserSessionK8sUsername returns the value of the field, resolving if necessary -func (ev *Event) GetSignalTargetUserSessionK8sUsername() string { +// GetSignalTargetExecTime returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetExecTime() time.Time { if ev.GetEventType().String() != "signal" { - return "" + return time.Time{} } if ev.Signal.Target == nil { - return "" + return time.Time{} } - return ev.FieldHandlers.ResolveK8SUsername(ev, &ev.Signal.Target.Process.UserSession) + return ev.Signal.Target.Process.ExecTime } -// GetSignalType returns the value of the field, resolving if necessary -func (ev *Event) GetSignalType() uint32 { +// GetSignalTargetExitTime returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetExitTime() time.Time { if ev.GetEventType().String() != "signal" { - return uint32(0) + return time.Time{} } - return ev.Signal.Type -} - -// GetSpliceFileChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetSpliceFileChangeTime() uint64 { - if ev.GetEventType().String() != "splice" { - return uint64(0) + if ev.Signal.Target == nil { + return time.Time{} } - return ev.Splice.File.FileFields.CTime + return ev.Signal.Target.Process.ExitTime } -// GetSpliceFileFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetSpliceFileFilesystem() string { - if ev.GetEventType().String() != "splice" { +// GetSignalTargetFilePath returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetFilePath() string { + if ev.GetEventType().String() != "signal" { return "" } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Splice.File) -} - -// GetSpliceFileGid returns the value of the field, resolving if necessary -func (ev *Event) GetSpliceFileGid() uint32 { - if ev.GetEventType().String() != "splice" { - return uint32(0) + if ev.Signal.Target == nil { + return "" } - return ev.Splice.File.FileFields.GID -} - -// GetSpliceFileGroup returns the value of the field, resolving if necessary -func (ev *Event) GetSpliceFileGroup() string { - if ev.GetEventType().String() != "splice" { + if !ev.Signal.Target.Process.IsNotKworker() { return "" } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Splice.File.FileFields) + return ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Process.FileEvent) } -// GetSpliceFileHashes returns the value of the field, resolving if necessary -func (ev *Event) GetSpliceFileHashes() []string { - if ev.GetEventType().String() != "splice" { - return []string{} +// GetSignalTargetFilePathLength returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetFilePathLength() int { + if ev.GetEventType().String() != "signal" { + return 0 } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Splice.File) -} - -// GetSpliceFileInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetSpliceFileInUpperLayer() bool { - if ev.GetEventType().String() != "splice" { - return false + if ev.Signal.Target == nil { + return 0 } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Splice.File.FileFields) + return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Process.FileEvent)) } -// GetSpliceFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetSpliceFileInode() uint64 { - if ev.GetEventType().String() != "splice" { - return uint64(0) +// GetSignalTargetForkTime returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetForkTime() time.Time { + if ev.GetEventType().String() != "signal" { + return time.Time{} } - return ev.Splice.File.FileFields.PathKey.Inode -} - -// GetSpliceFileMode returns the value of the field, resolving if necessary -func (ev *Event) GetSpliceFileMode() uint16 { - if ev.GetEventType().String() != "splice" { - return uint16(0) + if ev.Signal.Target == nil { + return time.Time{} } - return ev.Splice.File.FileFields.Mode + return ev.Signal.Target.Process.ForkTime } -// GetSpliceFileModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetSpliceFileModificationTime() uint64 { - if ev.GetEventType().String() != "splice" { - return uint64(0) +// GetSignalTargetGid returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetGid() uint32 { + if ev.GetEventType().String() != "signal" { + return uint32(0) } - return ev.Splice.File.FileFields.MTime -} - -// GetSpliceFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetSpliceFileMountId() uint32 { - if ev.GetEventType().String() != "splice" { + if ev.Signal.Target == nil { return uint32(0) } - return ev.Splice.File.FileFields.PathKey.MountID + return ev.Signal.Target.Process.Credentials.GID } -// GetSpliceFileName returns the value of the field, resolving if necessary -func (ev *Event) GetSpliceFileName() string { - if ev.GetEventType().String() != "splice" { +// GetSignalTargetGroup returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetGroup() string { + if ev.GetEventType().String() != "signal" { return "" } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Splice.File) -} - -// GetSpliceFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetSpliceFileNameLength() int { - if ev.GetEventType().String() != "splice" { - return 0 - } - return len(ev.FieldHandlers.ResolveFileBasename(ev, &ev.Splice.File)) -} - -// GetSpliceFilePackageName returns the value of the field, resolving if necessary -func (ev *Event) GetSpliceFilePackageName() string { - if ev.GetEventType().String() != "splice" { + if ev.Signal.Target == nil { return "" } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Splice.File) + return ev.Signal.Target.Process.Credentials.Group } -// GetSpliceFilePackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetSpliceFilePackageSourceVersion() string { - if ev.GetEventType().String() != "splice" { +// GetSignalTargetInterpreterFilePath returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetInterpreterFilePath() string { + if ev.GetEventType().String() != "signal" { return "" } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Splice.File) -} - -// GetSpliceFilePackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetSpliceFilePackageVersion() string { - if ev.GetEventType().String() != "splice" { + if ev.Signal.Target == nil { return "" } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Splice.File) -} - -// GetSpliceFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetSpliceFilePath() string { - if ev.GetEventType().String() != "splice" { + if !ev.Signal.Target.Process.HasInterpreter() { return "" } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Splice.File) + return ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent) } -// GetSpliceFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetSpliceFilePathLength() int { - if ev.GetEventType().String() != "splice" { +// GetSignalTargetInterpreterFilePathLength returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetInterpreterFilePathLength() int { + if ev.GetEventType().String() != "signal" { return 0 } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Splice.File)) -} - -// GetSpliceFileRights returns the value of the field, resolving if necessary -func (ev *Event) GetSpliceFileRights() int { - if ev.GetEventType().String() != "splice" { + if ev.Signal.Target == nil { return 0 } - return ev.FieldHandlers.ResolveRights(ev, &ev.Splice.File.FileFields) -} - -// GetSpliceFileUid returns the value of the field, resolving if necessary -func (ev *Event) GetSpliceFileUid() uint32 { - if ev.GetEventType().String() != "splice" { - return uint32(0) - } - return ev.Splice.File.FileFields.UID -} - -// GetSpliceFileUser returns the value of the field, resolving if necessary -func (ev *Event) GetSpliceFileUser() string { - if ev.GetEventType().String() != "splice" { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Splice.File.FileFields) -} - -// GetSplicePipeEntryFlag returns the value of the field, resolving if necessary -func (ev *Event) GetSplicePipeEntryFlag() uint32 { - if ev.GetEventType().String() != "splice" { - return uint32(0) - } - return ev.Splice.PipeEntryFlag -} - -// GetSplicePipeExitFlag returns the value of the field, resolving if necessary -func (ev *Event) GetSplicePipeExitFlag() uint32 { - if ev.GetEventType().String() != "splice" { - return uint32(0) - } - return ev.Splice.PipeExitFlag -} - -// GetSpliceRetval returns the value of the field, resolving if necessary -func (ev *Event) GetSpliceRetval() int64 { - if ev.GetEventType().String() != "splice" { - return int64(0) - } - return ev.Splice.SyscallEvent.Retval -} - -// GetTimestamp returns the value of the field, resolving if necessary -func (ev *Event) GetTimestamp() time.Time { - return ev.FieldHandlers.ResolveEventTime(ev, &ev.BaseEvent) -} - -// GetUnlinkFileChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetUnlinkFileChangeTime() uint64 { - if ev.GetEventType().String() != "unlink" { - return uint64(0) - } - return ev.Unlink.File.FileFields.CTime -} - -// GetUnlinkFileFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetUnlinkFileFilesystem() string { - if ev.GetEventType().String() != "unlink" { - return "" - } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Unlink.File) -} - -// GetUnlinkFileGid returns the value of the field, resolving if necessary -func (ev *Event) GetUnlinkFileGid() uint32 { - if ev.GetEventType().String() != "unlink" { - return uint32(0) - } - return ev.Unlink.File.FileFields.GID -} - -// GetUnlinkFileGroup returns the value of the field, resolving if necessary -func (ev *Event) GetUnlinkFileGroup() string { - if ev.GetEventType().String() != "unlink" { - return "" - } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Unlink.File.FileFields) + return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Process.LinuxBinprm.FileEvent)) } -// GetUnlinkFileHashes returns the value of the field, resolving if necessary -func (ev *Event) GetUnlinkFileHashes() []string { - if ev.GetEventType().String() != "unlink" { +// GetSignalTargetParentCmdargv returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetParentCmdargv() []string { + if ev.GetEventType().String() != "signal" { return []string{} } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Unlink.File) -} - -// GetUnlinkFileInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetUnlinkFileInUpperLayer() bool { - if ev.GetEventType().String() != "unlink" { - return false - } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Unlink.File.FileFields) -} - -// GetUnlinkFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetUnlinkFileInode() uint64 { - if ev.GetEventType().String() != "unlink" { - return uint64(0) - } - return ev.Unlink.File.FileFields.PathKey.Inode -} - -// GetUnlinkFileMode returns the value of the field, resolving if necessary -func (ev *Event) GetUnlinkFileMode() uint16 { - if ev.GetEventType().String() != "unlink" { - return uint16(0) - } - return ev.Unlink.File.FileFields.Mode -} - -// GetUnlinkFileModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetUnlinkFileModificationTime() uint64 { - if ev.GetEventType().String() != "unlink" { - return uint64(0) + if ev.Signal.Target == nil { + return []string{} } - return ev.Unlink.File.FileFields.MTime -} - -// GetUnlinkFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetUnlinkFileMountId() uint32 { - if ev.GetEventType().String() != "unlink" { - return uint32(0) + if ev.Signal.Target.Parent == nil { + return []string{} } - return ev.Unlink.File.FileFields.PathKey.MountID -} - -// GetUnlinkFileName returns the value of the field, resolving if necessary -func (ev *Event) GetUnlinkFileName() string { - if ev.GetEventType().String() != "unlink" { - return "" + if !ev.Signal.Target.HasParent() { + return []string{} } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Unlink.File) + return ev.FieldHandlers.ResolveProcessCmdArgv(ev, ev.Signal.Target.Parent) } -// GetUnlinkFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetUnlinkFileNameLength() int { - if ev.GetEventType().String() != "unlink" { - return 0 +// GetSignalTargetParentEnvp returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetParentEnvp() []string { + if ev.GetEventType().String() != "signal" { + return []string{} } - return len(ev.FieldHandlers.ResolveFileBasename(ev, &ev.Unlink.File)) -} - -// GetUnlinkFilePackageName returns the value of the field, resolving if necessary -func (ev *Event) GetUnlinkFilePackageName() string { - if ev.GetEventType().String() != "unlink" { - return "" + if ev.Signal.Target == nil { + return []string{} } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Unlink.File) -} - -// GetUnlinkFilePackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetUnlinkFilePackageSourceVersion() string { - if ev.GetEventType().String() != "unlink" { - return "" + if ev.Signal.Target.Parent == nil { + return []string{} } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Unlink.File) -} - -// GetUnlinkFilePackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetUnlinkFilePackageVersion() string { - if ev.GetEventType().String() != "unlink" { - return "" + if !ev.Signal.Target.HasParent() { + return []string{} } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Unlink.File) + return ev.FieldHandlers.ResolveProcessEnvp(ev, ev.Signal.Target.Parent) } -// GetUnlinkFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetUnlinkFilePath() string { - if ev.GetEventType().String() != "unlink" { +// GetSignalTargetParentFilePath returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetParentFilePath() string { + if ev.GetEventType().String() != "signal" { return "" } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Unlink.File) -} - -// GetUnlinkFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetUnlinkFilePathLength() int { - if ev.GetEventType().String() != "unlink" { - return 0 - } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Unlink.File)) -} - -// GetUnlinkFileRights returns the value of the field, resolving if necessary -func (ev *Event) GetUnlinkFileRights() int { - if ev.GetEventType().String() != "unlink" { - return 0 - } - return ev.FieldHandlers.ResolveRights(ev, &ev.Unlink.File.FileFields) -} - -// GetUnlinkFileUid returns the value of the field, resolving if necessary -func (ev *Event) GetUnlinkFileUid() uint32 { - if ev.GetEventType().String() != "unlink" { - return uint32(0) - } - return ev.Unlink.File.FileFields.UID -} - -// GetUnlinkFileUser returns the value of the field, resolving if necessary -func (ev *Event) GetUnlinkFileUser() string { - if ev.GetEventType().String() != "unlink" { + if ev.Signal.Target == nil { return "" } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Unlink.File.FileFields) -} - -// GetUnlinkFlags returns the value of the field, resolving if necessary -func (ev *Event) GetUnlinkFlags() uint32 { - if ev.GetEventType().String() != "unlink" { - return uint32(0) + if ev.Signal.Target.Parent == nil { + return "" } - return ev.Unlink.Flags -} - -// GetUnlinkRetval returns the value of the field, resolving if necessary -func (ev *Event) GetUnlinkRetval() int64 { - if ev.GetEventType().String() != "unlink" { - return int64(0) + if !ev.Signal.Target.HasParent() { + return "" } - return ev.Unlink.SyscallEvent.Retval -} - -// GetUnlinkSyscallDirfd returns the value of the field, resolving if necessary -func (ev *Event) GetUnlinkSyscallDirfd() int { - if ev.GetEventType().String() != "unlink" { - return 0 + if !ev.Signal.Target.Parent.IsNotKworker() { + return "" } - return ev.FieldHandlers.ResolveSyscallCtxArgsInt1(ev, &ev.Unlink.SyscallContext) + return ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Parent.FileEvent) } -// GetUnlinkSyscallFlags returns the value of the field, resolving if necessary -func (ev *Event) GetUnlinkSyscallFlags() int { - if ev.GetEventType().String() != "unlink" { +// GetSignalTargetParentFilePathLength returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetParentFilePathLength() int { + if ev.GetEventType().String() != "signal" { return 0 } - return ev.FieldHandlers.ResolveSyscallCtxArgsInt3(ev, &ev.Unlink.SyscallContext) -} - -// GetUnlinkSyscallInt1 returns the value of the field, resolving if necessary -func (ev *Event) GetUnlinkSyscallInt1() int { - if ev.GetEventType().String() != "unlink" { + if ev.Signal.Target == nil { return 0 } - return ev.FieldHandlers.ResolveSyscallCtxArgsInt1(ev, &ev.Unlink.SyscallContext) -} - -// GetUnlinkSyscallInt2 returns the value of the field, resolving if necessary -func (ev *Event) GetUnlinkSyscallInt2() int { - if ev.GetEventType().String() != "unlink" { + if ev.Signal.Target.Parent == nil { return 0 } - return ev.FieldHandlers.ResolveSyscallCtxArgsInt2(ev, &ev.Unlink.SyscallContext) + return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Parent.FileEvent)) } -// GetUnlinkSyscallInt3 returns the value of the field, resolving if necessary -func (ev *Event) GetUnlinkSyscallInt3() int { - if ev.GetEventType().String() != "unlink" { - return 0 +// GetSignalTargetParentGid returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetParentGid() uint32 { + if ev.GetEventType().String() != "signal" { + return uint32(0) } - return ev.FieldHandlers.ResolveSyscallCtxArgsInt3(ev, &ev.Unlink.SyscallContext) -} - -// GetUnlinkSyscallPath returns the value of the field, resolving if necessary -func (ev *Event) GetUnlinkSyscallPath() string { - if ev.GetEventType().String() != "unlink" { - return "" + if ev.Signal.Target == nil { + return uint32(0) + } + if ev.Signal.Target.Parent == nil { + return uint32(0) + } + if !ev.Signal.Target.HasParent() { + return uint32(0) } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr2(ev, &ev.Unlink.SyscallContext) + return ev.Signal.Target.Parent.Credentials.GID } -// GetUnlinkSyscallStr1 returns the value of the field, resolving if necessary -func (ev *Event) GetUnlinkSyscallStr1() string { - if ev.GetEventType().String() != "unlink" { +// GetSignalTargetParentGroup returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetParentGroup() string { + if ev.GetEventType().String() != "signal" { return "" } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr1(ev, &ev.Unlink.SyscallContext) -} - -// GetUnlinkSyscallStr2 returns the value of the field, resolving if necessary -func (ev *Event) GetUnlinkSyscallStr2() string { - if ev.GetEventType().String() != "unlink" { + if ev.Signal.Target == nil { return "" } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr2(ev, &ev.Unlink.SyscallContext) -} - -// GetUnlinkSyscallStr3 returns the value of the field, resolving if necessary -func (ev *Event) GetUnlinkSyscallStr3() string { - if ev.GetEventType().String() != "unlink" { + if ev.Signal.Target.Parent == nil { return "" } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr3(ev, &ev.Unlink.SyscallContext) -} - -// GetUnloadModuleName returns the value of the field, resolving if necessary -func (ev *Event) GetUnloadModuleName() string { - if ev.GetEventType().String() != "unload_module" { + if !ev.Signal.Target.HasParent() { return "" } - return ev.UnloadModule.Name + return ev.Signal.Target.Parent.Credentials.Group } -// GetUnloadModuleRetval returns the value of the field, resolving if necessary -func (ev *Event) GetUnloadModuleRetval() int64 { - if ev.GetEventType().String() != "unload_module" { - return int64(0) +// GetSignalTargetParentInterpreterFilePath returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetParentInterpreterFilePath() string { + if ev.GetEventType().String() != "signal" { + return "" } - return ev.UnloadModule.SyscallEvent.Retval -} - -// GetUtimesFileChangeTime returns the value of the field, resolving if necessary -func (ev *Event) GetUtimesFileChangeTime() uint64 { - if ev.GetEventType().String() != "utimes" { - return uint64(0) + if ev.Signal.Target == nil { + return "" } - return ev.Utimes.File.FileFields.CTime -} - -// GetUtimesFileFilesystem returns the value of the field, resolving if necessary -func (ev *Event) GetUtimesFileFilesystem() string { - if ev.GetEventType().String() != "utimes" { + if ev.Signal.Target.Parent == nil { return "" } - return ev.FieldHandlers.ResolveFileFilesystem(ev, &ev.Utimes.File) -} - -// GetUtimesFileGid returns the value of the field, resolving if necessary -func (ev *Event) GetUtimesFileGid() uint32 { - if ev.GetEventType().String() != "utimes" { - return uint32(0) + if !ev.Signal.Target.HasParent() { + return "" } - return ev.Utimes.File.FileFields.GID -} - -// GetUtimesFileGroup returns the value of the field, resolving if necessary -func (ev *Event) GetUtimesFileGroup() string { - if ev.GetEventType().String() != "utimes" { + if !ev.Signal.Target.Parent.HasInterpreter() { return "" } - return ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.Utimes.File.FileFields) + return ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent) } -// GetUtimesFileHashes returns the value of the field, resolving if necessary -func (ev *Event) GetUtimesFileHashes() []string { - if ev.GetEventType().String() != "utimes" { - return []string{} +// GetSignalTargetParentInterpreterFilePathLength returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetParentInterpreterFilePathLength() int { + if ev.GetEventType().String() != "signal" { + return 0 } - return ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Utimes.File) -} - -// GetUtimesFileInUpperLayer returns the value of the field, resolving if necessary -func (ev *Event) GetUtimesFileInUpperLayer() bool { - if ev.GetEventType().String() != "utimes" { - return false + if ev.Signal.Target == nil { + return 0 } - return ev.FieldHandlers.ResolveFileFieldsInUpperLayer(ev, &ev.Utimes.File.FileFields) -} - -// GetUtimesFileInode returns the value of the field, resolving if necessary -func (ev *Event) GetUtimesFileInode() uint64 { - if ev.GetEventType().String() != "utimes" { - return uint64(0) + if ev.Signal.Target.Parent == nil { + return 0 } - return ev.Utimes.File.FileFields.PathKey.Inode + return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Signal.Target.Parent.LinuxBinprm.FileEvent)) } -// GetUtimesFileMode returns the value of the field, resolving if necessary -func (ev *Event) GetUtimesFileMode() uint16 { - if ev.GetEventType().String() != "utimes" { - return uint16(0) +// GetSignalTargetParentPid returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetParentPid() uint32 { + if ev.GetEventType().String() != "signal" { + return uint32(0) } - return ev.Utimes.File.FileFields.Mode -} - -// GetUtimesFileModificationTime returns the value of the field, resolving if necessary -func (ev *Event) GetUtimesFileModificationTime() uint64 { - if ev.GetEventType().String() != "utimes" { - return uint64(0) + if ev.Signal.Target == nil { + return uint32(0) } - return ev.Utimes.File.FileFields.MTime -} - -// GetUtimesFileMountId returns the value of the field, resolving if necessary -func (ev *Event) GetUtimesFileMountId() uint32 { - if ev.GetEventType().String() != "utimes" { + if ev.Signal.Target.Parent == nil { + return uint32(0) + } + if !ev.Signal.Target.HasParent() { return uint32(0) } - return ev.Utimes.File.FileFields.PathKey.MountID + return ev.Signal.Target.Parent.PIDContext.Pid } -// GetUtimesFileName returns the value of the field, resolving if necessary -func (ev *Event) GetUtimesFileName() string { - if ev.GetEventType().String() != "utimes" { - return "" +// GetSignalTargetParentPpid returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetParentPpid() uint32 { + if ev.GetEventType().String() != "signal" { + return uint32(0) + } + if ev.Signal.Target == nil { + return uint32(0) + } + if ev.Signal.Target.Parent == nil { + return uint32(0) + } + if !ev.Signal.Target.HasParent() { + return uint32(0) } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Utimes.File) + return ev.Signal.Target.Parent.PPid } -// GetUtimesFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetUtimesFileNameLength() int { - if ev.GetEventType().String() != "utimes" { - return 0 +// GetSignalTargetParentUid returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetParentUid() uint32 { + if ev.GetEventType().String() != "signal" { + return uint32(0) + } + if ev.Signal.Target == nil { + return uint32(0) + } + if ev.Signal.Target.Parent == nil { + return uint32(0) + } + if !ev.Signal.Target.HasParent() { + return uint32(0) } - return len(ev.FieldHandlers.ResolveFileBasename(ev, &ev.Utimes.File)) + return ev.Signal.Target.Parent.Credentials.UID } -// GetUtimesFilePackageName returns the value of the field, resolving if necessary -func (ev *Event) GetUtimesFilePackageName() string { - if ev.GetEventType().String() != "utimes" { +// GetSignalTargetParentUser returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetParentUser() string { + if ev.GetEventType().String() != "signal" { return "" } - return ev.FieldHandlers.ResolvePackageName(ev, &ev.Utimes.File) -} - -// GetUtimesFilePackageSourceVersion returns the value of the field, resolving if necessary -func (ev *Event) GetUtimesFilePackageSourceVersion() string { - if ev.GetEventType().String() != "utimes" { + if ev.Signal.Target == nil { return "" } - return ev.FieldHandlers.ResolvePackageSourceVersion(ev, &ev.Utimes.File) -} - -// GetUtimesFilePackageVersion returns the value of the field, resolving if necessary -func (ev *Event) GetUtimesFilePackageVersion() string { - if ev.GetEventType().String() != "utimes" { + if ev.Signal.Target.Parent == nil { return "" } - return ev.FieldHandlers.ResolvePackageVersion(ev, &ev.Utimes.File) -} - -// GetUtimesFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetUtimesFilePath() string { - if ev.GetEventType().String() != "utimes" { + if !ev.Signal.Target.HasParent() { return "" } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Utimes.File) + return ev.Signal.Target.Parent.Credentials.User } -// GetUtimesFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetUtimesFilePathLength() int { - if ev.GetEventType().String() != "utimes" { - return 0 +// GetSignalTargetPid returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetPid() uint32 { + if ev.GetEventType().String() != "signal" { + return uint32(0) } - return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Utimes.File)) + if ev.Signal.Target == nil { + return uint32(0) + } + return ev.Signal.Target.Process.PIDContext.Pid } -// GetUtimesFileRights returns the value of the field, resolving if necessary -func (ev *Event) GetUtimesFileRights() int { - if ev.GetEventType().String() != "utimes" { - return 0 +// GetSignalTargetPpid returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetPpid() uint32 { + if ev.GetEventType().String() != "signal" { + return uint32(0) } - return ev.FieldHandlers.ResolveRights(ev, &ev.Utimes.File.FileFields) + if ev.Signal.Target == nil { + return uint32(0) + } + return ev.Signal.Target.Process.PPid } -// GetUtimesFileUid returns the value of the field, resolving if necessary -func (ev *Event) GetUtimesFileUid() uint32 { - if ev.GetEventType().String() != "utimes" { +// GetSignalTargetUid returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetUid() uint32 { + if ev.GetEventType().String() != "signal" { return uint32(0) } - return ev.Utimes.File.FileFields.UID + if ev.Signal.Target == nil { + return uint32(0) + } + return ev.Signal.Target.Process.Credentials.UID } -// GetUtimesFileUser returns the value of the field, resolving if necessary -func (ev *Event) GetUtimesFileUser() string { - if ev.GetEventType().String() != "utimes" { +// GetSignalTargetUser returns the value of the field, resolving if necessary +func (ev *Event) GetSignalTargetUser() string { + if ev.GetEventType().String() != "signal" { return "" } - return ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.Utimes.File.FileFields) -} - -// GetUtimesRetval returns the value of the field, resolving if necessary -func (ev *Event) GetUtimesRetval() int64 { - if ev.GetEventType().String() != "utimes" { - return int64(0) + if ev.Signal.Target == nil { + return "" } - return ev.Utimes.SyscallEvent.Retval + return ev.Signal.Target.Process.Credentials.User } -// GetUtimesSyscallInt1 returns the value of the field, resolving if necessary -func (ev *Event) GetUtimesSyscallInt1() int { - if ev.GetEventType().String() != "utimes" { - return 0 +// GetSpliceFilePath returns the value of the field, resolving if necessary +func (ev *Event) GetSpliceFilePath() string { + if ev.GetEventType().String() != "splice" { + return "" } - return ev.FieldHandlers.ResolveSyscallCtxArgsInt1(ev, &ev.Utimes.SyscallContext) + return ev.FieldHandlers.ResolveFilePath(ev, &ev.Splice.File) } -// GetUtimesSyscallInt2 returns the value of the field, resolving if necessary -func (ev *Event) GetUtimesSyscallInt2() int { - if ev.GetEventType().String() != "utimes" { +// GetSpliceFilePathLength returns the value of the field, resolving if necessary +func (ev *Event) GetSpliceFilePathLength() int { + if ev.GetEventType().String() != "splice" { return 0 } - return ev.FieldHandlers.ResolveSyscallCtxArgsInt2(ev, &ev.Utimes.SyscallContext) + return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Splice.File)) } -// GetUtimesSyscallInt3 returns the value of the field, resolving if necessary -func (ev *Event) GetUtimesSyscallInt3() int { - if ev.GetEventType().String() != "utimes" { - return 0 - } - return ev.FieldHandlers.ResolveSyscallCtxArgsInt3(ev, &ev.Utimes.SyscallContext) +// GetTimestamp returns the value of the field, resolving if necessary +func (ev *Event) GetTimestamp() time.Time { + return ev.FieldHandlers.ResolveEventTime(ev, &ev.BaseEvent) } -// GetUtimesSyscallPath returns the value of the field, resolving if necessary -func (ev *Event) GetUtimesSyscallPath() string { - if ev.GetEventType().String() != "utimes" { +// GetUnlinkFilePath returns the value of the field, resolving if necessary +func (ev *Event) GetUnlinkFilePath() string { + if ev.GetEventType().String() != "unlink" { return "" } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr1(ev, &ev.Utimes.SyscallContext) + return ev.FieldHandlers.ResolveFilePath(ev, &ev.Unlink.File) } -// GetUtimesSyscallStr1 returns the value of the field, resolving if necessary -func (ev *Event) GetUtimesSyscallStr1() string { - if ev.GetEventType().String() != "utimes" { - return "" +// GetUnlinkFilePathLength returns the value of the field, resolving if necessary +func (ev *Event) GetUnlinkFilePathLength() int { + if ev.GetEventType().String() != "unlink" { + return 0 } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr1(ev, &ev.Utimes.SyscallContext) + return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Unlink.File)) } -// GetUtimesSyscallStr2 returns the value of the field, resolving if necessary -func (ev *Event) GetUtimesSyscallStr2() string { +// GetUtimesFilePath returns the value of the field, resolving if necessary +func (ev *Event) GetUtimesFilePath() string { if ev.GetEventType().String() != "utimes" { return "" } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr2(ev, &ev.Utimes.SyscallContext) + return ev.FieldHandlers.ResolveFilePath(ev, &ev.Utimes.File) } -// GetUtimesSyscallStr3 returns the value of the field, resolving if necessary -func (ev *Event) GetUtimesSyscallStr3() string { +// GetUtimesFilePathLength returns the value of the field, resolving if necessary +func (ev *Event) GetUtimesFilePathLength() int { if ev.GetEventType().String() != "utimes" { - return "" + return 0 } - return ev.FieldHandlers.ResolveSyscallCtxArgsStr3(ev, &ev.Utimes.SyscallContext) + return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.Utimes.File)) } diff --git a/pkg/security/secl/model/field_accessors_windows.go b/pkg/security/secl/model/field_accessors_windows.go index 1ed3130e6951e..7985ac407f831 100644 --- a/pkg/security/secl/model/field_accessors_windows.go +++ b/pkg/security/secl/model/field_accessors_windows.go @@ -10,56 +10,13 @@ package model import ( "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" + "net" "time" ) -// GetChangePermissionNewSd returns the value of the field, resolving if necessary -func (ev *Event) GetChangePermissionNewSd() string { - if ev.GetEventType().String() != "change_permission" { - return "" - } - return ev.FieldHandlers.ResolveNewSecurityDescriptor(ev, &ev.ChangePermission) -} - -// GetChangePermissionOldSd returns the value of the field, resolving if necessary -func (ev *Event) GetChangePermissionOldSd() string { - if ev.GetEventType().String() != "change_permission" { - return "" - } - return ev.FieldHandlers.ResolveOldSecurityDescriptor(ev, &ev.ChangePermission) -} - -// GetChangePermissionPath returns the value of the field, resolving if necessary -func (ev *Event) GetChangePermissionPath() string { - if ev.GetEventType().String() != "change_permission" { - return "" - } - return ev.ChangePermission.ObjectName -} - -// GetChangePermissionType returns the value of the field, resolving if necessary -func (ev *Event) GetChangePermissionType() string { - if ev.GetEventType().String() != "change_permission" { - return "" - } - return ev.ChangePermission.ObjectType -} - -// GetChangePermissionUserDomain returns the value of the field, resolving if necessary -func (ev *Event) GetChangePermissionUserDomain() string { - if ev.GetEventType().String() != "change_permission" { - return "" - } - return ev.ChangePermission.UserDomain -} - -// GetChangePermissionUsername returns the value of the field, resolving if necessary -func (ev *Event) GetChangePermissionUsername() string { - if ev.GetEventType().String() != "change_permission" { - return "" - } - return ev.ChangePermission.UserName -} +var _ = time.Time{} +var _ = net.IP{} +var _ = eval.NewContext // GetContainerCreatedAt returns the value of the field, resolving if necessary func (ev *Event) GetContainerCreatedAt() int { @@ -77,315 +34,11 @@ func (ev *Event) GetContainerId() string { return ev.FieldHandlers.ResolveContainerID(ev, ev.BaseEvent.ContainerContext) } -// GetContainerRuntime returns the value of the field, resolving if necessary -func (ev *Event) GetContainerRuntime() string { - if ev.BaseEvent.ContainerContext == nil { - return "" - } - return ev.FieldHandlers.ResolveContainerRuntime(ev, ev.BaseEvent.ContainerContext) -} - -// GetContainerTags returns the value of the field, resolving if necessary -func (ev *Event) GetContainerTags() []string { - if ev.BaseEvent.ContainerContext == nil { - return []string{} - } - return ev.FieldHandlers.ResolveContainerTags(ev, ev.BaseEvent.ContainerContext) -} - -// GetCreateFileDevicePath returns the value of the field, resolving if necessary -func (ev *Event) GetCreateFileDevicePath() string { - if ev.GetEventType().String() != "create" { - return "" - } - return ev.FieldHandlers.ResolveFimFilePath(ev, &ev.CreateNewFile.File) -} - -// GetCreateFileDevicePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetCreateFileDevicePathLength() int { - if ev.GetEventType().String() != "create" { - return 0 - } - return len(ev.FieldHandlers.ResolveFimFilePath(ev, &ev.CreateNewFile.File)) -} - -// GetCreateFileName returns the value of the field, resolving if necessary -func (ev *Event) GetCreateFileName() string { - if ev.GetEventType().String() != "create" { - return "" - } - return ev.FieldHandlers.ResolveFimFileBasename(ev, &ev.CreateNewFile.File) -} - -// GetCreateFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetCreateFileNameLength() int { - if ev.GetEventType().String() != "create" { - return 0 - } - return len(ev.FieldHandlers.ResolveFimFileBasename(ev, &ev.CreateNewFile.File)) -} - -// GetCreateFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetCreateFilePath() string { - if ev.GetEventType().String() != "create" { - return "" - } - return ev.FieldHandlers.ResolveFileUserPath(ev, &ev.CreateNewFile.File) -} - -// GetCreateFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetCreateFilePathLength() int { - if ev.GetEventType().String() != "create" { - return 0 - } - return len(ev.FieldHandlers.ResolveFileUserPath(ev, &ev.CreateNewFile.File)) -} - -// GetCreateRegistryKeyName returns the value of the field, resolving if necessary -func (ev *Event) GetCreateRegistryKeyName() string { - if ev.GetEventType().String() != "create_key" { - return "" - } - return ev.CreateRegistryKey.Registry.KeyName -} - -// GetCreateRegistryKeyNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetCreateRegistryKeyNameLength() int { - if ev.GetEventType().String() != "create_key" { - return 0 - } - return len(ev.CreateRegistryKey.Registry.KeyName) -} - -// GetCreateRegistryKeyPath returns the value of the field, resolving if necessary -func (ev *Event) GetCreateRegistryKeyPath() string { - if ev.GetEventType().String() != "create_key" { - return "" - } - return ev.CreateRegistryKey.Registry.KeyPath -} - -// GetCreateRegistryKeyPathLength returns the value of the field, resolving if necessary -func (ev *Event) GetCreateRegistryKeyPathLength() int { - if ev.GetEventType().String() != "create_key" { - return 0 - } - return len(ev.CreateRegistryKey.Registry.KeyPath) -} - -// GetCreateKeyRegistryKeyName returns the value of the field, resolving if necessary -func (ev *Event) GetCreateKeyRegistryKeyName() string { - if ev.GetEventType().String() != "create_key" { - return "" - } - return ev.CreateRegistryKey.Registry.KeyName -} - -// GetCreateKeyRegistryKeyNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetCreateKeyRegistryKeyNameLength() int { - if ev.GetEventType().String() != "create_key" { - return 0 - } - return len(ev.CreateRegistryKey.Registry.KeyName) -} - -// GetCreateKeyRegistryKeyPath returns the value of the field, resolving if necessary -func (ev *Event) GetCreateKeyRegistryKeyPath() string { - if ev.GetEventType().String() != "create_key" { - return "" - } - return ev.CreateRegistryKey.Registry.KeyPath -} - -// GetCreateKeyRegistryKeyPathLength returns the value of the field, resolving if necessary -func (ev *Event) GetCreateKeyRegistryKeyPathLength() int { - if ev.GetEventType().String() != "create_key" { - return 0 - } - return len(ev.CreateRegistryKey.Registry.KeyPath) -} - -// GetDeleteFileDevicePath returns the value of the field, resolving if necessary -func (ev *Event) GetDeleteFileDevicePath() string { - if ev.GetEventType().String() != "delete" { - return "" - } - return ev.FieldHandlers.ResolveFimFilePath(ev, &ev.DeleteFile.File) -} - -// GetDeleteFileDevicePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetDeleteFileDevicePathLength() int { - if ev.GetEventType().String() != "delete" { - return 0 - } - return len(ev.FieldHandlers.ResolveFimFilePath(ev, &ev.DeleteFile.File)) -} - -// GetDeleteFileName returns the value of the field, resolving if necessary -func (ev *Event) GetDeleteFileName() string { - if ev.GetEventType().String() != "delete" { - return "" - } - return ev.FieldHandlers.ResolveFimFileBasename(ev, &ev.DeleteFile.File) -} - -// GetDeleteFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetDeleteFileNameLength() int { - if ev.GetEventType().String() != "delete" { - return 0 - } - return len(ev.FieldHandlers.ResolveFimFileBasename(ev, &ev.DeleteFile.File)) -} - -// GetDeleteFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetDeleteFilePath() string { - if ev.GetEventType().String() != "delete" { - return "" - } - return ev.FieldHandlers.ResolveFileUserPath(ev, &ev.DeleteFile.File) -} - -// GetDeleteFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetDeleteFilePathLength() int { - if ev.GetEventType().String() != "delete" { - return 0 - } - return len(ev.FieldHandlers.ResolveFileUserPath(ev, &ev.DeleteFile.File)) -} - -// GetDeleteRegistryKeyName returns the value of the field, resolving if necessary -func (ev *Event) GetDeleteRegistryKeyName() string { - if ev.GetEventType().String() != "delete_key" { - return "" - } - return ev.DeleteRegistryKey.Registry.KeyName -} - -// GetDeleteRegistryKeyNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetDeleteRegistryKeyNameLength() int { - if ev.GetEventType().String() != "delete_key" { - return 0 - } - return len(ev.DeleteRegistryKey.Registry.KeyName) -} - -// GetDeleteRegistryKeyPath returns the value of the field, resolving if necessary -func (ev *Event) GetDeleteRegistryKeyPath() string { - if ev.GetEventType().String() != "delete_key" { - return "" - } - return ev.DeleteRegistryKey.Registry.KeyPath -} - -// GetDeleteRegistryKeyPathLength returns the value of the field, resolving if necessary -func (ev *Event) GetDeleteRegistryKeyPathLength() int { - if ev.GetEventType().String() != "delete_key" { - return 0 - } - return len(ev.DeleteRegistryKey.Registry.KeyPath) -} - -// GetDeleteKeyRegistryKeyName returns the value of the field, resolving if necessary -func (ev *Event) GetDeleteKeyRegistryKeyName() string { - if ev.GetEventType().String() != "delete_key" { - return "" - } - return ev.DeleteRegistryKey.Registry.KeyName -} - -// GetDeleteKeyRegistryKeyNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetDeleteKeyRegistryKeyNameLength() int { - if ev.GetEventType().String() != "delete_key" { - return 0 - } - return len(ev.DeleteRegistryKey.Registry.KeyName) -} - -// GetDeleteKeyRegistryKeyPath returns the value of the field, resolving if necessary -func (ev *Event) GetDeleteKeyRegistryKeyPath() string { - if ev.GetEventType().String() != "delete_key" { - return "" - } - return ev.DeleteRegistryKey.Registry.KeyPath -} - -// GetDeleteKeyRegistryKeyPathLength returns the value of the field, resolving if necessary -func (ev *Event) GetDeleteKeyRegistryKeyPathLength() int { - if ev.GetEventType().String() != "delete_key" { - return 0 - } - return len(ev.DeleteRegistryKey.Registry.KeyPath) -} - -// GetEventHostname returns the value of the field, resolving if necessary -func (ev *Event) GetEventHostname() string { - return ev.FieldHandlers.ResolveHostname(ev, &ev.BaseEvent) -} - -// GetEventOrigin returns the value of the field, resolving if necessary -func (ev *Event) GetEventOrigin() string { - return ev.BaseEvent.Origin -} - -// GetEventOs returns the value of the field, resolving if necessary -func (ev *Event) GetEventOs() string { - return ev.BaseEvent.Os -} - // GetEventService returns the value of the field, resolving if necessary func (ev *Event) GetEventService() string { return ev.FieldHandlers.ResolveService(ev, &ev.BaseEvent) } -// GetEventTimestamp returns the value of the field, resolving if necessary -func (ev *Event) GetEventTimestamp() int { - return ev.FieldHandlers.ResolveEventTimestamp(ev, &ev.BaseEvent) -} - -// GetExecCmdline returns the value of the field, resolving if necessary -func (ev *Event) GetExecCmdline() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - return ev.FieldHandlers.ResolveProcessCmdLine(ev, ev.Exec.Process) -} - -// GetExecCmdlineScrubbed returns the value of the field, resolving if necessary -func (ev *Event) GetExecCmdlineScrubbed() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - return ev.FieldHandlers.ResolveProcessCmdLineScrubbed(ev, ev.Exec.Process) -} - -// GetExecContainerId returns the value of the field, resolving if necessary -func (ev *Event) GetExecContainerId() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - return ev.Exec.Process.ContainerID -} - -// GetExecCreatedAt returns the value of the field, resolving if necessary -func (ev *Event) GetExecCreatedAt() int { - if ev.GetEventType().String() != "exec" { - return 0 - } - if ev.Exec.Process == nil { - return 0 - } - return ev.FieldHandlers.ResolveProcessCreatedAt(ev, ev.Exec.Process) -} - // GetExecEnvp returns the value of the field, resolving if necessary func (ev *Event) GetExecEnvp() []string { if ev.GetEventType().String() != "exec" { @@ -397,17 +50,6 @@ func (ev *Event) GetExecEnvp() []string { return ev.FieldHandlers.ResolveProcessEnvp(ev, ev.Exec.Process) } -// GetExecEnvs returns the value of the field, resolving if necessary -func (ev *Event) GetExecEnvs() []string { - if ev.GetEventType().String() != "exec" { - return []string{} - } - if ev.Exec.Process == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessEnvs(ev, ev.Exec.Process) -} - // GetExecExecTime returns the value of the field, resolving if necessary func (ev *Event) GetExecExecTime() time.Time { if ev.GetEventType().String() != "exec" { @@ -430,28 +72,6 @@ func (ev *Event) GetExecExitTime() time.Time { return ev.Exec.Process.ExitTime } -// GetExecFileName returns the value of the field, resolving if necessary -func (ev *Event) GetExecFileName() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Exec.Process.FileEvent) -} - -// GetExecFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetExecFileNameLength() int { - if ev.GetEventType().String() != "exec" { - return 0 - } - if ev.Exec.Process == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFileBasename(ev, &ev.Exec.Process.FileEvent)) -} - // GetExecFilePath returns the value of the field, resolving if necessary func (ev *Event) GetExecFilePath() string { if ev.GetEventType().String() != "exec" { @@ -496,58 +116,6 @@ func (ev *Event) GetExecPpid() uint32 { return ev.Exec.Process.PPid } -// GetExecUser returns the value of the field, resolving if necessary -func (ev *Event) GetExecUser() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - return ev.FieldHandlers.ResolveUser(ev, ev.Exec.Process) -} - -// GetExecUserSid returns the value of the field, resolving if necessary -func (ev *Event) GetExecUserSid() string { - if ev.GetEventType().String() != "exec" { - return "" - } - if ev.Exec.Process == nil { - return "" - } - return ev.Exec.Process.OwnerSidString -} - -// GetExitCause returns the value of the field, resolving if necessary -func (ev *Event) GetExitCause() uint32 { - if ev.GetEventType().String() != "exit" { - return uint32(0) - } - return ev.Exit.Cause -} - -// GetExitCmdline returns the value of the field, resolving if necessary -func (ev *Event) GetExitCmdline() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - return ev.FieldHandlers.ResolveProcessCmdLine(ev, ev.Exit.Process) -} - -// GetExitCmdlineScrubbed returns the value of the field, resolving if necessary -func (ev *Event) GetExitCmdlineScrubbed() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - return ev.FieldHandlers.ResolveProcessCmdLineScrubbed(ev, ev.Exit.Process) -} - // GetExitCode returns the value of the field, resolving if necessary func (ev *Event) GetExitCode() uint32 { if ev.GetEventType().String() != "exit" { @@ -556,48 +124,15 @@ func (ev *Event) GetExitCode() uint32 { return ev.Exit.Code } -// GetExitContainerId returns the value of the field, resolving if necessary -func (ev *Event) GetExitContainerId() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - return ev.Exit.Process.ContainerID -} - -// GetExitCreatedAt returns the value of the field, resolving if necessary -func (ev *Event) GetExitCreatedAt() int { - if ev.GetEventType().String() != "exit" { - return 0 - } - if ev.Exit.Process == nil { - return 0 - } - return ev.FieldHandlers.ResolveProcessCreatedAt(ev, ev.Exit.Process) -} - // GetExitEnvp returns the value of the field, resolving if necessary -func (ev *Event) GetExitEnvp() []string { - if ev.GetEventType().String() != "exit" { - return []string{} - } - if ev.Exit.Process == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessEnvp(ev, ev.Exit.Process) -} - -// GetExitEnvs returns the value of the field, resolving if necessary -func (ev *Event) GetExitEnvs() []string { +func (ev *Event) GetExitEnvp() []string { if ev.GetEventType().String() != "exit" { return []string{} } if ev.Exit.Process == nil { return []string{} } - return ev.FieldHandlers.ResolveProcessEnvs(ev, ev.Exit.Process) + return ev.FieldHandlers.ResolveProcessEnvp(ev, ev.Exit.Process) } // GetExitExecTime returns the value of the field, resolving if necessary @@ -622,28 +157,6 @@ func (ev *Event) GetExitExitTime() time.Time { return ev.Exit.Process.ExitTime } -// GetExitFileName returns the value of the field, resolving if necessary -func (ev *Event) GetExitFileName() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Exit.Process.FileEvent) -} - -// GetExitFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetExitFileNameLength() int { - if ev.GetEventType().String() != "exit" { - return 0 - } - if ev.Exit.Process == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFileBasename(ev, &ev.Exit.Process.FileEvent)) -} - // GetExitFilePath returns the value of the field, resolving if necessary func (ev *Event) GetExitFilePath() string { if ev.GetEventType().String() != "exit" { @@ -688,176 +201,6 @@ func (ev *Event) GetExitPpid() uint32 { return ev.Exit.Process.PPid } -// GetExitUser returns the value of the field, resolving if necessary -func (ev *Event) GetExitUser() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - return ev.FieldHandlers.ResolveUser(ev, ev.Exit.Process) -} - -// GetExitUserSid returns the value of the field, resolving if necessary -func (ev *Event) GetExitUserSid() string { - if ev.GetEventType().String() != "exit" { - return "" - } - if ev.Exit.Process == nil { - return "" - } - return ev.Exit.Process.OwnerSidString -} - -// GetOpenRegistryKeyName returns the value of the field, resolving if necessary -func (ev *Event) GetOpenRegistryKeyName() string { - if ev.GetEventType().String() != "open_key" { - return "" - } - return ev.OpenRegistryKey.Registry.KeyName -} - -// GetOpenRegistryKeyNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetOpenRegistryKeyNameLength() int { - if ev.GetEventType().String() != "open_key" { - return 0 - } - return len(ev.OpenRegistryKey.Registry.KeyName) -} - -// GetOpenRegistryKeyPath returns the value of the field, resolving if necessary -func (ev *Event) GetOpenRegistryKeyPath() string { - if ev.GetEventType().String() != "open_key" { - return "" - } - return ev.OpenRegistryKey.Registry.KeyPath -} - -// GetOpenRegistryKeyPathLength returns the value of the field, resolving if necessary -func (ev *Event) GetOpenRegistryKeyPathLength() int { - if ev.GetEventType().String() != "open_key" { - return 0 - } - return len(ev.OpenRegistryKey.Registry.KeyPath) -} - -// GetOpenKeyRegistryKeyName returns the value of the field, resolving if necessary -func (ev *Event) GetOpenKeyRegistryKeyName() string { - if ev.GetEventType().String() != "open_key" { - return "" - } - return ev.OpenRegistryKey.Registry.KeyName -} - -// GetOpenKeyRegistryKeyNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetOpenKeyRegistryKeyNameLength() int { - if ev.GetEventType().String() != "open_key" { - return 0 - } - return len(ev.OpenRegistryKey.Registry.KeyName) -} - -// GetOpenKeyRegistryKeyPath returns the value of the field, resolving if necessary -func (ev *Event) GetOpenKeyRegistryKeyPath() string { - if ev.GetEventType().String() != "open_key" { - return "" - } - return ev.OpenRegistryKey.Registry.KeyPath -} - -// GetOpenKeyRegistryKeyPathLength returns the value of the field, resolving if necessary -func (ev *Event) GetOpenKeyRegistryKeyPathLength() int { - if ev.GetEventType().String() != "open_key" { - return 0 - } - return len(ev.OpenRegistryKey.Registry.KeyPath) -} - -// GetProcessAncestorsCmdline returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsCmdline() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessCmdLine(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsCmdlineScrubbed returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsCmdlineScrubbed() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessCmdLineScrubbed(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsContainerId returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsContainerId() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.ContainerID - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsCreatedAt returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsCreatedAt() []int { - if ev.BaseEvent.ProcessContext == nil { - return []int{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []int{} - } - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &element.ProcessContext.Process)) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - // GetProcessAncestorsEnvp returns the value of the field, resolving if necessary func (ev *Event) GetProcessAncestorsEnvp() []string { if ev.BaseEvent.ProcessContext == nil { @@ -874,70 +217,7 @@ func (ev *Event) GetProcessAncestorsEnvp() []string { element := (*ProcessCacheEntry)(ptr) result := ev.FieldHandlers.ResolveProcessEnvp(ev, &element.ProcessContext.Process) values = append(values, result...) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsEnvs returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsEnvs() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveProcessEnvs(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsFileName returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsFileName() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsFileNameLength() []int { - if ev.BaseEvent.ProcessContext == nil { - return []int{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []int{} - } - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent)) - values = append(values, result) - ptr = iterator.Next() + ptr = iterator.Next(ctx) } return values } @@ -958,7 +238,7 @@ func (ev *Event) GetProcessAncestorsFilePath() []string { element := (*ProcessCacheEntry)(ptr) result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent) values = append(values, result) - ptr = iterator.Next() + ptr = iterator.Next(ctx) } return values } @@ -979,24 +259,11 @@ func (ev *Event) GetProcessAncestorsFilePathLength() []int { element := (*ProcessCacheEntry)(ptr) result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent)) values = append(values, result) - ptr = iterator.Next() + ptr = iterator.Next(ctx) } return values } -// GetProcessAncestorsLength returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsLength() int { - if ev.BaseEvent.ProcessContext == nil { - return 0 - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return 0 - } - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - return iterator.Len(ctx) -} - // GetProcessAncestorsPid returns the value of the field, resolving if necessary func (ev *Event) GetProcessAncestorsPid() []uint32 { if ev.BaseEvent.ProcessContext == nil { @@ -1013,7 +280,7 @@ func (ev *Event) GetProcessAncestorsPid() []uint32 { element := (*ProcessCacheEntry)(ptr) result := element.ProcessContext.Process.PIDContext.Pid values = append(values, result) - ptr = iterator.Next() + ptr = iterator.Next(ctx) } return values } @@ -1028,89 +295,15 @@ func (ev *Event) GetProcessAncestorsPpid() []uint32 { } var values []uint32 ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.PPid - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsUser returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsUser() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := ev.FieldHandlers.ResolveUser(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessAncestorsUserSid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessAncestorsUserSid() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Ancestor == nil { - return []string{} - } - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := (*ProcessCacheEntry)(ptr) - result := element.ProcessContext.Process.OwnerSidString - values = append(values, result) - ptr = iterator.Next() - } - return values -} - -// GetProcessCmdline returns the value of the field, resolving if necessary -func (ev *Event) GetProcessCmdline() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - return ev.FieldHandlers.ResolveProcessCmdLine(ev, &ev.BaseEvent.ProcessContext.Process) -} - -// GetProcessCmdlineScrubbed returns the value of the field, resolving if necessary -func (ev *Event) GetProcessCmdlineScrubbed() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - return ev.FieldHandlers.ResolveProcessCmdLineScrubbed(ev, &ev.BaseEvent.ProcessContext.Process) -} - -// GetProcessContainerId returns the value of the field, resolving if necessary -func (ev *Event) GetProcessContainerId() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - return ev.BaseEvent.ProcessContext.Process.ContainerID -} - -// GetProcessCreatedAt returns the value of the field, resolving if necessary -func (ev *Event) GetProcessCreatedAt() int { - if ev.BaseEvent.ProcessContext == nil { - return 0 + iterator := &ProcessAncestorsIterator{} + ptr := iterator.Front(ctx) + for ptr != nil { + element := (*ProcessCacheEntry)(ptr) + result := element.ProcessContext.Process.PPid + values = append(values, result) + ptr = iterator.Next(ctx) } - return ev.FieldHandlers.ResolveProcessCreatedAt(ev, &ev.BaseEvent.ProcessContext.Process) + return values } // GetProcessEnvp returns the value of the field, resolving if necessary @@ -1121,14 +314,6 @@ func (ev *Event) GetProcessEnvp() []string { return ev.FieldHandlers.ResolveProcessEnvp(ev, &ev.BaseEvent.ProcessContext.Process) } -// GetProcessEnvs returns the value of the field, resolving if necessary -func (ev *Event) GetProcessEnvs() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - return ev.FieldHandlers.ResolveProcessEnvs(ev, &ev.BaseEvent.ProcessContext.Process) -} - // GetProcessExecTime returns the value of the field, resolving if necessary func (ev *Event) GetProcessExecTime() time.Time { if ev.BaseEvent.ProcessContext == nil { @@ -1145,22 +330,6 @@ func (ev *Event) GetProcessExitTime() time.Time { return ev.BaseEvent.ProcessContext.Process.ExitTime } -// GetProcessFileName returns the value of the field, resolving if necessary -func (ev *Event) GetProcessFileName() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent) -} - -// GetProcessFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetProcessFileNameLength() int { - if ev.BaseEvent.ProcessContext == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent)) -} - // GetProcessFilePath returns the value of the field, resolving if necessary func (ev *Event) GetProcessFilePath() string { if ev.BaseEvent.ProcessContext == nil { @@ -1177,62 +346,6 @@ func (ev *Event) GetProcessFilePathLength() int { return len(ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent)) } -// GetProcessParentCmdline returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentCmdline() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - return ev.FieldHandlers.ResolveProcessCmdLine(ev, ev.BaseEvent.ProcessContext.Parent) -} - -// GetProcessParentCmdlineScrubbed returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentCmdlineScrubbed() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - return ev.FieldHandlers.ResolveProcessCmdLineScrubbed(ev, ev.BaseEvent.ProcessContext.Parent) -} - -// GetProcessParentContainerId returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentContainerId() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - return ev.BaseEvent.ProcessContext.Parent.ContainerID -} - -// GetProcessParentCreatedAt returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentCreatedAt() int { - if ev.BaseEvent.ProcessContext == nil { - return 0 - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return 0 - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0 - } - return ev.FieldHandlers.ResolveProcessCreatedAt(ev, ev.BaseEvent.ProcessContext.Parent) -} - // GetProcessParentEnvp returns the value of the field, resolving if necessary func (ev *Event) GetProcessParentEnvp() []string { if ev.BaseEvent.ProcessContext == nil { @@ -1247,45 +360,6 @@ func (ev *Event) GetProcessParentEnvp() []string { return ev.FieldHandlers.ResolveProcessEnvp(ev, ev.BaseEvent.ProcessContext.Parent) } -// GetProcessParentEnvs returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentEnvs() []string { - if ev.BaseEvent.ProcessContext == nil { - return []string{} - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return []string{} - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return []string{} - } - return ev.FieldHandlers.ResolveProcessEnvs(ev, ev.BaseEvent.ProcessContext.Parent) -} - -// GetProcessParentFileName returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentFileName() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent) -} - -// GetProcessParentFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentFileNameLength() int { - if ev.BaseEvent.ProcessContext == nil { - return 0 - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return 0 - } - return len(ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent)) -} - // GetProcessParentFilePath returns the value of the field, resolving if necessary func (ev *Event) GetProcessParentFilePath() string { if ev.BaseEvent.ProcessContext == nil { @@ -1339,34 +413,6 @@ func (ev *Event) GetProcessParentPpid() uint32 { return ev.BaseEvent.ProcessContext.Parent.PPid } -// GetProcessParentUser returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentUser() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - return ev.FieldHandlers.ResolveUser(ev, ev.BaseEvent.ProcessContext.Parent) -} - -// GetProcessParentUserSid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessParentUserSid() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - if ev.BaseEvent.ProcessContext.Parent == nil { - return "" - } - if !ev.BaseEvent.ProcessContext.HasParent() { - return "" - } - return ev.BaseEvent.ProcessContext.Parent.OwnerSidString -} - // GetProcessPid returns the value of the field, resolving if necessary func (ev *Event) GetProcessPid() uint32 { if ev.BaseEvent.ProcessContext == nil { @@ -1383,279 +429,7 @@ func (ev *Event) GetProcessPpid() uint32 { return ev.BaseEvent.ProcessContext.Process.PPid } -// GetProcessUser returns the value of the field, resolving if necessary -func (ev *Event) GetProcessUser() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - return ev.FieldHandlers.ResolveUser(ev, &ev.BaseEvent.ProcessContext.Process) -} - -// GetProcessUserSid returns the value of the field, resolving if necessary -func (ev *Event) GetProcessUserSid() string { - if ev.BaseEvent.ProcessContext == nil { - return "" - } - return ev.BaseEvent.ProcessContext.Process.OwnerSidString -} - -// GetRenameFileDestinationDevicePath returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileDestinationDevicePath() string { - if ev.GetEventType().String() != "rename" { - return "" - } - return ev.FieldHandlers.ResolveFimFilePath(ev, &ev.RenameFile.New) -} - -// GetRenameFileDestinationDevicePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileDestinationDevicePathLength() int { - if ev.GetEventType().String() != "rename" { - return 0 - } - return len(ev.FieldHandlers.ResolveFimFilePath(ev, &ev.RenameFile.New)) -} - -// GetRenameFileDestinationName returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileDestinationName() string { - if ev.GetEventType().String() != "rename" { - return "" - } - return ev.FieldHandlers.ResolveFimFileBasename(ev, &ev.RenameFile.New) -} - -// GetRenameFileDestinationNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileDestinationNameLength() int { - if ev.GetEventType().String() != "rename" { - return 0 - } - return len(ev.FieldHandlers.ResolveFimFileBasename(ev, &ev.RenameFile.New)) -} - -// GetRenameFileDestinationPath returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileDestinationPath() string { - if ev.GetEventType().String() != "rename" { - return "" - } - return ev.FieldHandlers.ResolveFileUserPath(ev, &ev.RenameFile.New) -} - -// GetRenameFileDestinationPathLength returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileDestinationPathLength() int { - if ev.GetEventType().String() != "rename" { - return 0 - } - return len(ev.FieldHandlers.ResolveFileUserPath(ev, &ev.RenameFile.New)) -} - -// GetRenameFileDevicePath returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileDevicePath() string { - if ev.GetEventType().String() != "rename" { - return "" - } - return ev.FieldHandlers.ResolveFimFilePath(ev, &ev.RenameFile.Old) -} - -// GetRenameFileDevicePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileDevicePathLength() int { - if ev.GetEventType().String() != "rename" { - return 0 - } - return len(ev.FieldHandlers.ResolveFimFilePath(ev, &ev.RenameFile.Old)) -} - -// GetRenameFileName returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileName() string { - if ev.GetEventType().String() != "rename" { - return "" - } - return ev.FieldHandlers.ResolveFimFileBasename(ev, &ev.RenameFile.Old) -} - -// GetRenameFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFileNameLength() int { - if ev.GetEventType().String() != "rename" { - return 0 - } - return len(ev.FieldHandlers.ResolveFimFileBasename(ev, &ev.RenameFile.Old)) -} - -// GetRenameFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFilePath() string { - if ev.GetEventType().String() != "rename" { - return "" - } - return ev.FieldHandlers.ResolveFileUserPath(ev, &ev.RenameFile.Old) -} - -// GetRenameFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetRenameFilePathLength() int { - if ev.GetEventType().String() != "rename" { - return 0 - } - return len(ev.FieldHandlers.ResolveFileUserPath(ev, &ev.RenameFile.Old)) -} - -// GetSetRegistryKeyName returns the value of the field, resolving if necessary -func (ev *Event) GetSetRegistryKeyName() string { - if ev.GetEventType().String() != "set_key_value" { - return "" - } - return ev.SetRegistryKeyValue.Registry.KeyName -} - -// GetSetRegistryKeyNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetSetRegistryKeyNameLength() int { - if ev.GetEventType().String() != "set_key_value" { - return 0 - } - return len(ev.SetRegistryKeyValue.Registry.KeyName) -} - -// GetSetRegistryKeyPath returns the value of the field, resolving if necessary -func (ev *Event) GetSetRegistryKeyPath() string { - if ev.GetEventType().String() != "set_key_value" { - return "" - } - return ev.SetRegistryKeyValue.Registry.KeyPath -} - -// GetSetRegistryKeyPathLength returns the value of the field, resolving if necessary -func (ev *Event) GetSetRegistryKeyPathLength() int { - if ev.GetEventType().String() != "set_key_value" { - return 0 - } - return len(ev.SetRegistryKeyValue.Registry.KeyPath) -} - -// GetSetRegistryValueName returns the value of the field, resolving if necessary -func (ev *Event) GetSetRegistryValueName() string { - if ev.GetEventType().String() != "set_key_value" { - return "" - } - return ev.SetRegistryKeyValue.ValueName -} - -// GetSetRegistryValueNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetSetRegistryValueNameLength() int { - if ev.GetEventType().String() != "set_key_value" { - return 0 - } - return len(ev.SetRegistryKeyValue.ValueName) -} - -// GetSetValueName returns the value of the field, resolving if necessary -func (ev *Event) GetSetValueName() string { - if ev.GetEventType().String() != "set_key_value" { - return "" - } - return ev.SetRegistryKeyValue.ValueName -} - -// GetSetKeyValueRegistryKeyName returns the value of the field, resolving if necessary -func (ev *Event) GetSetKeyValueRegistryKeyName() string { - if ev.GetEventType().String() != "set_key_value" { - return "" - } - return ev.SetRegistryKeyValue.Registry.KeyName -} - -// GetSetKeyValueRegistryKeyNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetSetKeyValueRegistryKeyNameLength() int { - if ev.GetEventType().String() != "set_key_value" { - return 0 - } - return len(ev.SetRegistryKeyValue.Registry.KeyName) -} - -// GetSetKeyValueRegistryKeyPath returns the value of the field, resolving if necessary -func (ev *Event) GetSetKeyValueRegistryKeyPath() string { - if ev.GetEventType().String() != "set_key_value" { - return "" - } - return ev.SetRegistryKeyValue.Registry.KeyPath -} - -// GetSetKeyValueRegistryKeyPathLength returns the value of the field, resolving if necessary -func (ev *Event) GetSetKeyValueRegistryKeyPathLength() int { - if ev.GetEventType().String() != "set_key_value" { - return 0 - } - return len(ev.SetRegistryKeyValue.Registry.KeyPath) -} - -// GetSetKeyValueRegistryValueName returns the value of the field, resolving if necessary -func (ev *Event) GetSetKeyValueRegistryValueName() string { - if ev.GetEventType().String() != "set_key_value" { - return "" - } - return ev.SetRegistryKeyValue.ValueName -} - -// GetSetKeyValueRegistryValueNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetSetKeyValueRegistryValueNameLength() int { - if ev.GetEventType().String() != "set_key_value" { - return 0 - } - return len(ev.SetRegistryKeyValue.ValueName) -} - -// GetSetKeyValueValueName returns the value of the field, resolving if necessary -func (ev *Event) GetSetKeyValueValueName() string { - if ev.GetEventType().String() != "set_key_value" { - return "" - } - return ev.SetRegistryKeyValue.ValueName -} - // GetTimestamp returns the value of the field, resolving if necessary func (ev *Event) GetTimestamp() time.Time { return ev.FieldHandlers.ResolveEventTime(ev, &ev.BaseEvent) } - -// GetWriteFileDevicePath returns the value of the field, resolving if necessary -func (ev *Event) GetWriteFileDevicePath() string { - if ev.GetEventType().String() != "write" { - return "" - } - return ev.FieldHandlers.ResolveFimFilePath(ev, &ev.WriteFile.File) -} - -// GetWriteFileDevicePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetWriteFileDevicePathLength() int { - if ev.GetEventType().String() != "write" { - return 0 - } - return len(ev.FieldHandlers.ResolveFimFilePath(ev, &ev.WriteFile.File)) -} - -// GetWriteFileName returns the value of the field, resolving if necessary -func (ev *Event) GetWriteFileName() string { - if ev.GetEventType().String() != "write" { - return "" - } - return ev.FieldHandlers.ResolveFimFileBasename(ev, &ev.WriteFile.File) -} - -// GetWriteFileNameLength returns the value of the field, resolving if necessary -func (ev *Event) GetWriteFileNameLength() int { - if ev.GetEventType().String() != "write" { - return 0 - } - return len(ev.FieldHandlers.ResolveFimFileBasename(ev, &ev.WriteFile.File)) -} - -// GetWriteFilePath returns the value of the field, resolving if necessary -func (ev *Event) GetWriteFilePath() string { - if ev.GetEventType().String() != "write" { - return "" - } - return ev.FieldHandlers.ResolveFileUserPath(ev, &ev.WriteFile.File) -} - -// GetWriteFilePathLength returns the value of the field, resolving if necessary -func (ev *Event) GetWriteFilePathLength() int { - if ev.GetEventType().String() != "write" { - return 0 - } - return len(ev.FieldHandlers.ResolveFileUserPath(ev, &ev.WriteFile.File)) -} diff --git a/pkg/security/secl/model/field_handlers_unix.go b/pkg/security/secl/model/field_handlers_unix.go index 6dd4df4c575c5..418e4bc8750f4 100644 --- a/pkg/security/secl/model/field_handlers_unix.go +++ b/pkg/security/secl/model/field_handlers_unix.go @@ -239,6 +239,8 @@ func (ev *Event) resolveFields(forADs bool) { _ = ev.FieldHandlers.ResolveK8SUsername(ev, &ev.BaseEvent.ProcessContext.Process.UserSession) // resolve event specific fields switch ev.GetEventType().String() { + case "accept": + _ = ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.Accept.Addr) case "bind": _ = ev.FieldHandlers.ResolveIsIPPublic(ev, &ev.Bind.Addr) case "bpf": @@ -534,6 +536,12 @@ func (ev *Event) resolveFields(forADs bool) { if !forADs { _ = ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Mkdir.File) } + if !forADs { + _ = ev.FieldHandlers.ResolveSyscallCtxArgsStr1(ev, &ev.Mkdir.SyscallContext) + } + if !forADs { + _ = ev.FieldHandlers.ResolveSyscallCtxArgsInt2(ev, &ev.Mkdir.SyscallContext) + } case "mmap": _ = ev.FieldHandlers.ResolveFileFieldsUser(ev, &ev.MMap.File.FileFields) _ = ev.FieldHandlers.ResolveFileFieldsGroup(ev, &ev.MMap.File.FileFields) @@ -561,6 +569,8 @@ func (ev *Event) resolveFields(forADs bool) { _ = ev.FieldHandlers.ResolveSyscallCtxArgsStr3(ev, &ev.Mount.SyscallContext) } case "mprotect": + case "network_flow_monitor": + _ = ev.FieldHandlers.ResolveNetworkDeviceIfName(ev, &ev.NetworkFlowMonitor.Device) case "ondemand": _ = ev.FieldHandlers.ResolveOnDemandName(ev, &ev.OnDemand) _ = ev.FieldHandlers.ResolveOnDemandArg1Str(ev, &ev.OnDemand) @@ -853,6 +863,9 @@ func (ev *Event) resolveFields(forADs bool) { if !forADs { _ = ev.FieldHandlers.ResolveHashesFromEvent(ev, &ev.Rmdir.File) } + if !forADs { + _ = ev.FieldHandlers.ResolveSyscallCtxArgsStr1(ev, &ev.Rmdir.SyscallContext) + } case "selinux": _ = ev.FieldHandlers.ResolveSELinuxBoolName(ev, &ev.SELinux) case "setgid": diff --git a/pkg/security/secl/model/marshallers_linux.go b/pkg/security/secl/model/marshallers_linux.go index 4ea841cd8fc3b..7bd40455e33a5 100644 --- a/pkg/security/secl/model/marshallers_linux.go +++ b/pkg/security/secl/model/marshallers_linux.go @@ -32,7 +32,7 @@ func MarshalBinary(data []byte, binaryMarshalers ...BinaryMarshaler) (int, error // MarshalBinary marshals a binary representation of itself func (e *FileFields) MarshalBinary(data []byte) (int, error) { - if len(data) < 72 { + if len(data) < FileFieldsSize { return 0, ErrNotEnoughSpace } binary.NativeEndian.PutUint64(data[0:8], e.Inode) @@ -66,7 +66,8 @@ func (e *FileFields) MarshalBinary(data []byte) (int, error) { } binary.NativeEndian.PutUint64(data[56:64], uint64(timeSec)) binary.NativeEndian.PutUint64(data[64:72], uint64(timeNsec)) - return 72, nil + + return FileFieldsSize, nil } // MarshalProcCache marshals a binary representation of itself @@ -171,8 +172,38 @@ func (adlc *ActivityDumpLoadConfig) MarshalBinary() ([]byte, error) { binary.NativeEndian.PutUint64(raw[16:24], adlc.WaitListTimestampRaw) binary.NativeEndian.PutUint64(raw[24:32], adlc.StartTimestampRaw) binary.NativeEndian.PutUint64(raw[32:40], adlc.EndTimestampRaw) - binary.NativeEndian.PutUint32(raw[40:44], adlc.Rate) + binary.NativeEndian.PutUint16(raw[40:42], adlc.Rate) + binary.NativeEndian.PutUint16(raw[42:44], 0) binary.NativeEndian.PutUint32(raw[44:48], adlc.Paused) return raw, nil } + +// MarshalBinary returns the binary representation of a path key +func (pl *PathLeaf) MarshalBinary() ([]byte, error) { + buff := make([]byte, PathLeafSize) + + pl.Parent.Write(buff) + copy(buff[16:], pl.Name[:]) + binary.NativeEndian.PutUint16(buff[16+len(pl.Name):], pl.Len) + + return buff, nil +} + +func (p *PathKey) Write(buffer []byte) { + binary.NativeEndian.PutUint64(buffer[0:8], p.Inode) + binary.NativeEndian.PutUint32(buffer[8:12], p.MountID) + binary.NativeEndian.PutUint32(buffer[12:16], p.PathID) +} + +// MarshalBinary returns the binary representation of a path key +func (p *PathKey) MarshalBinary() ([]byte, error) { + if p.IsNull() { + return nil, &ErrInvalidKeyPath{Inode: p.Inode, MountID: p.MountID} + } + + buff := make([]byte, 16) + p.Write(buff) + + return buff, nil +} diff --git a/pkg/security/secl/model/model.go b/pkg/security/secl/model/model.go index 9a6ae500f2d6f..cb6666fb16259 100644 --- a/pkg/security/secl/model/model.go +++ b/pkg/security/secl/model/model.go @@ -10,15 +10,15 @@ package model import ( "net" + "net/netip" "reflect" "runtime" "time" - "modernc.org/mathutil" - "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/containerutils" "github.com/DataDog/datadog-agent/pkg/security/secl/model/usersession" + "github.com/DataDog/datadog-agent/pkg/security/secl/model/utils" ) // Model describes the data model for the runtime security agent events @@ -72,9 +72,9 @@ func (r *Releasable) AppendReleaseCallback(callback func()) { // ContainerContext holds the container context of an event type ContainerContext struct { Releasable - ContainerID containerutils.ContainerID `field:"id,handler:ResolveContainerID"` // SECLDoc[id] Definition:`ID of the container` - CreatedAt uint64 `field:"created_at,handler:ResolveContainerCreatedAt"` // SECLDoc[created_at] Definition:`Timestamp of the creation of the container`` - Tags []string `field:"tags,handler:ResolveContainerTags,opts:skip_ad,weight:9999"` // SECLDoc[tags] Definition:`Tags of the container` + ContainerID containerutils.ContainerID `field:"id,handler:ResolveContainerID,opts:gen_getters"` // SECLDoc[id] Definition:`ID of the container` + CreatedAt uint64 `field:"created_at,handler:ResolveContainerCreatedAt,opts:gen_getters"` // SECLDoc[created_at] Definition:`Timestamp of the creation of the container`` + Tags []string `field:"tags,handler:ResolveContainerTags,opts:skip_ad,weight:9999"` // SECLDoc[tags] Definition:`Tags of the container` Resolved bool `field:"-"` Runtime string `field:"runtime,handler:ResolveContainerRuntime"` // SECLDoc[runtime] Definition:`Runtime managing the container` } @@ -96,15 +96,25 @@ type IPPortContext struct { IsPublicResolved bool `field:"-"` } +// GetComparable returns a comparable version of IPPortContext +func (ipc *IPPortContext) GetComparable() netip.AddrPort { + ipcAddr, ok := netip.AddrFromSlice(ipc.IPNet.IP) + if !ok { + return netip.AddrPort{} + } + return netip.AddrPortFrom(ipcAddr, ipc.Port) +} + // NetworkContext represents the network context of the event type NetworkContext struct { Device NetworkDeviceContext `field:"device"` // network device on which the network packet was captured - L3Protocol uint16 `field:"l3_protocol"` // SECLDoc[l3_protocol] Definition:`L3 protocol of the network packet` Constants:`L3 protocols` - L4Protocol uint16 `field:"l4_protocol"` // SECLDoc[l4_protocol] Definition:`L4 protocol of the network packet` Constants:`L4 protocols` - Source IPPortContext `field:"source"` // source of the network packet - Destination IPPortContext `field:"destination"` // destination of the network packet - Size uint32 `field:"size"` // SECLDoc[size] Definition:`Size in bytes of the network packet` + L3Protocol uint16 `field:"l3_protocol"` // SECLDoc[l3_protocol] Definition:`L3 protocol of the network packet` Constants:`L3 protocols` + L4Protocol uint16 `field:"l4_protocol"` // SECLDoc[l4_protocol] Definition:`L4 protocol of the network packet` Constants:`L4 protocols` + Source IPPortContext `field:"source"` // source of the network packet + Destination IPPortContext `field:"destination"` // destination of the network packet + NetworkDirection uint32 `field:"network_direction"` // SECLDoc[network_direction] Definition:`Network direction of the network packet` Constants:`Network directions` + Size uint32 `field:"size"` // SECLDoc[size] Definition:`Size in bytes of the network packet` } // IsZero returns if there is a network context @@ -114,8 +124,8 @@ func (nc *NetworkContext) IsZero() bool { // SpanContext describes a span context type SpanContext struct { - SpanID uint64 `field:"-"` - TraceID mathutil.Int128 `field:"-"` + SpanID uint64 `field:"-"` + TraceID utils.TraceID `field:"-"` } // BaseEvent represents an event sent from the kernel @@ -124,13 +134,13 @@ type BaseEvent struct { Type uint32 `field:"-"` Flags uint32 `field:"-"` TimestampRaw uint64 `field:"event.timestamp,handler:ResolveEventTimestamp"` // SECLDoc[event.timestamp] Definition:`Timestamp of the event` - Timestamp time.Time `field:"timestamp,opts:getters_only,handler:ResolveEventTime"` + Timestamp time.Time `field:"timestamp,opts:getters_only|gen_getters,handler:ResolveEventTime"` Rules []*MatchedRule `field:"-"` ActionReports []ActionReport `field:"-"` - Os string `field:"event.os"` // SECLDoc[event.os] Definition:`Operating system of the event` - Origin string `field:"event.origin"` // SECLDoc[event.origin] Definition:`Origin of the event` - Service string `field:"event.service,handler:ResolveService,opts:skip_ad"` // SECLDoc[event.service] Definition:`Service associated with the event` - Hostname string `field:"event.hostname,handler:ResolveHostname"` // SECLDoc[event.hostname] Definition:`Hostname associated with the event` + Os string `field:"event.os"` // SECLDoc[event.os] Definition:`Operating system of the event` + Origin string `field:"event.origin"` // SECLDoc[event.origin] Definition:`Origin of the event` + Service string `field:"event.service,handler:ResolveService,opts:skip_ad|gen_getters"` // SECLDoc[event.service] Definition:`Service associated with the event` + Hostname string `field:"event.hostname,handler:ResolveHostname"` // SECLDoc[event.hostname] Definition:`Hostname associated with the event` // context shared with all events ProcessContext *ProcessContext `field:"process"` @@ -508,7 +518,7 @@ func (it *ProcessAncestorsIterator) Front(ctx *eval.Context) *ProcessCacheEntry } // Next returns the next element -func (it *ProcessAncestorsIterator) Next() *ProcessCacheEntry { +func (it *ProcessAncestorsIterator) Next(_ *eval.Context) *ProcessCacheEntry { if next := it.prev.Ancestor; next != nil { it.prev = next return next @@ -570,8 +580,8 @@ type ProcessContext struct { // ExitEvent represents a process exit event type ExitEvent struct { *Process - Cause uint32 `field:"cause"` // SECLDoc[cause] Definition:`Cause of the process termination (one of EXITED, SIGNALED, COREDUMPED)` - Code uint32 `field:"code"` // SECLDoc[code] Definition:`Exit code of the process or number of the signal that caused the process to terminate` + Cause uint32 `field:"cause"` // SECLDoc[cause] Definition:`Cause of the process termination (one of EXITED, SIGNALED, COREDUMPED)` + Code uint32 `field:"code,opts:gen_getters"` // SECLDoc[code] Definition:`Exit code of the process or number of the signal that caused the process to terminate` } // DNSEvent represents a DNS event diff --git a/pkg/security/secl/model/model_helpers_unix.go b/pkg/security/secl/model/model_helpers_unix.go index 4529844f98e93..4c47e726fbe6f 100644 --- a/pkg/security/secl/model/model_helpers_unix.go +++ b/pkg/security/secl/model/model_helpers_unix.go @@ -8,7 +8,6 @@ package model import ( - "encoding/binary" "errors" "fmt" "path" @@ -18,7 +17,7 @@ import ( "time" "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" - "modernc.org/mathutil" + "github.com/DataDog/datadog-agent/pkg/security/secl/model/utils" ) const ( @@ -134,7 +133,7 @@ func (c *Credentials) Equals(o *Credentials) bool { } // SetSpan sets the span -func (p *Process) SetSpan(spanID uint64, traceID mathutil.Int128) { +func (p *Process) SetSpan(spanID uint64, traceID utils.TraceID) { p.SpanID = spanID p.TraceID = traceID } @@ -199,13 +198,13 @@ func (f *FileFields) HasHardLinks() bool { return f.NLink > 1 } -// GetInLowerLayer returns whether a file is in a lower layer -func (f *FileFields) GetInLowerLayer() bool { +// IsInLowerLayer returns whether a file is in a lower layer +func (f *FileFields) IsInLowerLayer() bool { return f.Flags&LowerLayer != 0 } -// GetInUpperLayer returns whether a file is in the upper layer -func (f *FileFields) GetInUpperLayer() bool { +// IsInUpperLayer returns whether a file is in the upper layer +func (f *FileFields) IsInUpperLayer() bool { return f.Flags&UpperLayer != 0 } @@ -330,12 +329,6 @@ func (d NetDevice) GetKey() string { return fmt.Sprintf("%v_%v", d.IfIndex, d.NetNS) } -func (p *PathKey) Write(buffer []byte) { - binary.NativeEndian.PutUint64(buffer[0:8], p.Inode) - binary.NativeEndian.PutUint32(buffer[8:12], p.MountID) - binary.NativeEndian.PutUint32(buffer[12:16], p.PathID) -} - // IsNull returns true if a key is invalid func (p *PathKey) IsNull() bool { return p.Inode == 0 && p.MountID == 0 @@ -345,18 +338,6 @@ func (p *PathKey) String() string { return fmt.Sprintf("%x/%x", p.MountID, p.Inode) } -// MarshalBinary returns the binary representation of a path key -func (p *PathKey) MarshalBinary() ([]byte, error) { - if p.IsNull() { - return nil, &ErrInvalidKeyPath{Inode: p.Inode, MountID: p.MountID} - } - - buff := make([]byte, 16) - p.Write(buff) - - return buff, nil -} - // PathKeySize defines the path key size const PathKeySize = 16 @@ -382,17 +363,6 @@ func (pl *PathLeaf) SetName(name string) { pl.Len = uint16(len(name) + 1) } -// MarshalBinary returns the binary representation of a path key -func (pl *PathLeaf) MarshalBinary() ([]byte, error) { - buff := make([]byte, PathLeafSize) - - pl.Parent.Write(buff) - copy(buff[16:], pl.Name[:]) - binary.NativeEndian.PutUint16(buff[16+len(pl.Name):], pl.Len) - - return buff, nil -} - // ResolveHashes resolves the hash of the provided file func (dfh *FakeFieldHandlers) ResolveHashes(_ EventType, _ *Process, _ *FileEvent) []string { return nil diff --git a/pkg/security/secl/model/model_test.go b/pkg/security/secl/model/model_test.go index 0d96280ef7178..90aaff09ca770 100644 --- a/pkg/security/secl/model/model_test.go +++ b/pkg/security/secl/model/model_test.go @@ -123,7 +123,7 @@ func TestSetFieldValue(t *testing.T) { event := NewFakeEvent() for _, field := range event.GetFields() { - kind, err := event.GetFieldType(field) + _, kind, err := event.GetFieldMetadata(field) if err != nil { t.Fatal(err) } diff --git a/pkg/security/secl/model/model_unix.go b/pkg/security/secl/model/model_unix.go index c7ff2ac4240b3..b06a59b389414 100644 --- a/pkg/security/secl/model/model_unix.go +++ b/pkg/security/secl/model/model_unix.go @@ -5,20 +5,25 @@ //go:build unix -//go:generate go run github.com/DataDog/datadog-agent/pkg/security/secl/compiler/generators/accessors -tags unix -types-file model.go -output accessors_unix.go -field-handlers field_handlers_unix.go -doc ../../../../docs/cloud-workload-security/secl_linux.json -field-accessors-output field_accessors_unix.go +//go:generate accessors -tags unix -types-file model.go -output accessors_unix.go -field-handlers field_handlers_unix.go -doc ../../../../docs/cloud-workload-security/secl_linux.json -field-accessors-output field_accessors_unix.go // Package model holds model related files package model import ( + "net/netip" "time" - "modernc.org/mathutil" - "github.com/google/gopacket" "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/containerutils" + "github.com/DataDog/datadog-agent/pkg/security/secl/model/utils" +) + +const ( + // FileFieldsSize is the size used by the file_t structure + FileFieldsSize = 72 ) // Event represents an event sent from the kernel @@ -63,6 +68,7 @@ type Event struct { // network syscalls Bind BindEvent `field:"bind" event:"bind"` // [7.37] [Network] A bind was executed Connect ConnectEvent `field:"connect" event:"connect"` // [7.60] [Network] A connect was executed + Accept AcceptEvent `field:"accept" event:"accept"` // [7.60] [Network] An accept was executed // kernel events SELinux SELinuxEvent `field:"selinux" event:"selinux"` // [7.30] [Kernel] An SELinux operation was run @@ -74,9 +80,10 @@ type Event struct { UnloadModule UnloadModuleEvent `field:"unload_module" event:"unload_module"` // [7.35] [Kernel] A kernel module was deleted // network events - DNS DNSEvent `field:"dns" event:"dns"` // [7.36] [Network] A DNS request was sent - IMDS IMDSEvent `field:"imds" event:"imds"` // [7.55] [Network] An IMDS event was captured - RawPacket RawPacketEvent `field:"packet" event:"packet"` // [7.60] [Network] A raw network packet captured + DNS DNSEvent `field:"dns" event:"dns"` // [7.36] [Network] A DNS request was sent + IMDS IMDSEvent `field:"imds" event:"imds"` // [7.55] [Network] An IMDS event was captured + RawPacket RawPacketEvent `field:"packet" event:"packet"` // [7.60] [Network] A raw network packet was captured + NetworkFlowMonitor NetworkFlowMonitorEvent `field:"network_flow_monitor" event:"network_flow_monitor"` // [7.63] [Network] A network monitor event was sent // on-demand events OnDemand OnDemandEvent `field:"ondemand" event:"ondemand"` @@ -97,9 +104,9 @@ type Event struct { type CGroupContext struct { CGroupID containerutils.CGroupID `field:"id,handler:ResolveCGroupID"` // SECLDoc[id] Definition:`ID of the cgroup` CGroupFlags containerutils.CGroupFlags `field:"-"` - CGroupManager string `field:"manager,handler:ResolveCGroupManager"` // SECLDoc[manager] Definition:`Lifecycle manager of the cgroup` + CGroupManager string `field:"manager,handler:ResolveCGroupManager"` // SECLDoc[manager] Definition:`[Experimental] Lifecycle manager of the cgroup` CGroupFile PathKey `field:"file"` - CGroupVersion int `field:"version,handler:ResolveCGroupVersion"` // SECLDoc[version] Definition:`Version of the cgroup API` + CGroupVersion int `field:"version,handler:ResolveCGroupVersion"` // SECLDoc[version] Definition:`[Experimental] Version of the cgroup API` } // Merge two cgroup context @@ -194,10 +201,10 @@ type CapsetEvent struct { // Credentials represents the kernel credentials of a process type Credentials struct { - UID uint32 `field:"uid"` // SECLDoc[uid] Definition:`UID of the process` - GID uint32 `field:"gid"` // SECLDoc[gid] Definition:`GID of the process` - User string `field:"user"` // SECLDoc[user] Definition:`User of the process` Example:`process.user == "root"` Description:`Constrain an event to be triggered by a process running as the root user.` - Group string `field:"group"` // SECLDoc[group] Definition:`Group of the process` + UID uint32 `field:"uid,opts:gen_getters"` // SECLDoc[uid] Definition:`UID of the process` + GID uint32 `field:"gid,opts:gen_getters"` // SECLDoc[gid] Definition:`GID of the process` + User string `field:"user,opts:gen_getters"` // SECLDoc[user] Definition:`User of the process` Example:`process.user == "root"` Description:`Constrain an event to be triggered by a process running as the root user.` + Group string `field:"group,opts:gen_getters"` // SECLDoc[group] Definition:`Group of the process` EUID uint32 `field:"euid"` // SECLDoc[euid] Definition:`Effective UID of the process` EGID uint32 `field:"egid"` // SECLDoc[egid] Definition:`Effective GID of the process` @@ -229,23 +236,23 @@ type Process struct { CGroup CGroupContext `field:"cgroup"` // SECLDoc[cgroup] Definition:`CGroup` ContainerID containerutils.ContainerID `field:"container.id,handler:ResolveProcessContainerID"` // SECLDoc[container.id] Definition:`Container ID` - SpanID uint64 `field:"-"` - TraceID mathutil.Int128 `field:"-"` + SpanID uint64 `field:"-"` + TraceID utils.TraceID `field:"-"` TTYName string `field:"tty_name"` // SECLDoc[tty_name] Definition:`Name of the TTY associated with the process` Comm string `field:"comm"` // SECLDoc[comm] Definition:`Comm attribute of the process` LinuxBinprm LinuxBinprm `field:"interpreter,check:HasInterpreter"` // Script interpreter as identified by the shebang // pid_cache_t - ForkTime time.Time `field:"fork_time,opts:getters_only"` - ExitTime time.Time `field:"exit_time,opts:getters_only"` - ExecTime time.Time `field:"exec_time,opts:getters_only"` + ForkTime time.Time `field:"fork_time,opts:getters_only|gen_getters"` + ExitTime time.Time `field:"exit_time,opts:getters_only|gen_getters"` + ExecTime time.Time `field:"exec_time,opts:getters_only|gen_getters"` // TODO: merge with ExecTime CreatedAt uint64 `field:"created_at,handler:ResolveProcessCreatedAt"` // SECLDoc[created_at] Definition:`Timestamp of the creation of the process` Cookie uint64 `field:"-"` - PPid uint32 `field:"ppid"` // SECLDoc[ppid] Definition:`Parent process ID` + PPid uint32 `field:"ppid,opts:gen_getters"` // SECLDoc[ppid] Definition:`Parent process ID` // credentials_t section of pid_cache_t Credentials @@ -261,13 +268,13 @@ type Process struct { EnvsEntry *EnvsEntry `field:"-"` // defined to generate accessors, ArgsTruncated and EnvsTruncated are used during by unmarshaller - Argv0 string `field:"argv0,handler:ResolveProcessArgv0,weight:100"` // SECLDoc[argv0] Definition:`First argument of the process` - Args string `field:"args,handler:ResolveProcessArgs,weight:500,opts:skip_ad"` // SECLDoc[args] Definition:`Arguments of the process (as a string, excluding argv0)` Example:`exec.args == "-sV -p 22,53,110,143,4564 198.116.0-255.1-127"` Description:`Matches any process with these exact arguments.` Example:`exec.args =~ "* -F * http*"` Description:`Matches any process that has the "-F" argument anywhere before an argument starting with "http".` - Argv []string `field:"argv,handler:ResolveProcessArgv,weight:500; cmdargv,handler:ResolveProcessCmdArgv,opts:getters_only; args_flags,handler:ResolveProcessArgsFlags,opts:helper; args_options,handler:ResolveProcessArgsOptions,opts:helper"` // SECLDoc[argv] Definition:`Arguments of the process (as an array, excluding argv0)` Example:`exec.argv in ["127.0.0.1"]` Description:`Matches any process that has this IP address as one of its arguments.` SECLDoc[args_flags] Definition:`Flags in the process arguments` Example:`exec.args_flags in ["s"] && exec.args_flags in ["V"]` Description:`Matches any process with both "-s" and "-V" flags in its arguments. Also matches "-sV".` SECLDoc[args_options] Definition:`Argument of the process as options` Example:`exec.args_options in ["p=0-1024"]` Description:`Matches any process that has either "-p 0-1024" or "--p=0-1024" in its arguments.` - ArgsTruncated bool `field:"args_truncated,handler:ResolveProcessArgsTruncated"` // SECLDoc[args_truncated] Definition:`Indicator of arguments truncation` - Envs []string `field:"envs,handler:ResolveProcessEnvs,weight:100"` // SECLDoc[envs] Definition:`Environment variable names of the process` - Envp []string `field:"envp,handler:ResolveProcessEnvp,weight:100"` // SECLDoc[envp] Definition:`Environment variables of the process` - EnvsTruncated bool `field:"envs_truncated,handler:ResolveProcessEnvsTruncated"` // SECLDoc[envs_truncated] Definition:`Indicator of environment variables truncation` + Argv0 string `field:"argv0,handler:ResolveProcessArgv0,weight:100"` // SECLDoc[argv0] Definition:`First argument of the process` + Args string `field:"args,handler:ResolveProcessArgs,weight:500,opts:skip_ad"` // SECLDoc[args] Definition:`Arguments of the process (as a string, excluding argv0)` Example:`exec.args == "-sV -p 22,53,110,143,4564 198.116.0-255.1-127"` Description:`Matches any process with these exact arguments.` Example:`exec.args =~ "* -F * http*"` Description:`Matches any process that has the "-F" argument anywhere before an argument starting with "http".` + Argv []string `field:"argv,handler:ResolveProcessArgv,weight:500; cmdargv,handler:ResolveProcessCmdArgv,opts:getters_only|gen_getters; args_flags,handler:ResolveProcessArgsFlags,opts:helper; args_options,handler:ResolveProcessArgsOptions,opts:helper"` // SECLDoc[argv] Definition:`Arguments of the process (as an array, excluding argv0)` Example:`exec.argv in ["127.0.0.1"]` Description:`Matches any process that has this IP address as one of its arguments.` SECLDoc[args_flags] Definition:`Flags in the process arguments` Example:`exec.args_flags in ["s"] && exec.args_flags in ["V"]` Description:`Matches any process with both "-s" and "-V" flags in its arguments. Also matches "-sV".` SECLDoc[args_options] Definition:`Argument of the process as options` Example:`exec.args_options in ["p=0-1024"]` Description:`Matches any process that has either "-p 0-1024" or "--p=0-1024" in its arguments.` + ArgsTruncated bool `field:"args_truncated,handler:ResolveProcessArgsTruncated"` // SECLDoc[args_truncated] Definition:`Indicator of arguments truncation` + Envs []string `field:"envs,handler:ResolveProcessEnvs,weight:100"` // SECLDoc[envs] Definition:`Environment variable names of the process` + Envp []string `field:"envp,handler:ResolveProcessEnvp,weight:100,opts:gen_getters"` // SECLDoc[envp] Definition:`Environment variables of the process` + EnvsTruncated bool `field:"envs_truncated,handler:ResolveProcessEnvsTruncated"` // SECLDoc[envs_truncated] Definition:`Indicator of environment variables truncation` ArgsScrubbed string `field:"args_scrubbed,handler:ResolveProcessArgsScrubbed,opts:getters_only"` ArgvScrubbed []string `field:"argv_scrubbed,handler:ResolveProcessArgvScrubbed,opts:getters_only"` @@ -325,9 +332,9 @@ type FileFields struct { type FileEvent struct { FileFields - PathnameStr string `field:"path,handler:ResolveFilePath,opts:length" op_override:"ProcessSymlinkPathname"` // SECLDoc[path] Definition:`File's path` Example:`exec.file.path == "/usr/bin/apt"` Description:`Matches the execution of the file located at /usr/bin/apt` Example:`open.file.path == "/etc/passwd"` Description:`Matches any process opening the /etc/passwd file.` - BasenameStr string `field:"name,handler:ResolveFileBasename,opts:length" op_override:"ProcessSymlinkBasename"` // SECLDoc[name] Definition:`File's basename` Example:`exec.file.name == "apt"` Description:`Matches the execution of any file named apt.` - Filesystem string `field:"filesystem,handler:ResolveFileFilesystem"` // SECLDoc[filesystem] Definition:`File's filesystem` + PathnameStr string `field:"path,handler:ResolveFilePath,opts:length|gen_getters" op_override:"ProcessSymlinkPathname"` // SECLDoc[path] Definition:`File's path` Example:`exec.file.path == "/usr/bin/apt"` Description:`Matches the execution of the file located at /usr/bin/apt` Example:`open.file.path == "/etc/passwd"` Description:`Matches any process opening the /etc/passwd file.` + BasenameStr string `field:"name,handler:ResolveFileBasename,opts:length" op_override:"ProcessSymlinkBasename"` // SECLDoc[name] Definition:`File's basename` Example:`exec.file.name == "apt"` Description:`Matches the execution of any file named apt.` + Filesystem string `field:"filesystem,handler:ResolveFileFilesystem"` // SECLDoc[filesystem] Definition:`File's filesystem` MountPath string `field:"-"` MountSource uint32 `field:"-"` @@ -373,8 +380,13 @@ type LinkEvent struct { // MkdirEvent represents a mkdir event type MkdirEvent struct { SyscallEvent + SyscallContext File FileEvent `field:"file"` Mode uint32 `field:"file.destination.mode; file.destination.rights"` // SECLDoc[file.destination.mode] Definition:`Mode of the new directory` Constants:`File mode constants` SECLDoc[file.destination.rights] Definition:`Rights of the new directory` Constants:`File mode constants` + + // Syscall context aliases + SyscallPath string `field:"syscall.path,ref:mkdir.syscall.str1"` // SECLDoc[syscall.path] Definition:`Path argument of the syscall` + SyscallMode uint32 `field:"syscall.mode,ref:mkdir.syscall.int2"` // SECLDoc[syscall.mode] Definition:`Mode of the new directory` } // ArgsEnvsEvent defines a args/envs event @@ -401,9 +413,9 @@ type MountEvent struct { SyscallEvent SyscallContext Mount - MountPointPath string `field:"mountpoint.path,handler:ResolveMountPointPath"` // SECLDoc[mountpoint.path] Definition:`Path of the mount point` - MountSourcePath string `field:"source.path,handler:ResolveMountSourcePath"` // SECLDoc[source.path] Definition:`Source path of a bind mount` - MountRootPath string `field:"root.path,handler:ResolveMountRootPath"` // SECLDoc[root.path] Definition:`Root path of the mount` + MountPointPath string `field:"mountpoint.path,handler:ResolveMountPointPath,opts:gen_getters"` // SECLDoc[mountpoint.path] Definition:`Path of the mount point` + MountSourcePath string `field:"source.path,handler:ResolveMountSourcePath"` // SECLDoc[source.path] Definition:`Source path of a bind mount` + MountRootPath string `field:"root.path,handler:ResolveMountRootPath,opts:gen_getters"` // SECLDoc[root.path] Definition:`Root path of the mount` MountPointPathResolutionError error `field:"-"` MountSourcePathResolutionError error `field:"-"` MountRootPathResolutionError error `field:"-"` @@ -455,8 +467,8 @@ type SELinuxEvent struct { // PIDContext holds the process context of a kernel event type PIDContext struct { - Pid uint32 `field:"pid"` // SECLDoc[pid] Definition:`Process ID of the process (also called thread group ID)` - Tid uint32 `field:"tid"` // SECLDoc[tid] Definition:`Thread ID of the thread` + Pid uint32 `field:"pid,opts:gen_getters"` // SECLDoc[pid] Definition:`Process ID of the process (also called thread group ID)` + Tid uint32 `field:"tid"` // SECLDoc[tid] Definition:`Thread ID of the thread` NetNS uint32 `field:"-"` IsKworker bool `field:"is_kworker"` // SECLDoc[is_kworker] Definition:`Indicates whether the process is a kworker` ExecInode uint64 `field:"-"` // used to track exec and event loss @@ -479,7 +491,11 @@ type RenameEvent struct { // RmdirEvent represents a rmdir event type RmdirEvent struct { SyscallEvent + SyscallContext File FileEvent `field:"file"` + + // Syscall context aliases + SyscallPath string `field:"syscall.path,ref:rmdir.syscall.str1"` // SECLDoc[syscall.path] Definition:`Path argument of the syscall` } // SetXAttrEvent represents an extended attributes event @@ -624,6 +640,7 @@ type CgroupTracingEvent struct { ContainerContext ContainerContext CGroupContext CGroupContext Config ActivityDumpLoadConfig + Pid uint32 ConfigCookie uint64 } @@ -641,7 +658,7 @@ type ActivityDumpLoadConfig struct { WaitListTimestampRaw uint64 StartTimestampRaw uint64 EndTimestampRaw uint64 - Rate uint32 // max number of events per sec + Rate uint16 // max number of events per sec Paused uint32 } @@ -670,6 +687,14 @@ type ConnectEvent struct { Protocol uint16 `field:"protocol"` // SECLDoc[protocol] Definition:`Socket Protocol` } +// AcceptEvent represents an accept event +type AcceptEvent struct { + SyscallEvent + + Addr IPPortContext `field:"addr"` // Connection address + AddrFamily uint16 `field:"addr.family"` // SECLDoc[addr.family] Definition:`Address family` +} + // NetDevice represents a network device type NetDevice struct { Name string @@ -735,3 +760,97 @@ type RawPacketEvent struct { CaptureInfo gopacket.CaptureInfo `field:"-"` Data []byte `field:"-"` } + +// NetworkStats is used to record network statistics +type NetworkStats struct { + DataSize uint64 `field:"data_size"` // SECLDoc[data_size] Definition:`Amount of data transmitted or received` + PacketCount uint64 `field:"packet_count"` // SECLDoc[packet_count] Definition:`Count of network packets transmitted or received` +} + +// Add the input stats to the current stats +func (ns *NetworkStats) Add(input NetworkStats) { + ns.DataSize += input.DataSize + ns.PacketCount += input.PacketCount +} + +// FiveTuple is used to uniquely identify a flow +type FiveTuple struct { + Source netip.AddrPort + Destination netip.AddrPort + L4Protocol uint16 +} + +// Flow is used to represent a network 5-tuple with statistics +type Flow struct { + Source IPPortContext `field:"source"` // source of the network packet + Destination IPPortContext `field:"destination"` // destination of the network packet + L3Protocol uint16 `field:"l3_protocol"` // SECLDoc[l3_protocol] Definition:`L3 protocol of the network packet` Constants:`L3 protocols` + L4Protocol uint16 `field:"l4_protocol"` // SECLDoc[l4_protocol] Definition:`L4 protocol of the network packet` Constants:`L4 protocols` + + Ingress NetworkStats `field:"ingress"` // SECLDoc[ingress] Definition:`Network statistics about ingress traffic` + Egress NetworkStats `field:"egress"` // SECLDoc[egress] Definition:`Network statistics about egress traffic` +} + +// GetFiveTuple returns the five tuple identifying the flow +func (f *Flow) GetFiveTuple() FiveTuple { + return FiveTuple{ + Source: f.Source.GetComparable(), + Destination: f.Destination.GetComparable(), + L4Protocol: f.L4Protocol, + } +} + +// NetworkFlowMonitorEvent represents a network flow monitor event +type NetworkFlowMonitorEvent struct { + Device NetworkDeviceContext `field:"device"` // network device on which the network flows were captured + FlowsCount uint64 `field:"-"` + Flows []Flow `field:"flows,iterator:FlowsIterator"` // list of captured flows +} + +// FlowsIterator defines an iterator of flows +type FlowsIterator struct { + prev int +} + +// Front returns the first element +func (it *FlowsIterator) Front(ctx *eval.Context) *Flow { + if len(ctx.Event.(*Event).NetworkFlowMonitor.Flows) == 0 { + return nil + } + + front := ctx.Event.(*Event).NetworkFlowMonitor.Flows[0] + it.prev = 0 + return &front +} + +// Next returns the next element +func (it *FlowsIterator) Next(ctx *eval.Context) *Flow { + if len(ctx.Event.(*Event).NetworkFlowMonitor.Flows) > it.prev+1 { + it.prev++ + return &(ctx.Event.(*Event).NetworkFlowMonitor.Flows[it.prev]) + } + return nil +} + +// At returns the element at the given position +func (it *FlowsIterator) At(ctx *eval.Context, regID eval.RegisterID, pos int) *Flow { + if entry := ctx.RegisterCache[regID]; entry != nil && entry.Pos == pos { + return entry.Value.(*Flow) + } + + if len(ctx.Event.(*Event).NetworkFlowMonitor.Flows) > pos { + flow := &(ctx.Event.(*Event).NetworkFlowMonitor.Flows[pos]) + ctx.RegisterCache[regID] = &eval.RegisterCacheEntry{ + Pos: pos, + Value: flow, + } + return flow + } + + return nil +} + +// Len returns the len +func (it *FlowsIterator) Len(ctx *eval.Context) int { + return len(ctx.Event.(*Event).NetworkFlowMonitor.Flows) +} diff --git a/pkg/security/secl/model/model_windows.go b/pkg/security/secl/model/model_windows.go index 5305dfee0b71c..8fe667c85f2a6 100644 --- a/pkg/security/secl/model/model_windows.go +++ b/pkg/security/secl/model/model_windows.go @@ -49,9 +49,9 @@ type Event struct { // FileEvent is the common file event type type FileEvent struct { - FileObject uint64 `field:"-"` // handle numeric value - PathnameStr string `field:"path,handler:ResolveFilePath,opts:length" op_override:"eval.WindowsPathCmp"` // SECLDoc[path] Definition:`File's path` Example:`exec.file.path == "c:\cmd.bat"` Description:`Matches the execution of the file located at c:\cmd.bat` - BasenameStr string `field:"name,handler:ResolveFileBasename,opts:length" op_override:"eval.CaseInsensitiveCmp"` // SECLDoc[name] Definition:`File's basename` Example:`exec.file.name == "cmd.bat"` Description:`Matches the execution of any file named cmd.bat.` + FileObject uint64 `field:"-"` // handle numeric value + PathnameStr string `field:"path,handler:ResolveFilePath,opts:length|gen_getters" op_override:"eval.WindowsPathCmp"` // SECLDoc[path] Definition:`File's path` Example:`exec.file.path == "c:\cmd.bat"` Description:`Matches the execution of the file located at c:\cmd.bat` + BasenameStr string `field:"name,handler:ResolveFileBasename,opts:length" op_override:"eval.CaseInsensitiveCmp"` // SECLDoc[name] Definition:`File's basename` Example:`exec.file.name == "cmd.bat"` Description:`Matches the execution of any file named cmd.bat.` } // FimFileEvent is the common file event type @@ -76,12 +76,12 @@ type Process struct { ContainerID string `field:"container.id"` // SECLDoc[container.id] Definition:`Container ID` - ExitTime time.Time `field:"exit_time,opts:getters_only"` - ExecTime time.Time `field:"exec_time,opts:getters_only"` + ExitTime time.Time `field:"exit_time,opts:getters_only|gen_getters"` + ExecTime time.Time `field:"exec_time,opts:getters_only|gen_getters"` CreatedAt uint64 `field:"created_at,handler:ResolveProcessCreatedAt"` // SECLDoc[created_at] Definition:`Timestamp of the creation of the process` - PPid uint32 `field:"ppid"` // SECLDoc[ppid] Definition:`Parent process ID` + PPid uint32 `field:"ppid,opts:gen_getters"` // SECLDoc[ppid] Definition:`Parent process ID` ArgsEntry *ArgsEntry `field:"-"` EnvsEntry *EnvsEntry `field:"-"` @@ -92,8 +92,8 @@ type Process struct { OwnerSidString string `field:"user_sid"` // SECLDoc[user_sid] Definition:`Sid of the user of the process` User string `field:"user,handler:ResolveUser"` // SECLDoc[user] Definition:`User name` - Envs []string `field:"envs,handler:ResolveProcessEnvs,weight:100"` // SECLDoc[envs] Definition:`Environment variable names of the process` - Envp []string `field:"envp,handler:ResolveProcessEnvp,weight:100"` // SECLDoc[envp] Definition:`Environment variables of the process` // SECLDoc[envp] Definition:`Environment variables of the process` + Envs []string `field:"envs,handler:ResolveProcessEnvs,weight:100"` // SECLDoc[envs] Definition:`Environment variable names of the process` + Envp []string `field:"envp,handler:ResolveProcessEnvp,weight:100,opts:gen_getters"` // SECLDoc[envp] Definition:`Environment variables of the process` // SECLDoc[envp] Definition:`Environment variables of the process` // cache version Variables eval.Variables `field:"-"` @@ -107,7 +107,7 @@ type ExecEvent struct { // PIDContext holds the process context of an kernel event type PIDContext struct { - Pid uint32 `field:"pid"` // SECLDoc[pid] Definition:`Process ID of the process (also called thread group ID)` + Pid uint32 `field:"pid,opts:gen_getters"` // SECLDoc[pid] Definition:`Process ID of the process (also called thread group ID)` } // NetworkDeviceContext defines a network device context diff --git a/pkg/security/secl/model/sharedconsts/argsenvs.go b/pkg/security/secl/model/sharedconsts/argsenvs.go new file mode 100644 index 0000000000000..13df701a4f609 --- /dev/null +++ b/pkg/security/secl/model/sharedconsts/argsenvs.go @@ -0,0 +1,14 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package sharedconsts holds model related shared constants +package sharedconsts + +const ( + // MaxArgEnvSize maximum size of one argument or environment variable + MaxArgEnvSize = 256 + // MaxArgsEnvsSize maximum number of args and/or envs + MaxArgsEnvsSize = 256 +) diff --git a/pkg/security/secl/model/sharedconsts/auditid.go b/pkg/security/secl/model/sharedconsts/auditid.go new file mode 100644 index 0000000000000..50d5e36cc9b21 --- /dev/null +++ b/pkg/security/secl/model/sharedconsts/auditid.go @@ -0,0 +1,14 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package sharedconsts holds model related shared constants +package sharedconsts + +import "math" + +const ( + // AuditUIDUnset is used to specify that a login uid is not set + AuditUIDUnset = math.MaxUint32 +) diff --git a/pkg/security/secl/model/sharedconsts/exitcode.go b/pkg/security/secl/model/sharedconsts/exitcode.go new file mode 100644 index 0000000000000..ad45e610ee2e2 --- /dev/null +++ b/pkg/security/secl/model/sharedconsts/exitcode.go @@ -0,0 +1,32 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package sharedconsts holds model related shared constants +package sharedconsts + +// ExitCause represents the cause of a process termination +type ExitCause uint32 + +func (cause ExitCause) String() string { + switch cause { + case ExitExited: + return "EXITED" + case ExitCoreDumped: + return "COREDUMPED" + case ExitSignaled: + return "SIGNALED" + default: + return "UNKNOWN" + } +} + +const ( + // ExitExited Process exited normally + ExitExited ExitCause = iota + // ExitCoreDumped Process was terminated with a coredump signal + ExitCoreDumped + // ExitSignaled Process was terminated with a signal other than a coredump + ExitSignaled +) diff --git a/pkg/security/secl/model/string_array_iter.go b/pkg/security/secl/model/string_array_iter.go index c27537255c729..10034151c85d8 100644 --- a/pkg/security/secl/model/string_array_iter.go +++ b/pkg/security/secl/model/string_array_iter.go @@ -8,24 +8,38 @@ package model import "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" -func newAncestorsIterator[T any](iter *ProcessAncestorsIterator, ctx *eval.Context, ev *Event, perIter func(ev *Event, pce *ProcessCacheEntry) T) []T { - results := make([]T, 0, ctx.CachedAncestorsCount) - for pce := iter.Front(ctx); pce != nil; pce = iter.Next() { - results = append(results, perIter(ev, pce)) +// AncestorsIterator is a generic interface that iterators must implement +type AncestorsIterator[T any] interface { + Front(ctx *eval.Context) T + Next(ctx *eval.Context) T + At(ctx *eval.Context, regID eval.RegisterID, pos int) T + Len(ctx *eval.Context) int +} + +// Helper function to check if a value is nil +func isNil[V comparable](v V) bool { + var zero V + return v == zero +} + +func newAncestorsIterator[T any, V comparable](iter AncestorsIterator[V], field eval.Field, ctx *eval.Context, ev *Event, perIter func(ev *Event, current V) T) []T { + results := make([]T, 0, ctx.AncestorsCounters[field]) + for entry := iter.Front(ctx); !isNil(entry); entry = iter.Next(ctx) { + results = append(results, perIter(ev, entry)) } - ctx.CachedAncestorsCount = len(results) + ctx.AncestorsCounters[field] = len(results) return results } -func newAncestorsIteratorArray[T any](iter *ProcessAncestorsIterator, ctx *eval.Context, ev *Event, perIter func(ev *Event, pce *ProcessCacheEntry) []T) []T { - results := make([]T, 0, ctx.CachedAncestorsCount) +func newAncestorsIteratorArray[T any, V comparable](iter AncestorsIterator[V], field eval.Field, ctx *eval.Context, ev *Event, perIter func(ev *Event, current V) []T) []T { + results := make([]T, 0, ctx.AncestorsCounters[field]) ancestorsCount := 0 - for pce := iter.Front(ctx); pce != nil; pce = iter.Next() { - results = append(results, perIter(ev, pce)...) + for entry := iter.Front(ctx); !isNil(entry); entry = iter.Next(ctx) { + results = append(results, perIter(ev, entry)...) ancestorsCount++ } - ctx.CachedAncestorsCount = ancestorsCount + ctx.AncestorsCounters[field] = ancestorsCount return results } diff --git a/pkg/security/secl/model/syscalls_linux_amd64.go b/pkg/security/secl/model/syscalls_linux_amd64.go index a1a6925e68377..c5bbb83cc3dbe 100644 --- a/pkg/security/secl/model/syscalls_linux_amd64.go +++ b/pkg/security/secl/model/syscalls_linux_amd64.go @@ -348,6 +348,7 @@ const ( SysStatx Syscall = 332 SysIoPgetevents Syscall = 333 SysRseq Syscall = 334 + SysUretprobe Syscall = 335 SysPidfdSendSignal Syscall = 424 SysIoUringSetup Syscall = 425 SysIoUringEnter Syscall = 426 @@ -386,4 +387,9 @@ const ( SysLsmGetSelfAttr Syscall = 459 SysLsmSetSelfAttr Syscall = 460 SysLsmListModules Syscall = 461 + SysMseal Syscall = 462 + SysSetxattrat Syscall = 463 + SysGetxattrat Syscall = 464 + SysListxattrat Syscall = 465 + SysRemovexattrat Syscall = 466 ) diff --git a/pkg/security/secl/model/syscalls_linux_arm64.go b/pkg/security/secl/model/syscalls_linux_arm64.go index bc062229dcc33..630beb45c4875 100644 --- a/pkg/security/secl/model/syscalls_linux_arm64.go +++ b/pkg/security/secl/model/syscalls_linux_arm64.go @@ -352,5 +352,10 @@ const ( SysLsmGetSelfAttr Syscall = 459 SysLsmSetSelfAttr Syscall = 460 SysLsmListModules Syscall = 461 - SysSyscalls Syscall = 462 + SysMseal Syscall = 462 + SysSetxattrat Syscall = 463 + SysGetxattrat Syscall = 464 + SysListxattrat Syscall = 465 + SysRemovexattrat Syscall = 466 + SysSyscalls Syscall = 467 ) diff --git a/pkg/security/secl/model/syscalls_string_linux_amd64.go b/pkg/security/secl/model/syscalls_string_linux_amd64.go index a273b4d457b7b..e19b59867937c 100644 --- a/pkg/security/secl/model/syscalls_string_linux_amd64.go +++ b/pkg/security/secl/model/syscalls_string_linux_amd64.go @@ -343,6 +343,7 @@ func _() { _ = x[SysStatx-332] _ = x[SysIoPgetevents-333] _ = x[SysRseq-334] + _ = x[SysUretprobe-335] _ = x[SysPidfdSendSignal-424] _ = x[SysIoUringSetup-425] _ = x[SysIoUringEnter-426] @@ -381,23 +382,28 @@ func _() { _ = x[SysLsmGetSelfAttr-459] _ = x[SysLsmSetSelfAttr-460] _ = x[SysLsmListModules-461] + _ = x[SysMseal-462] + _ = x[SysSetxattrat-463] + _ = x[SysGetxattrat-464] + _ = x[SysListxattrat-465] + _ = x[SysRemovexattrat-466] } const ( - _Syscall_name_0 = "SysReadSysWriteSysOpenSysCloseSysStatSysFstatSysLstatSysPollSysLseekSysMmapSysMprotectSysMunmapSysBrkSysRtSigactionSysRtSigprocmaskSysRtSigreturnSysIoctlSysPread64SysPwrite64SysReadvSysWritevSysAccessSysPipeSysSelectSysSchedYieldSysMremapSysMsyncSysMincoreSysMadviseSysShmgetSysShmatSysShmctlSysDupSysDup2SysPauseSysNanosleepSysGetitimerSysAlarmSysSetitimerSysGetpidSysSendfileSysSocketSysConnectSysAcceptSysSendtoSysRecvfromSysSendmsgSysRecvmsgSysShutdownSysBindSysListenSysGetsocknameSysGetpeernameSysSocketpairSysSetsockoptSysGetsockoptSysCloneSysForkSysVforkSysExecveSysExitSysWait4SysKillSysUnameSysSemgetSysSemopSysSemctlSysShmdtSysMsggetSysMsgsndSysMsgrcvSysMsgctlSysFcntlSysFlockSysFsyncSysFdatasyncSysTruncateSysFtruncateSysGetdentsSysGetcwdSysChdirSysFchdirSysRenameSysMkdirSysRmdirSysCreatSysLinkSysUnlinkSysSymlinkSysReadlinkSysChmodSysFchmodSysChownSysFchownSysLchownSysUmaskSysGettimeofdaySysGetrlimitSysGetrusageSysSysinfoSysTimesSysPtraceSysGetuidSysSyslogSysGetgidSysSetuidSysSetgidSysGeteuidSysGetegidSysSetpgidSysGetppidSysGetpgrpSysSetsidSysSetreuidSysSetregidSysGetgroupsSysSetgroupsSysSetresuidSysGetresuidSysSetresgidSysGetresgidSysGetpgidSysSetfsuidSysSetfsgidSysGetsidSysCapgetSysCapsetSysRtSigpendingSysRtSigtimedwaitSysRtSigqueueinfoSysRtSigsuspendSysSigaltstackSysUtimeSysMknodSysUselibSysPersonalitySysUstatSysStatfsSysFstatfsSysSysfsSysGetprioritySysSetprioritySysSchedSetparamSysSchedGetparamSysSchedSetschedulerSysSchedGetschedulerSysSchedGetPriorityMaxSysSchedGetPriorityMinSysSchedRrGetIntervalSysMlockSysMunlockSysMlockallSysMunlockallSysVhangupSysModifyLdtSysPivotRootSysSysctlSysPrctlSysArchPrctlSysAdjtimexSysSetrlimitSysChrootSysSyncSysAcctSysSettimeofdaySysMountSysUmount2SysSwaponSysSwapoffSysRebootSysSethostnameSysSetdomainnameSysIoplSysIopermSysCreateModuleSysInitModuleSysDeleteModuleSysGetKernelSymsSysQueryModuleSysQuotactlSysNfsservctlSysGetpmsgSysPutpmsgSysAfsSyscallSysTuxcallSysSecuritySysGettidSysReadaheadSysSetxattrSysLsetxattrSysFsetxattrSysGetxattrSysLgetxattrSysFgetxattrSysListxattrSysLlistxattrSysFlistxattrSysRemovexattrSysLremovexattrSysFremovexattrSysTkillSysTimeSysFutexSysSchedSetaffinitySysSchedGetaffinitySysSetThreadAreaSysIoSetupSysIoDestroySysIoGeteventsSysIoSubmitSysIoCancelSysGetThreadAreaSysLookupDcookieSysEpollCreateSysEpollCtlOldSysEpollWaitOldSysRemapFilePagesSysGetdents64SysSetTidAddressSysRestartSyscallSysSemtimedopSysFadvise64SysTimerCreateSysTimerSettimeSysTimerGettimeSysTimerGetoverrunSysTimerDeleteSysClockSettimeSysClockGettimeSysClockGetresSysClockNanosleepSysExitGroupSysEpollWaitSysEpollCtlSysTgkillSysUtimesSysVserverSysMbindSysSetMempolicySysGetMempolicySysMqOpenSysMqUnlinkSysMqTimedsendSysMqTimedreceiveSysMqNotifySysMqGetsetattrSysKexecLoadSysWaitidSysAddKeySysRequestKeySysKeyctlSysIoprioSetSysIoprioGetSysInotifyInitSysInotifyAddWatchSysInotifyRmWatchSysMigratePagesSysOpenatSysMkdiratSysMknodatSysFchownatSysFutimesatSysNewfstatatSysUnlinkatSysRenameatSysLinkatSysSymlinkatSysReadlinkatSysFchmodatSysFaccessatSysPselect6SysPpollSysUnshareSysSetRobustListSysGetRobustListSysSpliceSysTeeSysSyncFileRangeSysVmspliceSysMovePagesSysUtimensatSysEpollPwaitSysSignalfdSysTimerfdCreateSysEventfdSysFallocateSysTimerfdSettimeSysTimerfdGettimeSysAccept4SysSignalfd4SysEventfd2SysEpollCreate1SysDup3SysPipe2SysInotifyInit1SysPreadvSysPwritevSysRtTgsigqueueinfoSysPerfEventOpenSysRecvmmsgSysFanotifyInitSysFanotifyMarkSysPrlimit64SysNameToHandleAtSysOpenByHandleAtSysClockAdjtimeSysSyncfsSysSendmmsgSysSetnsSysGetcpuSysProcessVmReadvSysProcessVmWritevSysKcmpSysFinitModuleSysSchedSetattrSysSchedGetattrSysRenameat2SysSeccompSysGetrandomSysMemfdCreateSysKexecFileLoadSysBpfSysExecveatSysUserfaultfdSysMembarrierSysMlock2SysCopyFileRangeSysPreadv2SysPwritev2SysPkeyMprotectSysPkeyAllocSysPkeyFreeSysStatxSysIoPgeteventsSysRseq" - _Syscall_name_1 = "SysPidfdSendSignalSysIoUringSetupSysIoUringEnterSysIoUringRegisterSysOpenTreeSysMoveMountSysFsopenSysFsconfigSysFsmountSysFspickSysPidfdOpenSysClone3SysCloseRangeSysOpenat2SysPidfdGetfdSysFaccessat2SysProcessMadviseSysEpollPwait2SysMountSetattrSysQuotactlFdSysLandlockCreateRulesetSysLandlockAddRuleSysLandlockRestrictSelfSysMemfdSecretSysProcessMreleaseSysFutexWaitvSysSetMempolicyHomeNodeSysCachestatSysFchmodat2SysMapShadowStackSysFutexWakeSysFutexWaitSysFutexRequeueSysStatmountSysListmountSysLsmGetSelfAttrSysLsmSetSelfAttrSysLsmListModules" + _Syscall_name_0 = "SysReadSysWriteSysOpenSysCloseSysStatSysFstatSysLstatSysPollSysLseekSysMmapSysMprotectSysMunmapSysBrkSysRtSigactionSysRtSigprocmaskSysRtSigreturnSysIoctlSysPread64SysPwrite64SysReadvSysWritevSysAccessSysPipeSysSelectSysSchedYieldSysMremapSysMsyncSysMincoreSysMadviseSysShmgetSysShmatSysShmctlSysDupSysDup2SysPauseSysNanosleepSysGetitimerSysAlarmSysSetitimerSysGetpidSysSendfileSysSocketSysConnectSysAcceptSysSendtoSysRecvfromSysSendmsgSysRecvmsgSysShutdownSysBindSysListenSysGetsocknameSysGetpeernameSysSocketpairSysSetsockoptSysGetsockoptSysCloneSysForkSysVforkSysExecveSysExitSysWait4SysKillSysUnameSysSemgetSysSemopSysSemctlSysShmdtSysMsggetSysMsgsndSysMsgrcvSysMsgctlSysFcntlSysFlockSysFsyncSysFdatasyncSysTruncateSysFtruncateSysGetdentsSysGetcwdSysChdirSysFchdirSysRenameSysMkdirSysRmdirSysCreatSysLinkSysUnlinkSysSymlinkSysReadlinkSysChmodSysFchmodSysChownSysFchownSysLchownSysUmaskSysGettimeofdaySysGetrlimitSysGetrusageSysSysinfoSysTimesSysPtraceSysGetuidSysSyslogSysGetgidSysSetuidSysSetgidSysGeteuidSysGetegidSysSetpgidSysGetppidSysGetpgrpSysSetsidSysSetreuidSysSetregidSysGetgroupsSysSetgroupsSysSetresuidSysGetresuidSysSetresgidSysGetresgidSysGetpgidSysSetfsuidSysSetfsgidSysGetsidSysCapgetSysCapsetSysRtSigpendingSysRtSigtimedwaitSysRtSigqueueinfoSysRtSigsuspendSysSigaltstackSysUtimeSysMknodSysUselibSysPersonalitySysUstatSysStatfsSysFstatfsSysSysfsSysGetprioritySysSetprioritySysSchedSetparamSysSchedGetparamSysSchedSetschedulerSysSchedGetschedulerSysSchedGetPriorityMaxSysSchedGetPriorityMinSysSchedRrGetIntervalSysMlockSysMunlockSysMlockallSysMunlockallSysVhangupSysModifyLdtSysPivotRootSysSysctlSysPrctlSysArchPrctlSysAdjtimexSysSetrlimitSysChrootSysSyncSysAcctSysSettimeofdaySysMountSysUmount2SysSwaponSysSwapoffSysRebootSysSethostnameSysSetdomainnameSysIoplSysIopermSysCreateModuleSysInitModuleSysDeleteModuleSysGetKernelSymsSysQueryModuleSysQuotactlSysNfsservctlSysGetpmsgSysPutpmsgSysAfsSyscallSysTuxcallSysSecuritySysGettidSysReadaheadSysSetxattrSysLsetxattrSysFsetxattrSysGetxattrSysLgetxattrSysFgetxattrSysListxattrSysLlistxattrSysFlistxattrSysRemovexattrSysLremovexattrSysFremovexattrSysTkillSysTimeSysFutexSysSchedSetaffinitySysSchedGetaffinitySysSetThreadAreaSysIoSetupSysIoDestroySysIoGeteventsSysIoSubmitSysIoCancelSysGetThreadAreaSysLookupDcookieSysEpollCreateSysEpollCtlOldSysEpollWaitOldSysRemapFilePagesSysGetdents64SysSetTidAddressSysRestartSyscallSysSemtimedopSysFadvise64SysTimerCreateSysTimerSettimeSysTimerGettimeSysTimerGetoverrunSysTimerDeleteSysClockSettimeSysClockGettimeSysClockGetresSysClockNanosleepSysExitGroupSysEpollWaitSysEpollCtlSysTgkillSysUtimesSysVserverSysMbindSysSetMempolicySysGetMempolicySysMqOpenSysMqUnlinkSysMqTimedsendSysMqTimedreceiveSysMqNotifySysMqGetsetattrSysKexecLoadSysWaitidSysAddKeySysRequestKeySysKeyctlSysIoprioSetSysIoprioGetSysInotifyInitSysInotifyAddWatchSysInotifyRmWatchSysMigratePagesSysOpenatSysMkdiratSysMknodatSysFchownatSysFutimesatSysNewfstatatSysUnlinkatSysRenameatSysLinkatSysSymlinkatSysReadlinkatSysFchmodatSysFaccessatSysPselect6SysPpollSysUnshareSysSetRobustListSysGetRobustListSysSpliceSysTeeSysSyncFileRangeSysVmspliceSysMovePagesSysUtimensatSysEpollPwaitSysSignalfdSysTimerfdCreateSysEventfdSysFallocateSysTimerfdSettimeSysTimerfdGettimeSysAccept4SysSignalfd4SysEventfd2SysEpollCreate1SysDup3SysPipe2SysInotifyInit1SysPreadvSysPwritevSysRtTgsigqueueinfoSysPerfEventOpenSysRecvmmsgSysFanotifyInitSysFanotifyMarkSysPrlimit64SysNameToHandleAtSysOpenByHandleAtSysClockAdjtimeSysSyncfsSysSendmmsgSysSetnsSysGetcpuSysProcessVmReadvSysProcessVmWritevSysKcmpSysFinitModuleSysSchedSetattrSysSchedGetattrSysRenameat2SysSeccompSysGetrandomSysMemfdCreateSysKexecFileLoadSysBpfSysExecveatSysUserfaultfdSysMembarrierSysMlock2SysCopyFileRangeSysPreadv2SysPwritev2SysPkeyMprotectSysPkeyAllocSysPkeyFreeSysStatxSysIoPgeteventsSysRseqSysUretprobe" + _Syscall_name_1 = "SysPidfdSendSignalSysIoUringSetupSysIoUringEnterSysIoUringRegisterSysOpenTreeSysMoveMountSysFsopenSysFsconfigSysFsmountSysFspickSysPidfdOpenSysClone3SysCloseRangeSysOpenat2SysPidfdGetfdSysFaccessat2SysProcessMadviseSysEpollPwait2SysMountSetattrSysQuotactlFdSysLandlockCreateRulesetSysLandlockAddRuleSysLandlockRestrictSelfSysMemfdSecretSysProcessMreleaseSysFutexWaitvSysSetMempolicyHomeNodeSysCachestatSysFchmodat2SysMapShadowStackSysFutexWakeSysFutexWaitSysFutexRequeueSysStatmountSysListmountSysLsmGetSelfAttrSysLsmSetSelfAttrSysLsmListModulesSysMsealSysSetxattratSysGetxattratSysListxattratSysRemovexattrat" ) var ( - _Syscall_index_0 = [...]uint16{0, 7, 15, 22, 30, 37, 45, 53, 60, 68, 75, 86, 95, 101, 115, 131, 145, 153, 163, 174, 182, 191, 200, 207, 216, 229, 238, 246, 256, 266, 275, 283, 292, 298, 305, 313, 325, 337, 345, 357, 366, 377, 386, 396, 405, 414, 425, 435, 445, 456, 463, 472, 486, 500, 513, 526, 539, 547, 554, 562, 571, 578, 586, 593, 601, 610, 618, 627, 635, 644, 653, 662, 671, 679, 687, 695, 707, 718, 730, 741, 750, 758, 767, 776, 784, 792, 800, 807, 816, 826, 837, 845, 854, 862, 871, 880, 888, 903, 915, 927, 937, 945, 954, 963, 972, 981, 990, 999, 1009, 1019, 1029, 1039, 1049, 1058, 1069, 1080, 1092, 1104, 1116, 1128, 1140, 1152, 1162, 1173, 1184, 1193, 1202, 1211, 1226, 1243, 1260, 1275, 1289, 1297, 1305, 1314, 1328, 1336, 1345, 1355, 1363, 1377, 1391, 1407, 1423, 1443, 1463, 1485, 1507, 1528, 1536, 1546, 1557, 1570, 1580, 1592, 1604, 1613, 1621, 1633, 1644, 1656, 1665, 1672, 1679, 1694, 1702, 1712, 1721, 1731, 1740, 1754, 1770, 1777, 1786, 1801, 1814, 1829, 1845, 1859, 1870, 1883, 1893, 1903, 1916, 1926, 1937, 1946, 1958, 1969, 1981, 1993, 2004, 2016, 2028, 2040, 2053, 2066, 2080, 2095, 2110, 2118, 2125, 2133, 2152, 2171, 2187, 2197, 2209, 2223, 2234, 2245, 2261, 2277, 2291, 2305, 2320, 2337, 2350, 2366, 2383, 2396, 2408, 2422, 2437, 2452, 2470, 2484, 2499, 2514, 2528, 2545, 2557, 2569, 2580, 2589, 2598, 2608, 2616, 2631, 2646, 2655, 2666, 2680, 2697, 2708, 2723, 2735, 2744, 2753, 2766, 2775, 2787, 2799, 2813, 2831, 2848, 2863, 2872, 2882, 2892, 2903, 2915, 2928, 2939, 2950, 2959, 2971, 2984, 2995, 3007, 3018, 3026, 3036, 3052, 3068, 3077, 3083, 3099, 3110, 3122, 3134, 3147, 3158, 3174, 3184, 3196, 3213, 3230, 3240, 3252, 3263, 3278, 3285, 3293, 3308, 3317, 3327, 3346, 3362, 3373, 3388, 3403, 3415, 3432, 3449, 3464, 3473, 3484, 3492, 3501, 3518, 3536, 3543, 3557, 3572, 3587, 3599, 3609, 3621, 3635, 3651, 3657, 3668, 3682, 3695, 3704, 3720, 3730, 3741, 3756, 3768, 3779, 3787, 3802, 3809} - _Syscall_index_1 = [...]uint16{0, 18, 33, 48, 66, 77, 89, 98, 109, 119, 128, 140, 149, 162, 172, 185, 198, 215, 229, 244, 257, 281, 299, 322, 336, 354, 367, 390, 402, 414, 431, 443, 455, 470, 482, 494, 511, 528, 545} + _Syscall_index_0 = [...]uint16{0, 7, 15, 22, 30, 37, 45, 53, 60, 68, 75, 86, 95, 101, 115, 131, 145, 153, 163, 174, 182, 191, 200, 207, 216, 229, 238, 246, 256, 266, 275, 283, 292, 298, 305, 313, 325, 337, 345, 357, 366, 377, 386, 396, 405, 414, 425, 435, 445, 456, 463, 472, 486, 500, 513, 526, 539, 547, 554, 562, 571, 578, 586, 593, 601, 610, 618, 627, 635, 644, 653, 662, 671, 679, 687, 695, 707, 718, 730, 741, 750, 758, 767, 776, 784, 792, 800, 807, 816, 826, 837, 845, 854, 862, 871, 880, 888, 903, 915, 927, 937, 945, 954, 963, 972, 981, 990, 999, 1009, 1019, 1029, 1039, 1049, 1058, 1069, 1080, 1092, 1104, 1116, 1128, 1140, 1152, 1162, 1173, 1184, 1193, 1202, 1211, 1226, 1243, 1260, 1275, 1289, 1297, 1305, 1314, 1328, 1336, 1345, 1355, 1363, 1377, 1391, 1407, 1423, 1443, 1463, 1485, 1507, 1528, 1536, 1546, 1557, 1570, 1580, 1592, 1604, 1613, 1621, 1633, 1644, 1656, 1665, 1672, 1679, 1694, 1702, 1712, 1721, 1731, 1740, 1754, 1770, 1777, 1786, 1801, 1814, 1829, 1845, 1859, 1870, 1883, 1893, 1903, 1916, 1926, 1937, 1946, 1958, 1969, 1981, 1993, 2004, 2016, 2028, 2040, 2053, 2066, 2080, 2095, 2110, 2118, 2125, 2133, 2152, 2171, 2187, 2197, 2209, 2223, 2234, 2245, 2261, 2277, 2291, 2305, 2320, 2337, 2350, 2366, 2383, 2396, 2408, 2422, 2437, 2452, 2470, 2484, 2499, 2514, 2528, 2545, 2557, 2569, 2580, 2589, 2598, 2608, 2616, 2631, 2646, 2655, 2666, 2680, 2697, 2708, 2723, 2735, 2744, 2753, 2766, 2775, 2787, 2799, 2813, 2831, 2848, 2863, 2872, 2882, 2892, 2903, 2915, 2928, 2939, 2950, 2959, 2971, 2984, 2995, 3007, 3018, 3026, 3036, 3052, 3068, 3077, 3083, 3099, 3110, 3122, 3134, 3147, 3158, 3174, 3184, 3196, 3213, 3230, 3240, 3252, 3263, 3278, 3285, 3293, 3308, 3317, 3327, 3346, 3362, 3373, 3388, 3403, 3415, 3432, 3449, 3464, 3473, 3484, 3492, 3501, 3518, 3536, 3543, 3557, 3572, 3587, 3599, 3609, 3621, 3635, 3651, 3657, 3668, 3682, 3695, 3704, 3720, 3730, 3741, 3756, 3768, 3779, 3787, 3802, 3809, 3821} + _Syscall_index_1 = [...]uint16{0, 18, 33, 48, 66, 77, 89, 98, 109, 119, 128, 140, 149, 162, 172, 185, 198, 215, 229, 244, 257, 281, 299, 322, 336, 354, 367, 390, 402, 414, 431, 443, 455, 470, 482, 494, 511, 528, 545, 553, 566, 579, 593, 609} ) func (i Syscall) String() string { switch { - case 0 <= i && i <= 334: + case 0 <= i && i <= 335: return _Syscall_name_0[_Syscall_index_0[i]:_Syscall_index_0[i+1]] - case 424 <= i && i <= 461: + case 424 <= i && i <= 466: i -= 424 return _Syscall_name_1[_Syscall_index_1[i]:_Syscall_index_1[i+1]] default: diff --git a/pkg/security/secl/model/syscalls_string_linux_arm64.go b/pkg/security/secl/model/syscalls_string_linux_arm64.go index ebcc9b7f3bc50..d8cfeb4bffb73 100644 --- a/pkg/security/secl/model/syscalls_string_linux_arm64.go +++ b/pkg/security/secl/model/syscalls_string_linux_arm64.go @@ -347,21 +347,26 @@ func _() { _ = x[SysLsmGetSelfAttr-459] _ = x[SysLsmSetSelfAttr-460] _ = x[SysLsmListModules-461] - _ = x[SysSyscalls-462] + _ = x[SysMseal-462] + _ = x[SysSetxattrat-463] + _ = x[SysGetxattrat-464] + _ = x[SysListxattrat-465] + _ = x[SysRemovexattrat-466] + _ = x[SysSyscalls-467] } const ( _Syscall_name_0 = "SysIoSetupSysIoDestroySysIoSubmitSysIoCancelSysIoGeteventsSysSetxattrSysLsetxattrSysFsetxattrSysGetxattrSysLgetxattrSysFgetxattrSysListxattrSysLlistxattrSysFlistxattrSysRemovexattrSysLremovexattrSysFremovexattrSysGetcwdSysLookupDcookieSysEventfd2SysEpollCreate1SysEpollCtlSysEpollPwaitSysDupSysDup3SysFcntlSysInotifyInit1SysInotifyAddWatchSysInotifyRmWatchSysIoctlSysIoprioSetSysIoprioGetSysFlockSysMknodatSysMkdiratSysUnlinkatSysSymlinkatSysLinkatSysRenameatSysUmount2SysMountSysPivotRootSysNfsservctlSysStatfsSysFstatfsSysTruncateSysFtruncateSysFallocateSysFaccessatSysChdirSysFchdirSysChrootSysFchmodSysFchmodatSysFchownatSysFchownSysOpenatSysCloseSysVhangupSysPipe2SysQuotactlSysGetdents64SysLseekSysReadSysWriteSysReadvSysWritevSysPread64SysPwrite64SysPreadvSysPwritevSysSendfileSysPselect6SysPpollSysSignalfd4SysVmspliceSysSpliceSysTeeSysReadlinkatSysFstatatSysFstatSysSyncSysFsyncSysFdatasyncSysSyncFileRange2SysTimerfdCreateSysTimerfdSettimeSysTimerfdGettimeSysUtimensatSysAcctSysCapgetSysCapsetSysPersonalitySysExitSysExitGroupSysWaitidSysSetTidAddressSysUnshareSysFutexSysSetRobustListSysGetRobustListSysNanosleepSysGetitimerSysSetitimerSysKexecLoadSysInitModuleSysDeleteModuleSysTimerCreateSysTimerGettimeSysTimerGetoverrunSysTimerSettimeSysTimerDeleteSysClockSettimeSysClockGettimeSysClockGetresSysClockNanosleepSysSyslogSysPtraceSysSchedSetparamSysSchedSetschedulerSysSchedGetschedulerSysSchedGetparamSysSchedSetaffinitySysSchedGetaffinitySysSchedYieldSysSchedGetPriorityMaxSysSchedGetPriorityMinSysSchedRrGetIntervalSysRestartSyscallSysKillSysTkillSysTgkillSysSigaltstackSysRtSigsuspendSysRtSigactionSysRtSigprocmaskSysRtSigpendingSysRtSigtimedwaitSysRtSigqueueinfoSysRtSigreturnSysSetprioritySysGetprioritySysRebootSysSetregidSysSetgidSysSetreuidSysSetuidSysSetresuidSysGetresuidSysSetresgidSysGetresgidSysSetfsuidSysSetfsgidSysTimesSysSetpgidSysGetpgidSysGetsidSysSetsidSysGetgroupsSysSetgroupsSysUnameSysSethostnameSysSetdomainnameSysGetrlimitSysSetrlimitSysGetrusageSysUmaskSysPrctlSysGetcpuSysGettimeofdaySysSettimeofdaySysAdjtimexSysGetpidSysGetppidSysGetuidSysGeteuidSysGetgidSysGetegidSysGettidSysSysinfoSysMqOpenSysMqUnlinkSysMqTimedsendSysMqTimedreceiveSysMqNotifySysMqGetsetattrSysMsggetSysMsgctlSysMsgrcvSysMsgsndSysSemgetSysSemctlSysSemtimedopSysSemopSysShmgetSysShmctlSysShmatSysShmdtSysSocketSysSocketpairSysBindSysListenSysAcceptSysConnectSysGetsocknameSysGetpeernameSysSendtoSysRecvfromSysSetsockoptSysGetsockoptSysShutdownSysSendmsgSysRecvmsgSysReadaheadSysBrkSysMunmapSysMremapSysAddKeySysRequestKeySysKeyctlSysCloneSysExecveSysMmapSysFadvise64SysSwaponSysSwapoffSysMprotectSysMsyncSysMlockSysMunlockSysMlockallSysMunlockallSysMincoreSysMadviseSysRemapFilePagesSysMbindSysGetMempolicySysSetMempolicySysMigratePagesSysMovePagesSysRtTgsigqueueinfoSysPerfEventOpenSysAccept4SysRecvmmsgSysArchSpecificSyscall" _Syscall_name_1 = "SysWait4SysPrlimit64SysFanotifyInitSysFanotifyMarkSysNameToHandleAtSysOpenByHandleAtSysClockAdjtimeSysSyncfsSysSetnsSysSendmmsgSysProcessVmReadvSysProcessVmWritevSysKcmpSysFinitModuleSysSchedSetattrSysSchedGetattrSysRenameat2SysSeccompSysGetrandomSysMemfdCreateSysBpfSysExecveatSysUserfaultfdSysMembarrierSysMlock2SysCopyFileRangeSysPreadv2SysPwritev2SysPkeyMprotectSysPkeyAllocSysPkeyFreeSysStatxSysIoPgeteventsSysRseqSysKexecFileLoad" _Syscall_name_2 = "SysClockGettime64SysClockSettime64SysClockAdjtime64SysClockGetresTime64SysClockNanosleepTime64SysTimerGettime64SysTimerSettime64SysTimerfdGettime64SysTimerfdSettime64SysUtimensatTime64SysPselect6Time64SysPpollTime64" - _Syscall_name_3 = "SysIoPgeteventsTime64SysRecvmmsgTime64SysMqTimedsendTime64SysMqTimedreceiveTime64SysSemtimedopTime64SysRtSigtimedwaitTime64SysFutexTime64SysSchedRrGetIntervalTime64SysPidfdSendSignalSysIoUringSetupSysIoUringEnterSysIoUringRegisterSysOpenTreeSysMoveMountSysFsopenSysFsconfigSysFsmountSysFspickSysPidfdOpenSysClone3SysCloseRangeSysOpenat2SysPidfdGetfdSysFaccessat2SysProcessMadviseSysEpollPwait2SysMountSetattrSysQuotactlFdSysLandlockCreateRulesetSysLandlockAddRuleSysLandlockRestrictSelfSysMemfdSecretSysProcessMreleaseSysFutexWaitvSysSetMempolicyHomeNodeSysCachestatSysFchmodat2SysMapShadowStackSysFutexWakeSysFutexWaitSysFutexRequeueSysStatmountSysListmountSysLsmGetSelfAttrSysLsmSetSelfAttrSysLsmListModulesSysSyscalls" + _Syscall_name_3 = "SysIoPgeteventsTime64SysRecvmmsgTime64SysMqTimedsendTime64SysMqTimedreceiveTime64SysSemtimedopTime64SysRtSigtimedwaitTime64SysFutexTime64SysSchedRrGetIntervalTime64SysPidfdSendSignalSysIoUringSetupSysIoUringEnterSysIoUringRegisterSysOpenTreeSysMoveMountSysFsopenSysFsconfigSysFsmountSysFspickSysPidfdOpenSysClone3SysCloseRangeSysOpenat2SysPidfdGetfdSysFaccessat2SysProcessMadviseSysEpollPwait2SysMountSetattrSysQuotactlFdSysLandlockCreateRulesetSysLandlockAddRuleSysLandlockRestrictSelfSysMemfdSecretSysProcessMreleaseSysFutexWaitvSysSetMempolicyHomeNodeSysCachestatSysFchmodat2SysMapShadowStackSysFutexWakeSysFutexWaitSysFutexRequeueSysStatmountSysListmountSysLsmGetSelfAttrSysLsmSetSelfAttrSysLsmListModulesSysMsealSysSetxattratSysGetxattratSysListxattratSysRemovexattratSysSyscalls" ) var ( _Syscall_index_0 = [...]uint16{0, 10, 22, 33, 44, 58, 69, 81, 93, 104, 116, 128, 140, 153, 166, 180, 195, 210, 219, 235, 246, 261, 272, 285, 291, 298, 306, 321, 339, 356, 364, 376, 388, 396, 406, 416, 427, 439, 448, 459, 469, 477, 489, 502, 511, 521, 532, 544, 556, 568, 576, 585, 594, 603, 614, 625, 634, 643, 651, 661, 669, 680, 693, 701, 708, 716, 724, 733, 743, 754, 763, 773, 784, 795, 803, 815, 826, 835, 841, 854, 864, 872, 879, 887, 899, 916, 932, 949, 966, 978, 985, 994, 1003, 1017, 1024, 1036, 1045, 1061, 1071, 1079, 1095, 1111, 1123, 1135, 1147, 1159, 1172, 1187, 1201, 1216, 1234, 1249, 1263, 1278, 1293, 1307, 1324, 1333, 1342, 1358, 1378, 1398, 1414, 1433, 1452, 1465, 1487, 1509, 1530, 1547, 1554, 1562, 1571, 1585, 1600, 1614, 1630, 1645, 1662, 1679, 1693, 1707, 1721, 1730, 1741, 1750, 1761, 1770, 1782, 1794, 1806, 1818, 1829, 1840, 1848, 1858, 1868, 1877, 1886, 1898, 1910, 1918, 1932, 1948, 1960, 1972, 1984, 1992, 2000, 2009, 2024, 2039, 2050, 2059, 2069, 2078, 2088, 2097, 2107, 2116, 2126, 2135, 2146, 2160, 2177, 2188, 2203, 2212, 2221, 2230, 2239, 2248, 2257, 2270, 2278, 2287, 2296, 2304, 2312, 2321, 2334, 2341, 2350, 2359, 2369, 2383, 2397, 2406, 2417, 2430, 2443, 2454, 2464, 2474, 2486, 2492, 2501, 2510, 2519, 2532, 2541, 2549, 2558, 2565, 2577, 2586, 2596, 2607, 2615, 2623, 2633, 2644, 2657, 2667, 2677, 2694, 2702, 2717, 2732, 2747, 2759, 2778, 2794, 2804, 2815, 2837} _Syscall_index_1 = [...]uint16{0, 8, 20, 35, 50, 67, 84, 99, 108, 116, 127, 144, 162, 169, 183, 198, 213, 225, 235, 247, 261, 267, 278, 292, 305, 314, 330, 340, 351, 366, 378, 389, 397, 412, 419, 435} _Syscall_index_2 = [...]uint8{0, 17, 34, 51, 71, 94, 111, 128, 147, 166, 184, 201, 215} - _Syscall_index_3 = [...]uint16{0, 21, 38, 58, 81, 100, 123, 137, 164, 182, 197, 212, 230, 241, 253, 262, 273, 283, 292, 304, 313, 326, 336, 349, 362, 379, 393, 408, 421, 445, 463, 486, 500, 518, 531, 554, 566, 578, 595, 607, 619, 634, 646, 658, 675, 692, 709, 720} + _Syscall_index_3 = [...]uint16{0, 21, 38, 58, 81, 100, 123, 137, 164, 182, 197, 212, 230, 241, 253, 262, 273, 283, 292, 304, 313, 326, 336, 349, 362, 379, 393, 408, 421, 445, 463, 486, 500, 518, 531, 554, 566, 578, 595, 607, 619, 634, 646, 658, 675, 692, 709, 717, 730, 743, 757, 773, 784} ) func (i Syscall) String() string { @@ -374,7 +379,7 @@ func (i Syscall) String() string { case 403 <= i && i <= 414: i -= 403 return _Syscall_name_2[_Syscall_index_2[i]:_Syscall_index_2[i+1]] - case 416 <= i && i <= 462: + case 416 <= i && i <= 467: i -= 416 return _Syscall_name_3[_Syscall_index_3[i]:_Syscall_index_3[i+1]] default: diff --git a/pkg/security/secl/model/unmarshallers_linux.go b/pkg/security/secl/model/unmarshallers_linux.go index 7c38d3127e1ba..9c07f7742be8c 100644 --- a/pkg/security/secl/model/unmarshallers_linux.go +++ b/pkg/security/secl/model/unmarshallers_linux.go @@ -25,6 +25,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/containerutils" + "github.com/DataDog/datadog-agent/pkg/security/secl/model/sharedconsts" ) func validateReadSize(size, read int) (int, error) { @@ -160,7 +161,7 @@ func (e *Credentials) UnmarshalBinary(data []byte) (int, error) { e.FSGID = binary.NativeEndian.Uint32(data[20:24]) e.AUID = binary.NativeEndian.Uint32(data[24:28]) if binary.NativeEndian.Uint32(data[28:32]) != 1 { - e.AUID = AuditUIDUnset + e.AUID = sharedconsts.AuditUIDUnset } e.CapEffective = binary.NativeEndian.Uint64(data[32:40]) e.CapPermitted = binary.NativeEndian.Uint64(data[40:48]) @@ -316,14 +317,14 @@ func (e *ExitEvent) UnmarshalBinary(data []byte) (int, error) { exitStatus := binary.NativeEndian.Uint32(data[0:4]) if exitStatus&0x7F == 0x00 { // process terminated normally - e.Cause = uint32(ExitExited) + e.Cause = uint32(sharedconsts.ExitExited) e.Code = (exitStatus >> 8) & 0xFF } else if exitStatus&0x7F != 0x7F { // process terminated because of a signal if exitStatus&0x80 == 0x80 { // coredump signal - e.Cause = uint32(ExitCoreDumped) + e.Cause = uint32(sharedconsts.ExitCoreDumped) e.Code = exitStatus & 0x7F } else { // other signals - e.Cause = uint32(ExitSignaled) + e.Cause = uint32(sharedconsts.ExitSignaled) e.Code = exitStatus & 0x7F } } @@ -352,8 +353,8 @@ func (e *ArgsEnvsEvent) UnmarshalBinary(data []byte) (int, error) { e.ID = binary.NativeEndian.Uint64(data[0:8]) e.Size = binary.NativeEndian.Uint32(data[8:12]) - if e.Size > MaxArgEnvSize { - e.Size = MaxArgEnvSize + if e.Size > sharedconsts.MaxArgEnvSize { + e.Size = sharedconsts.MaxArgEnvSize } argsEnvSize := int(e.Size) @@ -385,7 +386,7 @@ func (p *PathKey) UnmarshalBinary(data []byte) (int, error) { // UnmarshalBinary unmarshalls a binary representation of itself func (e *FileFields) UnmarshalBinary(data []byte) (int, error) { - if len(data) < 72 { + if len(data) < FileFieldsSize { return 0, ErrNotEnoughData } @@ -412,7 +413,7 @@ func (e *FileFields) UnmarshalBinary(data []byte) (int, error) { timeNsec = binary.NativeEndian.Uint64(data[48:56]) e.MTime = uint64(time.Unix(int64(timeSec), int64(timeNsec)).UnixNano()) - return 72, nil + return FileFieldsSize, nil } // UnmarshalBinary unmarshalls a binary representation of itself @@ -427,7 +428,7 @@ func (e *LinkEvent) UnmarshalBinary(data []byte) (int, error) { // UnmarshalBinary unmarshalls a binary representation of itself func (e *MkdirEvent) UnmarshalBinary(data []byte) (int, error) { - n, err := UnmarshalBinary(data, &e.SyscallEvent, &e.File) + n, err := UnmarshalBinary(data, &e.SyscallEvent, &e.SyscallContext, &e.File) if err != nil { return n, err } @@ -517,8 +518,8 @@ func (s *SpanContext) UnmarshalBinary(data []byte) (int, error) { } s.SpanID = binary.NativeEndian.Uint64(data[0:8]) - s.TraceID.Lo = int64(binary.NativeEndian.Uint64(data[8:16])) - s.TraceID.Hi = int64(binary.NativeEndian.Uint64(data[16:24])) + s.TraceID.Lo = binary.NativeEndian.Uint64(data[8:16]) + s.TraceID.Hi = binary.NativeEndian.Uint64(data[16:24]) return 24, nil } @@ -586,7 +587,7 @@ func (e *RenameEvent) UnmarshalBinary(data []byte) (int, error) { // UnmarshalBinary unmarshalls a binary representation of itself func (e *RmdirEvent) UnmarshalBinary(data []byte) (int, error) { - return UnmarshalBinary(data, &e.SyscallEvent, &e.File) + return UnmarshalBinary(data, &e.SyscallEvent, &e.SyscallContext, &e.File) } // UnmarshalBinary unmarshalls a binary representation of itself @@ -984,12 +985,13 @@ func (e *CgroupTracingEvent) UnmarshalBinary(data []byte) (int, error) { } cursor += read - if len(data)-cursor < 8 { + if len(data)-cursor < 12 { return 0, ErrNotEnoughData } e.ConfigCookie = binary.NativeEndian.Uint64(data[cursor : cursor+8]) - return cursor + 8, nil + e.Pid = binary.NativeEndian.Uint32(data[cursor+8 : cursor+12]) + return cursor + 12, nil } // UnmarshalBinary unmarshals a binary representation of itself @@ -1028,7 +1030,8 @@ func (adlc *ActivityDumpLoadConfig) EventUnmarshalBinary(data []byte) (int, erro adlc.WaitListTimestampRaw = binary.NativeEndian.Uint64(data[16:24]) adlc.StartTimestampRaw = binary.NativeEndian.Uint64(data[24:32]) adlc.EndTimestampRaw = binary.NativeEndian.Uint64(data[32:40]) - adlc.Rate = binary.NativeEndian.Uint32(data[40:44]) + adlc.Rate = binary.NativeEndian.Uint16(data[40:42]) + // 2 bytes of padding adlc.Paused = binary.NativeEndian.Uint32(data[44:48]) return 48, nil } @@ -1056,7 +1059,7 @@ func (e *NetworkContext) UnmarshalBinary(data []byte) (int, error) { return 0, err } - if len(data)-read < 44 { + if len(data)-read < 48 { return 0, ErrNotEnoughData } @@ -1065,11 +1068,11 @@ func (e *NetworkContext) UnmarshalBinary(data []byte) (int, error) { SliceToArray(data[read+16:read+32], dstIP[:]) e.Source.Port = binary.BigEndian.Uint16(data[read+32 : read+34]) e.Destination.Port = binary.BigEndian.Uint16(data[read+34 : read+36]) - // padding 4 bytes + e.L4Protocol = binary.NativeEndian.Uint16(data[read+36 : read+38]) + e.L3Protocol = binary.NativeEndian.Uint16(data[read+38 : read+40]) e.Size = binary.NativeEndian.Uint32(data[read+40 : read+44]) - e.L3Protocol = binary.NativeEndian.Uint16(data[read+44 : read+46]) - e.L4Protocol = binary.NativeEndian.Uint16(data[read+46 : read+48]) + e.NetworkDirection = binary.NativeEndian.Uint32(data[read+44 : read+48]) // readjust IP sizes depending on the protocol switch e.L3Protocol { @@ -1253,6 +1256,33 @@ func (e *VethPairEvent) UnmarshalBinary(data []byte) (int, error) { return cursor, nil } +// UnmarshalBinary unmarshalls a binary representation of itself +func (e *AcceptEvent) UnmarshalBinary(data []byte) (int, error) { + read, err := UnmarshalBinary(data, &e.SyscallEvent) + if err != nil { + return 0, err + } + + if len(data)-read < 20 { + return 0, ErrNotEnoughData + } + + var ipRaw [16]byte + SliceToArray(data[read:read+16], ipRaw[:]) + e.AddrFamily = binary.NativeEndian.Uint16(data[read+16 : read+18]) + e.Addr.Port = binary.BigEndian.Uint16(data[read+18 : read+20]) + + // readjust IP size depending on the protocol + switch e.AddrFamily { + case unix.AF_INET: + e.Addr.IPNet = *eval.IPNetFromIP(ipRaw[0:4]) + case unix.AF_INET6: + e.Addr.IPNet = *eval.IPNetFromIP(ipRaw[:]) + } + + return read + 20, nil +} + // UnmarshalBinary unmarshalls a binary representation of itself func (e *BindEvent) UnmarshalBinary(data []byte) (int, error) { read, err := UnmarshalBinary(data, &e.SyscallEvent) @@ -1272,9 +1302,9 @@ func (e *BindEvent) UnmarshalBinary(data []byte) (int, error) { // readjust IP size depending on the protocol switch e.AddrFamily { - case 0x2: // unix.AF_INET + case unix.AF_INET: e.Addr.IPNet = *eval.IPNetFromIP(ipRaw[0:4]) - case 0xa: // unix.AF_INET6 + case unix.AF_INET6: e.Addr.IPNet = *eval.IPNetFromIP(ipRaw[:]) } @@ -1300,9 +1330,9 @@ func (e *ConnectEvent) UnmarshalBinary(data []byte) (int, error) { // readjust IP size depending on the protocol switch e.AddrFamily { - case 0x2: // unix.AF_INET + case unix.AF_INET: e.Addr.IPNet = *eval.IPNetFromIP(ipRaw[0:4]) - case 0xa: // unix.AF_INET6 + case unix.AF_INET6: e.Addr.IPNet = *eval.IPNetFromIP(ipRaw[:]) } @@ -1393,3 +1423,83 @@ func (e *RawPacketEvent) UnmarshalBinary(data []byte) (int, error) { return len(data), nil } + +// UnmarshalBinary unmarshals a binary representation of itself +func (e *NetworkStats) UnmarshalBinary(data []byte) (int, error) { + if len(data) < 16 { + return 0, ErrNotEnoughData + } + + e.DataSize = binary.NativeEndian.Uint64(data[0:8]) + e.PacketCount = binary.NativeEndian.Uint64(data[8:16]) + return 16, nil +} + +// UnmarshalBinary unmarshals a binary representation of itself +func (e *Flow) UnmarshalBinary(data []byte) (int, error) { + if len(data) < 40 { + return 0, ErrNotEnoughData + } + + var srcIP, dstIP [16]byte + SliceToArray(data[0:16], srcIP[:]) + SliceToArray(data[16:32], dstIP[:]) + e.Source.Port = binary.BigEndian.Uint16(data[32:34]) + e.Destination.Port = binary.BigEndian.Uint16(data[34:36]) + e.L4Protocol = binary.NativeEndian.Uint16(data[36:38]) + e.L3Protocol = binary.NativeEndian.Uint16(data[38:40]) + + // readjust IP sizes depending on the protocol + switch e.L3Protocol { + case 0x800: // unix.ETH_P_IP + e.Source.IPNet = *eval.IPNetFromIP(srcIP[0:4]) + e.Destination.IPNet = *eval.IPNetFromIP(dstIP[0:4]) + default: + e.Source.IPNet = *eval.IPNetFromIP(srcIP[:]) + e.Destination.IPNet = *eval.IPNetFromIP(dstIP[:]) + } + + // parse stats + readIngress, err := e.Ingress.UnmarshalBinary(data[40:]) + if err != nil { + return 0, ErrNotEnoughData + } + readEgress, err := e.Egress.UnmarshalBinary(data[40+readIngress:]) + if err != nil { + return 0, ErrNotEnoughData + } + + return 40 + readIngress + readEgress, nil +} + +// UnmarshalBinary unmarshals a binary representation of itself +func (e *NetworkFlowMonitorEvent) UnmarshalBinary(data []byte) (int, error) { + read, err := e.Device.UnmarshalBinary(data) + if err != nil { + return 0, ErrNotEnoughData + } + total := read + data = data[read:] + + if len(data) < 8 { + return 0, ErrNotEnoughData + } + e.FlowsCount = binary.NativeEndian.Uint64(data[0:8]) + total += 8 + data = data[8:] + + for i := uint64(0); i < e.FlowsCount; i++ { + // parse flow + var flow Flow + read, err = flow.UnmarshalBinary(data) + if err != nil { + return 0, err + } + total += read + data = data[read:] + + e.Flows = append(e.Flows, flow) + } + + return total, nil +} diff --git a/pkg/security/secl/model/utils/uint128.go b/pkg/security/secl/model/utils/uint128.go new file mode 100644 index 0000000000000..a6f69839ec54f --- /dev/null +++ b/pkg/security/secl/model/utils/uint128.go @@ -0,0 +1,32 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package utils holds utility related to the model +package utils + +import ( + "math/big" +) + +// TraceID is a 128-bit identifier for a trace. +type TraceID struct { + Lo uint64 + Hi uint64 +} + +func (t TraceID) bigInt() *big.Int { + hi := big.NewInt(0) + hi.SetUint64(t.Hi) + hi.Lsh(hi, 64) + + lo := big.NewInt(0) + lo.SetUint64(t.Lo) + + return hi.Add(hi, lo) +} + +func (t TraceID) String() string { + return t.bigInt().String() +} diff --git a/pkg/security/secl/rules/actions.go b/pkg/security/secl/rules/actions.go index 9ca3382442111..62655a970802a 100644 --- a/pkg/security/secl/rules/actions.go +++ b/pkg/security/secl/rules/actions.go @@ -67,13 +67,12 @@ func (a *Action) CompileFilter(parsingContext *ast.ParsingContext, model eval.Mo expression := *a.Def.Filter - eval := eval.NewRule("action_rule", expression, evalOpts) - - if err := eval.Parse(parsingContext); err != nil { + eval, err := eval.NewRule("action_rule", expression, parsingContext, evalOpts) + if err != nil { return &ErrActionFilter{Expression: expression, Err: err} } - if err := eval.GenEvaluator(model, parsingContext); err != nil { + if err := eval.GenEvaluator(model); err != nil { return &ErrActionFilter{Expression: expression, Err: err} } diff --git a/pkg/security/secl/rules/bucket.go b/pkg/security/secl/rules/bucket.go index 59cb7b2d98ee9..7a10ec8feb6ae 100644 --- a/pkg/security/secl/rules/bucket.go +++ b/pkg/security/secl/rules/bucket.go @@ -20,12 +20,6 @@ type RuleBucket struct { // AddRule adds a rule to the bucket func (rb *RuleBucket) AddRule(rule *Rule) error { - for _, r := range rb.rules { - if r.Def.ID == rule.Def.ID { - return &ErrRuleLoad{Rule: rule.PolicyRule, Err: ErrDefinitionIDConflict} - } - } - for _, field := range rule.GetEvaluator().GetFields() { index := sort.SearchStrings(rb.fields, field) if index < len(rb.fields) && rb.fields[index] == field { diff --git a/pkg/security/secl/rules/collected_events_functests.go b/pkg/security/secl/rules/collected_events_functests.go index 25754a4c574c6..9b88f1c33a6f1 100644 --- a/pkg/security/secl/rules/collected_events_functests.go +++ b/pkg/security/secl/rules/collected_events_functests.go @@ -41,7 +41,7 @@ func (ec *EventCollector) CollectEvent(rs *RuleSet, ctx *eval.Context, event eva continue } - fieldEventType, err := event.GetFieldEventType(field) + fieldEventType, _, err := event.GetFieldMetadata(field) if err != nil { rs.logger.Errorf("failed to get event type for field %s: %v", field, err) } diff --git a/pkg/security/secl/rules/errors.go b/pkg/security/secl/rules/errors.go index 8cbbc71a06ddc..09ca57691e023 100644 --- a/pkg/security/secl/rules/errors.go +++ b/pkg/security/secl/rules/errors.go @@ -44,6 +44,9 @@ var ( // ErrRuleAgentFilter is returned when an agent rule was filtered ErrRuleAgentFilter = errors.New("agent rule filtered") + + // ErrMultipleEventCategories is returned when multile event categories are in the same expansion + ErrMultipleEventCategories = errors.New("multiple event categories in the same rule expansion") ) // ErrFieldTypeUnknown is returned when a field has an unknown type diff --git a/pkg/security/secl/rules/filter/seclrulefilter.go b/pkg/security/secl/rules/filter/seclrulefilter.go new file mode 100644 index 0000000000000..d0d753652296e --- /dev/null +++ b/pkg/security/secl/rules/filter/seclrulefilter.go @@ -0,0 +1,82 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package filter holds filter related files +package filter + +import ( + "runtime" + + "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/ast" + "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" +) + +// SECLRuleFilter defines a SECL rule filter +type SECLRuleFilter struct { + model eval.Model + parsingContext *ast.ParsingContext +} + +// NewSECLRuleFilter returns a new agent version based rule filter +func NewSECLRuleFilter(model eval.Model) *SECLRuleFilter { + return &SECLRuleFilter{ + model: model, + parsingContext: ast.NewParsingContext(true), + } +} + +func mergeFilterExpressions(filters []string) (expression string) { + for i, filter := range filters { + if i != 0 { + expression += " || " + } + expression += "(" + filter + ")" + } + return +} + +func (r *SECLRuleFilter) newEvalContext() eval.Context { + return eval.Context{ + Event: r.model.NewEvent(), + } +} + +// IsAccepted checks whether the rule is accepted +func (r *SECLRuleFilter) IsAccepted(filters []string) (bool, error) { + if len(filters) == 0 { + return true, nil + } + + // early check for obvious and most used cases + if len(filters) == 1 { + switch filters[0] { + case `os == "linux"`: + return runtime.GOOS == "linux", nil + case `os == "windows"`: + return runtime.GOOS == "windows", nil + } + } + + expression := mergeFilterExpressions(filters) + astRule, err := r.parsingContext.ParseRule(expression) + if err != nil { + return false, err + } + + evalOpts := &eval.Opts{} + evalOpts. + WithConstants(map[string]interface{}{ + "true": &eval.BoolEvaluator{Value: true}, + "false": &eval.BoolEvaluator{Value: false}, + }) + + evaluator, err := eval.NewRuleEvaluator(astRule, r.model, evalOpts) + if err != nil { + return false, err + } + + ctx := r.newEvalContext() + return evaluator.Eval(&ctx), nil +} diff --git a/pkg/security/secl/rules/fim_others.go b/pkg/security/secl/rules/fim_others.go new file mode 100644 index 0000000000000..139815ebb124d --- /dev/null +++ b/pkg/security/secl/rules/fim_others.go @@ -0,0 +1,23 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build !unix + +// Package rules holds rules related files +package rules + +type expandedRule struct { + id string + expr string +} + +func expandFim(baseID, _, baseExpr string) []expandedRule { + return []expandedRule{ + { + id: baseID, + expr: baseExpr, + }, + } +} diff --git a/pkg/security/secl/rules/fim_test.go b/pkg/security/secl/rules/fim_test.go new file mode 100644 index 0000000000000..0a41a6ebec733 --- /dev/null +++ b/pkg/security/secl/rules/fim_test.go @@ -0,0 +1,107 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build unix + +// Package rules holds rules related files +package rules + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestExpandFIM(t *testing.T) { + entries := []struct { + id string + expr string + expected []expandedRule + }{ + { + id: "test", + expr: "fim.write.file.path == \"/tmp/test\"", + expected: []expandedRule{ + { + id: "__fim_expanded_open__test", + expr: "(open.file.path == \"/tmp/test\") && open.flags & (O_CREAT|O_TRUNC|O_APPEND|O_RDWR|O_WRONLY) > 0", + }, + { + id: "__fim_expanded_chmod__test", + expr: "chmod.file.path == \"/tmp/test\"", + }, + { + id: "__fim_expanded_chown__test", + expr: "chown.file.path == \"/tmp/test\"", + }, + { + id: "__fim_expanded_link__test", + expr: "link.file.path == \"/tmp/test\"", + }, + { + id: "__fim_expanded_rename__test", + expr: "rename.file.path == \"/tmp/test\"", + }, + { + id: "__fim_expanded_rename_destination__test", + expr: "rename.file.destination.path == \"/tmp/test\"", + }, + { + id: "__fim_expanded_unlink__test", + expr: "unlink.file.path == \"/tmp/test\"", + }, + { + id: "__fim_expanded_utimes__test", + expr: "utimes.file.path == \"/tmp/test\"", + }, + }, + }, + { + id: "complex", + expr: "(fim.write.file.path == \"/tmp/test\" || fim.write.file.name == \"abc\") && process.file.name == \"def\" && container.id != \"\"", + expected: []expandedRule{ + { + id: "__fim_expanded_open__complex", + expr: "((open.file.path == \"/tmp/test\" || open.file.name == \"abc\") && process.file.name == \"def\" && container.id != \"\") && open.flags & (O_CREAT|O_TRUNC|O_APPEND|O_RDWR|O_WRONLY) > 0", + }, + { + id: "__fim_expanded_chmod__complex", + expr: "(chmod.file.path == \"/tmp/test\" || chmod.file.name == \"abc\") && process.file.name == \"def\" && container.id != \"\"", + }, + { + id: "__fim_expanded_chown__complex", + expr: "(chown.file.path == \"/tmp/test\" || chown.file.name == \"abc\") && process.file.name == \"def\" && container.id != \"\"", + }, + { + id: "__fim_expanded_link__complex", + expr: "(link.file.path == \"/tmp/test\" || link.file.name == \"abc\") && process.file.name == \"def\" && container.id != \"\"", + }, + { + id: "__fim_expanded_rename__complex", + expr: "(rename.file.path == \"/tmp/test\" || rename.file.name == \"abc\") && process.file.name == \"def\" && container.id != \"\"", + }, + { + id: "__fim_expanded_rename_destination__complex", + expr: "(rename.file.destination.path == \"/tmp/test\" || rename.file.destination.name == \"abc\") && process.file.name == \"def\" && container.id != \"\"", + }, + { + id: "__fim_expanded_unlink__complex", + expr: "(unlink.file.path == \"/tmp/test\" || unlink.file.name == \"abc\") && process.file.name == \"def\" && container.id != \"\"", + }, + { + id: "__fim_expanded_utimes__complex", + expr: "(utimes.file.path == \"/tmp/test\" || utimes.file.name == \"abc\") && process.file.name == \"def\" && container.id != \"\"", + }, + }, + }, + } + + for _, entry := range entries { + t.Run(entry.id, func(t *testing.T) { + actual := expandFim(entry.id, "", entry.expr) + assert.Equal(t, entry.expected, actual) + }) + } +} diff --git a/pkg/security/secl/rules/fim_unix.go b/pkg/security/secl/rules/fim_unix.go new file mode 100644 index 0000000000000..3e2c313999fd3 --- /dev/null +++ b/pkg/security/secl/rules/fim_unix.go @@ -0,0 +1,55 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build unix + +// Package rules holds rules related files +package rules + +import ( + "fmt" + "strings" +) + +type expandedRule struct { + id string + expr string +} + +func expandFim(baseID, groupID, baseExpr string) []expandedRule { + if !strings.Contains(baseExpr, "fim.write.file.") { + return []expandedRule{ + { + id: baseID, + expr: baseExpr, + }, + } + } + + var expandedRules []expandedRule + for _, eventType := range []string{"open", "chmod", "chown", "link", "rename", "unlink", "utimes"} { + expr := strings.Replace(baseExpr, "fim.write.file.", fmt.Sprintf("%s.file.", eventType), -1) + if eventType == "open" { + expr = fmt.Sprintf("(%s) && open.flags & (O_CREAT|O_TRUNC|O_APPEND|O_RDWR|O_WRONLY) > 0", expr) + } + + id := fmt.Sprintf("__fim_expanded_%s_%s_%s", eventType, groupID, baseID) + expandedRules = append(expandedRules, expandedRule{ + id: id, + expr: expr, + }) + + if eventType == "rename" { + expr := strings.Replace(baseExpr, "fim.write.file.", "rename.file.destination.", -1) + id := fmt.Sprintf("__fim_expanded_%s_%s_%s", "rename_destination", groupID, baseID) + expandedRules = append(expandedRules, expandedRule{ + id: id, + expr: expr, + }) + } + } + + return expandedRules +} diff --git a/pkg/security/secl/rules/rule_filters.go b/pkg/security/secl/rules/rule_filters.go index e6fbb0024bea7..ff28d3257879a 100644 --- a/pkg/security/secl/rules/rule_filters.go +++ b/pkg/security/secl/rules/rule_filters.go @@ -11,9 +11,8 @@ import ( "github.com/Masterminds/semver/v3" - "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/ast" "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" - "github.com/DataDog/datadog-agent/pkg/security/secl/model" + "github.com/DataDog/datadog-agent/pkg/security/secl/rules/filter" "github.com/DataDog/datadog-agent/pkg/security/secl/validators" ) @@ -81,76 +80,22 @@ func (r *AgentVersionFilter) IsMacroAccepted(macro *MacroDefinition) (bool, erro // SECLRuleFilter defines a SECL rule filter type SECLRuleFilter struct { - model eval.Model - parsingContext *ast.ParsingContext + inner *filter.SECLRuleFilter } // NewSECLRuleFilter returns a new agent version based rule filter func NewSECLRuleFilter(model eval.Model) *SECLRuleFilter { return &SECLRuleFilter{ - model: model, - parsingContext: ast.NewParsingContext(false), - } -} - -func mergeFilterExpressions(filters []string) (expression string) { - for i, filter := range filters { - if i != 0 { - expression += " || " - } - expression += "(" + filter + ")" - } - return -} - -func (r *SECLRuleFilter) newEvalContext() eval.Context { - return eval.Context{ - Event: r.model.NewEvent(), + inner: filter.NewSECLRuleFilter(model), } } // IsRuleAccepted checks whether the rule is accepted func (r *SECLRuleFilter) IsRuleAccepted(rule *RuleDefinition) (bool, error) { - if len(rule.Filters) == 0 { - return true, nil - } - - expression := mergeFilterExpressions(rule.Filters) - astRule, err := r.parsingContext.ParseRule(expression) - if err != nil { - return false, err - } - - evalOpts := &eval.Opts{} - evalOpts. - WithConstants(model.BooleanConstants) - - evaluator, err := eval.NewRuleEvaluator(astRule, r.model, evalOpts) - if err != nil { - return false, err - } - - ctx := r.newEvalContext() - return evaluator.Eval(&ctx), nil + return r.inner.IsAccepted(rule.Filters) } // IsMacroAccepted checks whether the macro is accepted func (r *SECLRuleFilter) IsMacroAccepted(macro *MacroDefinition) (bool, error) { - if len(macro.Filters) == 0 { - return true, nil - } - - expression := mergeFilterExpressions(macro.Filters) - astRule, err := r.parsingContext.ParseRule(expression) - if err != nil { - return false, err - } - - evaluator, err := eval.NewRuleEvaluator(astRule, r.model, &eval.Opts{}) - if err != nil { - return false, err - } - - ctx := r.newEvalContext() - return evaluator.Eval(&ctx), nil + return r.inner.IsAccepted(macro.Filters) } diff --git a/pkg/security/secl/rules/ruleset.go b/pkg/security/secl/rules/ruleset.go index ad0a2124eca20..062a837b7f8e8 100644 --- a/pkg/security/secl/rules/ruleset.go +++ b/pkg/security/secl/rules/ruleset.go @@ -11,6 +11,7 @@ import ( "fmt" "reflect" "slices" + "strings" "sync" "github.com/spf13/cast" @@ -121,6 +122,10 @@ func (rs *RuleSet) AddMacro(parsingContext *ast.ParsingContext, pMacro *PolicyMa case pMacro.Def.Expression != "" && len(pMacro.Def.Values) > 0: return nil, &ErrMacroLoad{Macro: pMacro, Err: errors.New("only one of 'expression' and 'values' can be defined")} case pMacro.Def.Expression != "": + if strings.Contains(pMacro.Def.Expression, "fim.write.file.") { + return nil, &ErrMacroLoad{Macro: pMacro, Err: errors.New("macro expression cannot contain 'fim.write.file.' event types")} + } + if macro, err = eval.NewMacro(pMacro.Def.ID, pMacro.Def.Expression, rs.model, parsingContext, rs.evalOpts); err != nil { return nil, &ErrMacroLoad{Macro: pMacro, Err: err} } @@ -203,7 +208,7 @@ func (rs *RuleSet) PopulateFieldsWithRuleActionsData(policyRules []*PolicyRule, variableValue = actionDef.Set.Value } else if actionDef.Set.Field != "" { - kind, err := rs.eventCtor().GetFieldType(actionDef.Set.Field) + _, kind, err := rs.eventCtor().GetFieldMetadata(actionDef.Set.Field) if err != nil { errs = multierror.Append(errs, fmt.Errorf("failed to get field '%s': %w", actionDef.Set.Field, err)) continue @@ -292,19 +297,19 @@ func (rs *RuleSet) isActionAvailable(eventType eval.EventType, action *Action) b } // AddRule creates the rule evaluator and adds it to the bucket of its events -func (rs *RuleSet) AddRule(parsingContext *ast.ParsingContext, pRule *PolicyRule) (*eval.Rule, error) { +func (rs *RuleSet) AddRule(parsingContext *ast.ParsingContext, pRule *PolicyRule) (model.EventCategory, error) { if pRule.Def.Disabled { - return nil, nil + return "", nil } for _, id := range rs.opts.ReservedRuleIDs { if id == pRule.Def.ID { - return nil, &ErrRuleLoad{Rule: pRule, Err: ErrInternalIDConflict} + return "", &ErrRuleLoad{Rule: pRule, Err: ErrInternalIDConflict} } } if _, exists := rs.rules[pRule.Def.ID]; exists { - return nil, &ErrRuleLoad{Rule: pRule, Err: ErrDefinitionIDConflict} + return "", &ErrRuleLoad{Rule: pRule, Err: ErrDefinitionIDConflict} } var tags []string @@ -312,48 +317,67 @@ func (rs *RuleSet) AddRule(parsingContext *ast.ParsingContext, pRule *PolicyRule tags = append(tags, k+":"+v) } - rule := &Rule{ - PolicyRule: pRule, - Rule: eval.NewRule(pRule.Def.ID, pRule.Def.Expression, rs.evalOpts, tags...), + expandedRules := expandFim(pRule.Def.ID, pRule.Def.GroupID, pRule.Def.Expression) + + categories := make([]model.EventCategory, 0) + for _, er := range expandedRules { + category, err := rs.innerAddExpandedRule(parsingContext, pRule, er, tags) + if err != nil { + return "", err + } + categories = append(categories, category) } + categories = slices.Compact(categories) + if len(categories) != 1 { + return "", &ErrRuleLoad{Rule: pRule, Err: ErrMultipleEventCategories} + } + return categories[0], nil +} - if err := rule.Parse(parsingContext); err != nil { - return nil, &ErrRuleLoad{Rule: pRule, Err: &ErrRuleSyntax{Err: err}} +func (rs *RuleSet) innerAddExpandedRule(parsingContext *ast.ParsingContext, pRule *PolicyRule, exRule expandedRule, tags []string) (model.EventCategory, error) { + evalRule, err := eval.NewRule(exRule.id, exRule.expr, parsingContext, rs.evalOpts, tags...) + if err != nil { + return "", &ErrRuleLoad{Rule: pRule, Err: &ErrRuleSyntax{Err: err}} } - if err := rule.GenEvaluator(rs.model, parsingContext); err != nil { - return nil, &ErrRuleLoad{Rule: pRule, Err: err} + rule := &Rule{ + PolicyRule: pRule, + Rule: evalRule, + } + + if err := rule.GenEvaluator(rs.model); err != nil { + return "", &ErrRuleLoad{Rule: pRule, Err: err} } eventType, err := GetRuleEventType(rule.Rule) if err != nil { - return nil, &ErrRuleLoad{Rule: pRule, Err: err} + return "", &ErrRuleLoad{Rule: pRule, Err: err} } // validate event context against event type for _, field := range rule.GetFields() { restrictions := rs.model.GetFieldRestrictions(field) if len(restrictions) > 0 && !slices.Contains(restrictions, eventType) { - return nil, &ErrRuleLoad{Rule: pRule, Err: &ErrFieldNotAvailable{Field: field, EventType: eventType, RestrictedTo: restrictions}} + return "", &ErrRuleLoad{Rule: pRule, Err: &ErrFieldNotAvailable{Field: field, EventType: eventType, RestrictedTo: restrictions}} } } // ignore event types not supported if _, exists := rs.opts.EventTypeEnabled["*"]; !exists { if enabled, exists := rs.opts.EventTypeEnabled[eventType]; !exists || !enabled { - return nil, &ErrRuleLoad{Rule: pRule, Err: ErrEventTypeNotEnabled} + return "", &ErrRuleLoad{Rule: pRule, Err: ErrEventTypeNotEnabled} } } for _, action := range rule.PolicyRule.Actions { if !rs.isActionAvailable(eventType, action) { - return nil, &ErrRuleLoad{Rule: pRule, Err: &ErrActionNotAvailable{ActionName: action.Def.Name(), EventType: eventType}} + return "", &ErrRuleLoad{Rule: pRule, Err: &ErrActionNotAvailable{ActionName: action.Def.Name(), EventType: eventType}} } // compile action filter if action.Def.Filter != nil { if err := action.CompileFilter(parsingContext, rs.model, rs.evalOpts); err != nil { - return nil, &ErrRuleLoad{Rule: pRule, Err: err} + return "", &ErrRuleLoad{Rule: pRule, Err: err} } } @@ -361,7 +385,7 @@ func (rs *RuleSet) AddRule(parsingContext *ast.ParsingContext, pRule *PolicyRule if _, found := rs.fieldEvaluators[action.Def.Set.Field]; !found { evaluator, err := rs.model.GetEvaluator(action.Def.Set.Field, "") if err != nil { - return nil, err + return "", err } rs.fieldEvaluators[action.Def.Set.Field] = evaluator } @@ -375,7 +399,7 @@ func (rs *RuleSet) AddRule(parsingContext *ast.ParsingContext, pRule *PolicyRule } if err := bucket.AddRule(rule); err != nil { - return nil, err + return "", err } // Merge the fields of the new rule with the existing list of fields of the ruleset @@ -383,7 +407,7 @@ func (rs *RuleSet) AddRule(parsingContext *ast.ParsingContext, pRule *PolicyRule rs.rules[pRule.Def.ID] = rule - return rule.Rule, nil + return model.GetEventTypeCategory(eventType), nil } // NotifyRuleMatch notifies all the ruleset listeners that an event matched a rule @@ -498,7 +522,7 @@ func IsDiscarder(ctx *eval.Context, field eval.Field, rules []*Rule) (bool, erro // IsDiscarder partially evaluates an Event against a field func (rs *RuleSet) IsDiscarder(event eval.Event, field eval.Field) (bool, error) { - eventType, err := event.GetFieldEventType(field) + eventType, _, err := event.GetFieldMetadata(field) if err != nil { return false, err } diff --git a/pkg/security/secl/rules/ruleset_test.go b/pkg/security/secl/rules/ruleset_test.go index f7b56bdc5cd93..2839a0306af9c 100644 --- a/pkg/security/secl/rules/ruleset_test.go +++ b/pkg/security/secl/rules/ruleset_test.go @@ -18,6 +18,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/ast" "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/model" + "github.com/DataDog/datadog-agent/pkg/security/secl/model/sharedconsts" ) type testFieldValues map[eval.Field][]interface{} @@ -892,15 +893,15 @@ func TestRuleSetAUDApprovers(t *testing.T) { Field: "process.auid", TypeBitmask: eval.ScalarValueType | eval.RangeValueType, FilterMode: ApproverOnlyMode, - RangeFilterValue: &RangeFilterValue{Min: 0, Max: model.AuditUIDUnset - 1}, + RangeFilterValue: &RangeFilterValue{Min: 0, Max: sharedconsts.AuditUIDUnset - 1}, FilterWeight: 10, HandleNotApproverValue: func(fieldValueType eval.FieldValueType, value interface{}) (eval.FieldValueType, interface{}, bool) { if fieldValueType != eval.ScalarValueType { return fieldValueType, value, false } - if i, ok := value.(int); ok && uint32(i) == model.AuditUIDUnset { - return eval.RangeValueType, RangeFilterValue{Min: 0, Max: model.AuditUIDUnset - 1}, true + if i, ok := value.(int); ok && uint32(i) == sharedconsts.AuditUIDUnset { + return eval.RangeValueType, RangeFilterValue{Min: 0, Max: sharedconsts.AuditUIDUnset - 1}, true } return fieldValueType, value, false @@ -955,7 +956,7 @@ func TestRuleSetAUDApprovers(t *testing.T) { } rge := approvers["process.auid"][0].Value.(RangeFilterValue) - if rge.Min != 0 || rge.Max != model.AuditUIDUnset-1 { + if rge.Min != 0 || rge.Max != sharedconsts.AuditUIDUnset-1 { t.Fatalf("unexpected range") } }) @@ -1065,11 +1066,13 @@ func TestRuleSetAUDApprovers(t *testing.T) { func TestGetRuleEventType(t *testing.T) { t.Run("ok", func(t *testing.T) { - rule := eval.NewRule("aaa", `open.file.name == "test"`, &eval.Opts{}) - pc := ast.NewParsingContext(false) + rule, err := eval.NewRule("aaa", `open.file.name == "test"`, pc, &eval.Opts{}) + if err != nil { + t.Fatal(err) + } - if err := rule.GenEvaluator(&model.Model{}, pc); err != nil { + if err := rule.GenEvaluator(&model.Model{}); err != nil { t.Fatal(err) } @@ -1079,7 +1082,7 @@ func TestGetRuleEventType(t *testing.T) { } event := model.NewFakeEvent() - fieldEventType, err := event.GetFieldEventType("open.file.name") + fieldEventType, _, err := event.GetFieldMetadata("open.file.name") if err != nil { t.Fatal("should get a field event type") } @@ -1090,11 +1093,13 @@ func TestGetRuleEventType(t *testing.T) { }) t.Run("ko", func(t *testing.T) { - rule := eval.NewRule("aaa", `open.file.name == "test" && unlink.file.name == "123"`, &eval.Opts{}) - pc := ast.NewParsingContext(false) + rule, err := eval.NewRule("aaa", `open.file.name == "test" && unlink.file.name == "123"`, pc, &eval.Opts{}) + if err != nil { + t.Fatal(err) + } - if err := rule.GenEvaluator(&model.Model{}, pc); err == nil { + if err := rule.GenEvaluator(&model.Model{}); err == nil { t.Fatalf("shouldn't get an evaluator, multiple event types: %s", err) } diff --git a/pkg/security/secl/schemas/accept.schema.json b/pkg/security/secl/schemas/accept.schema.json new file mode 100644 index 0000000000000..85949804cbd26 --- /dev/null +++ b/pkg/security/secl/schemas/accept.schema.json @@ -0,0 +1,56 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "accept.schema.json", + "type": "object", + "required" : ["accept"], + "allOf": [ + { + "$ref": "event.schema.json" + }, + { + "$ref": "usr.schema.json" + }, + { + "$ref": "process_context.schema.json" + }, + { + "date": { + "$ref": "datetime.schema.json" + } + }, + { + "properties": { + "accept": { + "type": "object", + "required": [ + "addr" + ], + "properties": { + "addr": { + "type": "object", + "required": [ + "family", + "ip", + "port" + ], + "properties": { + "family": { + "type": "string" + }, + "ip": { + "type": "string" + }, + "port": { + "type": "integer" + } + } + }, + "protocol": { + "type": "string" + } + } + } + } + } + ] +} diff --git a/pkg/security/secl/schemas/connect.schema.json b/pkg/security/secl/schemas/connect.schema.json index 61b530105af9a..d4dbcb49910cb 100644 --- a/pkg/security/secl/schemas/connect.schema.json +++ b/pkg/security/secl/schemas/connect.schema.json @@ -2,6 +2,7 @@ "$schema": "https://json-schema.org/draft/2020-12/schema", "$id": "connect.schema.json", "type": "object", + "required" : ["connect"], "allOf": [ { "$ref": "event.schema.json" @@ -53,4 +54,4 @@ } } ] -} \ No newline at end of file +} diff --git a/pkg/security/secl/validators/rule_structure.go b/pkg/security/secl/validators/rule_structure.go index e2665dd41658d..5c795eb29697d 100644 --- a/pkg/security/secl/validators/rule_structure.go +++ b/pkg/security/secl/validators/rule_structure.go @@ -7,16 +7,14 @@ package validators import ( - "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/ast" "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/model" ) // HasBareWildcardInField checks whether a rule has a bare wildcard func HasBareWildcardInField(rule *eval.Rule) (bool, error) { - parsingContext := ast.NewParsingContext(false) localModel := &model.Model{} - if err := rule.GenEvaluator(localModel, parsingContext); err != nil { + if err := rule.GenEvaluator(localModel); err != nil { return false, err } diff --git a/pkg/security/secl/validators/rule_structure_test.go b/pkg/security/secl/validators/rule_structure_test.go index aec86bbc5d73d..d10365b4a5fd9 100644 --- a/pkg/security/secl/validators/rule_structure_test.go +++ b/pkg/security/secl/validators/rule_structure_test.go @@ -11,6 +11,7 @@ package validators import ( "testing" + "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/ast" "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" ) @@ -113,7 +114,11 @@ func TestHasBareWildcardInField(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - ruleToEval := eval.NewRule(tt.name, tt.args.ruleExpression, &eval.Opts{}) + pc := ast.NewParsingContext(false) + ruleToEval, err := eval.NewRule(tt.name, tt.args.ruleExpression, pc, &eval.Opts{}) + if err != nil { + t.Fatalf("Error creating rule: %s", err) + } got, err := HasBareWildcardInField(ruleToEval) diff --git a/pkg/security/seclwin/go.mod b/pkg/security/seclwin/go.mod index 496f580b96071..adadadfc3cd5d 100644 --- a/pkg/security/seclwin/go.mod +++ b/pkg/security/seclwin/go.mod @@ -4,14 +4,10 @@ go 1.23.0 replace github.com/DataDog/datadog-agent/pkg/security/secl => ../secl -require ( - github.com/DataDog/datadog-agent/pkg/security/secl v0.56.0-rc.3 - modernc.org/mathutil v1.6.0 -) +require github.com/DataDog/datadog-agent/pkg/security/secl v0.56.0-rc.3 require ( github.com/alecthomas/participle v0.7.1 // indirect github.com/jellydator/ttlcache/v3 v3.3.0 // indirect - github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect golang.org/x/sync v0.10.0 // indirect ) diff --git a/pkg/security/seclwin/go.sum b/pkg/security/seclwin/go.sum index 8207782fa1c53..6a8a41d4b471b 100644 --- a/pkg/security/seclwin/go.sum +++ b/pkg/security/seclwin/go.sum @@ -10,8 +10,6 @@ github.com/jellydator/ttlcache/v3 v3.3.0/go.mod h1:bj2/e0l4jRnQdrnSTaGTsh4GSXvMj github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= -github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= @@ -24,5 +22,3 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4= -modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo= diff --git a/pkg/security/seclwin/model/accessors_win.go b/pkg/security/seclwin/model/accessors_win.go index d7f989e58bd93..4a5e19a7e0a9c 100644 --- a/pkg/security/seclwin/model/accessors_win.go +++ b/pkg/security/seclwin/model/accessors_win.go @@ -10,13 +10,15 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/containerutils" "math" + "net" "reflect" ) // to always require the math package var _ = math.MaxUint16 +var _ = net.IP{} -func (m *Model) GetEventTypes() []eval.EventType { +func (_ *Model) GetEventTypes() []eval.EventType { return []eval.EventType{ eval.EventType("change_permission"), eval.EventType("create"), @@ -31,12 +33,12 @@ func (m *Model) GetEventTypes() []eval.EventType { eval.EventType("write"), } } -func (m *Model) GetFieldRestrictions(field eval.Field) []eval.EventType { +func (_ *Model) GetFieldRestrictions(field eval.Field) []eval.EventType { switch field { } return nil } -func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Evaluator, error) { +func (_ *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Evaluator, error) { switch field { case "change_permission.new_sd": return &eval.StringEvaluator{ @@ -868,23 +870,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessCmdLine(ev, &element.ProcessContext.Process) - results = append(results, result) - return results + return []string{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - return ev.FieldHandlers.ResolveProcessCmdLine(ev, &pce.ProcessContext.Process) + if result, ok := ctx.StringCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + return ev.FieldHandlers.ResolveProcessCmdLine(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results return results @@ -895,23 +894,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := element.ProcessContext.Process.ContainerID - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) string { - return pce.ProcessContext.Process.ContainerID + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + return current.ProcessContext.Process.ContainerID }) ctx.StringCache[field] = results return results @@ -923,23 +919,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &element.ProcessContext.Process)) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int { - return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &pce.ProcessContext.Process)) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, ¤t.ProcessContext.Process)) }) ctx.IntCache[field] = results return results @@ -951,23 +944,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessEnvp(ev, &element.ProcessContext.Process) - results = append(results, result...) - return results + return result + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIteratorArray(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) []string { - return ev.FieldHandlers.ResolveProcessEnvp(ev, &pce.ProcessContext.Process) + results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + return ev.FieldHandlers.ResolveProcessEnvp(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results return results @@ -979,23 +969,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveProcessEnvs(ev, &element.ProcessContext.Process) - results = append(results, result...) - return results + return result + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIteratorArray(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) []string { - return ev.FieldHandlers.ResolveProcessEnvs(ev, &pce.ProcessContext.Process) + results := newAncestorsIteratorArray(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) []string { + return ev.FieldHandlers.ResolveProcessEnvs(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results return results @@ -1008,23 +995,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - return ev.FieldHandlers.ResolveFileBasename(ev, &pce.ProcessContext.Process.FileEvent) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + return ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.FileEvent) }) ctx.StringCache[field] = results return results @@ -1037,23 +1021,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := len(ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent)) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int { - return len(ev.FieldHandlers.ResolveFileBasename(ev, &pce.ProcessContext.Process.FileEvent)) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + return len(ev.FieldHandlers.ResolveFileBasename(ev, ¤t.ProcessContext.Process.FileEvent)) }) ctx.IntCache[field] = results return results @@ -1066,23 +1047,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - return ev.FieldHandlers.ResolveFilePath(ev, &pce.ProcessContext.Process.FileEvent) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + return ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.FileEvent) }) ctx.StringCache[field] = results return results @@ -1095,23 +1073,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := len(ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent)) - results = append(results, result) - return results + return []int{result} } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) int { - return len(ev.FieldHandlers.ResolveFilePath(ev, &pce.ProcessContext.Process.FileEvent)) + if result, ok := ctx.IntCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) int { + return len(ev.FieldHandlers.ResolveFilePath(ev, ¤t.ProcessContext.Process.FileEvent)) }) ctx.IntCache[field] = results return results @@ -1132,23 +1107,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.PIDContext.Pid) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.PIDContext.Pid) + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.PIDContext.Pid) }) ctx.IntCache[field] = results return results @@ -1159,23 +1131,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.IntArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []int { ctx.AppendResolvedField(field) - if result, ok := ctx.IntCache[field]; ok { - return result - } - var results []int iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := int(element.ProcessContext.Process.PPid) - results = append(results, result) - return results + return []int{result} + } + if result, ok := ctx.IntCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) int { - return int(pce.ProcessContext.Process.PPid) + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) int { + return int(current.ProcessContext.Process.PPid) }) ctx.IntCache[field] = results return results @@ -1187,23 +1156,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := ev.FieldHandlers.ResolveUser(ev, &element.ProcessContext.Process) - results = append(results, result) - return results + return []string{result} + } + if result, ok := ctx.StringCache[field]; ok { + return result } - results = newAncestorsIterator(iterator, ctx, ev, func(ev *Event, pce *ProcessCacheEntry) string { - return ev.FieldHandlers.ResolveUser(ev, &pce.ProcessContext.Process) + results := newAncestorsIterator(iterator, field, ctx, ev, func(ev *Event, current *ProcessCacheEntry) string { + return ev.FieldHandlers.ResolveUser(ev, ¤t.ProcessContext.Process) }) ctx.StringCache[field] = results return results @@ -1214,23 +1180,20 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval return &eval.StringArrayEvaluator{ EvalFnc: func(ctx *eval.Context) []string { ctx.AppendResolvedField(field) - if result, ok := ctx.StringCache[field]; ok { - return result - } - var results []string iterator := &ProcessAncestorsIterator{} if regID != "" { - value := iterator.At(ctx, regID, ctx.Registers[regID]) - if value == nil { - return results + element := iterator.At(ctx, regID, ctx.Registers[regID]) + if element == nil { + return nil } - element := value result := element.ProcessContext.Process.OwnerSidString - results = append(results, result) - return results + return []string{result} } - results = newAncestorsIterator(iterator, ctx, nil, func(ev *Event, pce *ProcessCacheEntry) string { - return pce.ProcessContext.Process.OwnerSidString + if result, ok := ctx.StringCache[field]; ok { + return result + } + results := newAncestorsIterator(iterator, field, ctx, nil, func(ev *Event, current *ProcessCacheEntry) string { + return current.ProcessContext.Process.OwnerSidString }) ctx.StringCache[field] = results return results @@ -1339,6 +1302,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveProcessCmdLine(ev, ev.BaseEvent.ProcessContext.Parent) @@ -1352,6 +1316,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.BaseEvent.ProcessContext.Parent.ContainerID @@ -1365,6 +1330,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, ev.BaseEvent.ProcessContext.Parent)) @@ -1378,6 +1344,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessEnvp(ev, ev.BaseEvent.ProcessContext.Parent) @@ -1391,6 +1358,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return []string{} } return ev.FieldHandlers.ResolveProcessEnvs(ev, ev.BaseEvent.ProcessContext.Parent) @@ -1405,6 +1373,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent) @@ -1430,6 +1399,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent) @@ -1454,6 +1424,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.PIDContext.Pid) @@ -1467,6 +1438,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return 0 } return int(ev.BaseEvent.ProcessContext.Parent.PPid) @@ -1480,6 +1452,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.FieldHandlers.ResolveUser(ev, ev.BaseEvent.ProcessContext.Parent) @@ -1493,6 +1466,7 @@ func (m *Model) GetEvaluator(field eval.Field, regID eval.RegisterID) (eval.Eval ctx.AppendResolvedField(field) ev := ctx.Event.(*Event) if !ev.BaseEvent.ProcessContext.HasParent() { + ctx.Error = &eval.ErrNotSupported{Field: field} return "" } return ev.BaseEvent.ProcessContext.Parent.OwnerSidString @@ -2041,1112 +2015,366 @@ func (ev *Event) GetFields() []eval.Field { } } func (ev *Event) GetFieldValue(field eval.Field) (interface{}, error) { - switch field { - case "change_permission.new_sd": - return ev.FieldHandlers.ResolveNewSecurityDescriptor(ev, &ev.ChangePermission), nil - case "change_permission.old_sd": - return ev.FieldHandlers.ResolveOldSecurityDescriptor(ev, &ev.ChangePermission), nil - case "change_permission.path": - return ev.ChangePermission.ObjectName, nil - case "change_permission.type": - return ev.ChangePermission.ObjectType, nil - case "change_permission.user_domain": - return ev.ChangePermission.UserDomain, nil - case "change_permission.username": - return ev.ChangePermission.UserName, nil - case "container.created_at": - return int(ev.FieldHandlers.ResolveContainerCreatedAt(ev, ev.BaseEvent.ContainerContext)), nil - case "container.id": - return ev.FieldHandlers.ResolveContainerID(ev, ev.BaseEvent.ContainerContext), nil - case "container.runtime": - return ev.FieldHandlers.ResolveContainerRuntime(ev, ev.BaseEvent.ContainerContext), nil - case "container.tags": - return ev.FieldHandlers.ResolveContainerTags(ev, ev.BaseEvent.ContainerContext), nil - case "create.file.device_path": - return ev.FieldHandlers.ResolveFimFilePath(ev, &ev.CreateNewFile.File), nil - case "create.file.device_path.length": - return ev.FieldHandlers.ResolveFimFilePath(ev, &ev.CreateNewFile.File), nil - case "create.file.name": - return ev.FieldHandlers.ResolveFimFileBasename(ev, &ev.CreateNewFile.File), nil - case "create.file.name.length": - return ev.FieldHandlers.ResolveFimFileBasename(ev, &ev.CreateNewFile.File), nil - case "create.file.path": - return ev.FieldHandlers.ResolveFileUserPath(ev, &ev.CreateNewFile.File), nil - case "create.file.path.length": - return ev.FieldHandlers.ResolveFileUserPath(ev, &ev.CreateNewFile.File), nil - case "create.registry.key_name": - return ev.CreateRegistryKey.Registry.KeyName, nil - case "create.registry.key_name.length": - return len(ev.CreateRegistryKey.Registry.KeyName), nil - case "create.registry.key_path": - return ev.CreateRegistryKey.Registry.KeyPath, nil - case "create.registry.key_path.length": - return len(ev.CreateRegistryKey.Registry.KeyPath), nil - case "create_key.registry.key_name": - return ev.CreateRegistryKey.Registry.KeyName, nil - case "create_key.registry.key_name.length": - return len(ev.CreateRegistryKey.Registry.KeyName), nil - case "create_key.registry.key_path": - return ev.CreateRegistryKey.Registry.KeyPath, nil - case "create_key.registry.key_path.length": - return len(ev.CreateRegistryKey.Registry.KeyPath), nil - case "delete.file.device_path": - return ev.FieldHandlers.ResolveFimFilePath(ev, &ev.DeleteFile.File), nil - case "delete.file.device_path.length": - return ev.FieldHandlers.ResolveFimFilePath(ev, &ev.DeleteFile.File), nil - case "delete.file.name": - return ev.FieldHandlers.ResolveFimFileBasename(ev, &ev.DeleteFile.File), nil - case "delete.file.name.length": - return ev.FieldHandlers.ResolveFimFileBasename(ev, &ev.DeleteFile.File), nil - case "delete.file.path": - return ev.FieldHandlers.ResolveFileUserPath(ev, &ev.DeleteFile.File), nil - case "delete.file.path.length": - return ev.FieldHandlers.ResolveFileUserPath(ev, &ev.DeleteFile.File), nil - case "delete.registry.key_name": - return ev.DeleteRegistryKey.Registry.KeyName, nil - case "delete.registry.key_name.length": - return len(ev.DeleteRegistryKey.Registry.KeyName), nil - case "delete.registry.key_path": - return ev.DeleteRegistryKey.Registry.KeyPath, nil - case "delete.registry.key_path.length": - return len(ev.DeleteRegistryKey.Registry.KeyPath), nil - case "delete_key.registry.key_name": - return ev.DeleteRegistryKey.Registry.KeyName, nil - case "delete_key.registry.key_name.length": - return len(ev.DeleteRegistryKey.Registry.KeyName), nil - case "delete_key.registry.key_path": - return ev.DeleteRegistryKey.Registry.KeyPath, nil - case "delete_key.registry.key_path.length": - return len(ev.DeleteRegistryKey.Registry.KeyPath), nil - case "event.hostname": - return ev.FieldHandlers.ResolveHostname(ev, &ev.BaseEvent), nil - case "event.origin": - return ev.BaseEvent.Origin, nil - case "event.os": - return ev.BaseEvent.Os, nil - case "event.service": - return ev.FieldHandlers.ResolveService(ev, &ev.BaseEvent), nil - case "event.timestamp": - return int(ev.FieldHandlers.ResolveEventTimestamp(ev, &ev.BaseEvent)), nil - case "exec.cmdline": - return ev.FieldHandlers.ResolveProcessCmdLine(ev, ev.Exec.Process), nil - case "exec.container.id": - return ev.Exec.Process.ContainerID, nil - case "exec.created_at": - return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, ev.Exec.Process)), nil - case "exec.envp": - return ev.FieldHandlers.ResolveProcessEnvp(ev, ev.Exec.Process), nil - case "exec.envs": - return ev.FieldHandlers.ResolveProcessEnvs(ev, ev.Exec.Process), nil - case "exec.file.name": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Exec.Process.FileEvent), nil - case "exec.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Exec.Process.FileEvent), nil - case "exec.file.path": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exec.Process.FileEvent), nil - case "exec.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exec.Process.FileEvent), nil - case "exec.pid": - return int(ev.Exec.Process.PIDContext.Pid), nil - case "exec.ppid": - return int(ev.Exec.Process.PPid), nil - case "exec.user": - return ev.FieldHandlers.ResolveUser(ev, ev.Exec.Process), nil - case "exec.user_sid": - return ev.Exec.Process.OwnerSidString, nil - case "exit.cause": - return int(ev.Exit.Cause), nil - case "exit.cmdline": - return ev.FieldHandlers.ResolveProcessCmdLine(ev, ev.Exit.Process), nil - case "exit.code": - return int(ev.Exit.Code), nil - case "exit.container.id": - return ev.Exit.Process.ContainerID, nil - case "exit.created_at": - return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, ev.Exit.Process)), nil - case "exit.envp": - return ev.FieldHandlers.ResolveProcessEnvp(ev, ev.Exit.Process), nil - case "exit.envs": - return ev.FieldHandlers.ResolveProcessEnvs(ev, ev.Exit.Process), nil - case "exit.file.name": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Exit.Process.FileEvent), nil - case "exit.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.Exit.Process.FileEvent), nil - case "exit.file.path": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exit.Process.FileEvent), nil - case "exit.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.Exit.Process.FileEvent), nil - case "exit.pid": - return int(ev.Exit.Process.PIDContext.Pid), nil - case "exit.ppid": - return int(ev.Exit.Process.PPid), nil - case "exit.user": - return ev.FieldHandlers.ResolveUser(ev, ev.Exit.Process), nil - case "exit.user_sid": - return ev.Exit.Process.OwnerSidString, nil - case "open.registry.key_name": - return ev.OpenRegistryKey.Registry.KeyName, nil - case "open.registry.key_name.length": - return len(ev.OpenRegistryKey.Registry.KeyName), nil - case "open.registry.key_path": - return ev.OpenRegistryKey.Registry.KeyPath, nil - case "open.registry.key_path.length": - return len(ev.OpenRegistryKey.Registry.KeyPath), nil - case "open_key.registry.key_name": - return ev.OpenRegistryKey.Registry.KeyName, nil - case "open_key.registry.key_name.length": - return len(ev.OpenRegistryKey.Registry.KeyName), nil - case "open_key.registry.key_path": - return ev.OpenRegistryKey.Registry.KeyPath, nil - case "open_key.registry.key_path.length": - return len(ev.OpenRegistryKey.Registry.KeyPath), nil - case "process.ancestors.cmdline": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessCmdLine(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.container.id": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := element.ProcessContext.Process.ContainerID - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.created_at": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &element.ProcessContext.Process)) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.envp": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessEnvp(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.envs": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveProcessEnvs(ev, &element.ProcessContext.Process) - values = append(values, result...) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.file.name": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFileBasename(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent), nil - case "process.ancestors.file.path": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveFilePath(ev, &element.ProcessContext.Process.FileEvent) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent), nil - case "process.ancestors.length": - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - return iterator.Len(ctx), nil - case "process.ancestors.pid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.PIDContext.Pid) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.ppid": - var values []int - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := int(element.ProcessContext.Process.PPid) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.user": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := ev.FieldHandlers.ResolveUser(ev, &element.ProcessContext.Process) - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.ancestors.user_sid": - var values []string - ctx := eval.NewContext(ev) - iterator := &ProcessAncestorsIterator{} - ptr := iterator.Front(ctx) - for ptr != nil { - element := ptr - result := element.ProcessContext.Process.OwnerSidString - values = append(values, result) - ptr = iterator.Next() - } - return values, nil - case "process.cmdline": - return ev.FieldHandlers.ResolveProcessCmdLine(ev, &ev.BaseEvent.ProcessContext.Process), nil - case "process.container.id": - return ev.BaseEvent.ProcessContext.Process.ContainerID, nil - case "process.created_at": - return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, &ev.BaseEvent.ProcessContext.Process)), nil - case "process.envp": - return ev.FieldHandlers.ResolveProcessEnvp(ev, &ev.BaseEvent.ProcessContext.Process), nil - case "process.envs": - return ev.FieldHandlers.ResolveProcessEnvs(ev, &ev.BaseEvent.ProcessContext.Process), nil - case "process.file.name": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent), nil - case "process.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent), nil - case "process.file.path": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent), nil - case "process.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Process.FileEvent), nil - case "process.parent.cmdline": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessCmdLine(ev, ev.BaseEvent.ProcessContext.Parent), nil - case "process.parent.container.id": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.BaseEvent.ProcessContext.Parent.ContainerID, nil - case "process.parent.created_at": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.FieldHandlers.ResolveProcessCreatedAt(ev, ev.BaseEvent.ProcessContext.Parent)), nil - case "process.parent.envp": - if !ev.BaseEvent.ProcessContext.HasParent() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessEnvp(ev, ev.BaseEvent.ProcessContext.Parent), nil - case "process.parent.envs": - if !ev.BaseEvent.ProcessContext.HasParent() { - return []string{}, &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveProcessEnvs(ev, ev.BaseEvent.ProcessContext.Parent), nil - case "process.parent.file.name": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent), nil - case "process.parent.file.name.length": - return ev.FieldHandlers.ResolveFileBasename(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent), nil - case "process.parent.file.path": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent), nil - case "process.parent.file.path.length": - return ev.FieldHandlers.ResolveFilePath(ev, &ev.BaseEvent.ProcessContext.Parent.FileEvent), nil - case "process.parent.pid": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Parent.PIDContext.Pid), nil - case "process.parent.ppid": - if !ev.BaseEvent.ProcessContext.HasParent() { - return 0, &eval.ErrNotSupported{Field: field} - } - return int(ev.BaseEvent.ProcessContext.Parent.PPid), nil - case "process.parent.user": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.FieldHandlers.ResolveUser(ev, ev.BaseEvent.ProcessContext.Parent), nil - case "process.parent.user_sid": - if !ev.BaseEvent.ProcessContext.HasParent() { - return "", &eval.ErrNotSupported{Field: field} - } - return ev.BaseEvent.ProcessContext.Parent.OwnerSidString, nil - case "process.pid": - return int(ev.BaseEvent.ProcessContext.Process.PIDContext.Pid), nil - case "process.ppid": - return int(ev.BaseEvent.ProcessContext.Process.PPid), nil - case "process.user": - return ev.FieldHandlers.ResolveUser(ev, &ev.BaseEvent.ProcessContext.Process), nil - case "process.user_sid": - return ev.BaseEvent.ProcessContext.Process.OwnerSidString, nil - case "rename.file.destination.device_path": - return ev.FieldHandlers.ResolveFimFilePath(ev, &ev.RenameFile.New), nil - case "rename.file.destination.device_path.length": - return ev.FieldHandlers.ResolveFimFilePath(ev, &ev.RenameFile.New), nil - case "rename.file.destination.name": - return ev.FieldHandlers.ResolveFimFileBasename(ev, &ev.RenameFile.New), nil - case "rename.file.destination.name.length": - return ev.FieldHandlers.ResolveFimFileBasename(ev, &ev.RenameFile.New), nil - case "rename.file.destination.path": - return ev.FieldHandlers.ResolveFileUserPath(ev, &ev.RenameFile.New), nil - case "rename.file.destination.path.length": - return ev.FieldHandlers.ResolveFileUserPath(ev, &ev.RenameFile.New), nil - case "rename.file.device_path": - return ev.FieldHandlers.ResolveFimFilePath(ev, &ev.RenameFile.Old), nil - case "rename.file.device_path.length": - return ev.FieldHandlers.ResolveFimFilePath(ev, &ev.RenameFile.Old), nil - case "rename.file.name": - return ev.FieldHandlers.ResolveFimFileBasename(ev, &ev.RenameFile.Old), nil - case "rename.file.name.length": - return ev.FieldHandlers.ResolveFimFileBasename(ev, &ev.RenameFile.Old), nil - case "rename.file.path": - return ev.FieldHandlers.ResolveFileUserPath(ev, &ev.RenameFile.Old), nil - case "rename.file.path.length": - return ev.FieldHandlers.ResolveFileUserPath(ev, &ev.RenameFile.Old), nil - case "set.registry.key_name": - return ev.SetRegistryKeyValue.Registry.KeyName, nil - case "set.registry.key_name.length": - return len(ev.SetRegistryKeyValue.Registry.KeyName), nil - case "set.registry.key_path": - return ev.SetRegistryKeyValue.Registry.KeyPath, nil - case "set.registry.key_path.length": - return len(ev.SetRegistryKeyValue.Registry.KeyPath), nil - case "set.registry.value_name": - return ev.SetRegistryKeyValue.ValueName, nil - case "set.registry.value_name.length": - return len(ev.SetRegistryKeyValue.ValueName), nil - case "set.value_name": - return ev.SetRegistryKeyValue.ValueName, nil - case "set_key_value.registry.key_name": - return ev.SetRegistryKeyValue.Registry.KeyName, nil - case "set_key_value.registry.key_name.length": - return len(ev.SetRegistryKeyValue.Registry.KeyName), nil - case "set_key_value.registry.key_path": - return ev.SetRegistryKeyValue.Registry.KeyPath, nil - case "set_key_value.registry.key_path.length": - return len(ev.SetRegistryKeyValue.Registry.KeyPath), nil - case "set_key_value.registry.value_name": - return ev.SetRegistryKeyValue.ValueName, nil - case "set_key_value.registry.value_name.length": - return len(ev.SetRegistryKeyValue.ValueName), nil - case "set_key_value.value_name": - return ev.SetRegistryKeyValue.ValueName, nil - case "write.file.device_path": - return ev.FieldHandlers.ResolveFimFilePath(ev, &ev.WriteFile.File), nil - case "write.file.device_path.length": - return ev.FieldHandlers.ResolveFimFilePath(ev, &ev.WriteFile.File), nil - case "write.file.name": - return ev.FieldHandlers.ResolveFimFileBasename(ev, &ev.WriteFile.File), nil - case "write.file.name.length": - return ev.FieldHandlers.ResolveFimFileBasename(ev, &ev.WriteFile.File), nil - case "write.file.path": - return ev.FieldHandlers.ResolveFileUserPath(ev, &ev.WriteFile.File), nil - case "write.file.path.length": - return ev.FieldHandlers.ResolveFileUserPath(ev, &ev.WriteFile.File), nil + m := &Model{} + evaluator, err := m.GetEvaluator(field, "") + if err != nil { + return nil, err } - return nil, &eval.ErrFieldNotFound{Field: field} -} -func (ev *Event) GetFieldEventType(field eval.Field) (eval.EventType, error) { - switch field { - case "change_permission.new_sd": - return "change_permission", nil - case "change_permission.old_sd": - return "change_permission", nil - case "change_permission.path": - return "change_permission", nil - case "change_permission.type": - return "change_permission", nil - case "change_permission.user_domain": - return "change_permission", nil - case "change_permission.username": - return "change_permission", nil - case "container.created_at": - return "", nil - case "container.id": - return "", nil - case "container.runtime": - return "", nil - case "container.tags": - return "", nil - case "create.file.device_path": - return "create", nil - case "create.file.device_path.length": - return "create", nil - case "create.file.name": - return "create", nil - case "create.file.name.length": - return "create", nil - case "create.file.path": - return "create", nil - case "create.file.path.length": - return "create", nil - case "create.registry.key_name": - return "create_key", nil - case "create.registry.key_name.length": - return "create_key", nil - case "create.registry.key_path": - return "create_key", nil - case "create.registry.key_path.length": - return "create_key", nil - case "create_key.registry.key_name": - return "create_key", nil - case "create_key.registry.key_name.length": - return "create_key", nil - case "create_key.registry.key_path": - return "create_key", nil - case "create_key.registry.key_path.length": - return "create_key", nil - case "delete.file.device_path": - return "delete", nil - case "delete.file.device_path.length": - return "delete", nil - case "delete.file.name": - return "delete", nil - case "delete.file.name.length": - return "delete", nil - case "delete.file.path": - return "delete", nil - case "delete.file.path.length": - return "delete", nil - case "delete.registry.key_name": - return "delete_key", nil - case "delete.registry.key_name.length": - return "delete_key", nil - case "delete.registry.key_path": - return "delete_key", nil - case "delete.registry.key_path.length": - return "delete_key", nil - case "delete_key.registry.key_name": - return "delete_key", nil - case "delete_key.registry.key_name.length": - return "delete_key", nil - case "delete_key.registry.key_path": - return "delete_key", nil - case "delete_key.registry.key_path.length": - return "delete_key", nil - case "event.hostname": - return "", nil - case "event.origin": - return "", nil - case "event.os": - return "", nil - case "event.service": - return "", nil - case "event.timestamp": - return "", nil - case "exec.cmdline": - return "exec", nil - case "exec.container.id": - return "exec", nil - case "exec.created_at": - return "exec", nil - case "exec.envp": - return "exec", nil - case "exec.envs": - return "exec", nil - case "exec.file.name": - return "exec", nil - case "exec.file.name.length": - return "exec", nil - case "exec.file.path": - return "exec", nil - case "exec.file.path.length": - return "exec", nil - case "exec.pid": - return "exec", nil - case "exec.ppid": - return "exec", nil - case "exec.user": - return "exec", nil - case "exec.user_sid": - return "exec", nil - case "exit.cause": - return "exit", nil - case "exit.cmdline": - return "exit", nil - case "exit.code": - return "exit", nil - case "exit.container.id": - return "exit", nil - case "exit.created_at": - return "exit", nil - case "exit.envp": - return "exit", nil - case "exit.envs": - return "exit", nil - case "exit.file.name": - return "exit", nil - case "exit.file.name.length": - return "exit", nil - case "exit.file.path": - return "exit", nil - case "exit.file.path.length": - return "exit", nil - case "exit.pid": - return "exit", nil - case "exit.ppid": - return "exit", nil - case "exit.user": - return "exit", nil - case "exit.user_sid": - return "exit", nil - case "open.registry.key_name": - return "open_key", nil - case "open.registry.key_name.length": - return "open_key", nil - case "open.registry.key_path": - return "open_key", nil - case "open.registry.key_path.length": - return "open_key", nil - case "open_key.registry.key_name": - return "open_key", nil - case "open_key.registry.key_name.length": - return "open_key", nil - case "open_key.registry.key_path": - return "open_key", nil - case "open_key.registry.key_path.length": - return "open_key", nil - case "process.ancestors.cmdline": - return "", nil - case "process.ancestors.container.id": - return "", nil - case "process.ancestors.created_at": - return "", nil - case "process.ancestors.envp": - return "", nil - case "process.ancestors.envs": - return "", nil - case "process.ancestors.file.name": - return "", nil - case "process.ancestors.file.name.length": - return "", nil - case "process.ancestors.file.path": - return "", nil - case "process.ancestors.file.path.length": - return "", nil - case "process.ancestors.length": - return "", nil - case "process.ancestors.pid": - return "", nil - case "process.ancestors.ppid": - return "", nil - case "process.ancestors.user": - return "", nil - case "process.ancestors.user_sid": - return "", nil - case "process.cmdline": - return "", nil - case "process.container.id": - return "", nil - case "process.created_at": - return "", nil - case "process.envp": - return "", nil - case "process.envs": - return "", nil - case "process.file.name": - return "", nil - case "process.file.name.length": - return "", nil - case "process.file.path": - return "", nil - case "process.file.path.length": - return "", nil - case "process.parent.cmdline": - return "", nil - case "process.parent.container.id": - return "", nil - case "process.parent.created_at": - return "", nil - case "process.parent.envp": - return "", nil - case "process.parent.envs": - return "", nil - case "process.parent.file.name": - return "", nil - case "process.parent.file.name.length": - return "", nil - case "process.parent.file.path": - return "", nil - case "process.parent.file.path.length": - return "", nil - case "process.parent.pid": - return "", nil - case "process.parent.ppid": - return "", nil - case "process.parent.user": - return "", nil - case "process.parent.user_sid": - return "", nil - case "process.pid": - return "", nil - case "process.ppid": - return "", nil - case "process.user": - return "", nil - case "process.user_sid": - return "", nil - case "rename.file.destination.device_path": - return "rename", nil - case "rename.file.destination.device_path.length": - return "rename", nil - case "rename.file.destination.name": - return "rename", nil - case "rename.file.destination.name.length": - return "rename", nil - case "rename.file.destination.path": - return "rename", nil - case "rename.file.destination.path.length": - return "rename", nil - case "rename.file.device_path": - return "rename", nil - case "rename.file.device_path.length": - return "rename", nil - case "rename.file.name": - return "rename", nil - case "rename.file.name.length": - return "rename", nil - case "rename.file.path": - return "rename", nil - case "rename.file.path.length": - return "rename", nil - case "set.registry.key_name": - return "set_key_value", nil - case "set.registry.key_name.length": - return "set_key_value", nil - case "set.registry.key_path": - return "set_key_value", nil - case "set.registry.key_path.length": - return "set_key_value", nil - case "set.registry.value_name": - return "set_key_value", nil - case "set.registry.value_name.length": - return "set_key_value", nil - case "set.value_name": - return "set_key_value", nil - case "set_key_value.registry.key_name": - return "set_key_value", nil - case "set_key_value.registry.key_name.length": - return "set_key_value", nil - case "set_key_value.registry.key_path": - return "set_key_value", nil - case "set_key_value.registry.key_path.length": - return "set_key_value", nil - case "set_key_value.registry.value_name": - return "set_key_value", nil - case "set_key_value.registry.value_name.length": - return "set_key_value", nil - case "set_key_value.value_name": - return "set_key_value", nil - case "write.file.device_path": - return "write", nil - case "write.file.device_path.length": - return "write", nil - case "write.file.name": - return "write", nil - case "write.file.name.length": - return "write", nil - case "write.file.path": - return "write", nil - case "write.file.path.length": - return "write", nil + ctx := eval.NewContext(ev) + value := evaluator.Eval(ctx) + if ctx.Error != nil { + return nil, ctx.Error } - return "", &eval.ErrFieldNotFound{Field: field} + return value, nil } -func (ev *Event) GetFieldType(field eval.Field) (reflect.Kind, error) { +func (ev *Event) GetFieldMetadata(field eval.Field) (eval.EventType, reflect.Kind, error) { switch field { case "change_permission.new_sd": - return reflect.String, nil + return "change_permission", reflect.String, nil case "change_permission.old_sd": - return reflect.String, nil + return "change_permission", reflect.String, nil case "change_permission.path": - return reflect.String, nil + return "change_permission", reflect.String, nil case "change_permission.type": - return reflect.String, nil + return "change_permission", reflect.String, nil case "change_permission.user_domain": - return reflect.String, nil + return "change_permission", reflect.String, nil case "change_permission.username": - return reflect.String, nil + return "change_permission", reflect.String, nil case "container.created_at": - return reflect.Int, nil + return "", reflect.Int, nil case "container.id": - return reflect.String, nil + return "", reflect.String, nil case "container.runtime": - return reflect.String, nil + return "", reflect.String, nil case "container.tags": - return reflect.String, nil + return "", reflect.String, nil case "create.file.device_path": - return reflect.String, nil + return "create", reflect.String, nil case "create.file.device_path.length": - return reflect.Int, nil + return "create", reflect.Int, nil case "create.file.name": - return reflect.String, nil + return "create", reflect.String, nil case "create.file.name.length": - return reflect.Int, nil + return "create", reflect.Int, nil case "create.file.path": - return reflect.String, nil + return "create", reflect.String, nil case "create.file.path.length": - return reflect.Int, nil + return "create", reflect.Int, nil case "create.registry.key_name": - return reflect.String, nil + return "create_key", reflect.String, nil case "create.registry.key_name.length": - return reflect.Int, nil + return "create_key", reflect.Int, nil case "create.registry.key_path": - return reflect.String, nil + return "create_key", reflect.String, nil case "create.registry.key_path.length": - return reflect.Int, nil + return "create_key", reflect.Int, nil case "create_key.registry.key_name": - return reflect.String, nil + return "create_key", reflect.String, nil case "create_key.registry.key_name.length": - return reflect.Int, nil + return "create_key", reflect.Int, nil case "create_key.registry.key_path": - return reflect.String, nil + return "create_key", reflect.String, nil case "create_key.registry.key_path.length": - return reflect.Int, nil + return "create_key", reflect.Int, nil case "delete.file.device_path": - return reflect.String, nil + return "delete", reflect.String, nil case "delete.file.device_path.length": - return reflect.Int, nil + return "delete", reflect.Int, nil case "delete.file.name": - return reflect.String, nil + return "delete", reflect.String, nil case "delete.file.name.length": - return reflect.Int, nil + return "delete", reflect.Int, nil case "delete.file.path": - return reflect.String, nil + return "delete", reflect.String, nil case "delete.file.path.length": - return reflect.Int, nil + return "delete", reflect.Int, nil case "delete.registry.key_name": - return reflect.String, nil + return "delete_key", reflect.String, nil case "delete.registry.key_name.length": - return reflect.Int, nil + return "delete_key", reflect.Int, nil case "delete.registry.key_path": - return reflect.String, nil + return "delete_key", reflect.String, nil case "delete.registry.key_path.length": - return reflect.Int, nil + return "delete_key", reflect.Int, nil case "delete_key.registry.key_name": - return reflect.String, nil + return "delete_key", reflect.String, nil case "delete_key.registry.key_name.length": - return reflect.Int, nil + return "delete_key", reflect.Int, nil case "delete_key.registry.key_path": - return reflect.String, nil + return "delete_key", reflect.String, nil case "delete_key.registry.key_path.length": - return reflect.Int, nil + return "delete_key", reflect.Int, nil case "event.hostname": - return reflect.String, nil + return "", reflect.String, nil case "event.origin": - return reflect.String, nil + return "", reflect.String, nil case "event.os": - return reflect.String, nil + return "", reflect.String, nil case "event.service": - return reflect.String, nil + return "", reflect.String, nil case "event.timestamp": - return reflect.Int, nil + return "", reflect.Int, nil case "exec.cmdline": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.container.id": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.created_at": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.envp": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.envs": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.file.name": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.file.name.length": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.file.path": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.file.path.length": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.pid": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.ppid": - return reflect.Int, nil + return "exec", reflect.Int, nil case "exec.user": - return reflect.String, nil + return "exec", reflect.String, nil case "exec.user_sid": - return reflect.String, nil + return "exec", reflect.String, nil case "exit.cause": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.cmdline": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.code": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.container.id": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.created_at": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.envp": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.envs": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.file.name": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.file.name.length": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.file.path": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.file.path.length": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.pid": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.ppid": - return reflect.Int, nil + return "exit", reflect.Int, nil case "exit.user": - return reflect.String, nil + return "exit", reflect.String, nil case "exit.user_sid": - return reflect.String, nil + return "exit", reflect.String, nil case "open.registry.key_name": - return reflect.String, nil + return "open_key", reflect.String, nil case "open.registry.key_name.length": - return reflect.Int, nil + return "open_key", reflect.Int, nil case "open.registry.key_path": - return reflect.String, nil + return "open_key", reflect.String, nil case "open.registry.key_path.length": - return reflect.Int, nil + return "open_key", reflect.Int, nil case "open_key.registry.key_name": - return reflect.String, nil + return "open_key", reflect.String, nil case "open_key.registry.key_name.length": - return reflect.Int, nil + return "open_key", reflect.Int, nil case "open_key.registry.key_path": - return reflect.String, nil + return "open_key", reflect.String, nil case "open_key.registry.key_path.length": - return reflect.Int, nil + return "open_key", reflect.Int, nil case "process.ancestors.cmdline": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.container.id": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.created_at": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.envp": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.envs": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.file.name": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.file.name.length": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.file.path": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.file.path.length": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.length": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.pid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.ppid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ancestors.user": - return reflect.String, nil + return "", reflect.String, nil case "process.ancestors.user_sid": - return reflect.String, nil + return "", reflect.String, nil case "process.cmdline": - return reflect.String, nil + return "", reflect.String, nil case "process.container.id": - return reflect.String, nil + return "", reflect.String, nil case "process.created_at": - return reflect.Int, nil + return "", reflect.Int, nil case "process.envp": - return reflect.String, nil + return "", reflect.String, nil case "process.envs": - return reflect.String, nil + return "", reflect.String, nil case "process.file.name": - return reflect.String, nil + return "", reflect.String, nil case "process.file.name.length": - return reflect.Int, nil + return "", reflect.Int, nil case "process.file.path": - return reflect.String, nil + return "", reflect.String, nil case "process.file.path.length": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.cmdline": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.container.id": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.created_at": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.envp": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.envs": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.file.name": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.file.name.length": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.file.path": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.file.path.length": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.pid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.ppid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.parent.user": - return reflect.String, nil + return "", reflect.String, nil case "process.parent.user_sid": - return reflect.String, nil + return "", reflect.String, nil case "process.pid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.ppid": - return reflect.Int, nil + return "", reflect.Int, nil case "process.user": - return reflect.String, nil + return "", reflect.String, nil case "process.user_sid": - return reflect.String, nil + return "", reflect.String, nil case "rename.file.destination.device_path": - return reflect.String, nil + return "rename", reflect.String, nil case "rename.file.destination.device_path.length": - return reflect.Int, nil + return "rename", reflect.Int, nil case "rename.file.destination.name": - return reflect.String, nil + return "rename", reflect.String, nil case "rename.file.destination.name.length": - return reflect.Int, nil + return "rename", reflect.Int, nil case "rename.file.destination.path": - return reflect.String, nil + return "rename", reflect.String, nil case "rename.file.destination.path.length": - return reflect.Int, nil + return "rename", reflect.Int, nil case "rename.file.device_path": - return reflect.String, nil + return "rename", reflect.String, nil case "rename.file.device_path.length": - return reflect.Int, nil + return "rename", reflect.Int, nil case "rename.file.name": - return reflect.String, nil + return "rename", reflect.String, nil case "rename.file.name.length": - return reflect.Int, nil + return "rename", reflect.Int, nil case "rename.file.path": - return reflect.String, nil + return "rename", reflect.String, nil case "rename.file.path.length": - return reflect.Int, nil + return "rename", reflect.Int, nil case "set.registry.key_name": - return reflect.String, nil + return "set_key_value", reflect.String, nil case "set.registry.key_name.length": - return reflect.Int, nil + return "set_key_value", reflect.Int, nil case "set.registry.key_path": - return reflect.String, nil + return "set_key_value", reflect.String, nil case "set.registry.key_path.length": - return reflect.Int, nil + return "set_key_value", reflect.Int, nil case "set.registry.value_name": - return reflect.String, nil + return "set_key_value", reflect.String, nil case "set.registry.value_name.length": - return reflect.Int, nil + return "set_key_value", reflect.Int, nil case "set.value_name": - return reflect.String, nil + return "set_key_value", reflect.String, nil case "set_key_value.registry.key_name": - return reflect.String, nil + return "set_key_value", reflect.String, nil case "set_key_value.registry.key_name.length": - return reflect.Int, nil + return "set_key_value", reflect.Int, nil case "set_key_value.registry.key_path": - return reflect.String, nil + return "set_key_value", reflect.String, nil case "set_key_value.registry.key_path.length": - return reflect.Int, nil + return "set_key_value", reflect.Int, nil case "set_key_value.registry.value_name": - return reflect.String, nil + return "set_key_value", reflect.String, nil case "set_key_value.registry.value_name.length": - return reflect.Int, nil + return "set_key_value", reflect.Int, nil case "set_key_value.value_name": - return reflect.String, nil + return "set_key_value", reflect.String, nil case "write.file.device_path": - return reflect.String, nil + return "write", reflect.String, nil case "write.file.device_path.length": - return reflect.Int, nil + return "write", reflect.Int, nil case "write.file.name": - return reflect.String, nil + return "write", reflect.String, nil case "write.file.name.length": - return reflect.Int, nil + return "write", reflect.Int, nil case "write.file.path": - return reflect.String, nil + return "write", reflect.String, nil case "write.file.path.length": - return reflect.Int, nil + return "write", reflect.Int, nil } - return reflect.Invalid, &eval.ErrFieldNotFound{Field: field} + return "", reflect.Invalid, &eval.ErrFieldNotFound{Field: field} } func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { switch field { case "change_permission.new_sd": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "ChangePermission.NewSd"} + return &eval.ErrValueTypeMismatch{Field: "change_permission.new_sd"} } ev.ChangePermission.NewSd = rv return nil case "change_permission.old_sd": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "ChangePermission.OldSd"} + return &eval.ErrValueTypeMismatch{Field: "change_permission.old_sd"} } ev.ChangePermission.OldSd = rv return nil case "change_permission.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "ChangePermission.ObjectName"} + return &eval.ErrValueTypeMismatch{Field: "change_permission.path"} } ev.ChangePermission.ObjectName = rv return nil case "change_permission.type": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "ChangePermission.ObjectType"} + return &eval.ErrValueTypeMismatch{Field: "change_permission.type"} } ev.ChangePermission.ObjectType = rv return nil case "change_permission.user_domain": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "ChangePermission.UserDomain"} + return &eval.ErrValueTypeMismatch{Field: "change_permission.user_domain"} } ev.ChangePermission.UserDomain = rv return nil case "change_permission.username": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "ChangePermission.UserName"} + return &eval.ErrValueTypeMismatch{Field: "change_permission.username"} } ev.ChangePermission.UserName = rv return nil @@ -3156,7 +2384,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ContainerContext.CreatedAt"} + return &eval.ErrValueTypeMismatch{Field: "container.created_at"} } ev.BaseEvent.ContainerContext.CreatedAt = uint64(rv) return nil @@ -3166,7 +2394,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ContainerContext.ContainerID"} + return &eval.ErrValueTypeMismatch{Field: "container.id"} } ev.BaseEvent.ContainerContext.ContainerID = containerutils.ContainerID(rv) return nil @@ -3176,7 +2404,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ContainerContext.Runtime"} + return &eval.ErrValueTypeMismatch{Field: "container.runtime"} } ev.BaseEvent.ContainerContext.Runtime = rv return nil @@ -3190,13 +2418,13 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ContainerContext.Tags = append(ev.BaseEvent.ContainerContext.Tags, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ContainerContext.Tags"} + return &eval.ErrValueTypeMismatch{Field: "container.tags"} } return nil case "create.file.device_path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "CreateNewFile.File.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "create.file.device_path"} } ev.CreateNewFile.File.PathnameStr = rv return nil @@ -3205,7 +2433,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "create.file.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "CreateNewFile.File.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "create.file.name"} } ev.CreateNewFile.File.BasenameStr = rv return nil @@ -3214,7 +2442,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "create.file.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "CreateNewFile.File.UserPathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "create.file.path"} } ev.CreateNewFile.File.UserPathnameStr = rv return nil @@ -3223,7 +2451,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "create.registry.key_name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "CreateRegistryKey.Registry.KeyName"} + return &eval.ErrValueTypeMismatch{Field: "create.registry.key_name"} } ev.CreateRegistryKey.Registry.KeyName = rv return nil @@ -3232,7 +2460,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "create.registry.key_path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "CreateRegistryKey.Registry.KeyPath"} + return &eval.ErrValueTypeMismatch{Field: "create.registry.key_path"} } ev.CreateRegistryKey.Registry.KeyPath = rv return nil @@ -3241,7 +2469,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "create_key.registry.key_name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "CreateRegistryKey.Registry.KeyName"} + return &eval.ErrValueTypeMismatch{Field: "create_key.registry.key_name"} } ev.CreateRegistryKey.Registry.KeyName = rv return nil @@ -3250,7 +2478,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "create_key.registry.key_path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "CreateRegistryKey.Registry.KeyPath"} + return &eval.ErrValueTypeMismatch{Field: "create_key.registry.key_path"} } ev.CreateRegistryKey.Registry.KeyPath = rv return nil @@ -3259,7 +2487,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "delete.file.device_path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "DeleteFile.File.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "delete.file.device_path"} } ev.DeleteFile.File.PathnameStr = rv return nil @@ -3268,7 +2496,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "delete.file.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "DeleteFile.File.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "delete.file.name"} } ev.DeleteFile.File.BasenameStr = rv return nil @@ -3277,7 +2505,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "delete.file.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "DeleteFile.File.UserPathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "delete.file.path"} } ev.DeleteFile.File.UserPathnameStr = rv return nil @@ -3286,7 +2514,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "delete.registry.key_name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "DeleteRegistryKey.Registry.KeyName"} + return &eval.ErrValueTypeMismatch{Field: "delete.registry.key_name"} } ev.DeleteRegistryKey.Registry.KeyName = rv return nil @@ -3295,7 +2523,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "delete.registry.key_path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "DeleteRegistryKey.Registry.KeyPath"} + return &eval.ErrValueTypeMismatch{Field: "delete.registry.key_path"} } ev.DeleteRegistryKey.Registry.KeyPath = rv return nil @@ -3304,7 +2532,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "delete_key.registry.key_name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "DeleteRegistryKey.Registry.KeyName"} + return &eval.ErrValueTypeMismatch{Field: "delete_key.registry.key_name"} } ev.DeleteRegistryKey.Registry.KeyName = rv return nil @@ -3313,7 +2541,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "delete_key.registry.key_path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "DeleteRegistryKey.Registry.KeyPath"} + return &eval.ErrValueTypeMismatch{Field: "delete_key.registry.key_path"} } ev.DeleteRegistryKey.Registry.KeyPath = rv return nil @@ -3322,35 +2550,35 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "event.hostname": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.Hostname"} + return &eval.ErrValueTypeMismatch{Field: "event.hostname"} } ev.BaseEvent.Hostname = rv return nil case "event.origin": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.Origin"} + return &eval.ErrValueTypeMismatch{Field: "event.origin"} } ev.BaseEvent.Origin = rv return nil case "event.os": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.Os"} + return &eval.ErrValueTypeMismatch{Field: "event.os"} } ev.BaseEvent.Os = rv return nil case "event.service": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.Service"} + return &eval.ErrValueTypeMismatch{Field: "event.service"} } ev.BaseEvent.Service = rv return nil case "event.timestamp": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.TimestampRaw"} + return &eval.ErrValueTypeMismatch{Field: "event.timestamp"} } ev.BaseEvent.TimestampRaw = uint64(rv) return nil @@ -3360,7 +2588,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.CmdLine"} + return &eval.ErrValueTypeMismatch{Field: "exec.cmdline"} } ev.Exec.Process.CmdLine = rv return nil @@ -3370,7 +2598,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.ContainerID"} + return &eval.ErrValueTypeMismatch{Field: "exec.container.id"} } ev.Exec.Process.ContainerID = rv return nil @@ -3380,7 +2608,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.CreatedAt"} + return &eval.ErrValueTypeMismatch{Field: "exec.created_at"} } ev.Exec.Process.CreatedAt = uint64(rv) return nil @@ -3394,7 +2622,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Exec.Process.Envp = append(ev.Exec.Process.Envp, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.Envp"} + return &eval.ErrValueTypeMismatch{Field: "exec.envp"} } return nil case "exec.envs": @@ -3407,7 +2635,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Exec.Process.Envs = append(ev.Exec.Process.Envs, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.Envs"} + return &eval.ErrValueTypeMismatch{Field: "exec.envs"} } return nil case "exec.file.name": @@ -3416,7 +2644,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.FileEvent.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "exec.file.name"} } ev.Exec.Process.FileEvent.BasenameStr = rv return nil @@ -3431,7 +2659,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.FileEvent.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "exec.file.path"} } ev.Exec.Process.FileEvent.PathnameStr = rv return nil @@ -3446,7 +2674,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.PIDContext.Pid"} + return &eval.ErrValueTypeMismatch{Field: "exec.pid"} } ev.Exec.Process.PIDContext.Pid = uint32(rv) return nil @@ -3456,7 +2684,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.PPid"} + return &eval.ErrValueTypeMismatch{Field: "exec.ppid"} } ev.Exec.Process.PPid = uint32(rv) return nil @@ -3466,7 +2694,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.User"} + return &eval.ErrValueTypeMismatch{Field: "exec.user"} } ev.Exec.Process.User = rv return nil @@ -3476,14 +2704,14 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exec.Process.OwnerSidString"} + return &eval.ErrValueTypeMismatch{Field: "exec.user_sid"} } ev.Exec.Process.OwnerSidString = rv return nil case "exit.cause": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Cause"} + return &eval.ErrValueTypeMismatch{Field: "exit.cause"} } ev.Exit.Cause = uint32(rv) return nil @@ -3493,14 +2721,14 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.CmdLine"} + return &eval.ErrValueTypeMismatch{Field: "exit.cmdline"} } ev.Exit.Process.CmdLine = rv return nil case "exit.code": rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Code"} + return &eval.ErrValueTypeMismatch{Field: "exit.code"} } ev.Exit.Code = uint32(rv) return nil @@ -3510,7 +2738,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.ContainerID"} + return &eval.ErrValueTypeMismatch{Field: "exit.container.id"} } ev.Exit.Process.ContainerID = rv return nil @@ -3520,7 +2748,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.CreatedAt"} + return &eval.ErrValueTypeMismatch{Field: "exit.created_at"} } ev.Exit.Process.CreatedAt = uint64(rv) return nil @@ -3534,7 +2762,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Exit.Process.Envp = append(ev.Exit.Process.Envp, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.Envp"} + return &eval.ErrValueTypeMismatch{Field: "exit.envp"} } return nil case "exit.envs": @@ -3547,7 +2775,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.Exit.Process.Envs = append(ev.Exit.Process.Envs, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.Envs"} + return &eval.ErrValueTypeMismatch{Field: "exit.envs"} } return nil case "exit.file.name": @@ -3556,7 +2784,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.FileEvent.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "exit.file.name"} } ev.Exit.Process.FileEvent.BasenameStr = rv return nil @@ -3571,7 +2799,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.FileEvent.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "exit.file.path"} } ev.Exit.Process.FileEvent.PathnameStr = rv return nil @@ -3586,7 +2814,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.PIDContext.Pid"} + return &eval.ErrValueTypeMismatch{Field: "exit.pid"} } ev.Exit.Process.PIDContext.Pid = uint32(rv) return nil @@ -3596,7 +2824,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.PPid"} + return &eval.ErrValueTypeMismatch{Field: "exit.ppid"} } ev.Exit.Process.PPid = uint32(rv) return nil @@ -3606,7 +2834,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.User"} + return &eval.ErrValueTypeMismatch{Field: "exit.user"} } ev.Exit.Process.User = rv return nil @@ -3616,14 +2844,14 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "Exit.Process.OwnerSidString"} + return &eval.ErrValueTypeMismatch{Field: "exit.user_sid"} } ev.Exit.Process.OwnerSidString = rv return nil case "open.registry.key_name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "OpenRegistryKey.Registry.KeyName"} + return &eval.ErrValueTypeMismatch{Field: "open.registry.key_name"} } ev.OpenRegistryKey.Registry.KeyName = rv return nil @@ -3632,7 +2860,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "open.registry.key_path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "OpenRegistryKey.Registry.KeyPath"} + return &eval.ErrValueTypeMismatch{Field: "open.registry.key_path"} } ev.OpenRegistryKey.Registry.KeyPath = rv return nil @@ -3641,7 +2869,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "open_key.registry.key_name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "OpenRegistryKey.Registry.KeyName"} + return &eval.ErrValueTypeMismatch{Field: "open_key.registry.key_name"} } ev.OpenRegistryKey.Registry.KeyName = rv return nil @@ -3650,7 +2878,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "open_key.registry.key_path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "OpenRegistryKey.Registry.KeyPath"} + return &eval.ErrValueTypeMismatch{Field: "open_key.registry.key_path"} } ev.OpenRegistryKey.Registry.KeyPath = rv return nil @@ -3665,7 +2893,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.CmdLine"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.cmdline"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.CmdLine = rv return nil @@ -3678,7 +2906,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.ContainerID"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.container.id"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.ContainerID = rv return nil @@ -3691,7 +2919,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.CreatedAt"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.created_at"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.CreatedAt = uint64(rv) return nil @@ -3708,7 +2936,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Envp = append(ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Envp, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Envp"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.envp"} } return nil case "process.ancestors.envs": @@ -3724,7 +2952,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Envs = append(ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Envs, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.Envs"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.envs"} } return nil case "process.ancestors.file.name": @@ -3736,7 +2964,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.file.name"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.BasenameStr = rv return nil @@ -3757,7 +2985,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.file.path"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.FileEvent.PathnameStr = rv return nil @@ -3786,7 +3014,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.PIDContext.Pid"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.pid"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.PIDContext.Pid = uint32(rv) return nil @@ -3799,7 +3027,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.PPid"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.ppid"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.PPid = uint32(rv) return nil @@ -3812,7 +3040,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.User"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.user"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.User = rv return nil @@ -3825,7 +3053,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.OwnerSidString"} + return &eval.ErrValueTypeMismatch{Field: "process.ancestors.user_sid"} } ev.BaseEvent.ProcessContext.Ancestor.ProcessContext.Process.OwnerSidString = rv return nil @@ -3835,7 +3063,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.CmdLine"} + return &eval.ErrValueTypeMismatch{Field: "process.cmdline"} } ev.BaseEvent.ProcessContext.Process.CmdLine = rv return nil @@ -3845,7 +3073,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.ContainerID"} + return &eval.ErrValueTypeMismatch{Field: "process.container.id"} } ev.BaseEvent.ProcessContext.Process.ContainerID = rv return nil @@ -3855,7 +3083,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.CreatedAt"} + return &eval.ErrValueTypeMismatch{Field: "process.created_at"} } ev.BaseEvent.ProcessContext.Process.CreatedAt = uint64(rv) return nil @@ -3869,7 +3097,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ProcessContext.Process.Envp = append(ev.BaseEvent.ProcessContext.Process.Envp, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.Envp"} + return &eval.ErrValueTypeMismatch{Field: "process.envp"} } return nil case "process.envs": @@ -3882,7 +3110,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ProcessContext.Process.Envs = append(ev.BaseEvent.ProcessContext.Process.Envs, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.Envs"} + return &eval.ErrValueTypeMismatch{Field: "process.envs"} } return nil case "process.file.name": @@ -3891,7 +3119,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.FileEvent.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "process.file.name"} } ev.BaseEvent.ProcessContext.Process.FileEvent.BasenameStr = rv return nil @@ -3906,7 +3134,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.FileEvent.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "process.file.path"} } ev.BaseEvent.ProcessContext.Process.FileEvent.PathnameStr = rv return nil @@ -3924,7 +3152,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.CmdLine"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.cmdline"} } ev.BaseEvent.ProcessContext.Parent.CmdLine = rv return nil @@ -3937,7 +3165,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.ContainerID"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.container.id"} } ev.BaseEvent.ProcessContext.Parent.ContainerID = rv return nil @@ -3950,7 +3178,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.CreatedAt"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.created_at"} } ev.BaseEvent.ProcessContext.Parent.CreatedAt = uint64(rv) return nil @@ -3967,7 +3195,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ProcessContext.Parent.Envp = append(ev.BaseEvent.ProcessContext.Parent.Envp, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.Envp"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.envp"} } return nil case "process.parent.envs": @@ -3983,7 +3211,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case []string: ev.BaseEvent.ProcessContext.Parent.Envs = append(ev.BaseEvent.ProcessContext.Parent.Envs, rv...) default: - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.Envs"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.envs"} } return nil case "process.parent.file.name": @@ -3995,7 +3223,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.FileEvent.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.file.name"} } ev.BaseEvent.ProcessContext.Parent.FileEvent.BasenameStr = rv return nil @@ -4016,7 +3244,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.FileEvent.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.file.path"} } ev.BaseEvent.ProcessContext.Parent.FileEvent.PathnameStr = rv return nil @@ -4037,7 +3265,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.PIDContext.Pid"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.pid"} } ev.BaseEvent.ProcessContext.Parent.PIDContext.Pid = uint32(rv) return nil @@ -4050,7 +3278,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.PPid"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.ppid"} } ev.BaseEvent.ProcessContext.Parent.PPid = uint32(rv) return nil @@ -4063,7 +3291,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.User"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.user"} } ev.BaseEvent.ProcessContext.Parent.User = rv return nil @@ -4076,7 +3304,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Parent.OwnerSidString"} + return &eval.ErrValueTypeMismatch{Field: "process.parent.user_sid"} } ev.BaseEvent.ProcessContext.Parent.OwnerSidString = rv return nil @@ -4086,7 +3314,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.PIDContext.Pid"} + return &eval.ErrValueTypeMismatch{Field: "process.pid"} } ev.BaseEvent.ProcessContext.Process.PIDContext.Pid = uint32(rv) return nil @@ -4096,7 +3324,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(int) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.PPid"} + return &eval.ErrValueTypeMismatch{Field: "process.ppid"} } ev.BaseEvent.ProcessContext.Process.PPid = uint32(rv) return nil @@ -4106,7 +3334,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.User"} + return &eval.ErrValueTypeMismatch{Field: "process.user"} } ev.BaseEvent.ProcessContext.Process.User = rv return nil @@ -4116,14 +3344,14 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { } rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "BaseEvent.ProcessContext.Process.OwnerSidString"} + return &eval.ErrValueTypeMismatch{Field: "process.user_sid"} } ev.BaseEvent.ProcessContext.Process.OwnerSidString = rv return nil case "rename.file.destination.device_path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RenameFile.New.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.destination.device_path"} } ev.RenameFile.New.PathnameStr = rv return nil @@ -4132,7 +3360,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "rename.file.destination.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RenameFile.New.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.destination.name"} } ev.RenameFile.New.BasenameStr = rv return nil @@ -4141,7 +3369,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "rename.file.destination.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RenameFile.New.UserPathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.destination.path"} } ev.RenameFile.New.UserPathnameStr = rv return nil @@ -4150,7 +3378,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "rename.file.device_path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RenameFile.Old.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.device_path"} } ev.RenameFile.Old.PathnameStr = rv return nil @@ -4159,7 +3387,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "rename.file.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RenameFile.Old.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.name"} } ev.RenameFile.Old.BasenameStr = rv return nil @@ -4168,7 +3396,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "rename.file.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "RenameFile.Old.UserPathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "rename.file.path"} } ev.RenameFile.Old.UserPathnameStr = rv return nil @@ -4177,7 +3405,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "set.registry.key_name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetRegistryKeyValue.Registry.KeyName"} + return &eval.ErrValueTypeMismatch{Field: "set.registry.key_name"} } ev.SetRegistryKeyValue.Registry.KeyName = rv return nil @@ -4186,7 +3414,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "set.registry.key_path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetRegistryKeyValue.Registry.KeyPath"} + return &eval.ErrValueTypeMismatch{Field: "set.registry.key_path"} } ev.SetRegistryKeyValue.Registry.KeyPath = rv return nil @@ -4195,7 +3423,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "set.registry.value_name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetRegistryKeyValue.ValueName"} + return &eval.ErrValueTypeMismatch{Field: "set.registry.value_name"} } ev.SetRegistryKeyValue.ValueName = rv return nil @@ -4204,14 +3432,14 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "set.value_name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetRegistryKeyValue.ValueName"} + return &eval.ErrValueTypeMismatch{Field: "set.value_name"} } ev.SetRegistryKeyValue.ValueName = rv return nil case "set_key_value.registry.key_name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetRegistryKeyValue.Registry.KeyName"} + return &eval.ErrValueTypeMismatch{Field: "set_key_value.registry.key_name"} } ev.SetRegistryKeyValue.Registry.KeyName = rv return nil @@ -4220,7 +3448,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "set_key_value.registry.key_path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetRegistryKeyValue.Registry.KeyPath"} + return &eval.ErrValueTypeMismatch{Field: "set_key_value.registry.key_path"} } ev.SetRegistryKeyValue.Registry.KeyPath = rv return nil @@ -4229,7 +3457,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "set_key_value.registry.value_name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetRegistryKeyValue.ValueName"} + return &eval.ErrValueTypeMismatch{Field: "set_key_value.registry.value_name"} } ev.SetRegistryKeyValue.ValueName = rv return nil @@ -4238,14 +3466,14 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "set_key_value.value_name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "SetRegistryKeyValue.ValueName"} + return &eval.ErrValueTypeMismatch{Field: "set_key_value.value_name"} } ev.SetRegistryKeyValue.ValueName = rv return nil case "write.file.device_path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "WriteFile.File.PathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "write.file.device_path"} } ev.WriteFile.File.PathnameStr = rv return nil @@ -4254,7 +3482,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "write.file.name": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "WriteFile.File.BasenameStr"} + return &eval.ErrValueTypeMismatch{Field: "write.file.name"} } ev.WriteFile.File.BasenameStr = rv return nil @@ -4263,7 +3491,7 @@ func (ev *Event) SetFieldValue(field eval.Field, value interface{}) error { case "write.file.path": rv, ok := value.(string) if !ok { - return &eval.ErrValueTypeMismatch{Field: "WriteFile.File.UserPathnameStr"} + return &eval.ErrValueTypeMismatch{Field: "write.file.path"} } ev.WriteFile.File.UserPathnameStr = rv return nil diff --git a/pkg/security/seclwin/model/args_envs.go b/pkg/security/seclwin/model/args_envs.go index b59049742d8a5..4d2d7aa244fc3 100644 --- a/pkg/security/seclwin/model/args_envs.go +++ b/pkg/security/seclwin/model/args_envs.go @@ -9,20 +9,15 @@ package model import ( "slices" "strings" -) -const ( - // MaxArgEnvSize maximum size of one argument or environment variable - MaxArgEnvSize = 256 - // MaxArgsEnvsSize maximum number of args and/or envs - MaxArgsEnvsSize = 256 + "github.com/DataDog/datadog-agent/pkg/security/secl/model/sharedconsts" ) // ArgsEnvs raw value for args and envs type ArgsEnvs struct { ID uint64 Size uint32 - ValuesRaw [MaxArgEnvSize]byte + ValuesRaw [sharedconsts.MaxArgEnvSize]byte } // ArgsEntry defines a args cache entry diff --git a/pkg/security/seclwin/model/consts_common.go b/pkg/security/seclwin/model/consts_common.go index 1b64957cba71e..1b3f9f9309bb2 100644 --- a/pkg/security/seclwin/model/consts_common.go +++ b/pkg/security/seclwin/model/consts_common.go @@ -13,6 +13,7 @@ import ( "syscall" "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" + "github.com/DataDog/datadog-agent/pkg/security/secl/model/sharedconsts" "github.com/DataDog/datadog-agent/pkg/security/secl/model/usersession" ) @@ -319,11 +320,18 @@ var ( "IP_PROTO_RAW": IPProtoRAW, } + // NetworkDirectionConstants is the list of supported network directions + // generate_constants:Network directions,Network directions are the supported directions of network packets. + NetworkDirectionConstants = map[string]NetworkDirection{ + "INGRESS": Ingress, + "EGRESS": Egress, + } + // exitCauseConstants is the list of supported Exit causes - exitCauseConstants = map[string]ExitCause{ - "EXITED": ExitExited, - "COREDUMPED": ExitCoreDumped, - "SIGNALED": ExitSignaled, + exitCauseConstants = map[string]sharedconsts.ExitCause{ + "EXITED": sharedconsts.ExitExited, + "COREDUMPED": sharedconsts.ExitCoreDumped, + "SIGNALED": sharedconsts.ExitSignaled, } tlsVersionContants = map[string]uint16{ @@ -337,13 +345,13 @@ var ( ) var ( - dnsQTypeStrings = map[uint32]string{} - dnsQClassStrings = map[uint32]string{} - l3ProtocolStrings = map[L3Protocol]string{} - l4ProtocolStrings = map[L4Protocol]string{} - addressFamilyStrings = map[uint16]string{} - exitCauseStrings = map[ExitCause]string{} - tlsVersionStrings = map[uint16]string{} + dnsQTypeStrings = map[uint32]string{} + dnsQClassStrings = map[uint32]string{} + l3ProtocolStrings = map[L3Protocol]string{} + l4ProtocolStrings = map[L4Protocol]string{} + networkDirectionStrings = map[NetworkDirection]string{} + addressFamilyStrings = map[uint16]string{} + tlsVersionStrings = map[uint16]string{} ) // File flags @@ -410,6 +418,13 @@ func initL4ProtocolConstants() { } } +func initNetworkDirectionContants() { + for k, v := range NetworkDirectionConstants { + seclConstants[k] = &eval.IntEvaluator{Value: int(v)} + networkDirectionStrings[v] = k + } +} + func initAddressFamilyConstants() { for k, v := range addressFamilyConstants { seclConstants[k] = &eval.IntEvaluator{Value: int(v)} @@ -423,7 +438,6 @@ func initAddressFamilyConstants() { func initExitCauseConstants() { for k, v := range exitCauseConstants { seclConstants[k] = &eval.IntEvaluator{Value: int(v)} - exitCauseStrings[v] = k } } @@ -463,6 +477,7 @@ func initConstants() { initDNSQTypeConstants() initL3ProtocolConstants() initL4ProtocolConstants() + initNetworkDirectionContants() initAddressFamilyConstants() initExitCauseConstants() initBPFMapNamesConstants() @@ -781,18 +796,16 @@ const ( IPProtoRAW L4Protocol = 255 ) -// ExitCause represents the cause of a process termination -type ExitCause uint32 +// NetworkDirection is used to identify the network direction of a flow +type NetworkDirection uint32 -func (cause ExitCause) String() string { - return exitCauseStrings[cause] +func (direction NetworkDirection) String() string { + return networkDirectionStrings[direction] } const ( - // ExitExited Process exited normally - ExitExited ExitCause = iota - // ExitCoreDumped Process was terminated with a coredump signal - ExitCoreDumped - // ExitSignaled Process was terminated with a signal other than a coredump - ExitSignaled + // Egress is used to identify egress traffic + Egress NetworkDirection = iota + 1 + // Ingress is used to identify ingress traffic + Ingress ) diff --git a/pkg/security/seclwin/model/consts_map_names_linux.go b/pkg/security/seclwin/model/consts_map_names_linux.go index 55a6198fcfa78..0c76dbbc28bb2 100644 --- a/pkg/security/seclwin/model/consts_map_names_linux.go +++ b/pkg/security/seclwin/model/consts_map_names_linux.go @@ -31,7 +31,7 @@ var bpfMapNames = []string{ "events", "events_ringbuf_", "events_stats", - "exec_file_cache", + "inode_file", "exec_pid_transf", "fb_approver_sta", "fb_discarder_st", diff --git a/pkg/security/seclwin/model/events.go b/pkg/security/seclwin/model/events.go index 2c2e867ef17e8..77afd53f202e2 100644 --- a/pkg/security/seclwin/model/events.go +++ b/pkg/security/seclwin/model/events.go @@ -83,6 +83,8 @@ const ( NetDeviceEventType // VethPairEventType is sent when a new veth pair is created VethPairEventType + // AcceptEventType Accept event + AcceptEventType // BindEventType Bind event BindEventType // ConnectEventType Connect event @@ -101,6 +103,10 @@ const ( CgroupWriteEventType // RawPacketEventType raw packet event RawPacketEventType + // NetworkFlowMonitorEventType is sent to monitor network activity + NetworkFlowMonitorEventType + // StatEventType stat event (used kernel side only) + StatEventType // MaxKernelEventType is used internally to get the maximum number of kernel events. MaxKernelEventType @@ -219,6 +225,8 @@ func (t EventType) String() string { return "veth_pair" case BindEventType: return "bind" + case AcceptEventType: + return "accept" case ConnectEventType: return "connect" case UnshareMountNsEventType: @@ -231,6 +239,10 @@ func (t EventType) String() string { return "ondemand" case RawPacketEventType: return "packet" + case NetworkFlowMonitorEventType: + return "network_flow_monitor" + case StatEventType: + return "stat" case CustomEventType: return "custom_event" case CreateNewFileEventType: diff --git a/pkg/security/seclwin/model/model.go b/pkg/security/seclwin/model/model.go index 9a6ae500f2d6f..cb6666fb16259 100644 --- a/pkg/security/seclwin/model/model.go +++ b/pkg/security/seclwin/model/model.go @@ -10,15 +10,15 @@ package model import ( "net" + "net/netip" "reflect" "runtime" "time" - "modernc.org/mathutil" - "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/containerutils" "github.com/DataDog/datadog-agent/pkg/security/secl/model/usersession" + "github.com/DataDog/datadog-agent/pkg/security/secl/model/utils" ) // Model describes the data model for the runtime security agent events @@ -72,9 +72,9 @@ func (r *Releasable) AppendReleaseCallback(callback func()) { // ContainerContext holds the container context of an event type ContainerContext struct { Releasable - ContainerID containerutils.ContainerID `field:"id,handler:ResolveContainerID"` // SECLDoc[id] Definition:`ID of the container` - CreatedAt uint64 `field:"created_at,handler:ResolveContainerCreatedAt"` // SECLDoc[created_at] Definition:`Timestamp of the creation of the container`` - Tags []string `field:"tags,handler:ResolveContainerTags,opts:skip_ad,weight:9999"` // SECLDoc[tags] Definition:`Tags of the container` + ContainerID containerutils.ContainerID `field:"id,handler:ResolveContainerID,opts:gen_getters"` // SECLDoc[id] Definition:`ID of the container` + CreatedAt uint64 `field:"created_at,handler:ResolveContainerCreatedAt,opts:gen_getters"` // SECLDoc[created_at] Definition:`Timestamp of the creation of the container`` + Tags []string `field:"tags,handler:ResolveContainerTags,opts:skip_ad,weight:9999"` // SECLDoc[tags] Definition:`Tags of the container` Resolved bool `field:"-"` Runtime string `field:"runtime,handler:ResolveContainerRuntime"` // SECLDoc[runtime] Definition:`Runtime managing the container` } @@ -96,15 +96,25 @@ type IPPortContext struct { IsPublicResolved bool `field:"-"` } +// GetComparable returns a comparable version of IPPortContext +func (ipc *IPPortContext) GetComparable() netip.AddrPort { + ipcAddr, ok := netip.AddrFromSlice(ipc.IPNet.IP) + if !ok { + return netip.AddrPort{} + } + return netip.AddrPortFrom(ipcAddr, ipc.Port) +} + // NetworkContext represents the network context of the event type NetworkContext struct { Device NetworkDeviceContext `field:"device"` // network device on which the network packet was captured - L3Protocol uint16 `field:"l3_protocol"` // SECLDoc[l3_protocol] Definition:`L3 protocol of the network packet` Constants:`L3 protocols` - L4Protocol uint16 `field:"l4_protocol"` // SECLDoc[l4_protocol] Definition:`L4 protocol of the network packet` Constants:`L4 protocols` - Source IPPortContext `field:"source"` // source of the network packet - Destination IPPortContext `field:"destination"` // destination of the network packet - Size uint32 `field:"size"` // SECLDoc[size] Definition:`Size in bytes of the network packet` + L3Protocol uint16 `field:"l3_protocol"` // SECLDoc[l3_protocol] Definition:`L3 protocol of the network packet` Constants:`L3 protocols` + L4Protocol uint16 `field:"l4_protocol"` // SECLDoc[l4_protocol] Definition:`L4 protocol of the network packet` Constants:`L4 protocols` + Source IPPortContext `field:"source"` // source of the network packet + Destination IPPortContext `field:"destination"` // destination of the network packet + NetworkDirection uint32 `field:"network_direction"` // SECLDoc[network_direction] Definition:`Network direction of the network packet` Constants:`Network directions` + Size uint32 `field:"size"` // SECLDoc[size] Definition:`Size in bytes of the network packet` } // IsZero returns if there is a network context @@ -114,8 +124,8 @@ func (nc *NetworkContext) IsZero() bool { // SpanContext describes a span context type SpanContext struct { - SpanID uint64 `field:"-"` - TraceID mathutil.Int128 `field:"-"` + SpanID uint64 `field:"-"` + TraceID utils.TraceID `field:"-"` } // BaseEvent represents an event sent from the kernel @@ -124,13 +134,13 @@ type BaseEvent struct { Type uint32 `field:"-"` Flags uint32 `field:"-"` TimestampRaw uint64 `field:"event.timestamp,handler:ResolveEventTimestamp"` // SECLDoc[event.timestamp] Definition:`Timestamp of the event` - Timestamp time.Time `field:"timestamp,opts:getters_only,handler:ResolveEventTime"` + Timestamp time.Time `field:"timestamp,opts:getters_only|gen_getters,handler:ResolveEventTime"` Rules []*MatchedRule `field:"-"` ActionReports []ActionReport `field:"-"` - Os string `field:"event.os"` // SECLDoc[event.os] Definition:`Operating system of the event` - Origin string `field:"event.origin"` // SECLDoc[event.origin] Definition:`Origin of the event` - Service string `field:"event.service,handler:ResolveService,opts:skip_ad"` // SECLDoc[event.service] Definition:`Service associated with the event` - Hostname string `field:"event.hostname,handler:ResolveHostname"` // SECLDoc[event.hostname] Definition:`Hostname associated with the event` + Os string `field:"event.os"` // SECLDoc[event.os] Definition:`Operating system of the event` + Origin string `field:"event.origin"` // SECLDoc[event.origin] Definition:`Origin of the event` + Service string `field:"event.service,handler:ResolveService,opts:skip_ad|gen_getters"` // SECLDoc[event.service] Definition:`Service associated with the event` + Hostname string `field:"event.hostname,handler:ResolveHostname"` // SECLDoc[event.hostname] Definition:`Hostname associated with the event` // context shared with all events ProcessContext *ProcessContext `field:"process"` @@ -508,7 +518,7 @@ func (it *ProcessAncestorsIterator) Front(ctx *eval.Context) *ProcessCacheEntry } // Next returns the next element -func (it *ProcessAncestorsIterator) Next() *ProcessCacheEntry { +func (it *ProcessAncestorsIterator) Next(_ *eval.Context) *ProcessCacheEntry { if next := it.prev.Ancestor; next != nil { it.prev = next return next @@ -570,8 +580,8 @@ type ProcessContext struct { // ExitEvent represents a process exit event type ExitEvent struct { *Process - Cause uint32 `field:"cause"` // SECLDoc[cause] Definition:`Cause of the process termination (one of EXITED, SIGNALED, COREDUMPED)` - Code uint32 `field:"code"` // SECLDoc[code] Definition:`Exit code of the process or number of the signal that caused the process to terminate` + Cause uint32 `field:"cause"` // SECLDoc[cause] Definition:`Cause of the process termination (one of EXITED, SIGNALED, COREDUMPED)` + Code uint32 `field:"code,opts:gen_getters"` // SECLDoc[code] Definition:`Exit code of the process or number of the signal that caused the process to terminate` } // DNSEvent represents a DNS event diff --git a/pkg/security/seclwin/model/model_win.go b/pkg/security/seclwin/model/model_win.go index 5305dfee0b71c..8fe667c85f2a6 100644 --- a/pkg/security/seclwin/model/model_win.go +++ b/pkg/security/seclwin/model/model_win.go @@ -49,9 +49,9 @@ type Event struct { // FileEvent is the common file event type type FileEvent struct { - FileObject uint64 `field:"-"` // handle numeric value - PathnameStr string `field:"path,handler:ResolveFilePath,opts:length" op_override:"eval.WindowsPathCmp"` // SECLDoc[path] Definition:`File's path` Example:`exec.file.path == "c:\cmd.bat"` Description:`Matches the execution of the file located at c:\cmd.bat` - BasenameStr string `field:"name,handler:ResolveFileBasename,opts:length" op_override:"eval.CaseInsensitiveCmp"` // SECLDoc[name] Definition:`File's basename` Example:`exec.file.name == "cmd.bat"` Description:`Matches the execution of any file named cmd.bat.` + FileObject uint64 `field:"-"` // handle numeric value + PathnameStr string `field:"path,handler:ResolveFilePath,opts:length|gen_getters" op_override:"eval.WindowsPathCmp"` // SECLDoc[path] Definition:`File's path` Example:`exec.file.path == "c:\cmd.bat"` Description:`Matches the execution of the file located at c:\cmd.bat` + BasenameStr string `field:"name,handler:ResolveFileBasename,opts:length" op_override:"eval.CaseInsensitiveCmp"` // SECLDoc[name] Definition:`File's basename` Example:`exec.file.name == "cmd.bat"` Description:`Matches the execution of any file named cmd.bat.` } // FimFileEvent is the common file event type @@ -76,12 +76,12 @@ type Process struct { ContainerID string `field:"container.id"` // SECLDoc[container.id] Definition:`Container ID` - ExitTime time.Time `field:"exit_time,opts:getters_only"` - ExecTime time.Time `field:"exec_time,opts:getters_only"` + ExitTime time.Time `field:"exit_time,opts:getters_only|gen_getters"` + ExecTime time.Time `field:"exec_time,opts:getters_only|gen_getters"` CreatedAt uint64 `field:"created_at,handler:ResolveProcessCreatedAt"` // SECLDoc[created_at] Definition:`Timestamp of the creation of the process` - PPid uint32 `field:"ppid"` // SECLDoc[ppid] Definition:`Parent process ID` + PPid uint32 `field:"ppid,opts:gen_getters"` // SECLDoc[ppid] Definition:`Parent process ID` ArgsEntry *ArgsEntry `field:"-"` EnvsEntry *EnvsEntry `field:"-"` @@ -92,8 +92,8 @@ type Process struct { OwnerSidString string `field:"user_sid"` // SECLDoc[user_sid] Definition:`Sid of the user of the process` User string `field:"user,handler:ResolveUser"` // SECLDoc[user] Definition:`User name` - Envs []string `field:"envs,handler:ResolveProcessEnvs,weight:100"` // SECLDoc[envs] Definition:`Environment variable names of the process` - Envp []string `field:"envp,handler:ResolveProcessEnvp,weight:100"` // SECLDoc[envp] Definition:`Environment variables of the process` // SECLDoc[envp] Definition:`Environment variables of the process` + Envs []string `field:"envs,handler:ResolveProcessEnvs,weight:100"` // SECLDoc[envs] Definition:`Environment variable names of the process` + Envp []string `field:"envp,handler:ResolveProcessEnvp,weight:100,opts:gen_getters"` // SECLDoc[envp] Definition:`Environment variables of the process` // SECLDoc[envp] Definition:`Environment variables of the process` // cache version Variables eval.Variables `field:"-"` @@ -107,7 +107,7 @@ type ExecEvent struct { // PIDContext holds the process context of an kernel event type PIDContext struct { - Pid uint32 `field:"pid"` // SECLDoc[pid] Definition:`Process ID of the process (also called thread group ID)` + Pid uint32 `field:"pid,opts:gen_getters"` // SECLDoc[pid] Definition:`Process ID of the process (also called thread group ID)` } // NetworkDeviceContext defines a network device context diff --git a/pkg/security/seclwin/model/string_array_iter.go b/pkg/security/seclwin/model/string_array_iter.go index c27537255c729..10034151c85d8 100644 --- a/pkg/security/seclwin/model/string_array_iter.go +++ b/pkg/security/seclwin/model/string_array_iter.go @@ -8,24 +8,38 @@ package model import "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" -func newAncestorsIterator[T any](iter *ProcessAncestorsIterator, ctx *eval.Context, ev *Event, perIter func(ev *Event, pce *ProcessCacheEntry) T) []T { - results := make([]T, 0, ctx.CachedAncestorsCount) - for pce := iter.Front(ctx); pce != nil; pce = iter.Next() { - results = append(results, perIter(ev, pce)) +// AncestorsIterator is a generic interface that iterators must implement +type AncestorsIterator[T any] interface { + Front(ctx *eval.Context) T + Next(ctx *eval.Context) T + At(ctx *eval.Context, regID eval.RegisterID, pos int) T + Len(ctx *eval.Context) int +} + +// Helper function to check if a value is nil +func isNil[V comparable](v V) bool { + var zero V + return v == zero +} + +func newAncestorsIterator[T any, V comparable](iter AncestorsIterator[V], field eval.Field, ctx *eval.Context, ev *Event, perIter func(ev *Event, current V) T) []T { + results := make([]T, 0, ctx.AncestorsCounters[field]) + for entry := iter.Front(ctx); !isNil(entry); entry = iter.Next(ctx) { + results = append(results, perIter(ev, entry)) } - ctx.CachedAncestorsCount = len(results) + ctx.AncestorsCounters[field] = len(results) return results } -func newAncestorsIteratorArray[T any](iter *ProcessAncestorsIterator, ctx *eval.Context, ev *Event, perIter func(ev *Event, pce *ProcessCacheEntry) []T) []T { - results := make([]T, 0, ctx.CachedAncestorsCount) +func newAncestorsIteratorArray[T any, V comparable](iter AncestorsIterator[V], field eval.Field, ctx *eval.Context, ev *Event, perIter func(ev *Event, current V) []T) []T { + results := make([]T, 0, ctx.AncestorsCounters[field]) ancestorsCount := 0 - for pce := iter.Front(ctx); pce != nil; pce = iter.Next() { - results = append(results, perIter(ev, pce)...) + for entry := iter.Front(ctx); !isNil(entry); entry = iter.Next(ctx) { + results = append(results, perIter(ev, entry)...) ancestorsCount++ } - ctx.CachedAncestorsCount = ancestorsCount + ctx.AncestorsCounters[field] = ancestorsCount return results } diff --git a/pkg/security/security_profile/activity_tree/activity_tree.go b/pkg/security/security_profile/activity_tree/activity_tree.go index 7987e8061f753..765c8e1b2a16e 100644 --- a/pkg/security/security_profile/activity_tree/activity_tree.go +++ b/pkg/security/security_profile/activity_tree/activity_tree.go @@ -24,6 +24,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/resolvers/process" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/utils" + "github.com/DataDog/datadog-agent/pkg/security/utils/pathutils" ) // NodeDroppedReason is used to list the reasons to drop a node @@ -402,6 +403,8 @@ func (at *ActivityTree) insertEvent(event *model.Event, dryRun bool, insertMissi return node.InsertBindEvent(event, imageTag, generationType, at.Stats, dryRun), nil case model.SyscallsEventType: return node.InsertSyscalls(event, imageTag, at.SyscallsMask, at.Stats, dryRun), nil + case model.NetworkFlowMonitorEventType: + return node.InsertNetworkFlowMonitorEvent(event, imageTag, generationType, at.Stats, dryRun), nil case model.ExitEventType: // Update the exit time of the process (this is purely informative, do not rely on timestamps to detect // execed children) @@ -893,7 +896,7 @@ func (at *ActivityTree) ExtractPaths(_, fimEnabled, lineageEnabled bool) (map[st at.visitFileNode(file, func(fileNode *FileNode) { path, ok := modifiedPaths[fileNode.File.PathnameStr] if !ok { - modifiedPaths[fileNode.File.PathnameStr] = utils.CheckForPatterns(fileNode.File.PathnameStr) + modifiedPaths[fileNode.File.PathnameStr] = pathutils.CheckForPatterns(fileNode.File.PathnameStr) path = modifiedPaths[fileNode.File.PathnameStr] } if len(path) > 0 { @@ -905,7 +908,7 @@ func (at *ActivityTree) ExtractPaths(_, fimEnabled, lineageEnabled bool) (map[st execPath, ok := modifiedPaths[processNode.Process.FileEvent.PathnameStr] if !ok { - modifiedPaths[processNode.Process.FileEvent.PathnameStr] = utils.CheckForPatterns(processNode.Process.FileEvent.PathnameStr) + modifiedPaths[processNode.Process.FileEvent.PathnameStr] = pathutils.CheckForPatterns(processNode.Process.FileEvent.PathnameStr) execPath = modifiedPaths[processNode.Process.FileEvent.PathnameStr] } diff --git a/pkg/security/security_profile/activity_tree/activity_tree_graph.go b/pkg/security/security_profile/activity_tree/activity_tree_graph.go index 14f70a5a98ddd..cfc5b6f6f8985 100644 --- a/pkg/security/security_profile/activity_tree/activity_tree_graph.go +++ b/pkg/security/security_profile/activity_tree/activity_tree_graph.go @@ -10,6 +10,7 @@ package activitytree import ( "fmt" + "strconv" "strings" "github.com/DataDog/datadog-agent/pkg/security/resolvers/process" @@ -18,28 +19,71 @@ import ( ) var ( + bigText = 10 + mediumText = 7 + smallText = 5 + tableHeader = "<" + processColor = "#8fbbff" processProfileDriftColor = "#c2daff" processRuntimeColor = "#edf3ff" processSnapshotColor = "white" processShape = "record" + //nolint:deadcode,unused + processClusterColor = "#c7ddff" + + processCategoryColor = "#c7c7c7" + //nolint:deadcode,unused + processCategoryProfileDriftColor = "#e0e0e0" + //nolint:deadcode,unused + processCategoryRuntimeColor = "#f5f5f5" + processCategorySnapshotColor = "white" + processCategoryShape = "record" + processCategoryClusterColor = "#e3e3e3" fileColor = "#77bf77" fileProfileDriftColor = "#c6e1c1" fileRuntimeColor = "#e9f3e7" fileSnapshotColor = "white" fileShape = "record" + fileClusterColor = "#c2f2c2" networkColor = "#ff9800" networkProfileDriftColor = "#faddb1" networkRuntimeColor = "#ffebcd" networkShape = "record" + networkClusterColor = "#fff5e6" ) +func (at *ActivityTree) getGraphTitle(name string, selector string) string { + title := tableHeader + title += "" + for i, t := range strings.Split(selector, ",") { + if i%3 == 0 { + if i != 0 { + title += "" + } + title += "" + if i == 0 { + title += "" + } else { + title += "" + } + title += "" + title += "
Name" + name + "
Selector" + } else { + title += ", " + } + title += t + } + title += "
>" + return title +} + // PrepareGraphData returns a graph from the activity tree -func (at *ActivityTree) PrepareGraphData(title string, resolver *process.EBPFResolver) utils.Graph { +func (at *ActivityTree) PrepareGraphData(name string, selector string, resolver *process.EBPFResolver) utils.Graph { data := utils.Graph{ - Title: title, + Title: at.getGraphTitle(name, selector), Nodes: make(map[utils.GraphID]*utils.Node), } @@ -63,14 +107,17 @@ func (at *ActivityTree) prepareProcessNode(p *ProcessNode, data *utils.Graph, re args = strings.ReplaceAll(args, "\n", " ") args = strings.ReplaceAll(args, ">", "\\>") args = strings.ReplaceAll(args, "|", "\\|") + args = strings.ReplaceAll(args, "}", "\\}") + args = strings.ReplaceAll(args, "{", "\\{") } panGraphID := utils.NewGraphID(utils.NewNodeIDFromPtr(p)) pan := &utils.Node{ - ID: panGraphID, - Label: p.getNodeLabel(args), - Size: 60, - Color: processColor, - Shape: processShape, + ID: panGraphID, + Label: p.getNodeLabel(args), + Size: smallText, + Color: processColor, + Shape: processShape, + IsTable: true, } switch p.GenerationType { case ProfileDrift: @@ -113,22 +160,76 @@ func (at *ActivityTree) prepareProcessNode(p *ProcessNode, data *utils.Graph, re } } - for _, f := range p.Files { - fileID := at.prepareFileNode(f, data, "", panGraphID) - data.Edges = append(data.Edges, &utils.Edge{ - From: panGraphID, - To: fileID, - Color: fileColor, - }) + if len(p.Files) > 0 { + // create new subgraph for the filesystem events + subgraph := utils.SubGraph{ + Nodes: make(map[utils.GraphID]*utils.Node), + Title: "Filesystem", + TitleSize: mediumText, + Color: fileClusterColor, + Name: "cluster_" + panGraphID.Derive(utils.NewRandomNodeID()).String(), + } + + for _, f := range p.Files { + fileID := at.prepareFileNode(f, &subgraph, panGraphID) + data.Edges = append(data.Edges, &utils.Edge{ + From: panGraphID, + To: fileID, + Color: fileColor, + }) + } + + // add subgraph + data.SubGraphs = append(data.SubGraphs, &subgraph) + } + + for _, n := range p.NetworkDevices { + // create new subgraph for network device + subgraph := utils.SubGraph{ + Nodes: make(map[utils.GraphID]*utils.Node), + Title: "Network Flows", + TitleSize: mediumText, + } + deviceNodeID, ok := at.prepareNetworkDeviceNode(n, &subgraph, panGraphID) + if ok { + subgraph.Name = "cluster_" + deviceNodeID.String() + subgraph.Color = networkClusterColor + + data.Edges = append(data.Edges, &utils.Edge{ + From: panGraphID, + To: deviceNodeID, + Color: networkColor, + }) + + // build network flow nodes + for _, flowNode := range n.FlowNodes { + at.prepareNetworkFlowNode(flowNode, &subgraph, deviceNodeID) + } + + // add subgraph + data.SubGraphs = append(data.SubGraphs, &subgraph) + } } if len(p.Syscalls) > 0 { - syscallsNodeID := at.prepareSyscallsNode(p, data) + // create new subgraph for syscalls + subgraph := utils.SubGraph{ + Nodes: make(map[utils.GraphID]*utils.Node), + Title: "Syscalls", + TitleSize: mediumText, + Color: processCategoryClusterColor, + } + + syscallsNodeID := at.prepareSyscallsNode(p, &subgraph) + subgraph.Name = "cluster_" + syscallsNodeID.String() data.Edges = append(data.Edges, &utils.Edge{ From: utils.NewGraphID(utils.NewNodeIDFromPtr(p)), To: syscallsNodeID, - Color: processColor, + Color: processCategoryColor, }) + + // add subgraph + data.SubGraphs = append(data.SubGraphs, &subgraph) } for _, child := range p.Children { @@ -157,7 +258,7 @@ func (at *ActivityTree) prepareDNSNode(n *DNSNode, data *utils.Graph, processID dnsNode := &utils.Node{ ID: processID.Derive(utils.NewNodeIDFromPtr(n)), Label: name, - Size: 30, + Size: smallText, Color: networkColor, Shape: networkShape, } @@ -172,7 +273,7 @@ func (at *ActivityTree) prepareDNSNode(n *DNSNode, data *utils.Graph, processID } func (at *ActivityTree) prepareIMDSNode(n *IMDSNode, data *utils.Graph, processID utils.GraphID) (utils.GraphID, bool) { - label := "<" + label := tableHeader label += "" label += "" if len(n.Event.UserAgent) > 0 { @@ -198,7 +299,7 @@ func (at *ActivityTree) prepareIMDSNode(n *IMDSNode, data *utils.Graph, processI imdsNode := &utils.Node{ ID: processID.Derive(utils.NewNodeIDFromPtr(n)), Label: label, - Size: 30, + Size: smallText, Color: networkColor, Shape: networkShape, IsTable: true, @@ -213,6 +314,71 @@ func (at *ActivityTree) prepareIMDSNode(n *IMDSNode, data *utils.Graph, processI return imdsNode.ID, true } +func (at *ActivityTree) prepareNetworkDeviceNode(n *NetworkDeviceNode, data *utils.SubGraph, processID utils.GraphID) (utils.GraphID, bool) { + label := tableHeader + label += "" + label += "" + label += "" + label += "
IMDS" + n.Event.Type + "
Cloud provider" + n.Event.CloudProvider + "
Device name" + n.Context.IfName + "
Index" + strconv.Itoa(int(n.Context.IfIndex)) + "
Network namespace" + strconv.Itoa(int(n.Context.NetNS)) + "
>" + + deviceNode := &utils.Node{ + ID: processID.Derive(utils.NewNodeIDFromPtr(n)), + Label: label, + Size: smallText, + Color: networkColor, + Shape: networkShape, + IsTable: true, + } + + switch n.GenerationType { + case Runtime, Snapshot, Unknown: + deviceNode.FillColor = networkRuntimeColor + case ProfileDrift: + deviceNode.FillColor = networkProfileDriftColor + } + data.Nodes[deviceNode.ID] = deviceNode + return deviceNode.ID, true +} + +func (at *ActivityTree) prepareNetworkFlowNode(n *FlowNode, data *utils.SubGraph, deviceID utils.GraphID) { + label := tableHeader + label += "Source" + fmt.Sprintf("%s:%d", n.Flow.Source.IPNet.String(), n.Flow.Source.Port) + "" + if n.Flow.Source.IsPublicResolved { + label += "Is src public ?" + strconv.FormatBool(n.Flow.Source.IsPublic) + "" + } + label += "Destination" + fmt.Sprintf("%s:%d", n.Flow.Destination.IPNet.String(), n.Flow.Destination.Port) + "" + if n.Flow.Destination.IsPublicResolved { + label += "Is dst public ?" + strconv.FormatBool(n.Flow.Destination.IsPublic) + "" + } + label += "L4 protocol" + model.L4Protocol(n.Flow.L4Protocol).String() + "" + label += "Egress" + strconv.Itoa(int(n.Flow.Egress.DataSize)) + " bytes / " + strconv.Itoa(int(n.Flow.Egress.PacketCount)) + " pkts" + label += "Ingress" + strconv.Itoa(int(n.Flow.Ingress.DataSize)) + " bytes / " + strconv.Itoa(int(n.Flow.Ingress.PacketCount)) + " pkts" + label += ">" + + flowNode := &utils.Node{ + ID: deviceID.Derive(utils.NewNodeIDFromPtr(&n.Flow.Source)), + Label: label, + Size: smallText, + Color: networkColor, + Shape: networkShape, + IsTable: true, + } + + switch n.GenerationType { + case Runtime, Snapshot, Unknown: + flowNode.FillColor = networkRuntimeColor + case ProfileDrift: + flowNode.FillColor = networkProfileDriftColor + } + data.Nodes[flowNode.ID] = flowNode + + data.Edges = append(data.Edges, &utils.Edge{ + From: deviceID, + To: flowNode.ID, + Color: networkColor, + }) +} + func (at *ActivityTree) prepareSocketNode(n *SocketNode, data *utils.Graph, processID utils.GraphID) utils.GraphID { targetID := processID.Derive(utils.NewNodeIDFromPtr(n)) @@ -220,7 +386,7 @@ func (at *ActivityTree) prepareSocketNode(n *SocketNode, data *utils.Graph, proc socketNode := &utils.Node{ ID: targetID, Label: n.Family, - Size: 30, + Size: smallText, Color: networkColor, Shape: networkShape, } @@ -238,7 +404,7 @@ func (at *ActivityTree) prepareSocketNode(n *SocketNode, data *utils.Graph, proc bindNode := &utils.Node{ ID: processID.Derive(utils.NewNodeIDFromPtr(n), utils.NewNodeID(uint64(i+1))), Label: fmt.Sprintf("[%s]:%d", node.IP, node.Port), - Size: 30, + Size: smallText, Color: networkColor, Shape: networkShape, } @@ -260,14 +426,15 @@ func (at *ActivityTree) prepareSocketNode(n *SocketNode, data *utils.Graph, proc return targetID } -func (at *ActivityTree) prepareFileNode(f *FileNode, data *utils.Graph, prefix string, processID utils.GraphID) utils.GraphID { +func (at *ActivityTree) prepareFileNode(f *FileNode, data *utils.SubGraph, processID utils.GraphID) utils.GraphID { mergedID := processID.Derive(utils.NewNodeIDFromPtr(f)) fn := &utils.Node{ - ID: mergedID, - Label: f.getNodeLabel(), - Size: 30, - Color: fileColor, - Shape: fileShape, + ID: mergedID, + Label: f.getNodeLabel(""), + Size: smallText, + Color: fileColor, + Shape: fileShape, + IsTable: true, } switch f.GenerationType { case ProfileDrift: @@ -278,32 +445,32 @@ func (at *ActivityTree) prepareFileNode(f *FileNode, data *utils.Graph, prefix s fn.FillColor = fileSnapshotColor } data.Nodes[mergedID] = fn - - for _, child := range f.Children { - childID := at.prepareFileNode(child, data, prefix+f.Name, processID) - data.Edges = append(data.Edges, &utils.Edge{ - From: mergedID, - To: childID, - Color: fileColor, - }) - } return mergedID } -func (at *ActivityTree) prepareSyscallsNode(p *ProcessNode, data *utils.Graph) utils.GraphID { - label := "<" - for _, s := range p.Syscalls { - label += "" +func (at *ActivityTree) prepareSyscallsNode(p *ProcessNode, data *utils.SubGraph) utils.GraphID { + label := tableHeader + for i, s := range p.Syscalls { + if i%5 == 0 { + if i != 0 { + label += "" + } + label += "" label += "
" + model.Syscall(s.Syscall).String() + "
" + } else { + label += ", " + } + label += model.Syscall(s.Syscall).String() } + label += "
>" syscallsNode := &utils.Node{ ID: utils.NewGraphIDWithDescription("syscalls", utils.NewNodeIDFromPtr(p)), Label: label, - Size: 30, - Color: processColor, - FillColor: processSnapshotColor, - Shape: processShape, + Size: smallText, + Color: processCategoryColor, + FillColor: processCategorySnapshotColor, + Shape: processCategoryShape, IsTable: true, } data.Nodes[syscallsNode.ID] = syscallsNode diff --git a/pkg/security/security_profile/activity_tree/activity_tree_proto_dec_v1.go b/pkg/security/security_profile/activity_tree/activity_tree_proto_dec_v1.go index efa5b301b31fc..fac743d212e94 100644 --- a/pkg/security/security_profile/activity_tree/activity_tree_proto_dec_v1.go +++ b/pkg/security/security_profile/activity_tree/activity_tree_proto_dec_v1.go @@ -9,10 +9,12 @@ package activitytree import ( + "net" "time" adproto "github.com/DataDog/agent-payload/v5/cws/dumpsv1" + "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/containerutils" "github.com/DataDog/datadog-agent/pkg/security/secl/model" ) @@ -39,8 +41,9 @@ func protoDecodeProcessActivityNode(parent ProcessNodeParent, pan *adproto.Proce DNSNames: make(map[string]*DNSNode, len(pan.DnsNames)), IMDSEvents: make(map[model.IMDSEvent]*IMDSNode, len(pan.ImdsEvents)), Sockets: make([]*SocketNode, 0, len(pan.Sockets)), - Syscalls: make([]*SyscallNode, 0, len(pan.Syscalls)), + Syscalls: make([]*SyscallNode, 0, len(pan.SyscallNodes)), ImageTags: pan.ImageTags, + NetworkDevices: make(map[model.NetworkDeviceContext]*NetworkDeviceNode, len(pan.NetworkDevices)), } for _, rule := range pan.MatchedRules { @@ -73,13 +76,33 @@ func protoDecodeProcessActivityNode(parent ProcessNodeParent, pan *adproto.Proce ppan.Sockets = append(ppan.Sockets, protoDecodeProtoSocket(socket)) } - for _, sysc := range pan.Syscalls { - ppan.Syscalls = append(ppan.Syscalls, NewSyscallNode(int(sysc), "", Unknown)) + for _, sysc := range pan.SyscallNodes { + ppan.Syscalls = append(ppan.Syscalls, protoDecodeSyscallNode(sysc)) + } + + for _, networkDevice := range pan.NetworkDevices { + ppan.NetworkDevices[model.NetworkDeviceContext{ + NetNS: networkDevice.Netns, + IfIndex: networkDevice.Ifindex, + IfName: networkDevice.Ifname, + }] = protoDecodeNetworkDevice(networkDevice) } return ppan } +func protoDecodeSyscallNode(sysc *adproto.SyscallNode) *SyscallNode { + if sysc == nil { + return nil + } + + return &SyscallNode{ + ImageTags: sysc.ImageTags, + GenerationType: Runtime, + Syscall: int(sysc.Syscall), + } +} + func protoDecodeProcessNode(p *adproto.ProcessInfo) model.Process { if p == nil { return model.Process{} @@ -241,6 +264,67 @@ func protoDecodeDNSNode(dn *adproto.DNSNode) *DNSNode { return pdn } +func protoDecodeNetworkDevice(device *adproto.NetworkDeviceNode) *NetworkDeviceNode { + if device == nil { + return nil + } + ndn := &NetworkDeviceNode{ + MatchedRules: make([]*model.MatchedRule, 0, len(device.MatchedRules)), + FlowNodes: make(map[model.FiveTuple]*FlowNode, len(device.FlowNodes)), + Context: model.NetworkDeviceContext{ + NetNS: device.Netns, + IfIndex: device.Ifindex, + IfName: device.Ifname, + }, + } + + for _, rule := range device.MatchedRules { + ndn.MatchedRules = append(ndn.MatchedRules, protoDecodeProtoMatchedRule(rule)) + } + + for _, flow := range device.FlowNodes { + f := protoDecodeNetworkFlow(flow) + _, ok := ndn.FlowNodes[f.GetFiveTuple()] + if !ok { + fn := &FlowNode{ + ImageTags: flow.ImageTags, + GenerationType: Runtime, + Flow: *f, + } + ndn.FlowNodes[f.GetFiveTuple()] = fn + } + } + + return ndn +} + +func protoDecodeNetworkFlow(flowNode *adproto.FlowNode) *model.Flow { + return &model.Flow{ + Source: protoDecodeIPPortContext(flowNode.Source), + Destination: protoDecodeIPPortContext(flowNode.Destination), + L3Protocol: uint16(flowNode.L3Protocol), + L4Protocol: uint16(flowNode.L4Protocol), + Ingress: protoDecodeNetworkStats(flowNode.Ingress), + Egress: protoDecodeNetworkStats(flowNode.Egress), + } +} + +func protoDecodeIPPortContext(ipPort *adproto.IPPortContext) model.IPPortContext { + ipc := model.IPPortContext{ + IPNet: *eval.IPNetFromIP(net.ParseIP(ipPort.Ip)), + Port: uint16(ipPort.Port), + } + return ipc +} + +func protoDecodeNetworkStats(stats *adproto.NetworkStats) model.NetworkStats { + ns := model.NetworkStats{ + DataSize: stats.DataSize, + PacketCount: stats.PacketCount, + } + return ns +} + func protoDecodeIMDSNode(in *adproto.IMDSNode) *IMDSNode { if in == nil { return nil diff --git a/pkg/security/security_profile/activity_tree/activity_tree_proto_enc_v1.go b/pkg/security/security_profile/activity_tree/activity_tree_proto_enc_v1.go index 3897b5a160fe9..f6a68d45cc401 100644 --- a/pkg/security/security_profile/activity_tree/activity_tree_proto_enc_v1.go +++ b/pkg/security/security_profile/activity_tree/activity_tree_proto_enc_v1.go @@ -42,8 +42,9 @@ func processActivityNodeToProto(pan *ProcessNode) *adproto.ProcessActivityNode { DnsNames: make([]*adproto.DNSNode, 0, len(pan.DNSNames)), ImdsEvents: make([]*adproto.IMDSNode, 0, len(pan.IMDSEvents)), Sockets: make([]*adproto.SocketNode, 0, len(pan.Sockets)), - Syscalls: make([]uint32, 0, len(pan.Syscalls)), ImageTags: pan.ImageTags, + SyscallNodes: make([]*adproto.SyscallNode, 0, len(pan.Syscalls)), + NetworkDevices: make([]*adproto.NetworkDeviceNode, 0, len(pan.NetworkDevices)), } for _, rule := range pan.MatchedRules { @@ -71,12 +72,83 @@ func processActivityNodeToProto(pan *ProcessNode) *adproto.ProcessActivityNode { } for _, sysc := range pan.Syscalls { - ppan.Syscalls = append(ppan.Syscalls, uint32(sysc.Syscall)) + ppan.SyscallNodes = append(ppan.SyscallNodes, syscallNodeToProto(sysc)) + } + + for _, networkDevice := range pan.NetworkDevices { + ppan.NetworkDevices = append(ppan.NetworkDevices, networkDeviceToProto(networkDevice)) } return ppan } +func networkDeviceToProto(device *NetworkDeviceNode) *adproto.NetworkDeviceNode { + if device == nil { + return nil + } + + ndn := &adproto.NetworkDeviceNode{ + MatchedRules: make([]*adproto.MatchedRule, 0, len(device.MatchedRules)), + Netns: device.Context.NetNS, + Ifindex: device.Context.IfIndex, + Ifname: device.Context.IfName, + FlowNodes: make([]*adproto.FlowNode, 0, len(device.FlowNodes)), + } + + for _, rule := range device.MatchedRules { + ndn.MatchedRules = append(ndn.MatchedRules, matchedRuleToProto(rule)) + } + + for _, flowNode := range device.FlowNodes { + ndn.FlowNodes = append(ndn.FlowNodes, flowNodeToProto(flowNode.Flow, flowNode.ImageTags)) + } + + return ndn +} + +func flowNodeToProto(flow model.Flow, tags []string) *adproto.FlowNode { + return &adproto.FlowNode{ + ImageTags: tags, + L3Protocol: uint32(flow.L3Protocol), + L4Protocol: uint32(flow.L4Protocol), + Source: ipPortContextToProto(&flow.Source), + Destination: ipPortContextToProto(&flow.Destination), + Ingress: networkStatsToProto(&flow.Ingress), + Egress: networkStatsToProto(&flow.Egress), + } +} + +func ipPortContextToProto(ipPort *model.IPPortContext) *adproto.IPPortContext { + if ipPort == nil { + return nil + } + return &adproto.IPPortContext{ + Ip: ipPort.IPNet.IP.String(), + Port: uint32(ipPort.Port), + } +} + +func networkStatsToProto(stats *model.NetworkStats) *adproto.NetworkStats { + if stats == nil { + return nil + } + return &adproto.NetworkStats{ + DataSize: stats.DataSize, + PacketCount: stats.PacketCount, + } +} + +func syscallNodeToProto(sysc *SyscallNode) *adproto.SyscallNode { + if sysc == nil { + return nil + } + + return &adproto.SyscallNode{ + ImageTags: sysc.ImageTags, + Syscall: int32(sysc.Syscall), + } +} + func processNodeToProto(p *model.Process) *adproto.ProcessInfo { if p == nil { return nil diff --git a/pkg/security/security_profile/activity_tree/activity_tree_stats.go b/pkg/security/security_profile/activity_tree/activity_tree_stats.go index 7f10be52327b4..4f6e5c31607aa 100644 --- a/pkg/security/security_profile/activity_tree/activity_tree_stats.go +++ b/pkg/security/security_profile/activity_tree/activity_tree_stats.go @@ -27,6 +27,7 @@ type Stats struct { SocketNodes int64 IMDSNodes int64 SyscallNodes int64 + FlowNodes int64 counts map[model.EventType]*statsPerEventType } @@ -74,6 +75,7 @@ func (stats *Stats) ApproximateSize() int64 { total += stats.SocketNodes * int64(unsafe.Sizeof(SocketNode{})) // 40 total += stats.IMDSNodes * int64(unsafe.Sizeof(IMDSNode{})) total += stats.SyscallNodes * int64(unsafe.Sizeof(SyscallNode{})) + total += stats.FlowNodes * int64(unsafe.Sizeof(FlowNode{})) return total } diff --git a/pkg/security/security_profile/activity_tree/file_node.go b/pkg/security/security_profile/activity_tree/file_node.go index 3f5b3033512c2..59fdf2cb26305 100644 --- a/pkg/security/security_profile/activity_tree/file_node.go +++ b/pkg/security/security_profile/activity_tree/file_node.go @@ -12,6 +12,7 @@ import ( "fmt" "io" "sort" + "strconv" "strings" "time" @@ -67,25 +68,44 @@ func NewFileNode(fileEvent *model.FileEvent, event *model.Event, name string, im return fan } -func (fn *FileNode) getNodeLabel() string { - label := fn.Name - if fn.Open != nil { - label += " [open]" +func (fn *FileNode) getNodeLabel(prefix string) string { + var label string + if prefix == "" { + label += tableHeader + label += "" + label += "Events" + label += "Hash count" + label += "File" + label += "Package" + label += "" + } + label += fn.buildNodeRow(prefix) + for _, child := range fn.Children { + label += child.getNodeLabel(prefix + "/" + fn.Name) } - if fn.File != nil { - if len(fn.File.PkgName) != 0 { - label += fmt.Sprintf("|%s:%s}", fn.File.PkgName, fn.File.PkgVersion) - } - // add hashes - if len(fn.File.Hashes) > 0 { - label += fmt.Sprintf("|%v", strings.Join(fn.File.Hashes, "|")) - } else { - label += fmt.Sprintf("|(%s)", fn.File.HashState) - } + if prefix == "" { + label += ">" } return label } +func (fn *FileNode) buildNodeRow(prefix string) string { + var out string + if fn.Open != nil && fn.File != nil { + var pkg string + if len(fn.File.PkgName) != 0 { + pkg = fmt.Sprintf("%s:%s", fn.File.PkgName, fn.File.PkgVersion) + } + out += "" + out += "open" + out += "" + strconv.Itoa(len(fn.File.Hashes)) + " hash(es)" + out += "" + fmt.Sprintf("%s/%s", prefix, fn.Name) + "" + out += "" + pkg + "" + out += "" + } + return out +} + func (fn *FileNode) enrichFromEvent(event *model.Event) { if event == nil { return diff --git a/pkg/security/security_profile/activity_tree/flow_node.go b/pkg/security/security_profile/activity_tree/flow_node.go new file mode 100644 index 0000000000000..5f41defa87b40 --- /dev/null +++ b/pkg/security/security_profile/activity_tree/flow_node.go @@ -0,0 +1,56 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux + +// Package activitytree holds activitytree related files +package activitytree + +import ( + "github.com/DataDog/datadog-agent/pkg/security/secl/model" +) + +// FlowNode is used to store a flow node +type FlowNode struct { + ImageTags []string + GenerationType NodeGenerationType + + Flow model.Flow +} + +// NewFlowNode returns a new FlowNode instance +func NewFlowNode(flow model.Flow, generationType NodeGenerationType, imageTag string) *FlowNode { + node := &FlowNode{ + GenerationType: generationType, + Flow: flow, + } + node.appendImageTag(imageTag) + return node +} + +func (node *FlowNode) appendImageTag(imageTag string) { + node.ImageTags, _ = AppendIfNotPresent(node.ImageTags, imageTag) +} + +func (node *FlowNode) evictImageTag(imageTag string) bool { + imageTags, removed := removeImageTagFromList(node.ImageTags, imageTag) + if removed { + if len(imageTags) == 0 { + return true + } + node.ImageTags = imageTags + } + return false +} + +func (node *FlowNode) addFlow(flow model.Flow, imageTag string) { + if imageTag != "" { + node.appendImageTag(imageTag) + } + + // add metrics + node.Flow.Egress.Add(flow.Egress) + node.Flow.Ingress.Add(flow.Ingress) +} diff --git a/pkg/security/security_profile/activity_tree/metadata/metadata.go b/pkg/security/security_profile/activity_tree/metadata/metadata.go index 882e7fb69f5c0..d04f3b4cd8135 100644 --- a/pkg/security/security_profile/activity_tree/metadata/metadata.go +++ b/pkg/security/security_profile/activity_tree/metadata/metadata.go @@ -27,7 +27,7 @@ type Metadata struct { ProtobufVersion string `json:"protobuf_version"` DifferentiateArgs bool `json:"differentiate_args"` ContainerID containerutils.ContainerID `json:"-"` - CGroupContext model.CGroupContext `json:"-"` + CGroupContext model.CGroupContext `json:"cgroup"` Start time.Time `json:"start"` End time.Time `json:"end"` Size uint64 `json:"activity_dump_size,omitempty"` diff --git a/pkg/security/security_profile/activity_tree/metadata/metadata_proto_dec_v1.go b/pkg/security/security_profile/activity_tree/metadata/metadata_proto_dec_v1.go index e08f483efac61..308ca640ad4a8 100644 --- a/pkg/security/security_profile/activity_tree/metadata/metadata_proto_dec_v1.go +++ b/pkg/security/security_profile/activity_tree/metadata/metadata_proto_dec_v1.go @@ -12,6 +12,7 @@ import ( adproto "github.com/DataDog/agent-payload/v5/cws/dumpsv1" "github.com/DataDog/datadog-agent/pkg/security/secl/containerutils" + "github.com/DataDog/datadog-agent/pkg/security/secl/model" activity_tree "github.com/DataDog/datadog-agent/pkg/security/security_profile/activity_tree" ) @@ -36,5 +37,10 @@ func ProtoMetadataToMetadata(meta *adproto.Metadata) Metadata { End: activity_tree.ProtoDecodeTimestamp(meta.End), Size: meta.Size, Serialization: meta.GetSerialization(), + + CGroupContext: model.CGroupContext{ + CGroupID: containerutils.CGroupID(meta.CgroupId), + CGroupManager: meta.CgroupManager, + }, } } diff --git a/pkg/security/security_profile/activity_tree/metadata/metadata_proto_enc_v1.go b/pkg/security/security_profile/activity_tree/metadata/metadata_proto_enc_v1.go index e28b2ce901c63..b8435f62627f1 100644 --- a/pkg/security/security_profile/activity_tree/metadata/metadata_proto_enc_v1.go +++ b/pkg/security/security_profile/activity_tree/metadata/metadata_proto_enc_v1.go @@ -35,6 +35,8 @@ func ToProto(meta *Metadata) *adproto.Metadata { Size: meta.Size, Arch: meta.Arch, Serialization: meta.Serialization, + CgroupId: string(meta.CGroupContext.CGroupID), + CgroupManager: meta.CGroupContext.CGroupManager, } return pmeta diff --git a/pkg/security/security_profile/activity_tree/network_device_node.go b/pkg/security/security_profile/activity_tree/network_device_node.go new file mode 100644 index 0000000000000..3560569c756df --- /dev/null +++ b/pkg/security/security_profile/activity_tree/network_device_node.go @@ -0,0 +1,77 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux + +// Package activitytree holds activitytree related files +package activitytree + +import ( + "github.com/DataDog/datadog-agent/pkg/security/secl/model" +) + +// NetworkDeviceNode is used to store a Network Device node +type NetworkDeviceNode struct { + MatchedRules []*model.MatchedRule + GenerationType NodeGenerationType + + Context model.NetworkDeviceContext + + // FlowNodes are indexed by source IPPortContexts + FlowNodes map[model.FiveTuple]*FlowNode +} + +// NewNetworkDeviceNode returns a new NetworkDeviceNode instance +func NewNetworkDeviceNode(ctx *model.NetworkDeviceContext, generationType NodeGenerationType) *NetworkDeviceNode { + node := &NetworkDeviceNode{ + GenerationType: generationType, + Context: *ctx, + FlowNodes: make(map[model.FiveTuple]*FlowNode), + } + return node +} + +func (netdevice *NetworkDeviceNode) appendImageTag(imageTag string) { + for _, flow := range netdevice.FlowNodes { + flow.appendImageTag(imageTag) + } +} + +func (netdevice *NetworkDeviceNode) evictImageTag(imageTag string) bool { + for key, flow := range netdevice.FlowNodes { + if shouldRemove := flow.evictImageTag(imageTag); !shouldRemove { + delete(netdevice.FlowNodes, key) + } + } + + return len(netdevice.FlowNodes) == 0 +} + +func (netdevice *NetworkDeviceNode) insertNetworkFlowMonitorEvent(event *model.NetworkFlowMonitorEvent, dryRun bool, rules []*model.MatchedRule, generationType NodeGenerationType, imageTag string, stats *Stats) bool { + if len(rules) > 0 { + netdevice.MatchedRules = model.AppendMatchedRule(netdevice.MatchedRules, rules) + } + + var newFlow bool + for _, flow := range event.Flows { + existingNode, ok := netdevice.FlowNodes[flow.GetFiveTuple()] + if ok { + if !dryRun { + existingNode.addFlow(flow, imageTag) + } + } else { + newFlow = true + if dryRun { + // exit early + return newFlow + } + // create new entry + netdevice.FlowNodes[flow.GetFiveTuple()] = NewFlowNode(flow, generationType, imageTag) + stats.FlowNodes++ + } + } + + return newFlow +} diff --git a/pkg/security/security_profile/activity_tree/process_node.go b/pkg/security/security_profile/activity_tree/process_node.go index 11525c9eacc79..ce311c08a56d4 100644 --- a/pkg/security/security_profile/activity_tree/process_node.go +++ b/pkg/security/security_profile/activity_tree/process_node.go @@ -10,15 +10,16 @@ package activitytree import ( "fmt" - "io" - "sort" - "strings" - "github.com/DataDog/datadog-agent/pkg/security/resolvers" sprocess "github.com/DataDog/datadog-agent/pkg/security/resolvers/process" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/utils" - "golang.org/x/exp/slices" + "github.com/DataDog/datadog-agent/pkg/security/utils/pathutils" + "html" + "io" + "slices" + "sort" + "strconv" ) // ProcessNodeParent is an interface used to identify the parent of a process node @@ -38,9 +39,10 @@ type ProcessNode struct { ImageTags []string MatchedRules []*model.MatchedRule - Files map[string]*FileNode - DNSNames map[string]*DNSNode - IMDSEvents map[model.IMDSEvent]*IMDSNode + Files map[string]*FileNode + DNSNames map[string]*DNSNode + IMDSEvents map[model.IMDSEvent]*IMDSNode + NetworkDevices map[model.NetworkDeviceContext]*NetworkDeviceNode Sockets []*SocketNode Syscalls []*SyscallNode @@ -62,6 +64,7 @@ func NewProcessNode(entry *model.ProcessCacheEntry, generationType NodeGeneratio Files: make(map[string]*FileNode), DNSNames: make(map[string]*DNSNode), IMDSEvents: make(map[model.IMDSEvent]*IMDSNode), + NetworkDevices: make(map[model.NetworkDeviceContext]*NetworkDeviceNode), } } @@ -95,22 +98,35 @@ func (pn *ProcessNode) AppendImageTag(imageTag string) { } func (pn *ProcessNode) getNodeLabel(args string) string { - var label string + label := tableHeader + + label += "Command" + var cmd string if sprocess.IsBusybox(pn.Process.FileEvent.PathnameStr) { arg0, _ := sprocess.GetProcessArgv0(&pn.Process) - label = fmt.Sprintf("%s %s", arg0, args) + cmd = fmt.Sprintf("%s %s", arg0, args) } else { - label = fmt.Sprintf("%s %s", pn.Process.FileEvent.PathnameStr, args) + cmd = fmt.Sprintf("%s %s", pn.Process.FileEvent.PathnameStr, args) } + if len(cmd) > 100 { + cmd = cmd[:100] + " ..." + } + label += html.EscapeString(cmd) + label += "" + if len(pn.Process.FileEvent.PkgName) != 0 { - label += fmt.Sprintf(" \\{%s %s\\}", pn.Process.FileEvent.PkgName, pn.Process.FileEvent.PkgVersion) + label += "Package" + fmt.Sprintf("%s:%s", pn.Process.FileEvent.PkgName, pn.Process.FileEvent.PkgVersion) + "" } // add hashes if len(pn.Process.FileEvent.Hashes) > 0 { - label += fmt.Sprintf("|%v", strings.Join(pn.Process.FileEvent.Hashes, "|")) + label += "Hashes" + pn.Process.FileEvent.Hashes[0] + "" + for _, h := range pn.Process.FileEvent.Hashes { + label += "" + h + "" + } } else { - label += fmt.Sprintf("|(%s)", pn.Process.FileEvent.HashState) + label += "Hash state" + pn.Process.FileEvent.HashState.String() + "" } + label += ">" return label } @@ -168,7 +184,7 @@ func (pn *ProcessNode) scrubAndReleaseArgsEnvs(resolver *sprocess.EBPFResolver) // Matches return true if the process fields used to generate the dump are identical with the provided model.Process func (pn *ProcessNode) Matches(entry *model.Process, matchArgs bool, normalize bool) bool { if normalize { - match := utils.PathPatternMatch(pn.Process.FileEvent.PathnameStr, entry.FileEvent.PathnameStr, utils.PathPatternMatchOpts{WildcardLimit: 3, PrefixNodeRequired: 1, SuffixNodeRequired: 1, NodeSizeLimit: 8}) + match := pathutils.PathPatternMatch(pn.Process.FileEvent.PathnameStr, entry.FileEvent.PathnameStr, pathutils.PathPatternMatchOpts{WildcardLimit: 3, PrefixNodeRequired: 1, SuffixNodeRequired: 1, NodeSizeLimit: 8}) if !match { return false } @@ -338,6 +354,21 @@ func (pn *ProcessNode) InsertIMDSEvent(evt *model.Event, imageTag string, genera return true } +// InsertNetworkFlowMonitorEvent inserts a Network Flow Monitor event in a process node +func (pn *ProcessNode) InsertNetworkFlowMonitorEvent(evt *model.Event, imageTag string, generationType NodeGenerationType, stats *Stats, dryRun bool) bool { + deviceNode, ok := pn.NetworkDevices[evt.NetworkFlowMonitor.Device] + if ok { + return deviceNode.insertNetworkFlowMonitorEvent(&evt.NetworkFlowMonitor, dryRun, evt.Rules, generationType, imageTag, stats) + } + + if !dryRun { + newNode := NewNetworkDeviceNode(&evt.NetworkFlowMonitor.Device, generationType) + newNode.insertNetworkFlowMonitorEvent(&evt.NetworkFlowMonitor, dryRun, evt.Rules, generationType, imageTag, stats) + pn.NetworkDevices[evt.NetworkFlowMonitor.Device] = newNode + } + return true +} + // InsertBindEvent inserts a bind event in a process node func (pn *ProcessNode) InsertBindEvent(evt *model.Event, imageTag string, generationType NodeGenerationType, stats *Stats, dryRun bool) bool { if evt.Bind.SyscallEvent.Retval != 0 { @@ -401,6 +432,12 @@ func (pn *ProcessNode) TagAllNodes(imageTag string) { for _, scall := range pn.Syscalls { scall.appendImageTag(imageTag) } + for _, imds := range pn.IMDSEvents { + imds.appendImageTag(imageTag) + } + for _, device := range pn.NetworkDevices { + device.appendImageTag(imageTag) + } for _, child := range pn.Children { child.TagAllNodes(imageTag) } @@ -453,6 +490,13 @@ func (pn *ProcessNode) EvictImageTag(imageTag string, DNSNames *utils.StringKeys } } + // Evict image tag from network device nodes + for key, device := range pn.NetworkDevices { + if shouldRemoveNode := device.evictImageTag(imageTag); shouldRemoveNode { + delete(pn.NetworkDevices, key) + } + } + newSockets := []*SocketNode{} for _, sock := range pn.Sockets { if shouldRemoveNode := sock.evictImageTag(imageTag); !shouldRemoveNode { diff --git a/pkg/security/security_profile/activity_tree/process_node_snapshot.go b/pkg/security/security_profile/activity_tree/process_node_snapshot.go index bd5cb069083b4..506ec5e6b62a2 100644 --- a/pkg/security/security_profile/activity_tree/process_node_snapshot.go +++ b/pkg/security/security_profile/activity_tree/process_node_snapshot.go @@ -129,7 +129,7 @@ func (pn *ProcessNode) addFiles(files []string, stats *Stats, newEvent func() *m } evt := newEvent() - fullPath := filepath.Join(utils.ProcRootPath(pn.Process.Pid), f) + fullPath := utils.ProcRootFilePath(pn.Process.Pid, f) if evt.ProcessContext == nil { evt.ProcessContext = &model.ProcessContext{} } @@ -216,22 +216,57 @@ func getMemoryMappedFiles(pid int32, processEventPath string) (files []string, _ scanner := bufio.NewScanner(smapsFile) for scanner.Scan() && len(files) < MaxMmapedFiles { - line := scanner.Text() - fields := strings.Fields(line) + line := scanner.Bytes() - if len(fields) < 6 || strings.HasSuffix(fields[0], ":") { + path, ok := extractPathFromSmapsLine(line) + if !ok { continue } - path := strings.Join(fields[5:], " ") - if len(path) != 0 && path != processEventPath { - files = append(files, path) + if len(path) == 0 { + continue + } + + if path == processEventPath { + continue } + + // skip [vdso], [stack], [heap] and similar mappings + if strings.HasPrefix(path, "[") { + continue + } + + files = append(files, path) } return files, scanner.Err() } +func extractPathFromSmapsLine(line []byte) (string, bool) { + inSpace := false + spaceCount := 0 + for i, c := range line { + if c == ' ' || c == '\t' { + // check for fields separator + if !inSpace && spaceCount == 0 && i > 0 { + if line[i-1] == ':' { + return "", false + } + } + + if !inSpace { + inSpace = true + spaceCount++ + } + } else if spaceCount == 5 { + return string(line[i:]), true + } else { + inSpace = false + } + } + return "", false +} + func (pn *ProcessNode) snapshotBoundSockets(p *process.Process, stats *Stats, newEvent func() *model.Event) { // list all the file descriptors opened by the process FDs, err := p.OpenFiles() diff --git a/pkg/security/security_profile/activity_tree/process_node_snapshot_test.go b/pkg/security/security_profile/activity_tree/process_node_snapshot_test.go index 5dade1292b52c..83d87b4cd6f58 100644 --- a/pkg/security/security_profile/activity_tree/process_node_snapshot_test.go +++ b/pkg/security/security_profile/activity_tree/process_node_snapshot_test.go @@ -37,6 +37,9 @@ func TestSnapshotMemoryMappedFiles(t *testing.T) { if len(smap.Path) == 0 { continue } + if smap.Path[0] == '[' { + continue + } gopsutilFiles = append(gopsutilFiles, smap.Path) } @@ -48,3 +51,65 @@ func TestSnapshotMemoryMappedFiles(t *testing.T) { assert.Equal(t, gopsutilFiles, ownImplemFiles) } + +func TestExtractPathFromSmapsLine(t *testing.T) { + entries := []struct { + name string + line string + path string + ok bool + }{ + { + name: "stack", + line: "fffe33c3000-ffffe33e4000 rw-p 00000000 00:00 0 [stack]", + path: "[stack]", + ok: true, + }, + { + name: "regular", + line: "e1cc8924f000-e1cc89251000 rw-p 00030000 fd:00 6259 /usr/lib/aarch64-linux-gnu/ld-linux-aarch64.so.1", + path: "/usr/lib/aarch64-linux-gnu/ld-linux-aarch64.so.1", + ok: true, + }, + { + name: "regular with space", + line: "e1cc8924f000-e1cc89251000 rw-p 00030000 fd:00 6259 /usr/lib/aarch64-linux-gnu/ld linux aarch64.so.1", + path: "/usr/lib/aarch64-linux-gnu/ld linux aarch64.so.1", + ok: true, + }, + { + name: "field", + line: "KernelPageSize: 4 kB", + path: "", + ok: false, + }, + { + name: "vmflags", + line: "VmFlags: rd wr mr mw me ac", + path: "", + ok: false, + }, + // this one is not found today in actual smaps but + // if for some reason a new flags is added then the + // number of spaces matches the number of spaces in + // a file line, so it's best to test it + { + name: "vmflags future", + line: "VmFlags: rd wr mr mw me ac abc", + path: "", + ok: false, + }, + } + + for _, entry := range entries { + t.Run(entry.name, func(t *testing.T) { + path, ok := extractPathFromSmapsLine([]byte(entry.line)) + if ok != entry.ok { + t.Errorf("expected ok=%t, got %t", entry.ok, ok) + } + if path != entry.path { + t.Errorf("expected %s, got %s", entry.path, path) + } + }) + } +} diff --git a/pkg/security/security_profile/activity_tree/socket_node.go b/pkg/security/security_profile/activity_tree/socket_node.go index 73e4e0c3c174a..5c6314a326573 100644 --- a/pkg/security/security_profile/activity_tree/socket_node.go +++ b/pkg/security/security_profile/activity_tree/socket_node.go @@ -9,8 +9,9 @@ package activitytree import ( + "slices" + "github.com/DataDog/datadog-agent/pkg/security/secl/model" - "golang.org/x/exp/slices" ) // BindNode is used to store a bind node diff --git a/pkg/security/security_profile/dump/activity_dump.go b/pkg/security/security_profile/dump/activity_dump.go index 4df6a218cebe3..38dc5558b7994 100644 --- a/pkg/security/security_profile/dump/activity_dump.go +++ b/pkg/security/security_profile/dump/activity_dump.go @@ -128,11 +128,11 @@ type SyscallPolicy struct { } // NewActivityDumpLoadConfig returns a new instance of ActivityDumpLoadConfig -func NewActivityDumpLoadConfig(evt []model.EventType, timeout time.Duration, waitListTimeout time.Duration, rate int, start time.Time, resolver *stime.Resolver) *model.ActivityDumpLoadConfig { +func NewActivityDumpLoadConfig(evt []model.EventType, timeout time.Duration, waitListTimeout time.Duration, rate uint16, start time.Time, resolver *stime.Resolver) *model.ActivityDumpLoadConfig { adlc := &model.ActivityDumpLoadConfig{ TracedEventTypes: evt, Timeout: timeout, - Rate: uint32(rate), + Rate: uint16(rate), } if resolver != nil { adlc.StartTimestampRaw = uint64(resolver.ComputeMonotonicTimestamp(start)) @@ -224,7 +224,8 @@ func NewActivityDumpFromMessage(msg *api.ActivityDumpMessage) (*ActivityDump, er DifferentiateArgs: metadata.GetDifferentiateArgs(), ContainerID: containerutils.ContainerID(metadata.GetContainerID()), CGroupContext: model.CGroupContext{ - CGroupID: containerutils.CGroupID(metadata.GetCGroupID()), + CGroupID: containerutils.CGroupID(metadata.GetCGroupID()), + CGroupManager: metadata.GetCGroupManager(), }, Start: startTime, End: startTime.Add(timeout), @@ -667,6 +668,8 @@ func (ad *ActivityDump) resolveTags() error { } } + ad.Tags = append(ad.Tags, "cgroup_manager:"+containerutils.CGroupManager(ad.Metadata.CGroupContext.CGroupFlags&containerutils.CGroupManagerMask).String()) + return nil } @@ -697,6 +700,7 @@ func (ad *ActivityDump) ToSecurityActivityDumpMessage() *api.ActivityDumpMessage DifferentiateArgs: ad.Metadata.DifferentiateArgs, ContainerID: string(ad.Metadata.ContainerID), CGroupID: string(ad.Metadata.CGroupContext.CGroupID), + CGroupManager: containerutils.CGroupManager(ad.Metadata.CGroupContext.CGroupFlags & containerutils.CGroupManagerMask).String(), Start: ad.Metadata.Start.Format(time.RFC822), Timeout: ad.LoadConfig.Timeout.String(), Size: ad.Metadata.Size, @@ -710,6 +714,9 @@ func (ad *ActivityDump) ToSecurityActivityDumpMessage() *api.ActivityDumpMessage FileNodesCount: ad.ActivityTree.Stats.FileNodes, DNSNodesCount: ad.ActivityTree.Stats.DNSNodes, SocketNodesCount: ad.ActivityTree.Stats.SocketNodes, + IMDSNodesCount: ad.ActivityTree.Stats.IMDSNodes, + SyscallNodesCount: ad.ActivityTree.Stats.SyscallNodes, + FlowNodesCount: ad.ActivityTree.Stats.FlowNodes, ApproximateSize: ad.ActivityTree.Stats.ApproximateSize(), } } diff --git a/pkg/security/security_profile/dump/graph.go b/pkg/security/security_profile/dump/graph.go index 5a82ce371f5ae..2f9244b1dad68 100644 --- a/pkg/security/security_profile/dump/graph.go +++ b/pkg/security/security_profile/dump/graph.go @@ -11,7 +11,6 @@ package dump import ( "bytes" "fmt" - "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/resolvers/process" "github.com/DataDog/datadog-agent/pkg/security/utils" @@ -19,24 +18,42 @@ import ( // ActivityDumpGraphTemplate is the template used to generate graphs var ActivityDumpGraphTemplate = `digraph { - label = "{{ .Title }}" + label = {{ .Title }} labelloc = "t" - fontsize = 75 fontcolor = "black" fontname = "arial" + fontsize = 5 ratio = expand - ranksep = 2 + ranksep = 1.5 graph [pad=2] - node [margin=0.3, padding=1, penwidth=3] - edge [penwidth=2] + node [margin=0.05, padding=1, penwidth=1] + edge [penwidth=1] {{ range .Nodes }} {{ .ID }} [label={{ if not .IsTable }}"{{ end }}{{ .Label }}{{ if not .IsTable }}"{{ end }}, fontsize={{ .Size }}, shape={{ .Shape }}, fontname = "arial", color="{{ .Color }}", fillcolor="{{ .FillColor }}", style="filled"] {{ end }} {{ range .Edges }} - {{ .From }} -> {{ .To }} [arrowhead=none, color="{{ .Color }}"] + {{ .From }} -> {{ .To }} [{{ if not .HasArrowHead}}arrowhead=none,{{ end }} color="{{ .Color }}", label={{ if not .IsTable }}"{{ end }}{{ .Label }}{{ if not .IsTable }}"{{ end }}] + {{ end }} + + {{ range .SubGraphs }} + subgraph {{ .Name }} { + style=filled; + color="{{ .Color }}"; + label="{{ .Title }}"; + fontSize={{ .TitleSize }}; + margin=5; + + {{ range .Nodes }} + {{ .ID }} [label={{ if not .IsTable }}"{{ end }}{{ .Label }}{{ if not .IsTable }}"{{ end }}, fontsize={{ .Size }}, shape={{ .Shape }}, fontname = "arial", color="{{ .Color }}", fillcolor="{{ .FillColor }}", style="filled"] + {{ end }} + + {{ range .Edges }} + {{ .From }} -> {{ .To }} [{{ if not .HasArrowHead}}arrowhead=none,{{ end }} color="{{ .Color }}", label={{ if not .IsTable }}"{{ end }}{{ .Label }}{{ if not .IsTable }}"{{ end }}] + {{ end }} + } {{ end }} }` @@ -45,12 +62,11 @@ func (ad *ActivityDump) ToGraph() utils.Graph { ad.Lock() defer ad.Unlock() - title := fmt.Sprintf("%s: %s", ad.Metadata.Name, ad.getSelectorStr()) var resolver *process.EBPFResolver if ad.adm != nil { resolver = ad.adm.resolvers.ProcessResolver } - return ad.ActivityTree.PrepareGraphData(title, resolver) + return ad.ActivityTree.PrepareGraphData(ad.Metadata.Name, ad.getSelectorStr(), resolver) } // EncodeDOT encodes an activity dump in the DOT format diff --git a/pkg/security/security_profile/dump/manager.go b/pkg/security/security_profile/dump/manager.go index be81a28ca5c4d..9e9ba9f2ace99 100644 --- a/pkg/security/security_profile/dump/manager.go +++ b/pkg/security/security_profile/dump/manager.go @@ -40,6 +40,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/seclog" activity_tree "github.com/DataDog/datadog-agent/pkg/security/security_profile/activity_tree" "github.com/DataDog/datadog-agent/pkg/security/utils" + "github.com/DataDog/datadog-agent/pkg/security/utils/hostnameutils" ) // ActivityDumpHandler represents an handler for the activity dumps sent by the probe @@ -333,7 +334,7 @@ func NewActivityDumpManager(config *config.Config, statsdClient statsd.ClientInt func (adm *ActivityDumpManager) prepareContextTags() { // add hostname tag - hostname, err := utils.GetHostname() + hostname, err := hostnameutils.GetHostname() if err != nil || hostname == "" { hostname = "unknown" } @@ -854,7 +855,7 @@ func (adm *ActivityDumpManager) SnapshotTracedCgroups() { if err = adm.activityDumpsConfigMap.Lookup(&event.ConfigCookie, &event.Config); err != nil { // this config doesn't exist anymore, remove expired entries - seclog.Errorf("config not found for (%v): %v", cgroupFile, err) + seclog.Warnf("config not found for (%v): %v", cgroupFile, err) _ = adm.tracedCgroupsMap.Delete(cgroupFile) continue } @@ -863,7 +864,7 @@ func (adm *ActivityDumpManager) SnapshotTracedCgroups() { } if err = iterator.Err(); err != nil { - seclog.Errorf("couldn't iterate over the map traced_cgroups: %v", err) + seclog.Warnf("couldn't iterate over the map traced_cgroups: %v", err) } } diff --git a/pkg/security/security_profile/dump/remote_storage.go b/pkg/security/security_profile/dump/remote_storage.go index b952f77e3124c..d2db22fc1269d 100644 --- a/pkg/security/security_profile/dump/remote_storage.go +++ b/pkg/security/security_profile/dump/remote_storage.go @@ -37,10 +37,14 @@ type tooLargeEntityStatsEntry struct { compression bool } +type remoteEndpoint struct { + logsEndpoint logsconfig.Endpoint + url string +} + // ActivityDumpRemoteStorage is a remote storage that forwards dumps to the backend type ActivityDumpRemoteStorage struct { - urls []string - apiKeys []string + endpoints []remoteEndpoint tooLargeEntities map[tooLargeEntityStatsEntry]*atomic.Uint64 client *http.Client @@ -70,10 +74,10 @@ func NewActivityDumpRemoteStorage() (ActivityDumpStorage, error) { return nil, fmt.Errorf("couldn't generate storage endpoints: %w", err) } for _, endpoint := range endpoints.GetReliableEndpoints() { - storage.urls = append(storage.urls, utils.GetEndpointURL(endpoint, "api/v2/secdump")) - // TODO - runtime API key refresh: Storing the API key like this will no longer be valid once the - // security agent support API key refresh at runtime. - storage.apiKeys = append(storage.apiKeys, endpoint.GetAPIKey()) + storage.endpoints = append(storage.endpoints, remoteEndpoint{ + logsEndpoint: endpoint, + url: utils.GetEndpointURL(endpoint, "api/v2/secdump"), + }) } return storage, nil @@ -188,11 +192,11 @@ func (storage *ActivityDumpRemoteStorage) Persist(request config.StorageRequest, return fmt.Errorf("couldn't build request: %w", err) } - for i, url := range storage.urls { - if err := storage.sendToEndpoint(url, storage.apiKeys[i], request, writer, body); err != nil { - seclog.Warnf("couldn't sent activity dump to [%s, body size: %d, dump size: %d]: %v", url, body.Len(), ad.Size, err) + for _, endpoint := range storage.endpoints { + if err := storage.sendToEndpoint(endpoint.url, endpoint.logsEndpoint.GetAPIKey(), request, writer, body); err != nil { + seclog.Warnf("couldn't sent activity dump to [%s, body size: %d, dump size: %d]: %v", endpoint.url, body.Len(), ad.Size, err) } else { - seclog.Infof("[%s] file for activity dump [%s] successfully sent to [%s]", request.Format, ad.GetSelectorStr(), url) + seclog.Infof("[%s] file for activity dump [%s] successfully sent to [%s]", request.Format, ad.GetSelectorStr(), endpoint.url) } } diff --git a/pkg/security/security_profile/profile/manager.go b/pkg/security/security_profile/profile/manager.go index 38fd111fbfe5a..2594420194742 100644 --- a/pkg/security/security_profile/profile/manager.go +++ b/pkg/security/security_profile/profile/manager.go @@ -561,10 +561,10 @@ func (m *SecurityProfileManager) SendStats() error { } } - tags := []string{ + t := []string{ fmt.Sprintf("in_kernel:%v", profilesLoadedInKernel), } - if err := m.statsdClient.Gauge(metrics.MetricSecurityProfileProfiles, float64(len(m.profiles)), tags, 1.0); err != nil { + if err := m.statsdClient.Gauge(metrics.MetricSecurityProfileProfiles, float64(len(m.profiles)), t, 1.0); err != nil { return fmt.Errorf("couldn't send MetricSecurityProfileProfiles: %w", err) } @@ -587,9 +587,9 @@ func (m *SecurityProfileManager) SendStats() error { } for entry, count := range m.eventFiltering { - tags := []string{fmt.Sprintf("event_type:%s", entry.eventType), entry.state.ToTag(), entry.result.toTag()} + t := []string{fmt.Sprintf("event_type:%s", entry.eventType), entry.state.ToTag(), entry.result.toTag()} if value := count.Swap(0); value > 0 { - if err := m.statsdClient.Count(metrics.MetricSecurityProfileEventFiltering, int64(value), tags, 1.0); err != nil { + if err := m.statsdClient.Count(metrics.MetricSecurityProfileEventFiltering, int64(value), t, 1.0); err != nil { return fmt.Errorf("couldn't send MetricSecurityProfileEventFiltering metric: %w", err) } } @@ -600,8 +600,8 @@ func (m *SecurityProfileManager) SendStats() error { m.evictedVersions = []cgroupModel.WorkloadSelector{} m.evictedVersionsLock.Unlock() for _, version := range evictedVersions { - tags := version.ToTags() - if err := m.statsdClient.Count(metrics.MetricSecurityProfileEvictedVersions, 1, tags, 1.0); err != nil { + t := version.ToTags() + if err := m.statsdClient.Count(metrics.MetricSecurityProfileEvictedVersions, 1, t, 1.0); err != nil { return fmt.Errorf("couldn't send MetricSecurityProfileEvictedVersions metric: %w", err) } @@ -746,7 +746,7 @@ func (m *SecurityProfileManager) LookupEventInProfiles(event *model.Event) { profile.versionContextsLock.Lock() ctx, found := profile.versionContexts[imageTag] if found { - // update the lastseen of this version + // update the last seen of this version ctx.lastSeenNano = uint64(m.resolvers.TimeResolver.ComputeMonotonicTimestamp(time.Now())) } else { // create a new version diff --git a/pkg/security/serializers/serializers_base.go b/pkg/security/serializers/serializers_base.go index 805e52f8c67cc..17e2d1c713a82 100644 --- a/pkg/security/serializers/serializers_base.go +++ b/pkg/security/serializers/serializers_base.go @@ -8,12 +8,15 @@ package serializers import ( + "fmt" "slices" "strings" + "github.com/DataDog/datadog-agent/pkg/security/events" "github.com/DataDog/datadog-agent/pkg/security/rules/bundled" "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/model" + "github.com/DataDog/datadog-agent/pkg/security/secl/model/sharedconsts" "github.com/DataDog/datadog-agent/pkg/security/utils" "github.com/DataDog/datadog-agent/pkg/util/scrubber" ) @@ -115,6 +118,8 @@ type NetworkContextSerializer struct { Destination IPPortSerializer `json:"destination"` // size is the size in bytes of the network event Size uint32 `json:"size"` + // network_direction indicates if the packet was captured on ingress or egress + NetworkDirection string `json:"network_direction"` } // AWSSecurityCredentialsSerializer serializes the security credentials from an AWS IMDS request @@ -220,6 +225,42 @@ type RawPacketSerializer struct { TLSContext *TLSContextSerializer `json:"tls,omitempty"` } +// NetworkStatsSerializer defines a new network stats serializer +// easyjson:json +type NetworkStatsSerializer struct { + // data_size is the total count of bytes sent or received + DataSize uint64 `json:"data_size,omitempty"` + // packet_count is the total count of packets sent or received + PacketCount uint64 `json:"packet_count,omitempty"` +} + +// FlowSerializer defines a new flow serializer +// easyjson:json +type FlowSerializer struct { + // l3_protocol is the layer 3 protocol name + L3Protocol string `json:"l3_protocol"` + // l4_protocol is the layer 4 protocol name + L4Protocol string `json:"l4_protocol"` + // source is the emitter of the network event + Source IPPortSerializer `json:"source"` + // destination is the receiver of the network event + Destination IPPortSerializer `json:"destination"` + + // ingress holds the network statistics for ingress traffic + Ingress *NetworkStatsSerializer `json:"ingress,omitempty"` + // egress holds the network statistics for egress traffic + Egress *NetworkStatsSerializer `json:"egress,omitempty"` +} + +// NetworkFlowMonitorSerializer defines a network monitor event serializer +// easyjson:json +type NetworkFlowMonitorSerializer struct { + // device is the network device on which the event was captured + Device *NetworkDeviceSerializer `json:"device,omitempty"` + // flows is the list of flows with network statistics that were captured + Flows []*FlowSerializer `json:"flows,omitempty"` +} + func newMatchedRulesSerializer(r *model.MatchedRule) MatchedRuleSerializer { mrs := MatchedRuleSerializer{ ID: r.RuleID, @@ -303,7 +344,7 @@ func newIPPortFamilySerializer(c *model.IPPortContext, family string) IPPortFami func newExitEventSerializer(e *model.Event) *ExitEventSerializer { return &ExitEventSerializer{ - Cause: model.ExitCause(e.Exit.Cause).String(), + Cause: sharedconsts.ExitCause(e.Exit.Cause).String(), Code: e.Exit.Code, } } @@ -394,3 +435,27 @@ func newVariablesContext(e *model.Event, opts *eval.Opts, prefix string) (variab } return variables } + +// EventStringerWrapper an event stringer wrapper +type EventStringerWrapper struct { + Event interface{} // can be model.Event or events.CustomEvent +} + +func (e EventStringerWrapper) String() string { + var ( + data []byte + err error + ) + switch evt := e.Event.(type) { + case *model.Event: + data, err = MarshalEvent(evt) + case *events.CustomEvent: + data, err = MarshalCustomEvent(evt) + default: + return "event can't be wrapped, not supported" + } + if err != nil { + return fmt.Sprintf("unable to marshal event: %s", err) + } + return string(data) +} diff --git a/pkg/security/serializers/serializers_base_linux_easyjson.go b/pkg/security/serializers/serializers_base_linux_easyjson.go index 8502c05b45842..33140f9ad960c 100644 --- a/pkg/security/serializers/serializers_base_linux_easyjson.go +++ b/pkg/security/serializers/serializers_base_linux_easyjson.go @@ -186,6 +186,8 @@ func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers2(i (out.Destination).UnmarshalEasyJSON(in) case "size": out.Size = uint32(in.Uint32()) + case "network_direction": + out.NetworkDirection = string(in.String()) default: in.SkipRecursive() } @@ -246,6 +248,11 @@ func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers2(o out.RawString(prefix) out.Uint32(uint32(in.Size)) } + { + const prefix string = ",\"network_direction\":" + out.RawString(prefix) + out.String(string(in.NetworkDirection)) + } out.RawByte('}') } @@ -890,7 +897,187 @@ func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers4(o } out.RawByte('}') } -func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers5(in *jlexer.Lexer, out *NetworkContextSerializer) { +func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers5(in *jlexer.Lexer, out *NetworkStatsSerializer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "data_size": + out.DataSize = uint64(in.Uint64()) + case "packet_count": + out.PacketCount = uint64(in.Uint64()) + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers5(out *jwriter.Writer, in NetworkStatsSerializer) { + out.RawByte('{') + first := true + _ = first + if in.DataSize != 0 { + const prefix string = ",\"data_size\":" + first = false + out.RawString(prefix[1:]) + out.Uint64(uint64(in.DataSize)) + } + if in.PacketCount != 0 { + const prefix string = ",\"packet_count\":" + if first { + first = false + out.RawString(prefix[1:]) + } else { + out.RawString(prefix) + } + out.Uint64(uint64(in.PacketCount)) + } + out.RawByte('}') +} + +// MarshalEasyJSON supports easyjson.Marshaler interface +func (v NetworkStatsSerializer) MarshalEasyJSON(w *jwriter.Writer) { + easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers5(w, v) +} + +// UnmarshalEasyJSON supports easyjson.Unmarshaler interface +func (v *NetworkStatsSerializer) UnmarshalEasyJSON(l *jlexer.Lexer) { + easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers5(l, v) +} +func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers6(in *jlexer.Lexer, out *NetworkFlowMonitorSerializer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "device": + if in.IsNull() { + in.Skip() + out.Device = nil + } else { + if out.Device == nil { + out.Device = new(NetworkDeviceSerializer) + } + (*out.Device).UnmarshalEasyJSON(in) + } + case "flows": + if in.IsNull() { + in.Skip() + out.Flows = nil + } else { + in.Delim('[') + if out.Flows == nil { + if !in.IsDelim(']') { + out.Flows = make([]*FlowSerializer, 0, 8) + } else { + out.Flows = []*FlowSerializer{} + } + } else { + out.Flows = (out.Flows)[:0] + } + for !in.IsDelim(']') { + var v18 *FlowSerializer + if in.IsNull() { + in.Skip() + v18 = nil + } else { + if v18 == nil { + v18 = new(FlowSerializer) + } + (*v18).UnmarshalEasyJSON(in) + } + out.Flows = append(out.Flows, v18) + in.WantComma() + } + in.Delim(']') + } + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers6(out *jwriter.Writer, in NetworkFlowMonitorSerializer) { + out.RawByte('{') + first := true + _ = first + if in.Device != nil { + const prefix string = ",\"device\":" + first = false + out.RawString(prefix[1:]) + (*in.Device).MarshalEasyJSON(out) + } + if len(in.Flows) != 0 { + const prefix string = ",\"flows\":" + if first { + first = false + out.RawString(prefix[1:]) + } else { + out.RawString(prefix) + } + { + out.RawByte('[') + for v19, v20 := range in.Flows { + if v19 > 0 { + out.RawByte(',') + } + if v20 == nil { + out.RawString("null") + } else { + (*v20).MarshalEasyJSON(out) + } + } + out.RawByte(']') + } + } + out.RawByte('}') +} + +// MarshalEasyJSON supports easyjson.Marshaler interface +func (v NetworkFlowMonitorSerializer) MarshalEasyJSON(w *jwriter.Writer) { + easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers6(w, v) +} + +// UnmarshalEasyJSON supports easyjson.Unmarshaler interface +func (v *NetworkFlowMonitorSerializer) UnmarshalEasyJSON(l *jlexer.Lexer) { + easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers6(l, v) +} +func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers7(in *jlexer.Lexer, out *NetworkContextSerializer) { isTopLevel := in.IsStart() if in.IsNull() { if isTopLevel { @@ -929,6 +1116,8 @@ func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers5(i (out.Destination).UnmarshalEasyJSON(in) case "size": out.Size = uint32(in.Uint32()) + case "network_direction": + out.NetworkDirection = string(in.String()) default: in.SkipRecursive() } @@ -939,7 +1128,7 @@ func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers5(i in.Consumed() } } -func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers5(out *jwriter.Writer, in NetworkContextSerializer) { +func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers7(out *jwriter.Writer, in NetworkContextSerializer) { out.RawByte('{') first := true _ = first @@ -979,19 +1168,24 @@ func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers5(o out.RawString(prefix) out.Uint32(uint32(in.Size)) } + { + const prefix string = ",\"network_direction\":" + out.RawString(prefix) + out.String(string(in.NetworkDirection)) + } out.RawByte('}') } // MarshalEasyJSON supports easyjson.Marshaler interface func (v NetworkContextSerializer) MarshalEasyJSON(w *jwriter.Writer) { - easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers5(w, v) + easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers7(w, v) } // UnmarshalEasyJSON supports easyjson.Unmarshaler interface func (v *NetworkContextSerializer) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers5(l, v) + easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers7(l, v) } -func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers6(in *jlexer.Lexer, out *MatchedRuleSerializer) { +func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers8(in *jlexer.Lexer, out *MatchedRuleSerializer) { isTopLevel := in.IsStart() if in.IsNull() { if isTopLevel { @@ -1030,9 +1224,9 @@ func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers6(i out.Tags = (out.Tags)[:0] } for !in.IsDelim(']') { - var v18 string - v18 = string(in.String()) - out.Tags = append(out.Tags, v18) + var v21 string + v21 = string(in.String()) + out.Tags = append(out.Tags, v21) in.WantComma() } in.Delim(']') @@ -1051,7 +1245,7 @@ func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers6(i in.Consumed() } } -func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers6(out *jwriter.Writer, in MatchedRuleSerializer) { +func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers8(out *jwriter.Writer, in MatchedRuleSerializer) { out.RawByte('{') first := true _ = first @@ -1081,11 +1275,11 @@ func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers6(o } { out.RawByte('[') - for v19, v20 := range in.Tags { - if v19 > 0 { + for v22, v23 := range in.Tags { + if v22 > 0 { out.RawByte(',') } - out.String(string(v20)) + out.String(string(v23)) } out.RawByte(']') } @@ -1115,14 +1309,14 @@ func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers6(o // MarshalEasyJSON supports easyjson.Marshaler interface func (v MatchedRuleSerializer) MarshalEasyJSON(w *jwriter.Writer) { - easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers6(w, v) + easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers8(w, v) } // UnmarshalEasyJSON supports easyjson.Unmarshaler interface func (v *MatchedRuleSerializer) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers6(l, v) + easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers8(l, v) } -func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers7(in *jlexer.Lexer, out *IPPortSerializer) { +func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers9(in *jlexer.Lexer, out *IPPortSerializer) { isTopLevel := in.IsStart() if in.IsNull() { if isTopLevel { @@ -1155,7 +1349,7 @@ func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers7(i in.Consumed() } } -func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers7(out *jwriter.Writer, in IPPortSerializer) { +func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers9(out *jwriter.Writer, in IPPortSerializer) { out.RawByte('{') first := true _ = first @@ -1174,14 +1368,14 @@ func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers7(o // MarshalEasyJSON supports easyjson.Marshaler interface func (v IPPortSerializer) MarshalEasyJSON(w *jwriter.Writer) { - easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers7(w, v) + easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers9(w, v) } // UnmarshalEasyJSON supports easyjson.Unmarshaler interface func (v *IPPortSerializer) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers7(l, v) + easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers9(l, v) } -func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers8(in *jlexer.Lexer, out *IPPortFamilySerializer) { +func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers10(in *jlexer.Lexer, out *IPPortFamilySerializer) { isTopLevel := in.IsStart() if in.IsNull() { if isTopLevel { @@ -1216,7 +1410,7 @@ func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers8(i in.Consumed() } } -func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers8(out *jwriter.Writer, in IPPortFamilySerializer) { +func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers10(out *jwriter.Writer, in IPPortFamilySerializer) { out.RawByte('{') first := true _ = first @@ -1240,14 +1434,14 @@ func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers8(o // MarshalEasyJSON supports easyjson.Marshaler interface func (v IPPortFamilySerializer) MarshalEasyJSON(w *jwriter.Writer) { - easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers8(w, v) + easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers10(w, v) } // UnmarshalEasyJSON supports easyjson.Unmarshaler interface func (v *IPPortFamilySerializer) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers8(l, v) + easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers10(l, v) } -func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers9(in *jlexer.Lexer, out *IMDSEventSerializer) { +func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers11(in *jlexer.Lexer, out *IMDSEventSerializer) { isTopLevel := in.IsStart() if in.IsNull() { if isTopLevel { @@ -1298,7 +1492,7 @@ func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers9(i in.Consumed() } } -func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers9(out *jwriter.Writer, in IMDSEventSerializer) { +func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers11(out *jwriter.Writer, in IMDSEventSerializer) { out.RawByte('{') first := true _ = first @@ -1342,14 +1536,117 @@ func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers9(o // MarshalEasyJSON supports easyjson.Marshaler interface func (v IMDSEventSerializer) MarshalEasyJSON(w *jwriter.Writer) { - easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers9(w, v) + easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers11(w, v) } // UnmarshalEasyJSON supports easyjson.Unmarshaler interface func (v *IMDSEventSerializer) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers9(l, v) + easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers11(l, v) } -func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers10(in *jlexer.Lexer, out *ExitEventSerializer) { +func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers12(in *jlexer.Lexer, out *FlowSerializer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "l3_protocol": + out.L3Protocol = string(in.String()) + case "l4_protocol": + out.L4Protocol = string(in.String()) + case "source": + (out.Source).UnmarshalEasyJSON(in) + case "destination": + (out.Destination).UnmarshalEasyJSON(in) + case "ingress": + if in.IsNull() { + in.Skip() + out.Ingress = nil + } else { + if out.Ingress == nil { + out.Ingress = new(NetworkStatsSerializer) + } + (*out.Ingress).UnmarshalEasyJSON(in) + } + case "egress": + if in.IsNull() { + in.Skip() + out.Egress = nil + } else { + if out.Egress == nil { + out.Egress = new(NetworkStatsSerializer) + } + (*out.Egress).UnmarshalEasyJSON(in) + } + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers12(out *jwriter.Writer, in FlowSerializer) { + out.RawByte('{') + first := true + _ = first + { + const prefix string = ",\"l3_protocol\":" + out.RawString(prefix[1:]) + out.String(string(in.L3Protocol)) + } + { + const prefix string = ",\"l4_protocol\":" + out.RawString(prefix) + out.String(string(in.L4Protocol)) + } + { + const prefix string = ",\"source\":" + out.RawString(prefix) + (in.Source).MarshalEasyJSON(out) + } + { + const prefix string = ",\"destination\":" + out.RawString(prefix) + (in.Destination).MarshalEasyJSON(out) + } + if in.Ingress != nil { + const prefix string = ",\"ingress\":" + out.RawString(prefix) + (*in.Ingress).MarshalEasyJSON(out) + } + if in.Egress != nil { + const prefix string = ",\"egress\":" + out.RawString(prefix) + (*in.Egress).MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// MarshalEasyJSON supports easyjson.Marshaler interface +func (v FlowSerializer) MarshalEasyJSON(w *jwriter.Writer) { + easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers12(w, v) +} + +// UnmarshalEasyJSON supports easyjson.Unmarshaler interface +func (v *FlowSerializer) UnmarshalEasyJSON(l *jlexer.Lexer) { + easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers12(l, v) +} +func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers13(in *jlexer.Lexer, out *ExitEventSerializer) { isTopLevel := in.IsStart() if in.IsNull() { if isTopLevel { @@ -1382,7 +1679,7 @@ func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers10( in.Consumed() } } -func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers10(out *jwriter.Writer, in ExitEventSerializer) { +func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers13(out *jwriter.Writer, in ExitEventSerializer) { out.RawByte('{') first := true _ = first @@ -1401,14 +1698,14 @@ func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers10( // MarshalEasyJSON supports easyjson.Marshaler interface func (v ExitEventSerializer) MarshalEasyJSON(w *jwriter.Writer) { - easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers10(w, v) + easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers13(w, v) } // UnmarshalEasyJSON supports easyjson.Unmarshaler interface func (v *ExitEventSerializer) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers10(l, v) + easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers13(l, v) } -func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers11(in *jlexer.Lexer, out *EventContextSerializer) { +func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers14(in *jlexer.Lexer, out *EventContextSerializer) { isTopLevel := in.IsStart() if in.IsNull() { if isTopLevel { @@ -1451,9 +1748,9 @@ func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers11( out.MatchedRules = (out.MatchedRules)[:0] } for !in.IsDelim(']') { - var v21 MatchedRuleSerializer - (v21).UnmarshalEasyJSON(in) - out.MatchedRules = append(out.MatchedRules, v21) + var v24 MatchedRuleSerializer + (v24).UnmarshalEasyJSON(in) + out.MatchedRules = append(out.MatchedRules, v24) in.WantComma() } in.Delim(']') @@ -1470,7 +1767,7 @@ func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers11( in.Consumed() } } -func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers11(out *jwriter.Writer, in EventContextSerializer) { +func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers14(out *jwriter.Writer, in EventContextSerializer) { out.RawByte('{') first := true _ = first @@ -1520,11 +1817,11 @@ func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers11( } { out.RawByte('[') - for v22, v23 := range in.MatchedRules { - if v22 > 0 { + for v25, v26 := range in.MatchedRules { + if v25 > 0 { out.RawByte(',') } - (v23).MarshalEasyJSON(out) + (v26).MarshalEasyJSON(out) } out.RawByte(']') } @@ -1544,14 +1841,14 @@ func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers11( // MarshalEasyJSON supports easyjson.Marshaler interface func (v EventContextSerializer) MarshalEasyJSON(w *jwriter.Writer) { - easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers11(w, v) + easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers14(w, v) } // UnmarshalEasyJSON supports easyjson.Unmarshaler interface func (v *EventContextSerializer) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers11(l, v) + easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers14(l, v) } -func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers12(in *jlexer.Lexer, out *DNSQuestionSerializer) { +func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers15(in *jlexer.Lexer, out *DNSQuestionSerializer) { isTopLevel := in.IsStart() if in.IsNull() { if isTopLevel { @@ -1590,7 +1887,7 @@ func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers12( in.Consumed() } } -func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers12(out *jwriter.Writer, in DNSQuestionSerializer) { +func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers15(out *jwriter.Writer, in DNSQuestionSerializer) { out.RawByte('{') first := true _ = first @@ -1624,14 +1921,14 @@ func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers12( // MarshalEasyJSON supports easyjson.Marshaler interface func (v DNSQuestionSerializer) MarshalEasyJSON(w *jwriter.Writer) { - easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers12(w, v) + easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers15(w, v) } // UnmarshalEasyJSON supports easyjson.Unmarshaler interface func (v *DNSQuestionSerializer) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers12(l, v) + easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers15(l, v) } -func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers13(in *jlexer.Lexer, out *DNSEventSerializer) { +func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers16(in *jlexer.Lexer, out *DNSEventSerializer) { isTopLevel := in.IsStart() if in.IsNull() { if isTopLevel { @@ -1664,7 +1961,7 @@ func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers13( in.Consumed() } } -func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers13(out *jwriter.Writer, in DNSEventSerializer) { +func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers16(out *jwriter.Writer, in DNSEventSerializer) { out.RawByte('{') first := true _ = first @@ -1683,14 +1980,14 @@ func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers13( // MarshalEasyJSON supports easyjson.Marshaler interface func (v DNSEventSerializer) MarshalEasyJSON(w *jwriter.Writer) { - easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers13(w, v) + easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers16(w, v) } // UnmarshalEasyJSON supports easyjson.Unmarshaler interface func (v *DNSEventSerializer) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers13(l, v) + easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers16(l, v) } -func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers14(in *jlexer.Lexer, out *ContainerContextSerializer) { +func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers17(in *jlexer.Lexer, out *ContainerContextSerializer) { isTopLevel := in.IsStart() if in.IsNull() { if isTopLevel { @@ -1735,7 +2032,7 @@ func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers14( in.Consumed() } } -func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers14(out *jwriter.Writer, in ContainerContextSerializer) { +func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers17(out *jwriter.Writer, in ContainerContextSerializer) { out.RawByte('{') first := true _ = first @@ -1770,14 +2067,14 @@ func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers14( // MarshalEasyJSON supports easyjson.Marshaler interface func (v ContainerContextSerializer) MarshalEasyJSON(w *jwriter.Writer) { - easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers14(w, v) + easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers17(w, v) } // UnmarshalEasyJSON supports easyjson.Unmarshaler interface func (v *ContainerContextSerializer) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers14(l, v) + easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers17(l, v) } -func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers15(in *jlexer.Lexer, out *BaseEventSerializer) { +func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers18(in *jlexer.Lexer, out *BaseEventSerializer) { isTopLevel := in.IsStart() if in.IsNull() { if isTopLevel { @@ -1856,7 +2153,7 @@ func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers15( in.Consumed() } } -func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers15(out *jwriter.Writer, in BaseEventSerializer) { +func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers18(out *jwriter.Writer, in BaseEventSerializer) { out.RawByte('{') first := true _ = first @@ -1921,14 +2218,14 @@ func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers15( // MarshalEasyJSON supports easyjson.Marshaler interface func (v BaseEventSerializer) MarshalEasyJSON(w *jwriter.Writer) { - easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers15(w, v) + easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers18(w, v) } // UnmarshalEasyJSON supports easyjson.Unmarshaler interface func (v *BaseEventSerializer) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers15(l, v) + easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers18(l, v) } -func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers16(in *jlexer.Lexer, out *AWSSecurityCredentialsSerializer) { +func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers19(in *jlexer.Lexer, out *AWSSecurityCredentialsSerializer) { isTopLevel := in.IsStart() if in.IsNull() { if isTopLevel { @@ -1967,7 +2264,7 @@ func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers16( in.Consumed() } } -func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers16(out *jwriter.Writer, in AWSSecurityCredentialsSerializer) { +func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers19(out *jwriter.Writer, in AWSSecurityCredentialsSerializer) { out.RawByte('{') first := true _ = first @@ -2001,14 +2298,14 @@ func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers16( // MarshalEasyJSON supports easyjson.Marshaler interface func (v AWSSecurityCredentialsSerializer) MarshalEasyJSON(w *jwriter.Writer) { - easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers16(w, v) + easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers19(w, v) } // UnmarshalEasyJSON supports easyjson.Unmarshaler interface func (v *AWSSecurityCredentialsSerializer) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers16(l, v) + easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers19(l, v) } -func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers17(in *jlexer.Lexer, out *AWSIMDSEventSerializer) { +func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers20(in *jlexer.Lexer, out *AWSIMDSEventSerializer) { isTopLevel := in.IsStart() if in.IsNull() { if isTopLevel { @@ -2049,7 +2346,7 @@ func easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers17( in.Consumed() } } -func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers17(out *jwriter.Writer, in AWSIMDSEventSerializer) { +func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers20(out *jwriter.Writer, in AWSIMDSEventSerializer) { out.RawByte('{') first := true _ = first @@ -2068,10 +2365,10 @@ func easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers17( // MarshalEasyJSON supports easyjson.Marshaler interface func (v AWSIMDSEventSerializer) MarshalEasyJSON(w *jwriter.Writer) { - easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers17(w, v) + easyjsonA1e47abeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers20(w, v) } // UnmarshalEasyJSON supports easyjson.Unmarshaler interface func (v *AWSIMDSEventSerializer) UnmarshalEasyJSON(l *jlexer.Lexer) { - easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers17(l, v) + easyjsonA1e47abeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers20(l, v) } diff --git a/pkg/security/serializers/serializers_linux.go b/pkg/security/serializers/serializers_linux.go index 4f38767025b47..d82db8d08abf6 100644 --- a/pkg/security/serializers/serializers_linux.go +++ b/pkg/security/serializers/serializers_linux.go @@ -432,6 +432,13 @@ type SpliceEventSerializer struct { PipeExitFlag string `json:"pipe_exit_flag"` } +// AcceptEventSerializer serializes a bind event to JSON +// easyjson:json +type AcceptEventSerializer struct { + // Bound address (if any) + Addr IPPortFamilySerializer `json:"addr"` +} + // BindEventSerializer serializes a bind event to JSON // easyjson:json type BindEventSerializer struct { @@ -586,6 +593,18 @@ func newSyscallArgsSerializer(sc *model.SyscallContext, e *model.Event) *Syscall DestinationPath: &mountPointPath, FSType: &fstype, } + case model.FileMkdirEventType: + path := e.FieldHandlers.ResolveSyscallCtxArgsStr1(e, sc) + mode := e.FieldHandlers.ResolveSyscallCtxArgsInt2(e, sc) + return &SyscallArgsSerializer{ + Path: &path, + Mode: &mode, + } + case model.FileRmdirEventType: + path := e.FieldHandlers.ResolveSyscallCtxArgsStr1(e, sc) + return &SyscallArgsSerializer{ + Path: &path, + } } return nil @@ -604,6 +623,8 @@ type SyscallContextSerializer struct { Rename *SyscallArgsSerializer `json:"rename,omitempty"` Utimes *SyscallArgsSerializer `json:"utimes,omitempty"` Mount *SyscallArgsSerializer `json:"mount,omitempty"` + Mkdir *SyscallArgsSerializer `json:"mkdir,omitempty"` + Rmdir *SyscallArgsSerializer `json:"rmdir,omitempty"` } func newSyscallContextSerializer(sc *model.SyscallContext, e *model.Event, attachEventypeCb func(*SyscallContextSerializer, *SyscallArgsSerializer)) *SyscallContextSerializer { @@ -633,23 +654,25 @@ type EventSerializer struct { *SecurityProfileContextSerializer `json:"security_profile,omitempty"` *CGroupContextSerializer `json:"cgroup,omitempty"` - *SELinuxEventSerializer `json:"selinux,omitempty"` - *BPFEventSerializer `json:"bpf,omitempty"` - *MMapEventSerializer `json:"mmap,omitempty"` - *MProtectEventSerializer `json:"mprotect,omitempty"` - *PTraceEventSerializer `json:"ptrace,omitempty"` - *ModuleEventSerializer `json:"module,omitempty"` - *SignalEventSerializer `json:"signal,omitempty"` - *SpliceEventSerializer `json:"splice,omitempty"` - *DNSEventSerializer `json:"dns,omitempty"` - *IMDSEventSerializer `json:"imds,omitempty"` - *BindEventSerializer `json:"bind,omitempty"` - *ConnectEventSerializer `json:"connect,omitempty"` - *MountEventSerializer `json:"mount,omitempty"` - *SyscallsEventSerializer `json:"syscalls,omitempty"` - *UserContextSerializer `json:"usr,omitempty"` - *SyscallContextSerializer `json:"syscall,omitempty"` - *RawPacketSerializer `json:"packet,omitempty"` + *SELinuxEventSerializer `json:"selinux,omitempty"` + *BPFEventSerializer `json:"bpf,omitempty"` + *MMapEventSerializer `json:"mmap,omitempty"` + *MProtectEventSerializer `json:"mprotect,omitempty"` + *PTraceEventSerializer `json:"ptrace,omitempty"` + *ModuleEventSerializer `json:"module,omitempty"` + *SignalEventSerializer `json:"signal,omitempty"` + *SpliceEventSerializer `json:"splice,omitempty"` + *DNSEventSerializer `json:"dns,omitempty"` + *IMDSEventSerializer `json:"imds,omitempty"` + *AcceptEventSerializer `json:"accept,omitempty"` + *BindEventSerializer `json:"bind,omitempty"` + *ConnectEventSerializer `json:"connect,omitempty"` + *MountEventSerializer `json:"mount,omitempty"` + *SyscallsEventSerializer `json:"syscalls,omitempty"` + *UserContextSerializer `json:"usr,omitempty"` + *SyscallContextSerializer `json:"syscall,omitempty"` + *RawPacketSerializer `json:"packet,omitempty"` + *NetworkFlowMonitorSerializer `json:"network_flow_monitor,omitempty"` } func newSyscallsEventSerializer(e *model.SyscallsEvent) *SyscallsEventSerializer { @@ -664,8 +687,8 @@ func newSyscallsEventSerializer(e *model.SyscallsEvent) *SyscallsEventSerializer } func getInUpperLayer(f *model.FileFields) *bool { - lowerLayer := f.GetInLowerLayer() - upperLayer := f.GetInUpperLayer() + lowerLayer := f.IsInLowerLayer() + upperLayer := f.IsInUpperLayer() if !lowerLayer && !upperLayer { return nil } @@ -959,6 +982,14 @@ func newSpliceEventSerializer(e *model.Event) *SpliceEventSerializer { } } +func newAcceptEventSerializer(e *model.Event) *AcceptEventSerializer { + ces := &AcceptEventSerializer{ + Addr: newIPPortFamilySerializer(&e.Accept.Addr, + model.AddressFamily(e.Accept.AddrFamily).String()), + } + return ces +} + func newBindEventSerializer(e *model.Event) *BindEventSerializer { bes := &BindEventSerializer{ Addr: newIPPortFamilySerializer(&e.Bind.Addr, @@ -1034,6 +1065,36 @@ func newRawPacketEventSerializer(rp *model.RawPacketEvent, e *model.Event) *RawP } } +func newNetworkStatsSerializer(networkStats *model.NetworkStats, _ *model.Event) *NetworkStatsSerializer { + return &NetworkStatsSerializer{ + DataSize: networkStats.DataSize, + PacketCount: networkStats.PacketCount, + } +} + +func newFlowSerializer(flow *model.Flow, e *model.Event) *FlowSerializer { + return &FlowSerializer{ + L3Protocol: model.L3Protocol(flow.L3Protocol).String(), + L4Protocol: model.L4Protocol(flow.L4Protocol).String(), + Source: newIPPortSerializer(&flow.Source), + Destination: newIPPortSerializer(&flow.Destination), + Ingress: newNetworkStatsSerializer(&flow.Ingress, e), + Egress: newNetworkStatsSerializer(&flow.Egress, e), + } +} + +func newNetworkFlowMonitorSerializer(nm *model.NetworkFlowMonitorEvent, e *model.Event) *NetworkFlowMonitorSerializer { + s := &NetworkFlowMonitorSerializer{ + Device: newNetworkDeviceSerializer(&nm.Device, e), + } + + for _, flow := range nm.Flows { + s.Flows = append(s.Flows, newFlowSerializer(&flow, e)) + } + + return s +} + func serializeOutcome(retval int64) string { switch { case retval < 0: @@ -1090,7 +1151,7 @@ func newProcessContextSerializer(pc *model.ProcessContext, e *model.Event) *Proc ancestor = pce prev = s - ptr = it.Next() + ptr = it.Next(ctx) } // shrink the middle of the ancestors list if it is too long @@ -1136,7 +1197,7 @@ func newDDContextSerializer(e *model.Event) *DDContextSerializer { break } - ptr = it.Next() + ptr = it.Next(ctx) } return s } @@ -1144,12 +1205,13 @@ func newDDContextSerializer(e *model.Event) *DDContextSerializer { // nolint: deadcode, unused func newNetworkContextSerializer(e *model.Event, networkCtx *model.NetworkContext) *NetworkContextSerializer { return &NetworkContextSerializer{ - Device: newNetworkDeviceSerializer(&networkCtx.Device, e), - L3Protocol: model.L3Protocol(networkCtx.L3Protocol).String(), - L4Protocol: model.L4Protocol(networkCtx.L4Protocol).String(), - Source: newIPPortSerializer(&networkCtx.Source), - Destination: newIPPortSerializer(&networkCtx.Destination), - Size: networkCtx.Size, + Device: newNetworkDeviceSerializer(&networkCtx.Device, e), + L3Protocol: model.L3Protocol(networkCtx.L3Protocol).String(), + L4Protocol: model.L4Protocol(networkCtx.L4Protocol).String(), + Source: newIPPortSerializer(&networkCtx.Source), + Destination: newIPPortSerializer(&networkCtx.Destination), + Size: networkCtx.Size, + NetworkDirection: model.NetworkDirection(networkCtx.NetworkDirection).String(), } } @@ -1176,8 +1238,8 @@ func (e *EventSerializer) MarshalJSON() ([]byte, error) { } // MarshalEvent marshal the event -func MarshalEvent(event *model.Event, opts *eval.Opts) ([]byte, error) { - s := NewEventSerializer(event, opts) +func MarshalEvent(event *model.Event) ([]byte, error) { + s := NewEventSerializer(event, nil) return utils.MarshalEasyJSON(s) } @@ -1279,11 +1341,18 @@ func NewEventSerializer(event *model.Event, opts *eval.Opts) *EventSerializer { }, } s.EventContextSerializer.Outcome = serializeOutcome(event.Mkdir.Retval) + s.SyscallContextSerializer = newSyscallContextSerializer(&event.Mkdir.SyscallContext, event, func(ctx *SyscallContextSerializer, args *SyscallArgsSerializer) { + ctx.Mkdir = args + }) + case model.FileRmdirEventType: s.FileEventSerializer = &FileEventSerializer{ FileSerializer: *newFileSerializer(&event.Rmdir.File, event), } s.EventContextSerializer.Outcome = serializeOutcome(event.Rmdir.Retval) + s.SyscallContextSerializer = newSyscallContextSerializer(&event.Rmdir.SyscallContext, event, func(ctx *SyscallContextSerializer, args *SyscallArgsSerializer) { + ctx.Rmdir = args + }) case model.FileChdirEventType: s.FileEventSerializer = &FileEventSerializer{ FileSerializer: *newFileSerializer(&event.Chdir.File, event), @@ -1425,6 +1494,9 @@ func NewEventSerializer(event *model.Event, opts *eval.Opts) *EventSerializer { FileSerializer: *newFileSerializer(&event.Splice.File, event), } } + case model.AcceptEventType: + s.EventContextSerializer.Outcome = serializeOutcome(event.Accept.Retval) + s.AcceptEventSerializer = newAcceptEventSerializer(event) case model.BindEventType: s.EventContextSerializer.Outcome = serializeOutcome(event.Bind.Retval) s.BindEventSerializer = newBindEventSerializer(event) @@ -1449,6 +1521,8 @@ func NewEventSerializer(event *model.Event, opts *eval.Opts) *EventSerializer { }) case model.RawPacketEventType: s.RawPacketSerializer = newRawPacketEventSerializer(&event.RawPacket, event) + case model.NetworkFlowMonitorEventType: + s.NetworkFlowMonitorSerializer = newNetworkFlowMonitorSerializer(&event.NetworkFlowMonitor, event) } return s diff --git a/pkg/security/serializers/serializers_linux_easyjson.go b/pkg/security/serializers/serializers_linux_easyjson.go index e61dfb697a062..0a079aaae163b 100644 --- a/pkg/security/serializers/serializers_linux_easyjson.go +++ b/pkg/security/serializers/serializers_linux_easyjson.go @@ -412,6 +412,26 @@ func easyjsonDdc0fdbeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers2(i } (*out.Mount).UnmarshalEasyJSON(in) } + case "mkdir": + if in.IsNull() { + in.Skip() + out.Mkdir = nil + } else { + if out.Mkdir == nil { + out.Mkdir = new(SyscallArgsSerializer) + } + (*out.Mkdir).UnmarshalEasyJSON(in) + } + case "rmdir": + if in.IsNull() { + in.Skip() + out.Rmdir = nil + } else { + if out.Rmdir == nil { + out.Rmdir = new(SyscallArgsSerializer) + } + (*out.Rmdir).UnmarshalEasyJSON(in) + } default: in.SkipRecursive() } @@ -522,6 +542,26 @@ func easyjsonDdc0fdbeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers2(o } (*in.Mount).MarshalEasyJSON(out) } + if in.Mkdir != nil { + const prefix string = ",\"mkdir\":" + if first { + first = false + out.RawString(prefix[1:]) + } else { + out.RawString(prefix) + } + (*in.Mkdir).MarshalEasyJSON(out) + } + if in.Rmdir != nil { + const prefix string = ",\"rmdir\":" + if first { + first = false + out.RawString(prefix[1:]) + } else { + out.RawString(prefix) + } + (*in.Rmdir).MarshalEasyJSON(out) + } out.RawByte('}') } @@ -3606,6 +3646,7 @@ func easyjsonDdc0fdbeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers24( out.SpliceEventSerializer = new(SpliceEventSerializer) out.DNSEventSerializer = new(DNSEventSerializer) out.IMDSEventSerializer = new(IMDSEventSerializer) + out.AcceptEventSerializer = new(AcceptEventSerializer) out.BindEventSerializer = new(BindEventSerializer) out.ConnectEventSerializer = new(ConnectEventSerializer) out.MountEventSerializer = new(MountEventSerializer) @@ -3613,6 +3654,7 @@ func easyjsonDdc0fdbeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers24( out.UserContextSerializer = new(UserContextSerializer) out.SyscallContextSerializer = new(SyscallContextSerializer) out.RawPacketSerializer = new(RawPacketSerializer) + out.NetworkFlowMonitorSerializer = new(NetworkFlowMonitorSerializer) in.Delim('{') for !in.IsDelim('}') { key := in.UnsafeFieldName(false) @@ -3763,6 +3805,16 @@ func easyjsonDdc0fdbeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers24( } (*out.IMDSEventSerializer).UnmarshalEasyJSON(in) } + case "accept": + if in.IsNull() { + in.Skip() + out.AcceptEventSerializer = nil + } else { + if out.AcceptEventSerializer == nil { + out.AcceptEventSerializer = new(AcceptEventSerializer) + } + (*out.AcceptEventSerializer).UnmarshalEasyJSON(in) + } case "bind": if in.IsNull() { in.Skip() @@ -3854,6 +3906,16 @@ func easyjsonDdc0fdbeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers24( } (*out.RawPacketSerializer).UnmarshalEasyJSON(in) } + case "network_flow_monitor": + if in.IsNull() { + in.Skip() + out.NetworkFlowMonitorSerializer = nil + } else { + if out.NetworkFlowMonitorSerializer == nil { + out.NetworkFlowMonitorSerializer = new(NetworkFlowMonitorSerializer) + } + (*out.NetworkFlowMonitorSerializer).UnmarshalEasyJSON(in) + } case "evt": (out.EventContextSerializer).UnmarshalEasyJSON(in) case "date": @@ -4050,6 +4112,16 @@ func easyjsonDdc0fdbeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers24( } (*in.IMDSEventSerializer).MarshalEasyJSON(out) } + if in.AcceptEventSerializer != nil { + const prefix string = ",\"accept\":" + if first { + first = false + out.RawString(prefix[1:]) + } else { + out.RawString(prefix) + } + (*in.AcceptEventSerializer).MarshalEasyJSON(out) + } if in.BindEventSerializer != nil { const prefix string = ",\"bind\":" if first { @@ -4131,6 +4203,16 @@ func easyjsonDdc0fdbeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers24( } (*in.RawPacketSerializer).MarshalEasyJSON(out) } + if in.NetworkFlowMonitorSerializer != nil { + const prefix string = ",\"network_flow_monitor\":" + if first { + first = false + out.RawString(prefix[1:]) + } else { + out.RawString(prefix) + } + (*in.NetworkFlowMonitorSerializer).MarshalEasyJSON(out) + } if true { const prefix string = ",\"evt\":" if first { @@ -5118,3 +5200,55 @@ func (v AnomalyDetectionSyscallEventSerializer) MarshalEasyJSON(w *jwriter.Write func (v *AnomalyDetectionSyscallEventSerializer) UnmarshalEasyJSON(l *jlexer.Lexer) { easyjsonDdc0fdbeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers34(l, v) } +func easyjsonDdc0fdbeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers35(in *jlexer.Lexer, out *AcceptEventSerializer) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "addr": + (out.Addr).UnmarshalEasyJSON(in) + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjsonDdc0fdbeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers35(out *jwriter.Writer, in AcceptEventSerializer) { + out.RawByte('{') + first := true + _ = first + { + const prefix string = ",\"addr\":" + out.RawString(prefix[1:]) + (in.Addr).MarshalEasyJSON(out) + } + out.RawByte('}') +} + +// MarshalEasyJSON supports easyjson.Marshaler interface +func (v AcceptEventSerializer) MarshalEasyJSON(w *jwriter.Writer) { + easyjsonDdc0fdbeEncodeGithubComDataDogDatadogAgentPkgSecuritySerializers35(w, v) +} + +// UnmarshalEasyJSON supports easyjson.Unmarshaler interface +func (v *AcceptEventSerializer) UnmarshalEasyJSON(l *jlexer.Lexer) { + easyjsonDdc0fdbeDecodeGithubComDataDogDatadogAgentPkgSecuritySerializers35(l, v) +} diff --git a/pkg/security/serializers/serializers_others.go b/pkg/security/serializers/serializers_others.go index 53212383adfc6..d559a70f832b2 100644 --- a/pkg/security/serializers/serializers_others.go +++ b/pkg/security/serializers/serializers_others.go @@ -25,8 +25,8 @@ func (e *EventSerializer) ToJSON() ([]byte, error) { } // MarshalEvent marshal the event -func MarshalEvent(event *model.Event, opts *eval.Opts) ([]byte, error) { - s := NewEventSerializer(event, opts) +func MarshalEvent(event *model.Event) ([]byte, error) { + s := NewEventSerializer(event, nil) return json.Marshal(s) } diff --git a/pkg/security/serializers/serializers_windows.go b/pkg/security/serializers/serializers_windows.go index b40b90b610284..3f9452ab9f1ad 100644 --- a/pkg/security/serializers/serializers_windows.go +++ b/pkg/security/serializers/serializers_windows.go @@ -193,7 +193,7 @@ func newProcessContextSerializer(pc *model.ProcessContext, e *model.Event) *Proc } first = false - ptr = it.Next() + ptr = it.Next(ctx) } return &ps @@ -209,8 +209,8 @@ func (e *EventSerializer) ToJSON() ([]byte, error) { } // MarshalEvent marshal the event -func MarshalEvent(event *model.Event, opts *eval.Opts) ([]byte, error) { - s := NewEventSerializer(event, opts) +func MarshalEvent(event *model.Event) ([]byte, error) { + s := NewEventSerializer(event, nil) return json.Marshal(s) } diff --git a/pkg/security/telemetry/containers_running_telemetry_common.go b/pkg/security/telemetry/containers_running_telemetry_common.go new file mode 100644 index 0000000000000..67eef5a2788e3 --- /dev/null +++ b/pkg/security/telemetry/containers_running_telemetry_common.go @@ -0,0 +1,12 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package telemetry + +// ContainersRunningTelemetryConfig holds the config used by the containers running telemetry +type ContainersRunningTelemetryConfig struct { + RuntimeEnabled bool + FIMEnabled bool +} diff --git a/pkg/security/telemetry/containers_running_telemetry_linux.go b/pkg/security/telemetry/containers_running_telemetry_linux.go index 3a325b7efa469..c95351d20ea3b 100644 --- a/pkg/security/telemetry/containers_running_telemetry_linux.go +++ b/pkg/security/telemetry/containers_running_telemetry_linux.go @@ -11,7 +11,6 @@ import ( "time" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/metrics" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-go/v5/statsd" @@ -19,12 +18,12 @@ import ( // ContainersRunningTelemetry reports environment information (e.g containers running) when the runtime security component is running type ContainersRunningTelemetry struct { - cfg *config.RuntimeSecurityConfig + cfg ContainersRunningTelemetryConfig containers *ContainersTelemetry } // NewContainersRunningTelemetry creates a new ContainersRunningTelemetry instance -func NewContainersRunningTelemetry(cfg *config.RuntimeSecurityConfig, statsdClient statsd.ClientInterface, wmeta workloadmeta.Component) (*ContainersRunningTelemetry, error) { +func NewContainersRunningTelemetry(cfg ContainersRunningTelemetryConfig, statsdClient statsd.ClientInterface, wmeta workloadmeta.Component) (*ContainersRunningTelemetry, error) { telemetrySender := NewSimpleTelemetrySenderFromStatsd(statsdClient) containersTelemetry, err := NewContainersTelemetry(telemetrySender, wmeta) if err != nil { diff --git a/pkg/security/telemetry/containers_running_telemetry_others.go b/pkg/security/telemetry/containers_running_telemetry_others.go index 3bb9658228d9a..d4db85a995294 100644 --- a/pkg/security/telemetry/containers_running_telemetry_others.go +++ b/pkg/security/telemetry/containers_running_telemetry_others.go @@ -11,7 +11,6 @@ import ( "context" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-go/v5/statsd" ) @@ -19,7 +18,7 @@ import ( type ContainersRunningTelemetry struct{} // NewContainersRunningTelemetry creates a new ContainersRunningTelemetry instance (not supported on non-linux platforms) -func NewContainersRunningTelemetry(_ *config.RuntimeSecurityConfig, _ statsd.ClientInterface, _ workloadmeta.Component) (*ContainersRunningTelemetry, error) { +func NewContainersRunningTelemetry(_ ContainersRunningTelemetryConfig, _ statsd.ClientInterface, _ workloadmeta.Component) (*ContainersRunningTelemetry, error) { return nil, nil } diff --git a/pkg/security/tests/README.md b/pkg/security/tests/README.md new file mode 100644 index 0000000000000..39a0ef963b46c --- /dev/null +++ b/pkg/security/tests/README.md @@ -0,0 +1,19 @@ +# Running tests + +* Running all tests: + +```bash +inv -e security-agent.functional-tests --verbose --skip-linters --testflags "-test.run '.*'" +``` + +* Running a single test: + +```bash +inv -e security-agent.functional-tests --verbose --skip-linters --testflags "-test.run 'TestConnect'" +``` + +* Running ebpfless tests: + +```bash +inv -e security-agent.ebpfless-functional-tests --verbose --skip-linters --testflags "-test.run '.*'" +``` diff --git a/pkg/security/tests/accept_test.go b/pkg/security/tests/accept_test.go new file mode 100644 index 0000000000000..bc823441d4dbf --- /dev/null +++ b/pkg/security/tests/accept_test.go @@ -0,0 +1,129 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux && functionaltests + +// Package tests holds tests related files +package tests + +import ( + "context" + "math/rand/v2" + "strconv" + "testing" + + "github.com/DataDog/datadog-agent/pkg/security/secl/model" + "github.com/DataDog/datadog-agent/pkg/security/secl/rules" + "github.com/stretchr/testify/assert" + "golang.org/x/net/nettest" + "golang.org/x/sys/unix" +) + +func TestAcceptEvent(t *testing.T) { + SkipIfNotAvailable(t) + + ruleDefs := []*rules.RuleDefinition{ + { + ID: "test_accept_af_inet", + Expression: `accept.addr.family == AF_INET && process.file.name == "syscall_tester"`, + }, + { + ID: "test_accept_af_inet6", + Expression: `accept.addr.family == AF_INET6 && process.file.name == "syscall_tester"`, + }, + } + + test, err := newTestModule(t, nil, ruleDefs) + if err != nil { + t.Fatal(err) + } + defer test.Close() + + syscallTester, err := loadSyscallTester(t, test, "syscall_tester") + if err != nil { + t.Fatal(err) + } + + const MIX = 4000 + const MAX = 5000 + + t.Run("accept-af-inet-any-tcp-success-no-sockaddrin", func(t *testing.T) { + if ebpfLessEnabled { + t.Skip("Not available for ebpfLess") + } + port := rand.IntN(MAX-MIX) + MIX + + test.WaitSignal(t, func() error { + return runSyscallTesterFunc(context.Background(), t, syscallTester, "accept", "AF_INET", "0.0.0.0", "127.0.0.1", strconv.Itoa(port), "false") + }, func(event *model.Event, rule *rules.Rule) { + assertTriggeredRule(t, rule, "test_accept_af_inet") + assert.Equal(t, "accept", event.GetType(), "wrong event type") + assert.Equal(t, uint16(unix.AF_INET), event.Accept.AddrFamily, "wrong address family") + assert.Equal(t, uint16(port), event.Accept.Addr.Port, "wrong address port") + assert.Equal(t, "127.0.0.1", event.Accept.Addr.IPNet.IP.String(), "wrong address") + assert.LessOrEqual(t, int64(0), event.Accept.Retval, "wrong retval") + test.validateAcceptSchema(t, event) + }) + }) + + t.Run("accept-af-inet-any-tcp-success-sockaddrin", func(t *testing.T) { + + port := rand.IntN(MAX-MIX) + MIX + + test.WaitSignal(t, func() error { + return runSyscallTesterFunc(context.Background(), t, syscallTester, "accept", "AF_INET", "0.0.0.0", "127.0.0.1", strconv.Itoa(port), "true") + }, func(event *model.Event, rule *rules.Rule) { + assertTriggeredRule(t, rule, "test_accept_af_inet") + assert.Equal(t, "accept", event.GetType(), "wrong event type") + assert.Equal(t, uint16(unix.AF_INET), event.Accept.AddrFamily, "wrong address family") + assert.Equal(t, "127.0.0.1", event.Accept.Addr.IPNet.IP.String(), "wrong address") + assert.LessOrEqual(t, int64(0), event.Accept.Retval, "wrong retval") + test.validateAcceptSchema(t, event) + }) + }) + + t.Run("accept-af-inet6-any-tcp-success-no-sockaddrin", func(t *testing.T) { + if ebpfLessEnabled { + t.Skip("Not available for ebpfLess") + } + + if !nettest.SupportsIPv6() { + t.Skip("IPv6 is not supported") + } + + port := rand.IntN(MAX-MIX) + MIX + + test.WaitSignal(t, func() error { + return runSyscallTesterFunc(context.Background(), t, syscallTester, "accept", "AF_INET6", "::", "::1", strconv.Itoa(port), "false") + }, func(event *model.Event, rule *rules.Rule) { + assertTriggeredRule(t, rule, "test_accept_af_inet6") + assert.Equal(t, "accept", event.GetType(), "wrong event type") + assert.Equal(t, uint16(unix.AF_INET6), event.Accept.AddrFamily, "wrong address family") + assert.Equal(t, uint16(port), event.Accept.Addr.Port, "wrong address port") + assert.Equal(t, "::1", event.Accept.Addr.IPNet.IP.String(), "wrong address") + assert.LessOrEqual(t, int64(0), event.Accept.Retval, "wrong retval") + test.validateAcceptSchema(t, event) + }) + }) + + t.Run("accept-af-inet6-any-tcp-success-sockaddrin", func(t *testing.T) { + if !nettest.SupportsIPv6() { + t.Skip("IPv6 is not supported") + } + + port := rand.IntN(MAX-MIX) + MIX + + test.WaitSignal(t, func() error { + return runSyscallTesterFunc(context.Background(), t, syscallTester, "accept", "AF_INET6", "::", "::1", strconv.Itoa(port), "true") + }, func(event *model.Event, rule *rules.Rule) { + assertTriggeredRule(t, rule, "test_accept_af_inet6") + assert.Equal(t, "accept", event.GetType(), "wrong event type") + assert.Equal(t, uint16(unix.AF_INET6), event.Accept.AddrFamily, "wrong address family") + assert.Equal(t, "::1", event.Accept.Addr.IPNet.IP.String(), "wrong address") + assert.LessOrEqual(t, int64(0), event.Accept.Retval, "wrong retval") + test.validateAcceptSchema(t, event) + }) + }) +} diff --git a/pkg/security/tests/cgroup_test.go b/pkg/security/tests/cgroup_test.go index 5259c04583f49..d5ca5b5108f6e 100644 --- a/pkg/security/tests/cgroup_test.go +++ b/pkg/security/tests/cgroup_test.go @@ -12,29 +12,82 @@ import ( "fmt" "os" "os/exec" + "slices" "strconv" "syscall" "testing" "github.com/stretchr/testify/assert" + "golang.org/x/sys/unix" "github.com/DataDog/datadog-agent/pkg/security/ebpf/kernel" + "github.com/DataDog/datadog-agent/pkg/security/probe" "github.com/DataDog/datadog-agent/pkg/security/secl/containerutils" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" + "github.com/DataDog/datadog-agent/pkg/security/utils" ) -func createCGroup(name string) (string, error) { - cgroupPath := "/sys/fs/cgroup/memory/" + name - if err := os.MkdirAll(cgroupPath, 0700); err != nil { - return "", err +type testCGroup struct { + cgroupPath string + previousCGroupPath string +} + +func (cg *testCGroup) enter() error { + return os.WriteFile(cg.cgroupPath+"/cgroup.procs", []byte(strconv.Itoa(os.Getpid())), 0700) +} + +func (cg *testCGroup) leave(t *testing.T) { + if err := os.WriteFile("/sys/fs/cgroup"+cg.previousCGroupPath+"/cgroup.procs", []byte(strconv.Itoa(os.Getpid())), 0700); err != nil { + if err := os.WriteFile("/sys/fs/cgroup/systemd"+cg.previousCGroupPath+"/cgroup.procs", []byte(strconv.Itoa(os.Getpid())), 0700); err != nil { + t.Log(err) + return + } + } +} + +func (cg *testCGroup) remove(t *testing.T) { + if err := os.Remove(cg.cgroupPath); err != nil { + if content, err := os.ReadFile(cg.cgroupPath + "/cgroup.procs"); err == nil { + t.Logf("Processes in cgroup: %s", string(content)) + } + } +} + +func (cg *testCGroup) create() error { + return os.MkdirAll(cg.cgroupPath, 0700) +} + +func newCGroup(name, kind string) (*testCGroup, error) { + cgs, err := utils.GetProcControlGroups(uint32(os.Getpid()), uint32(os.Getpid())) + if err != nil { + return nil, err + } + + var previousCGroupPath string + for _, cg := range cgs { + if len(cg.Controllers) == 1 && cg.Controllers[0] == "" { + previousCGroupPath = cg.Path + break + } + if previousCGroupPath == "" { + previousCGroupPath = cg.Path + } else if previousCGroupPath == "/" { + previousCGroupPath = cg.Path + } + if slices.Contains(cg.Controllers, kind) || slices.Contains(cg.Controllers, "name="+kind) { + previousCGroupPath = cg.Path + break + } } - if err := os.WriteFile(cgroupPath+"/cgroup.procs", []byte(strconv.Itoa(os.Getpid())), 0700); err != nil { - return "", err + cgroupPath := "/sys/fs/cgroup/" + kind + "/" + name + cg := &testCGroup{ + previousCGroupPath: previousCGroupPath, + cgroupPath: cgroupPath, } - return cgroupPath, nil + return cg, nil } func TestCGroup(t *testing.T) { @@ -47,7 +100,7 @@ func TestCGroup(t *testing.T) { ruleDefs := []*rules.RuleDefinition{ { ID: "test_cgroup_id", - Expression: `open.file.path == "{{.Root}}/test-open" && cgroup.id =~ "*/cg1"`, // "/memory/cg1" or "/cg1" + Expression: `open.file.path == "{{.Root}}/test-open" && cgroup.id =~ "*/cg1"`, // "/cpu/cg1" or "/cg1" }, { ID: "test_cgroup_systemd", @@ -60,11 +113,20 @@ func TestCGroup(t *testing.T) { } defer test.Close() - cgroupPath, err := createCGroup("cg1") + testCGroup, err := newCGroup("cg1", "cpu") if err != nil { t.Fatal(err) } - defer os.RemoveAll(cgroupPath) + + if err := testCGroup.create(); err != nil { + t.Fatal(err) + } + defer testCGroup.remove(t) + + if err := testCGroup.enter(); err != nil { + t.Fatal(err) + } + defer testCGroup.leave(t) testFile, testFilePtr, err := test.Path("test-open") if err != nil { @@ -90,7 +152,7 @@ func TestCGroup(t *testing.T) { assertFieldEqual(t, event, "container.id", "") assertFieldEqual(t, event, "container.runtime", "") assert.Equal(t, containerutils.CGroupFlags(0), event.CGroupContext.CGroupFlags) - assertFieldIsOneOf(t, event, "cgroup.id", "/memory/cg1") + assertFieldIsOneOf(t, event, "cgroup.id", "/cpu/cg1") assertFieldIsOneOf(t, event, "cgroup.version", []int{1, 2}) test.validateOpenSchema(t, event) @@ -176,3 +238,144 @@ ExecStart=/usr/bin/touch %s`, testFile2) }) }) } + +func TestCGroupSnapshot(t *testing.T) { + if testEnvironment == DockerEnvironment { + t.Skip("skipping cgroup ID test in docker") + } + + SkipIfNotAvailable(t) + + _, cgroupContext, err := utils.GetProcContainerContext(uint32(os.Getpid()), uint32(os.Getpid())) + if err != nil { + t.Fatal(err) + } + + testCGroup, err := newCGroup("cg2", "systemd") + if err != nil { + t.Fatal(err) + } + + if err := testCGroup.create(); err != nil { + t.Fatal(err) + } + defer testCGroup.remove(t) + + if err := testCGroup.enter(); err != nil { + t.Fatal(err) + } + defer testCGroup.leave(t) + + executable, err := os.Executable() + if err != nil { + t.Fatal(err) + } + + var testsuiteStats unix.Stat_t + if err := unix.Stat(executable, &testsuiteStats); err != nil { + t.Fatal(err) + } + + ruleDefs := []*rules.RuleDefinition{ + { + ID: "test_cgroup_snapshot", + Expression: `open.file.path == "{{.Root}}/test-open" && cgroup.id != ""`, + }, + } + + test, err := newTestModule(t, nil, ruleDefs) + if err != nil { + t.Fatal(err) + } + defer test.Close() + + testFile, _, err := test.Path("test-open") + if err != nil { + t.Fatal(err) + } + + syscallTester, err := loadSyscallTester(t, test, "syscall_tester") + if err != nil { + t.Fatal(err) + } + + var syscallTesterStats unix.Stat_t + if err := unix.Stat(syscallTester, &syscallTesterStats); err != nil { + t.Fatal(err) + } + + p, ok := test.probe.PlatformProbe.(*probe.EBPFProbe) + if !ok { + t.Skip("not supported") + } + + var cmd *exec.Cmd + test.WaitSignal(t, func() error { + cmd = exec.Command(syscallTester, "open", testFile) + pipe, err := cmd.StdinPipe() + if err != nil { + t.Fatal(err) + } + defer pipe.Close() + + if err := cmd.Start(); err != nil { + t.Fatal(err) + } + + return nil + }, func(event *model.Event, rule *rules.Rule) { + assertTriggeredRule(t, rule, "test_cgroup_snapshot") + test.validateOpenSchema(t, event) + + testsuiteEntry := p.Resolvers.ProcessResolver.Get(uint32(os.Getpid())) + syscallTesterEntry := p.Resolvers.ProcessResolver.Get(uint32(cmd.Process.Pid)) + assert.NotNil(t, testsuiteEntry) + assert.NotNil(t, syscallTesterEntry) + + // Check that testsuite has changed cgroup since its start + assert.NotEqual(t, cgroupContext.CGroupID, testsuiteEntry.CGroup.CGroupID) + assert.Equal(t, int(testsuiteEntry.Pid), os.Getpid()) + + // Check that both testsuite and syscall tester share the same cgroup + assert.Equal(t, testsuiteEntry.CGroup.CGroupID, syscallTesterEntry.CGroup.CGroupID) + assert.Equal(t, testsuiteEntry.CGroup.CGroupFile, syscallTesterEntry.CGroup.CGroupFile) + + // Check that we have the right cgroup inode + cgroupFS := utils.NewCGroupFS() + _, _, cgroupSysFSPath, err := cgroupFS.FindCGroupContext(uint32(os.Getpid()), uint32(os.Getpid())) + if err != nil { + t.Fatal(err) + } + + var stats unix.Stat_t + if err := unix.Stat(cgroupSysFSPath, &stats); err != nil { + t.Fatal(err) + } + assert.Equal(t, stats.Ino, testsuiteEntry.CGroup.CGroupFile.Inode) + + // Check we filled the kernel maps correctly with the same values than userspace for the testsuite process + var newEntry *model.ProcessCacheEntry + ebpfProbe := test.probe.PlatformProbe.(*probe.EBPFProbe) + ebpfProbe.Resolvers.ProcessResolver.ResolveFromKernelMaps(uint32(os.Getpid()), uint32(os.Getpid()), testsuiteStats.Ino, func(entry *model.ProcessCacheEntry, _ error) { + newEntry = entry + }) + assert.NotNil(t, newEntry) + if newEntry != nil { + assert.Equal(t, stats.Ino, newEntry.CGroup.CGroupFile.Inode) + } + + // Check we filled the kernel maps correctly with the same values than userspace for the syscall tester process + newEntry = nil + ebpfProbe.Resolvers.ProcessResolver.ResolveFromKernelMaps(syscallTesterEntry.Pid, syscallTesterEntry.Pid, syscallTesterStats.Ino, func(entry *model.ProcessCacheEntry, _ error) { + newEntry = entry + }) + assert.NotNil(t, newEntry) + if newEntry != nil { + assert.Equal(t, stats.Ino, newEntry.CGroup.CGroupFile.Inode) + } + }) + + if cmd != nil { + cmd.Process.Kill() + } +} diff --git a/pkg/security/tests/connect_test.go b/pkg/security/tests/connect_test.go index 7f56dcfb8956c..43d21f5b44fd8 100644 --- a/pkg/security/tests/connect_test.go +++ b/pkg/security/tests/connect_test.go @@ -11,10 +11,10 @@ package tests import ( "context" "fmt" + "golang.org/x/net/nettest" "net" "testing" - "golang.org/x/net/nettest" "golang.org/x/sys/unix" "github.com/stretchr/testify/assert" diff --git a/pkg/security/tests/constants_test.go b/pkg/security/tests/constants_test.go index 8982f6a1ff686..4886e79a99026 100644 --- a/pkg/security/tests/constants_test.go +++ b/pkg/security/tests/constants_test.go @@ -60,9 +60,6 @@ func TestOctogonConstants(t *testing.T) { if err != nil { t.Skipf("btfhub constant fetcher is not available: %v", err) } - if !btfhubFetcher.HasConstantsInStore() { - t.Skip("btfhub has no constant for this OS") - } fallbackFetcher := constantfetch.NewFallbackConstantFetcher(kv) diff --git a/pkg/security/tests/fim_test.go b/pkg/security/tests/fim_test.go new file mode 100644 index 0000000000000..3bfe37d3f56b9 --- /dev/null +++ b/pkg/security/tests/fim_test.go @@ -0,0 +1,77 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux && functionaltests + +// Package tests holds tests related files +package tests + +import ( + "os" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/DataDog/datadog-agent/pkg/security/secl/model" + "github.com/DataDog/datadog-agent/pkg/security/secl/rules" +) + +func TestFIMOpen(t *testing.T) { + SkipIfNotAvailable(t) + + ruleDefs := []*rules.RuleDefinition{ + { + ID: "test_fim_rule", + Expression: `fim.write.file.path == "{{.Root}}/test-open"`, + }, + } + + test, err := newTestModule(t, nil, ruleDefs) + if err != nil { + t.Fatal(err) + } + defer test.Close() + + testFile, _, err := test.Path("test-open") + if err != nil { + t.Fatal(err) + } + defer os.Remove(testFile) + + // open test + test.WaitSignal(t, func() error { + f, err := os.Create(testFile) + if err != nil { + return err + } + return f.Close() + }, func(event *model.Event, rule *rules.Rule) { + assert.Equal(t, "open", event.GetType(), "wrong event type") + assertTriggeredRule(t, rule, "__fim_expanded_open__test_fim_rule") + assert.Equal(t, rule.Def.ID, "test_fim_rule") + assertInode(t, event.Open.File.Inode, getInode(t, testFile)) + }) + + // chmod test + test.WaitSignal(t, func() error { + return os.Chmod(testFile, 0o777) + }, func(event *model.Event, rule *rules.Rule) { + assert.Equal(t, "chmod", event.GetType(), "wrong event type") + assertTriggeredRule(t, rule, "__fim_expanded_chmod__test_fim_rule") + assert.Equal(t, rule.Def.ID, "test_fim_rule") + assertInode(t, event.Chmod.File.Inode, getInode(t, testFile)) + }) + + // open but read only + _ = test.GetSignal(t, func() error { + f, err := os.Open(testFile) + if err != nil { + return err + } + return f.Close() + }, func(_ *model.Event, _ *rules.Rule) { + t.Error("Event received (rule is in write only mode, and the open is read only)") + }) +} diff --git a/pkg/security/tests/main_linux.go b/pkg/security/tests/main_linux.go index bea7537bf4f91..b599db65fa22d 100644 --- a/pkg/security/tests/main_linux.go +++ b/pkg/security/tests/main_linux.go @@ -74,6 +74,9 @@ func SkipIfNotAvailable(t *testing.T) { "~TestOsOrigin", "~TestSpan", "~TestChdir", + "~TestBindEvent", + "~TestAccept", + "~TestConnect", "TestMountEvent", "TestMount", "TestMountPropagated", diff --git a/pkg/security/tests/misc_test.go b/pkg/security/tests/misc_test.go index 6c218eb27b881..fde6ef81f3498 100644 --- a/pkg/security/tests/misc_test.go +++ b/pkg/security/tests/misc_test.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" - "github.com/DataDog/datadog-agent/pkg/security/utils" + "github.com/DataDog/datadog-agent/pkg/security/utils/hostnameutils" ) func TestEnv(t *testing.T) { @@ -59,7 +59,7 @@ func TestOsOrigin(t *testing.T) { func TestHostname(t *testing.T) { SkipIfNotAvailable(t) - hostname, err := utils.GetHostname() + hostname, err := hostnameutils.GetHostname() if err != nil || hostname == "" { hostname = "unknown" } diff --git a/pkg/security/tests/module_tester.go b/pkg/security/tests/module_tester.go index 0b01cbcd72d49..444495773967a 100644 --- a/pkg/security/tests/module_tester.go +++ b/pkg/security/tests/module_tester.go @@ -557,7 +557,7 @@ func (tm *testModule) WaitSignal(tb testing.TB, action func() error, cb onRuleHa //nolint:deadcode,unused func (tm *testModule) marshalEvent(ev *model.Event) (string, error) { - b, err := serializers.MarshalEvent(ev, nil) + b, err := serializers.MarshalEvent(ev) return string(b), err } @@ -811,6 +811,8 @@ func genTestConfigs(cfgDir string, opts testOpts) (*emconfig.Config, *secconfig. "EnforcementDisarmerExecutableMaxAllowed": opts.enforcementDisarmerExecutableMaxAllowed, "EnforcementDisarmerExecutablePeriod": opts.enforcementDisarmerExecutablePeriod, "EventServerRetention": opts.eventServerRetention, + "EnableSelfTests": opts.enableSelfTests, + "NetworkFlowMonitorEnabled": opts.networkFlowMonitorEnabled, }); err != nil { return nil, nil, err } diff --git a/pkg/security/tests/module_tester_linux.go b/pkg/security/tests/module_tester_linux.go index 37a54cf6fee5b..c0adbdaee5ee7 100644 --- a/pkg/security/tests/module_tester_linux.go +++ b/pkg/security/tests/module_tester_linux.go @@ -33,6 +33,7 @@ import ( "github.com/stretchr/testify/assert" "golang.org/x/sys/unix" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/impl" ebpftelemetry "github.com/DataDog/datadog-agent/pkg/ebpf/telemetry" "github.com/DataDog/datadog-agent/pkg/eventmonitor" secconfig "github.com/DataDog/datadog-agent/pkg/security/config" @@ -77,6 +78,8 @@ event_monitoring_config: - "*custom*" network: enabled: true + flow_monitor: + enabled: {{ .NetworkFlowMonitorEnabled }} ingress: enabled: {{ .NetworkIngressEnabled }} raw_packet: @@ -173,7 +176,10 @@ runtime_security_config: {{end}} self_test: - enabled: false + enabled: {{.EnableSelfTests}} +{{if .EnableSelfTests}} + send_report: true +{{end}} policies: dir: {{.TestPoliciesDir}} @@ -342,7 +348,7 @@ func assertReturnValue(tb testing.TB, retval, expected int64) bool { //nolint:deadcode,unused func validateProcessContextLineage(tb testing.TB, event *model.Event) { - eventJSON, err := serializers.MarshalEvent(event, nil) + eventJSON, err := serializers.MarshalEvent(event) if err != nil { tb.Errorf("failed to marshal event: %v", err) return @@ -461,7 +467,7 @@ func validateProcessContextSECL(tb testing.TB, event *model.Event) { valid := nameFieldValid && pathFieldValid if !valid { - eventJSON, err := serializers.MarshalEvent(event, nil) + eventJSON, err := serializers.MarshalEvent(event) if err != nil { tb.Errorf("failed to marshal event: %v", err) return @@ -516,7 +522,7 @@ func validateSyscallContext(tb testing.TB, event *model.Event, jsonPath string) return } - eventJSON, err := serializers.MarshalEvent(event, nil) + eventJSON, err := serializers.MarshalEvent(event) if err != nil { tb.Errorf("failed to marshal event: %v", err) return @@ -751,7 +757,8 @@ func newTestModuleWithOnDemandProbes(t testing.TB, onDemandHooks []rules.OnDeman if !opts.staticOpts.disableRuntimeSecurity { msgSender := newFakeMsgSender(testMod) - cws, err := module.NewCWSConsumer(testMod.eventMonitor, secconfig.RuntimeSecurity, nil, module.Opts{EventSender: testMod, MsgSender: msgSender}) + compression := logscompression.NewComponent() + cws, err := module.NewCWSConsumer(testMod.eventMonitor, secconfig.RuntimeSecurity, nil, module.Opts{EventSender: testMod, MsgSender: msgSender}, compression) if err != nil { return nil, fmt.Errorf("failed to create module: %w", err) } @@ -1453,6 +1460,16 @@ func searchForSyscalls(ad *dump.ActivityDump) bool { return false } +//nolint:deadcode,unused +func searchForNetworkFlowMonitorEvents(ad *dump.ActivityDump) bool { + for _, node := range ad.ActivityTree.ProcessNodes { + if len(node.NetworkDevices) > 0 { + return true + } + } + return false +} + //nolint:deadcode,unused func (tm *testModule) getADFromDumpID(id *activityDumpIdentifier) (*dump.ActivityDump, error) { var fileProtobuf string diff --git a/pkg/security/tests/module_tester_windows.go b/pkg/security/tests/module_tester_windows.go index 25876dc813692..8a8cb70e123b0 100644 --- a/pkg/security/tests/module_tester_windows.go +++ b/pkg/security/tests/module_tester_windows.go @@ -17,6 +17,7 @@ import ( "github.com/hashicorp/go-multierror" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/impl" "github.com/DataDog/datadog-agent/pkg/eventmonitor" secconfig "github.com/DataDog/datadog-agent/pkg/security/config" "github.com/DataDog/datadog-agent/pkg/security/module" @@ -171,7 +172,8 @@ func newTestModule(t testing.TB, macroDefs []*rules.MacroDefinition, ruleDefs [] var ruleSetloadedErr *multierror.Error if !opts.staticOpts.disableRuntimeSecurity { - cws, err := module.NewCWSConsumer(testMod.eventMonitor, secconfig.RuntimeSecurity, nil, module.Opts{EventSender: testMod}) + compression := logscompression.NewComponent() + cws, err := module.NewCWSConsumer(testMod.eventMonitor, secconfig.RuntimeSecurity, nil, module.Opts{EventSender: testMod}, compression) if err != nil { return nil, fmt.Errorf("failed to create module: %w", err) } diff --git a/pkg/security/tests/network_test.go b/pkg/security/tests/network_test.go index 2a34b3101e17a..0b510033cc4f6 100644 --- a/pkg/security/tests/network_test.go +++ b/pkg/security/tests/network_test.go @@ -9,11 +9,13 @@ package tests import ( + "context" "fmt" "net" "net/netip" "os" "path/filepath" + "strconv" "strings" "testing" @@ -225,7 +227,7 @@ func TestRawPacketFilter(t *testing.T) { runTest := func(t *testing.T, filters []rawpacket.Filter, opts rawpacket.ProgOpts) { progSpecs, err := rawpacket.FiltersToProgramSpecs(rawPacketEventMap.FD(), clsRouterMapFd.FD(), filters, opts) - assert.Nil(t, err) + assert.NoError(t, err) assert.NotEmpty(t, progSpecs) colSpec := ebpf.CollectionSpec{ @@ -236,7 +238,7 @@ func TestRawPacketFilter(t *testing.T) { } progsCol, err := ebpf.NewCollection(&colSpec) - assert.Nil(t, err) + assert.NoError(t, err) if err == nil { progsCol.Close() } @@ -253,9 +255,73 @@ func TestRawPacketFilter(t *testing.T) { }) t.Run("all-with-limit", func(t *testing.T) { + // kernels < 5.2 have a limit of 4k instructions for the eBPF program size + checkKernelCompatibility(t, "Old debian kernels", func(kv *kernel.Version) bool { + return kv.IsDebianKernel() && kv.Code < kernel.Kernel5_2 + }) + opts := rawpacket.DefaultProgOpts opts.MaxProgSize = 4000 opts.NopInstLen = 3500 - runTest(t, filters, rawpacket.DefaultProgOpts) + runTest(t, filters, opts) + }) +} + +func TestNetworkFlowSendUDP4(t *testing.T) { + SkipIfNotAvailable(t) + + checkKernelCompatibility(t, "RHEL, SLES, SUSE and Oracle kernels", func(kv *kernel.Version) bool { + // TODO: Oracle because we are missing offsets + // OpenSUSE distributions are missing the dummy kernel module + return kv.IsSLESKernel() || kv.IsOpenSUSELeapKernel() || probe.IsNetworkFlowMonitorNotSupported(kv) + }) + + if testEnvironment != DockerEnvironment && !env.IsContainerized() { + if out, err := loadModule("veth"); err != nil { + t.Fatalf("couldn't load 'veth' module: %s, %v", string(out), err) + } + } + + testDestIP := "127.0.0.1" + testUDPDestPort := 12345 + + rule := &rules.RuleDefinition{ + ID: "test_rule_network_flow", + Expression: `network_flow_monitor.flows.length > 0 && process.file.name == "syscall_tester"`, + } + + test, err := newTestModule(t, nil, []*rules.RuleDefinition{rule}, withStaticOpts( + testOpts{ + networkFlowMonitorEnabled: true, + }, + )) + if err != nil { + t.Fatal(err) + } + defer test.Close() + + syscallTester, err := loadSyscallTester(t, test, "syscall_tester") + if err != nil { + t.Fatal(err) + } + + t.Run("test_network_flow_send_udp4", func(t *testing.T) { + test.WaitSignal(t, func() error { + return runSyscallTesterFunc(context.Background(), t, syscallTester, "network_flow_send_udp4", testDestIP, strconv.Itoa(testUDPDestPort)) + }, func(event *model.Event, _ *rules.Rule) { + assert.Equal(t, "network_flow_monitor", event.GetType(), "wrong event type") + assert.Equal(t, uint64(1), event.NetworkFlowMonitor.FlowsCount, "wrong FlowsCount") + assert.Equal(t, 1, len(event.NetworkFlowMonitor.Flows), "wrong flows count") + if len(event.NetworkFlowMonitor.Flows) > 0 { + assert.Equal(t, testDestIP, event.NetworkFlowMonitor.Flows[0].Destination.IPNet.IP.To4().String(), "wrong destination IP") + assert.Equal(t, uint16(testUDPDestPort), event.NetworkFlowMonitor.Flows[0].Destination.Port, "wrong destination Port") + assert.Equal(t, uint16(model.IPProtoUDP), event.NetworkFlowMonitor.Flows[0].L4Protocol, "wrong L4 protocol") + assert.Equal(t, uint16(model.EthPIP), event.NetworkFlowMonitor.Flows[0].L3Protocol, "wrong L3 protocol") + assert.Equal(t, uint64(1), event.NetworkFlowMonitor.Flows[0].Egress.PacketCount, "wrong egress packet count") + assert.Equal(t, uint64(46), event.NetworkFlowMonitor.Flows[0].Egress.DataSize, "wrong egress data size") // full packet size including l2 header + assert.Equal(t, uint64(0), event.NetworkFlowMonitor.Flows[0].Ingress.PacketCount, "wrong ingress packet count") + assert.Equal(t, uint64(0), event.NetworkFlowMonitor.Flows[0].Ingress.DataSize, "wrong ingress data size") + } + }) }) } diff --git a/pkg/security/tests/overlayfs_test.go b/pkg/security/tests/overlayfs_test.go index ad91123dda5b7..1d3fbc042d70b 100644 --- a/pkg/security/tests/overlayfs_test.go +++ b/pkg/security/tests/overlayfs_test.go @@ -21,6 +21,7 @@ import ( "golang.org/x/sys/unix" "github.com/DataDog/datadog-agent/pkg/security/ebpf/kernel" + sprobe "github.com/DataDog/datadog-agent/pkg/security/probe" "github.com/DataDog/datadog-agent/pkg/security/secl/model" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" ) @@ -58,11 +59,11 @@ func TestOverlayFS(t *testing.T) { }, { ID: "test_rule_unlink", - Expression: `unlink.file.path in ["{{.Root}}/bind/read.txt", "{{.Root}}/bind/override.txt", "{{.Root}}/bind/renamed.txt", "{{.Root}}/bind/new.txt", "{{.Root}}/bind/chmod.txt", "{{.Root}}/bind/utimes.txt", "{{.Root}}/bind/chown.txt", "{{.Root}}/bind/xattr.txt", "{{.Root}}/bind/truncate.txt", "{{.Root}}/bind/link.txt", "{{.Root}}/bind/linked.txt"]`, + Expression: `unlink.file.path == "{{.Root}}/bind/unlink.txt"`, }, { ID: "test_rule_rename", - Expression: `rename.file.path == "{{.Root}}/bind/create.txt"`, + Expression: `rename.file.path in ["{{.Root}}/bind/create.txt", "{{.Root}}/bind/new.txt"]`, }, { ID: "test_rule_rmdir", @@ -70,7 +71,7 @@ func TestOverlayFS(t *testing.T) { }, { ID: "test_rule_chmod", - Expression: `chmod.file.path == "{{.Root}}/bind/chmod.txt"`, + Expression: `chmod.file.path in ["{{.Root}}/bind/chmod.txt", "{{.Root}}/bind/new.txt"]`, }, { ID: "test_rule_mkdir", @@ -82,7 +83,7 @@ func TestOverlayFS(t *testing.T) { }, { ID: "test_rule_chown", - Expression: `chown.file.path == "{{.Root}}/bind/chown.txt"`, + Expression: `chown.file.path in ["{{.Root}}/bind/chown.txt", "{{.Root}}/bind/new.txt"]`, }, { ID: "test_rule_xattr", @@ -114,6 +115,11 @@ func TestOverlayFS(t *testing.T) { } defer test.Close() + p, ok := test.probe.PlatformProbe.(*sprobe.EBPFProbe) + if !ok { + t.Skip("not supported") + } + // create layers testLower, testUpper, testWordir, testMerged := createOverlayLayers(t, test) @@ -121,7 +127,7 @@ func TestOverlayFS(t *testing.T) { for _, filename := range []string{ "lower/read.txt", "lower/override.txt", "lower/create.txt", "lower/chmod.txt", "lower/utimes.txt", "lower/chown.txt", "lower/xattr.txt", "lower/truncate.txt", "lower/linked.txt", - "lower/discarded.txt", "lower/invalidator.txt"} { + "lower/discarded.txt", "lower/invalidator.txt", "lower/unlink.txt"} { _, _, err = test.Create(filename) if err != nil { t.Fatal(err) @@ -167,6 +173,27 @@ func TestOverlayFS(t *testing.T) { } }() + validateInodeAndLayerFallback := func(t *testing.T, filename string, expectedInode uint64, expectedUpperLayer bool) { + fileFields, err := p.Resolvers.ProcessResolver.RetrieveFileFieldsFromProcfs(filename) + assert.NoError(t, err, "shouldn't return an error") + if expectedInode != 0 { + assert.Equal(t, expectedInode, fileFields.Inode, "wrong inode using fallback") + } + assert.Equal(t, expectedUpperLayer, fileFields.IsInUpperLayer(), "wrong layer using fallback for inode %d", expectedInode) + } + + validateInodeAndLayerRuntime := func(t *testing.T, expectedInode uint64, expectedUpperLayer bool, fileFields *model.FileFields) { + if expectedInode != 0 { + assert.Equal(t, expectedInode, fileFields.Inode, "wrong inode in runtime event") + } + assert.Equal(t, expectedUpperLayer, fileFields.IsInUpperLayer(), "wrong layer in runtime event for inode %d", expectedInode) + } + + validateInodeAndLayer := func(t *testing.T, filename string, expectedInode uint64, expectedUpperLayer bool, fileFields *model.FileFields) { + validateInodeAndLayerRuntime(t, expectedInode, expectedUpperLayer, fileFields) + validateInodeAndLayerFallback(t, filename, expectedInode, expectedUpperLayer) + } + // open a file in lower in RDONLY and check that open/unlink inode are valid from userspace // perspective and equals t.Run("read-lower", func(t *testing.T) { @@ -185,23 +212,8 @@ func TestOverlayFS(t *testing.T) { return f.Close() }, func(event *model.Event, _ *rules.Rule) { inode = getInode(t, testFile) - inUpperLayer, _ := event.GetFieldValue("open.file.in_upper_layer") - - success := assert.Equal(t, inode, event.Open.File.Inode, "wrong open inode") - success = assert.Equal(t, false, inUpperLayer, "should be in base layer") && success - - if !success { - _ = os.Remove(testFile) - } - }) - - test.WaitSignal(t, func() error { - return os.Remove(testFile) - }, func(event *model.Event, _ *rules.Rule) { - inUpperLayer, _ := event.GetFieldValue("unlink.file.in_upper_layer") - assert.Equal(t, inode, event.Unlink.File.Inode, "wrong unlink inode") - assert.Equal(t, false, inUpperLayer, "should be in base layer") + validateInodeAndLayer(t, testFile, inode, false, &event.Open.File.FileFields) }) }) @@ -221,23 +233,8 @@ func TestOverlayFS(t *testing.T) { return f.Close() }, func(event *model.Event, _ *rules.Rule) { inode = getInode(t, testFile) - inUpperLayer, _ := event.GetFieldValue("open.file.in_upper_layer") - success := assert.Equal(t, event.Open.File.Inode, inode, "wrong open inode") - success = assert.Equal(t, false, inUpperLayer, "should be in base layer") && success - - if !success { - _ = os.Remove(testFile) - } - }) - - test.WaitSignal(t, func() error { - return os.Remove(testFile) - }, func(event *model.Event, _ *rules.Rule) { - inUpperLayer, _ := event.GetFieldValue("unlink.file.in_upper_layer") - - assert.Equal(t, inode, event.Unlink.File.Inode, "wrong unlink inode") - assert.Equal(t, true, inUpperLayer, "should be in upper layer") + validateInodeAndLayer(t, testFile, inode, true, &event.Open.File.FileFields) }) }) @@ -257,23 +254,8 @@ func TestOverlayFS(t *testing.T) { return f.Close() }, func(event *model.Event, _ *rules.Rule) { inode = getInode(t, testFile) - inUpperLayer, _ := event.GetFieldValue("open.file.in_upper_layer") - - success := assert.Equal(t, inode, event.Open.File.Inode, "wrong open inode") - success = assert.Equal(t, true, inUpperLayer, "should be in upper layer") && success - if !success { - _ = os.Remove(testFile) - } - }) - - test.WaitSignal(t, func() error { - return os.Remove(testFile) - }, func(event *model.Event, _ *rules.Rule) { - inUpperLayer, _ := event.GetFieldValue("unlink.file.in_upper_layer") - - assert.Equal(t, inode, event.Unlink.File.Inode, "wrong unlink inode") - assert.Equal(t, true, inUpperLayer, "should be in upper layer") + validateInodeAndLayer(t, testFile, inode, true, &event.Open.File.FileFields) }) }) @@ -293,35 +275,17 @@ func TestOverlayFS(t *testing.T) { test.WaitSignal(t, func() error { return os.Rename(oldFile, newFile) }, func(event *model.Event, _ *rules.Rule) { - success := true - if value, _ := event.GetFieldValue("rename.file.path"); value.(string) != oldFile { t.Errorf("expected filename not found %s != %s", value.(string), oldFile) - success = false } inode = getInode(t, newFile) - inUpperLayer, _ := event.GetFieldValue("rename.file.in_upper_layer") - success = assert.Equal(t, inode, event.Rename.New.Inode, "wrong rename inode") && success - success = assert.Equal(t, false, inUpperLayer, "should be in base layer") && success + assert.Equal(t, inode, event.Rename.New.Inode, "wrong rename inode") + assert.Equal(t, false, event.Rename.Old.IsInUpperLayer(), "should be in base layer") + assert.Equal(t, true, event.Rename.New.IsInUpperLayer(), "should be in upper layer") - inUpperLayer, _ = event.GetFieldValue("rename.file.destination.in_upper_layer") - - success = assert.Equal(t, true, inUpperLayer, "should be in upper layer") && success - - if !success { - _ = os.Remove(newFile) - } - }) - - test.WaitSignal(t, func() error { - return os.Remove(newFile) - }, func(event *model.Event, _ *rules.Rule) { - inUpperLayer, _ := event.GetFieldValue("unlink.file.in_upper_layer") - - assert.Equal(t, inode, event.Unlink.File.Inode, "wrong unlink inode") - assert.Equal(t, true, inUpperLayer, "should be in upper layer") + validateInodeAndLayerFallback(t, newFile, inode, true) }) }) @@ -378,10 +342,8 @@ func TestOverlayFS(t *testing.T) { test.WaitSignal(t, func() error { return os.Remove(testDir) }, func(event *model.Event, _ *rules.Rule) { - inUpperLayer, _ := event.GetFieldValue("rmdir.file.in_upper_layer") - assert.Equal(t, inode, event.Rmdir.File.Inode, "wrong rmdir inode") - assert.Equal(t, false, inUpperLayer, "should be in base layer") + assert.Equal(t, false, event.Rmdir.File.IsInUpperLayer(), "should be in base layer") }) }) @@ -397,23 +359,30 @@ func TestOverlayFS(t *testing.T) { return os.Chmod(testFile, 0777) }, func(event *model.Event, _ *rules.Rule) { inode = getInode(t, testFile) - inUpperLayer, _ := event.GetFieldValue("chmod.file.in_upper_layer") - success := assert.Equal(t, inode, event.Chmod.File.Inode, "wrong chmod inode") - success = assert.Equal(t, false, inUpperLayer, "should be in base layer") && success + validateInodeAndLayer(t, testFile, inode, true, &event.Chmod.File.FileFields) + }) + }) - if !success { - _ = os.Remove(testFile) - } + t.Run("chmod-upper", func(t *testing.T) { + checkKernelCompatibility(t, "Oracle kernels", func(kv *kernel.Version) bool { + // skip Oracle for now + return kv.IsOracleUEKKernel() }) + testFile, _, err := test.Path("bind/new.txt") + if err != nil { + t.Fatal(err) + } + + var inode uint64 + test.WaitSignal(t, func() error { - return os.Remove(testFile) + return os.Chmod(testFile, 0777) }, func(event *model.Event, _ *rules.Rule) { - inUpperLayer, _ := event.GetFieldValue("unlink.file.in_upper_layer") + inode = getInode(t, testFile) - assert.Equal(t, inode, event.Unlink.File.Inode, "wrong unlink inode") - assert.Equal(t, true, inUpperLayer, "should be in upper layer") + validateInodeAndLayer(t, testFile, inode, true, &event.Chmod.File.FileFields) }) }) @@ -429,23 +398,8 @@ func TestOverlayFS(t *testing.T) { return syscall.Mkdir(testFile, 0777) }, func(event *model.Event, _ *rules.Rule) { inode = getInode(t, testFile) - inUpperLayer, _ := event.GetFieldValue("mkdir.file.in_upper_layer") - - success := assert.Equal(t, inode, event.Mkdir.File.Inode, "wrong mkdir inode") - success = assert.Equal(t, true, inUpperLayer, "should be in upper layer") && success - if !success { - _ = os.Remove(testFile) - } - }) - - test.WaitSignal(t, func() error { - return os.Remove(testFile) - }, func(event *model.Event, _ *rules.Rule) { - inUpperLayer, _ := event.GetFieldValue("rmdir.file.in_upper_layer") - - assert.Equal(t, inode, event.Rmdir.File.Inode, "wrong rmdir inode") - assert.Equal(t, true, inUpperLayer, "should be in upper layer") + validateInodeAndLayer(t, testFile, inode, true, &event.Mkdir.File.FileFields) }) }) @@ -461,23 +415,8 @@ func TestOverlayFS(t *testing.T) { return os.Chtimes(testFile, time.Now(), time.Now()) }, func(event *model.Event, _ *rules.Rule) { inode = getInode(t, testFile) - inUpperLayer, _ := event.GetFieldValue("utimes.file.in_upper_layer") - - success := assert.Equal(t, inode, event.Utimes.File.Inode, "wrong utimes inode") - success = assert.Equal(t, false, inUpperLayer, "should be in base layer") && success - - if !success { - _ = os.Remove(testFile) - } - }) - test.WaitSignal(t, func() error { - return os.Remove(testFile) - }, func(event *model.Event, _ *rules.Rule) { - inUpperLayer, _ := event.GetFieldValue("unlink.file.in_upper_layer") - - assert.Equal(t, inode, event.Unlink.File.Inode, "wrong unlink inode") - assert.Equal(t, true, inUpperLayer, "should be in upper layer") + validateInodeAndLayer(t, testFile, inode, true, &event.Utimes.File.FileFields) }) }) @@ -493,23 +432,30 @@ func TestOverlayFS(t *testing.T) { return os.Chown(testFile, os.Getuid(), os.Getgid()) }, func(event *model.Event, _ *rules.Rule) { inode = getInode(t, testFile) - inUpperLayer, _ := event.GetFieldValue("chown.file.in_upper_layer") - success := assert.Equal(t, inode, event.Chown.File.Inode, "wrong chown inode") - success = assert.Equal(t, false, inUpperLayer, "should be in base layer") && success + validateInodeAndLayer(t, testFile, inode, true, &event.Chown.File.FileFields) + }) + }) - if !success { - _ = os.Remove(testFile) - } + t.Run("chown-upper", func(t *testing.T) { + checkKernelCompatibility(t, "Oracle kernels", func(kv *kernel.Version) bool { + // skip Oracle for now + return kv.IsOracleUEKKernel() }) + testFile, _, err := test.Path("bind/new.txt") + if err != nil { + t.Fatal(err) + } + + var inode uint64 + test.WaitSignal(t, func() error { - return os.Remove(testFile) + return os.Chown(testFile, os.Getuid(), os.Getgid()) }, func(event *model.Event, _ *rules.Rule) { - inUpperLayer, _ := event.GetFieldValue("unlink.file.in_upper_layer") + inode = getInode(t, testFile) - assert.Equal(t, inode, event.Unlink.File.Inode, "wrong unlink inode") - assert.Equal(t, true, inUpperLayer, "should be in upper layer") + validateInodeAndLayer(t, testFile, inode, true, &event.Chown.File.FileFields) }) }) @@ -536,23 +482,8 @@ func TestOverlayFS(t *testing.T) { return nil }, func(event *model.Event, _ *rules.Rule) { inode = getInode(t, testFile) - inUpperLayer, _ := event.GetFieldValue("setxattr.file.in_upper_layer") - - success := assert.Equal(t, inode, event.SetXAttr.File.Inode, "wrong setxattr inode") - success = assert.Equal(t, false, inUpperLayer, "should be in base layer") && success - if !success { - _ = os.Remove(testFile) - } - }) - - test.WaitSignal(t, func() error { - return os.Remove(testFile) - }, func(event *model.Event, _ *rules.Rule) { - inUpperLayer, _ := event.GetFieldValue("unlink.file.in_upper_layer") - - assert.Equal(t, inode, event.Unlink.File.Inode, "wrong unlink inode") - assert.Equal(t, true, inUpperLayer, "should be in upper layer") + validateInodeAndLayer(t, testFile, inode, true, &event.SetXAttr.File.FileFields) }) }) @@ -568,23 +499,30 @@ func TestOverlayFS(t *testing.T) { return os.Truncate(testFile, 0) }, func(event *model.Event, _ *rules.Rule) { inode = getInode(t, testFile) - inUpperLayer, _ := event.GetFieldValue("open.file.in_upper_layer") - success := assert.Equal(t, inode, event.Open.File.Inode, "wrong open inode") - success = assert.Equal(t, false, inUpperLayer, "should be in base layer") && success + validateInodeAndLayer(t, testFile, inode, true, &event.Open.File.FileFields) + }) + }) - if !success { - _ = os.Remove(testFile) - } + t.Run("truncate-upper", func(t *testing.T) { + checkKernelCompatibility(t, "Oracle kernels", func(kv *kernel.Version) bool { + // skip Oracle for now + return kv.IsOracleUEKKernel() }) + testFile, _, err := test.Path("bind/new.txt") + if err != nil { + t.Fatal(err) + } + + var inode uint64 + test.WaitSignal(t, func() error { - return os.Remove(testFile) + return os.Truncate(testFile, 0) }, func(event *model.Event, _ *rules.Rule) { - inUpperLayer, _ := event.GetFieldValue("unlink.file.in_upper_layer") + inode = getInode(t, testFile) - assert.Equal(t, inode, event.Unlink.File.Inode, "wrong unlink inode") - assert.Equal(t, true, inUpperLayer, "should be in upper layer") + validateInodeAndLayer(t, testFile, inode, true, &event.Open.File.FileFields) }) }) @@ -599,46 +537,62 @@ func TestOverlayFS(t *testing.T) { t.Fatal(err) } - var inode uint64 - test.WaitSignal(t, func() error { return os.Link(testSrc, testTarget) }, func(event *model.Event, _ *rules.Rule) { - inode = getInode(t, testSrc) - success := assert.Equal(t, inode, event.Link.Source.Inode, "wrong link source inode") - - inUpperLayer, _ := event.GetFieldValue("link.file.in_upper_layer") - success = assert.Equal(t, false, inUpperLayer, "should be in base layer") && success + // fake inode + validateInodeAndLayer(t, testTarget, 0, true, &event.Link.Target.FileFields) + }) + }) - inUpperLayer, _ = event.GetFieldValue("link.file.destination.in_upper_layer") - success = assert.Equal(t, true, inUpperLayer, "should be in upper layer") && success + t.Run("unlink-lower", func(t *testing.T) { + testFile, _, err := test.Path("bind/unlink.txt") + if err != nil { + t.Fatal(err) + } - if !success { - _ = os.Remove(testSrc) - _ = os.Remove(testTarget) - } - }) + inode := getInode(t, testFile) test.WaitSignal(t, func() error { - return os.Remove(testSrc) + return os.Remove(testFile) }, func(event *model.Event, _ *rules.Rule) { - inUpperLayer, _ := event.GetFieldValue("unlink.file.in_upper_layer") - - success := assert.Equal(t, inode, event.Unlink.File.Inode, "wrong unlink inode") - success = assert.Equal(t, true, inUpperLayer, "should be in base layer") && success + // impossible to test with the fallback, the file is deleted + validateInodeAndLayerRuntime(t, inode, false, &event.Unlink.File.FileFields) + }) + }) - if !success { - _ = os.Remove(testTarget) - } + t.Run("rename-upper", func(t *testing.T) { + checkKernelCompatibility(t, "Oracle kernels", func(kv *kernel.Version) bool { + // skip Oracle for now + return kv.IsOracleUEKKernel() }) + oldFile, _, err := test.Path("bind/new.txt") + if err != nil { + t.Fatal(err) + } + + newFile, _, err := test.Path("bind/new-renamed.txt") + if err != nil { + t.Fatal(err) + } + + var inode uint64 + test.WaitSignal(t, func() error { - return os.Remove(testTarget) + return os.Rename(oldFile, newFile) }, func(event *model.Event, _ *rules.Rule) { - inUpperLayer, _ := event.GetFieldValue("unlink.file.in_upper_layer") + if value, _ := event.GetFieldValue("rename.file.path"); value.(string) != oldFile { + t.Errorf("expected filename not found %s != %s", value.(string), oldFile) + } + + inode = getInode(t, newFile) + + assert.Equal(t, inode, event.Rename.New.Inode, "wrong rename inode") + assert.Equal(t, true, event.Rename.Old.IsInUpperLayer(), "should be in upper layer") + assert.Equal(t, true, event.Rename.New.IsInUpperLayer(), "should be in upper layer") - assert.Equal(t, inode, event.Unlink.File.Inode, "wrong unlink inode") - assert.Equal(t, true, inUpperLayer, "should be in upper layer") + validateInodeAndLayerFallback(t, newFile, inode, true) }) }) } diff --git a/pkg/security/tests/process_test.go b/pkg/security/tests/process_test.go index 796a8b9ede73f..4b04d15851cd8 100644 --- a/pkg/security/tests/process_test.go +++ b/pkg/security/tests/process_test.go @@ -39,6 +39,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/security/secl/compiler/eval" "github.com/DataDog/datadog-agent/pkg/security/secl/model" + "github.com/DataDog/datadog-agent/pkg/security/secl/model/sharedconsts" "github.com/DataDog/datadog-agent/pkg/security/secl/rules" ) @@ -435,7 +436,7 @@ func TestProcessContext(t *testing.T) { argv := strings.Split(args.(string), " ") assert.Equal(t, 2, len(argv), "incorrect number of args: %s", argv) - assert.Equal(t, model.MaxArgEnvSize-1, len(argv[1]), "wrong arg length") + assert.Equal(t, sharedconsts.MaxArgEnvSize-1, len(argv[1]), "wrong arg length") assert.Equal(t, true, strings.HasSuffix(argv[1], "..."), "args not truncated") // truncated is reported if a single argument is truncated or if the list is truncated @@ -480,8 +481,8 @@ func TestProcessContext(t *testing.T) { argv := strings.Split(execArgs.(string), " ") if ebpfLessEnabled { - assert.Equal(t, model.MaxArgsEnvsSize-1, len(argv), "incorrect number of args: %s", argv) - for i := 0; i != model.MaxArgsEnvsSize-1; i++ { + assert.Equal(t, sharedconsts.MaxArgsEnvsSize-1, len(argv), "incorrect number of args: %s", argv) + for i := 0; i != sharedconsts.MaxArgsEnvsSize-1; i++ { assert.Equal(t, args[i], argv[i], "expected arg not found") } } else { @@ -530,11 +531,11 @@ func TestProcessContext(t *testing.T) { argv := strings.Split(execArgs.(string), " ") if ebpfLessEnabled { - assert.Equal(t, model.MaxArgsEnvsSize-1, len(argv), "incorrect number of args: %s", argv) - for i := 0; i != model.MaxArgsEnvsSize-1; i++ { + assert.Equal(t, sharedconsts.MaxArgsEnvsSize-1, len(argv), "incorrect number of args: %s", argv) + for i := 0; i != sharedconsts.MaxArgsEnvsSize-1; i++ { expected := args[i] - if len(expected) > model.MaxArgEnvSize { - expected = args[i][:model.MaxArgEnvSize-4] + "..." // 4 is the size number of the string + if len(expected) > sharedconsts.MaxArgEnvSize { + expected = args[i][:sharedconsts.MaxArgEnvSize-4] + "..." // 4 is the size number of the string } assert.Equal(t, expected, argv[i], "expected arg not found") } @@ -542,8 +543,8 @@ func TestProcessContext(t *testing.T) { assert.Equal(t, 457, len(argv), "incorrect number of args: %s", argv) for i := 0; i != 457; i++ { expected := args[i] - if len(expected) > model.MaxArgEnvSize { - expected = args[i][:model.MaxArgEnvSize-4] + "..." // 4 is the size number of the string + if len(expected) > sharedconsts.MaxArgEnvSize { + expected = args[i][:sharedconsts.MaxArgEnvSize-4] + "..." // 4 is the size number of the string } assert.Equal(t, expected, argv[i], "expected arg not found") } @@ -594,7 +595,7 @@ func TestProcessContext(t *testing.T) { envp := (execEnvp.([]string)) assert.Equal(t, 2, len(envp), "incorrect number of envs: %s", envp) - assert.Equal(t, model.MaxArgEnvSize-1, len(envp[1]), "wrong env length") + assert.Equal(t, sharedconsts.MaxArgEnvSize-1, len(envp[1]), "wrong env length") assert.Equal(t, true, strings.HasSuffix(envp[1], "..."), "envs not truncated") // truncated is reported if a single environment variable is truncated or if the list is truncated @@ -645,8 +646,8 @@ func TestProcessContext(t *testing.T) { envp := (execEnvp.([]string)) if ebpfLessEnabled { - assert.Equal(t, model.MaxArgsEnvsSize, len(envp), "incorrect number of envs: %s", envp) - for i := 0; i != model.MaxArgsEnvsSize; i++ { + assert.Equal(t, sharedconsts.MaxArgsEnvsSize, len(envp), "incorrect number of envs: %s", envp) + for i := 0; i != sharedconsts.MaxArgsEnvsSize; i++ { assert.Equal(t, envs[i], envp[i], "expected env not found") } } else { @@ -707,11 +708,11 @@ func TestProcessContext(t *testing.T) { envp := (execEnvp.([]string)) if ebpfLessEnabled { - assert.Equal(t, model.MaxArgsEnvsSize, len(envp), "incorrect number of envs: %s", envp) - for i := 0; i != model.MaxArgsEnvsSize; i++ { + assert.Equal(t, sharedconsts.MaxArgsEnvsSize, len(envp), "incorrect number of envs: %s", envp) + for i := 0; i != sharedconsts.MaxArgsEnvsSize; i++ { expected := envs[i] - if len(expected) > model.MaxArgEnvSize { - expected = envs[i][:model.MaxArgEnvSize-4] + "..." // 4 is the size number of the string + if len(expected) > sharedconsts.MaxArgEnvSize { + expected = envs[i][:sharedconsts.MaxArgEnvSize-4] + "..." // 4 is the size number of the string } assert.Equal(t, expected, envp[i], "expected env not found") } @@ -719,8 +720,8 @@ func TestProcessContext(t *testing.T) { assert.Equal(t, 863, len(envp), "incorrect number of envs: %s", envp) for i := 0; i != 863; i++ { expected := envs[i] - if len(expected) > model.MaxArgEnvSize { - expected = envs[i][:model.MaxArgEnvSize-4] + "..." // 4 is the size number of the string + if len(expected) > sharedconsts.MaxArgEnvSize { + expected = envs[i][:sharedconsts.MaxArgEnvSize-4] + "..." // 4 is the size number of the string } assert.Equal(t, expected, envp[i], "expected env not found") } @@ -1811,7 +1812,7 @@ func TestProcessExit(t *testing.T) { test.validateExitSchema(t, event) assertTriggeredRule(t, rule, "test_exit_ok") assertFieldEqual(t, event, "exit.file.path", sleepExec) - assert.Equal(t, uint32(model.ExitExited), event.Exit.Cause, "wrong exit cause") + assert.Equal(t, uint32(sharedconsts.ExitExited), event.Exit.Cause, "wrong exit cause") assert.Equal(t, uint32(0), event.Exit.Code, "wrong exit code") assert.False(t, event.ProcessContext.ExitTime.Before(event.ProcessContext.ExecTime), "exit time < exec time") }) @@ -1830,7 +1831,7 @@ func TestProcessExit(t *testing.T) { test.validateExitSchema(t, event) assertTriggeredRule(t, rule, "test_exit_error") assertFieldEqual(t, event, "exit.file.path", sleepExec) - assert.Equal(t, uint32(model.ExitExited), event.Exit.Cause, "wrong exit cause") + assert.Equal(t, uint32(sharedconsts.ExitExited), event.Exit.Cause, "wrong exit cause") assert.Equal(t, uint32(1), event.Exit.Code, "wrong exit code") assert.False(t, event.ProcessContext.ExitTime.Before(event.ProcessContext.ExecTime), "exit time < exec time") }) @@ -1849,7 +1850,7 @@ func TestProcessExit(t *testing.T) { test.validateExitSchema(t, event) assertTriggeredRule(t, rule, "test_exit_coredump") assertFieldEqual(t, event, "exit.file.path", sleepExec) - assert.Equal(t, uint32(model.ExitCoreDumped), event.Exit.Cause, "wrong exit cause") + assert.Equal(t, uint32(sharedconsts.ExitCoreDumped), event.Exit.Cause, "wrong exit cause") assert.Equal(t, uint32(syscall.SIGQUIT), event.Exit.Code, "wrong exit code") assert.False(t, event.ProcessContext.ExitTime.Before(event.ProcessContext.ExecTime), "exit time < exec time") }) @@ -1870,7 +1871,7 @@ func TestProcessExit(t *testing.T) { test.validateExitSchema(t, event) assertTriggeredRule(t, rule, "test_exit_signal") assertFieldEqual(t, event, "exit.file.path", sleepExec) - assert.Equal(t, uint32(model.ExitSignaled), event.Exit.Cause, "wrong exit cause") + assert.Equal(t, uint32(sharedconsts.ExitSignaled), event.Exit.Cause, "wrong exit cause") assert.Equal(t, uint32(syscall.SIGKILL), event.Exit.Code, "wrong exit code") assert.False(t, event.ProcessContext.ExitTime.Before(event.ProcessContext.ExecTime), "exit time < exec time") }) @@ -1888,7 +1889,7 @@ func TestProcessExit(t *testing.T) { test.validateExitSchema(t, event) assertTriggeredRule(t, rule, "test_exit_time_1") assertFieldEqual(t, event, "exit.file.path", sleepExec) - assert.Equal(t, uint32(model.ExitExited), event.Exit.Cause, "wrong exit cause") + assert.Equal(t, uint32(sharedconsts.ExitExited), event.Exit.Cause, "wrong exit cause") assert.Equal(t, uint32(0), event.Exit.Code, "wrong exit code") assert.False(t, event.ProcessContext.ExitTime.Before(event.ProcessContext.ExecTime), "exit time < exec time") }) @@ -1906,7 +1907,7 @@ func TestProcessExit(t *testing.T) { test.validateExitSchema(t, event) assertTriggeredRule(t, rule, "test_exit_time_2") assertFieldEqual(t, event, "exit.file.path", sleepExec) - assert.Equal(t, uint32(model.ExitExited), event.Exit.Cause, "wrong exit cause") + assert.Equal(t, uint32(sharedconsts.ExitExited), event.Exit.Cause, "wrong exit cause") assert.Equal(t, uint32(0), event.Exit.Code, "wrong exit code") assert.False(t, event.ProcessContext.ExitTime.Before(event.ProcessContext.ExecTime), "exit time < exec time") }) diff --git a/pkg/security/tests/rule_filters_test.go b/pkg/security/tests/rule_filters_test.go index 0fe5187e1a183..0e69b3019a600 100644 --- a/pkg/security/tests/rule_filters_test.go +++ b/pkg/security/tests/rule_filters_test.go @@ -29,7 +29,7 @@ func TestSECLRuleFilter(t *testing.T) { Code: kernel.Kernel5_9, } - m, err := filtermodel.NewRuleFilterModel(nil, "") + m, err := filtermodel.NewRuleFilterModel(filtermodel.RuleFilterEventConfig{}) assert.NoError(t, err) m.Version = kv seclRuleFilter := rules.NewSECLRuleFilter(m) diff --git a/pkg/security/tests/schemas.go b/pkg/security/tests/schemas.go index 1fc9d7f34497b..b421b91d4ab3e 100644 --- a/pkg/security/tests/schemas.go +++ b/pkg/security/tests/schemas.go @@ -227,14 +227,32 @@ func (tm *testModule) validateIMDSSchema(t *testing.T, event *model.Event) bool return tm.validateEventSchema(t, event, "file:///imds.schema.json") } +//nolint:deadcode,unused +func (tm *testModule) validateAcceptSchema(t *testing.T, event *model.Event) bool { + if ebpfLessEnabled { + return true + } + + t.Helper() + return tm.validateEventSchema(t, event, "file:///accept.schema.json") +} + //nolint:deadcode,unused func (tm *testModule) validateBindSchema(t *testing.T, event *model.Event) bool { + if ebpfLessEnabled { + return true + } + t.Helper() return tm.validateEventSchema(t, event, "file:///bind.schema.json") } //nolint:deadcode,unused func (tm *testModule) validateConnectSchema(t *testing.T, event *model.Event) bool { + if ebpfLessEnabled { + return true + } + t.Helper() return tm.validateEventSchema(t, event, "file:///connect.schema.json") } diff --git a/pkg/security/tests/security_profile_test.go b/pkg/security/tests/security_profile_test.go index 01faf63d61ed5..4bc2599b63a56 100644 --- a/pkg/security/tests/security_profile_test.go +++ b/pkg/security/tests/security_profile_test.go @@ -2287,9 +2287,12 @@ func generateSyscallTestProfile(add ...model.Syscall) *dumpsv1.SecurityProfile { 267, // SysReadlinkat 273, // SysSetRobustList 281, // SysEpollPwait + 290, // SysEventfd2 291, // SysEpollCreate1 293, // SysPipe2 + 302, // SysPrlimit64 317, // SysSeccomp + 321, // SysBpf 334, // SysRseq 435, // SysClone3 439, // SysFaccessat2 diff --git a/pkg/security/tests/selftests_test.go b/pkg/security/tests/selftests_test.go new file mode 100644 index 0000000000000..55ab684aaa5f3 --- /dev/null +++ b/pkg/security/tests/selftests_test.go @@ -0,0 +1,64 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux && functionaltests + +// Package tests holds tests related files + +package tests + +import ( + "errors" + "testing" + "time" + + "github.com/DataDog/datadog-agent/pkg/security/events" + "github.com/DataDog/datadog-agent/pkg/security/secl/rules" + "github.com/DataDog/datadog-agent/pkg/util/log" + "github.com/avast/retry-go/v4" + "github.com/oliveagle/jsonpath" + "github.com/stretchr/testify/assert" +) + +func TestSelfTests(t *testing.T) { + SkipIfNotAvailable(t) + + test, err := newTestModule(t, nil, []*rules.RuleDefinition{}, withStaticOpts(testOpts{enableSelfTests: true})) + if err != nil { + t.Fatal(err) + } + defer test.Close() + + test.msgSender.flush() + + err = retry.Do(func() error { + msg := test.msgSender.getMsg(events.SelfTestRuleID) + if msg == nil { + return errors.New("self_test event not found") + } + + log.Debug("self_tests event tags:", msg.Tags) + assert.NotEmpty(t, msg.Tags, "event's tags are empty") + + jsonPathValidation(test, msg.Data, func(_ *testModule, obj interface{}) { + succeededTests, err := jsonpath.JsonPathLookup(obj, `$.succeeded_tests`) + if err != nil { + t.Errorf("could not get succeeded_tests field: %v", err) + } + failedTests, err := jsonpath.JsonPathLookup(obj, `$.failed_tests`) + if err != nil { + t.Errorf("could not get failed_tests field: %v", err) + } + + if len(succeededTests.([]interface{})) != 3 || len(failedTests.([]interface{})) > 0 { + t.Errorf("test results: successes: %v, fails: %v", succeededTests, failedTests) + } + + }) + + return nil + }, retry.Attempts(5), retry.Delay(2*time.Second), retry.MaxDelay(60*time.Second), retry.DelayType(retry.BackOffDelay)) + assert.NoError(t, err) +} diff --git a/pkg/security/tests/syscall_tester/c/syscall_tester.c b/pkg/security/tests/syscall_tester/c/syscall_tester.c index 5fc0ba53ddf09..8f8db9df6a947 100644 --- a/pkg/security/tests/syscall_tester/c/syscall_tester.c +++ b/pkg/security/tests/syscall_tester/c/syscall_tester.c @@ -378,6 +378,194 @@ int self_exec(int argc, char **argv) { return EXIT_SUCCESS; } +void* connect_thread_ipv4(void *arg) { + int s = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP); + connect(s, (struct sockaddr*)arg, sizeof(struct sockaddr)); + return NULL; +} + +int test_accept_af_inet(int argc, char** argv) { + pthread_t thread; + + if (argc != 5) { + fprintf(stderr, "%s: please specify a valid command:\n", __FUNCTION__); + fprintf(stderr, "Arg1: IP address where the socket should bind to\n"); + fprintf(stderr, "Arg2: IP address where the socket should connect to\n"); + fprintf(stderr, "Arg3: Port to bind\n"); + fprintf(stderr, "Arg4: Pass sockaddr_in \n"); + return EXIT_FAILURE; + } + + const char* bind_to = argv[1]; + const char* connect_to = argv[2]; + int port = atoi(argv[3]); + + struct sockaddr_in *sockAddrPtr = NULL; + struct sockaddr_in sockAddr; + memset(&sockAddr, 0, sizeof(struct sockaddr_in)); + + socklen_t sockLen = sizeof(struct sockaddr_in); + + if (strcmp(argv[4], "true") == 0) { + sockAddrPtr = &sockAddr; + } + + int s; + s = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP); + + if (s < 0) { + perror("socket"); + return EXIT_FAILURE; + } + + int ip32 = 0; + + struct sockaddr_in bindAddr; + memset(&bindAddr, 0, sizeof(struct sockaddr_in)); + bindAddr.sin_family = AF_INET; + if (inet_pton(AF_INET, bind_to, &ip32) != 1) { + perror("inet_pton bind_to"); + return EXIT_FAILURE; + } + + bindAddr.sin_addr.s_addr = htonl(ip32); + bindAddr.sin_port = htons(port); + + struct sockaddr_in connectAddr; + memset(&connectAddr, 0, sizeof(struct sockaddr_in)); + connectAddr.sin_family = AF_INET; + if (inet_pton(AF_INET, connect_to, &ip32) != 1) { + perror("inet_pton connect_to"); + return EXIT_FAILURE; + } + + connectAddr.sin_addr.s_addr = ip32; + connectAddr.sin_port = htons(port); + + if (bind(s, (struct sockaddr*)&bindAddr, sizeof(struct sockaddr)) < 0) { + close(s); + perror("Failed to bind"); + return EXIT_FAILURE; + } + + if (listen(s, 10) < 0) { + close(s); + perror("Failed to listen"); + return EXIT_FAILURE; + } + + pthread_create(&thread, NULL, connect_thread_ipv4, (void*)&connectAddr); + + if (accept(s, (struct sockaddr*)sockAddrPtr, &sockLen) < 0) { + perror("Failed to accept"); + } + + close(s); + pthread_join(thread, NULL); + return EXIT_SUCCESS; +} + +void* connect_thread_ipv6(void *arg) { + int s = socket(PF_INET6, SOCK_STREAM, IPPROTO_TCP); + connect(s, (struct sockaddr_in6*)arg, sizeof(struct sockaddr_in6)); + + return NULL; +} + +int test_accept_af_inet6(int argc, char** argv) { + pthread_t thread; + + if (argc != 5) { + fprintf(stderr, "%s: please specify a valid command:\n", __FUNCTION__); + fprintf(stderr, "Arg1: IP address where the socket should bind to\n"); + fprintf(stderr, "Arg2: IP address where the socket should connect to\n"); + fprintf(stderr, "Arg3: Port to bind\n"); + fprintf(stderr, "Arg4: Pass sockaddr_in \n"); + return EXIT_FAILURE; + } + + const char* bind_to = argv[1]; + const char* connect_to = argv[2]; + int port = atoi(argv[3]); + + struct sockaddr_in6 *sockAddrPtr = NULL; + struct sockaddr_in6 sockAddr; + memset(&sockAddr, 0, sizeof(struct sockaddr_in6)); + + socklen_t sockLen = sizeof(struct sockaddr_in6); + + if (strcmp(argv[4], "true") == 0) { + sockAddrPtr = &sockAddr; + } + + int s; + s = socket(PF_INET6, SOCK_STREAM, IPPROTO_TCP); + + if (s < 0) { + perror("socket"); + return EXIT_FAILURE; + } + + struct in6_addr ip6; + + struct sockaddr_in6 bindAddr; + memset(&bindAddr, 0, sizeof(struct sockaddr_in6)); + bindAddr.sin6_family = AF_INET6; + if (inet_pton(AF_INET6, bind_to, &ip6) != 1) { + perror("inet_pton bind_to"); + return EXIT_FAILURE; + } + bindAddr.sin6_addr = ip6; + bindAddr.sin6_port = htons(port); + + struct sockaddr_in6 connectAddr; + memset(&connectAddr, 0, sizeof(struct sockaddr_in6)); + connectAddr.sin6_family = AF_INET6; + if (inet_pton(AF_INET6, connect_to, &ip6) != 1) { + perror("inet_pton connect_to"); + return EXIT_FAILURE; + } + connectAddr.sin6_addr = ip6; + connectAddr.sin6_port = htons(port); + + if (bind(s, &bindAddr, sizeof(struct sockaddr_in6)) < 0) { + close(s); + perror("Failed to bind"); + return EXIT_FAILURE; + } + + if (listen(s, 10) < 0) { + close(s); + perror("Failed to listen"); + return EXIT_FAILURE; + } + + pthread_create(&thread, NULL, connect_thread_ipv6, (void*)&connectAddr); + + if (accept(s, (struct sockaddr*)sockAddrPtr, &sockLen) < 0) { + perror("Failed to accept"); + } + + pthread_join(thread, NULL); + close (s); + return EXIT_SUCCESS; +} + +int test_accept(int argc, char** argv) { + if (argc <= 2) { + fprintf(stderr, "Please specify an addr_type\n"); + return EXIT_FAILURE; + } + + if(strcmp(argv[1],"AF_INET") == 0) { + return test_accept_af_inet(argc - 1, argv + 1); + } else if(strcmp(argv[1], "AF_INET6") == 0) { + return test_accept_af_inet6(argc - 1, argv + 1); + } + + return EXIT_FAILURE; +} + int test_bind_af_inet(int argc, char** argv) { if (argc != 3) { @@ -868,6 +1056,49 @@ int test_new_netns_exec(int argc, char **argv) { return EXIT_FAILURE; } +int test_network_flow_send_udp4(int argc, char **argv) { + if (argc < 3) { + fprintf(stderr, "Please specify the remote IP address and port\n"); + return EXIT_FAILURE; + } + + int sockfd; + struct sockaddr_in server_addr; + const char *message = "DATA"; + + // Create a DGRAM socket + sockfd = socket(AF_INET, SOCK_DGRAM, 0); + if (sockfd < 0) { + fprintf(stderr, "Socket creation failed\n"); + return EXIT_FAILURE; + } + + // Configure server address structure + memset(&server_addr, 0, sizeof(server_addr)); + server_addr.sin_family = AF_INET; + server_addr.sin_port = htons(atoi(argv[2])); + server_addr.sin_addr.s_addr = inet_addr(argv[1]); + + // Send the message + if (sendto(sockfd, message, strlen(message), 0, (struct sockaddr *)&server_addr, sizeof(server_addr)) < 0) { + fprintf(stderr, "Failed to send data\n"); + close(sockfd); + return EXIT_FAILURE; + } + + printf("Message sent: %s\n", message); + pid_t pid; + + // Get the process ID + pid = getpid(); + printf("Process ID: %d\n", pid); + + // Close the socket + close(sockfd); + printf("Socket closed.\n"); + return EXIT_SUCCESS; +} + int main(int argc, char **argv) { setbuf(stdout, NULL); @@ -915,6 +1146,8 @@ int main(int argc, char **argv) { exit_code = test_process_set(sub_argc, sub_argv); } else if (strcmp(cmd, "self-exec") == 0) { exit_code = self_exec(sub_argc, sub_argv); + } else if (strcmp(cmd, "accept") == 0) { + exit_code = test_accept(sub_argc, sub_argv); } else if (strcmp(cmd, "bind") == 0) { exit_code = test_bind(sub_argc, sub_argv); } else if (strcmp(cmd, "connect") == 0) { @@ -947,6 +1180,8 @@ int main(int argc, char **argv) { exit_code = test_slow_cat(sub_argc, sub_argv); } else if (strcmp(cmd, "slow-write") == 0) { exit_code = test_slow_write(sub_argc, sub_argv); + } else if (strcmp(cmd, "network_flow_send_udp4") == 0) { + exit_code = test_network_flow_send_udp4(sub_argc, sub_argv); } else { fprintf(stderr, "Unknown command `%s`\n", cmd); diff --git a/pkg/security/tests/syscall_tester/go/syscall_go_tester.go b/pkg/security/tests/syscall_tester/go/syscall_go_tester.go index ba61cc7f81627..ea10c3c5295fe 100644 --- a/pkg/security/tests/syscall_tester/go/syscall_go_tester.go +++ b/pkg/security/tests/syscall_tester/go/syscall_go_tester.go @@ -26,8 +26,8 @@ import ( authenticationv1 "k8s.io/api/authentication/v1" "github.com/DataDog/datadog-agent/cmd/cws-instrumentation/subcommands/injectcmd" - "github.com/DataDog/datadog-agent/pkg/security/resolvers/usersessions" "github.com/DataDog/datadog-agent/pkg/security/tests/testutils" + "github.com/DataDog/datadog-agent/pkg/security/utils/k8sutils" ) var ( @@ -113,7 +113,7 @@ func K8SUserSessionTest(executable string, openPath string) error { } // prepare K8S user session context - data, err := usersessions.PrepareK8SUserSessionContext(&authenticationv1.UserInfo{ + data, err := k8sutils.PrepareK8SUserSessionContext(&authenticationv1.UserInfo{ Username: "qwerty.azerty@datadoghq.com", UID: "azerty.qwerty@datadoghq.com", Groups: []string{ diff --git a/pkg/security/tests/testopts.go b/pkg/security/tests/testopts.go index fe1a31333cc26..41f30def00263 100644 --- a/pkg/security/tests/testopts.go +++ b/pkg/security/tests/testopts.go @@ -10,7 +10,6 @@ package tests import ( "reflect" - "slices" "time" "github.com/DataDog/datadog-agent/pkg/security/resolvers/tags" @@ -74,6 +73,8 @@ type testOpts struct { enforcementDisarmerExecutablePeriod time.Duration eventServerRetention time.Duration discardRuntime bool + enableSelfTests bool + networkFlowMonitorEnabled bool } type dynamicTestOpts struct { @@ -109,55 +110,5 @@ func withForceReload() optFunc { } func (to testOpts) Equal(opts testOpts) bool { - return to.disableApprovers == opts.disableApprovers && - to.disableEnvVarsResolution == opts.disableEnvVarsResolution && - to.enableActivityDump == opts.enableActivityDump && - to.activityDumpRateLimiter == opts.activityDumpRateLimiter && - to.activityDumpTagRules == opts.activityDumpTagRules && - to.activityDumpDuration == opts.activityDumpDuration && - to.activityDumpLoadControllerPeriod == opts.activityDumpLoadControllerPeriod && - to.activityDumpTracedCgroupsCount == opts.activityDumpTracedCgroupsCount && - to.activityDumpCgroupDifferentiateArgs == opts.activityDumpCgroupDifferentiateArgs && - to.activityDumpAutoSuppressionEnabled == opts.activityDumpAutoSuppressionEnabled && - to.activityDumpLoadControllerTimeout == opts.activityDumpLoadControllerTimeout && - to.activityDumpSyscallMonitorPeriod == opts.activityDumpSyscallMonitorPeriod && - reflect.DeepEqual(to.activityDumpTracedEventTypes, opts.activityDumpTracedEventTypes) && - to.activityDumpLocalStorageDirectory == opts.activityDumpLocalStorageDirectory && - to.activityDumpLocalStorageCompression == opts.activityDumpLocalStorageCompression && - reflect.DeepEqual(to.activityDumpLocalStorageFormats, opts.activityDumpLocalStorageFormats) && - to.enableSecurityProfile == opts.enableSecurityProfile && - to.securityProfileMaxImageTags == opts.securityProfileMaxImageTags && - to.securityProfileDir == opts.securityProfileDir && - to.securityProfileWatchDir == opts.securityProfileWatchDir && - to.enableAutoSuppression == opts.enableAutoSuppression && - slices.Equal(to.autoSuppressionEventTypes, opts.autoSuppressionEventTypes) && - to.enableAnomalyDetection == opts.enableAnomalyDetection && - slices.Equal(to.anomalyDetectionEventTypes, opts.anomalyDetectionEventTypes) && - to.anomalyDetectionDefaultMinimumStablePeriod == opts.anomalyDetectionDefaultMinimumStablePeriod && - to.anomalyDetectionMinimumStablePeriodExec == opts.anomalyDetectionMinimumStablePeriodExec && - to.anomalyDetectionMinimumStablePeriodDNS == opts.anomalyDetectionMinimumStablePeriodDNS && - to.anomalyDetectionWarmupPeriod == opts.anomalyDetectionWarmupPeriod && - to.disableDiscarders == opts.disableDiscarders && - to.disableFilters == opts.disableFilters && - to.disableERPCDentryResolution == opts.disableERPCDentryResolution && - to.disableMapDentryResolution == opts.disableMapDentryResolution && - reflect.DeepEqual(to.envsWithValue, opts.envsWithValue) && - to.disableRuntimeSecurity == opts.disableRuntimeSecurity && - to.enableSBOM == opts.enableSBOM && - to.enableHostSBOM == opts.enableHostSBOM && - to.snapshotRuleMatchHandler == nil && opts.snapshotRuleMatchHandler == nil && - to.preStartCallback == nil && opts.preStartCallback == nil && - to.networkIngressEnabled == opts.networkIngressEnabled && - to.networkRawPacketEnabled == opts.networkRawPacketEnabled && - to.disableOnDemandRateLimiter == opts.disableOnDemandRateLimiter && - to.ebpfLessEnabled == opts.ebpfLessEnabled && - to.enforcementExcludeBinary == opts.enforcementExcludeBinary && - to.enforcementDisarmerContainerEnabled == opts.enforcementDisarmerContainerEnabled && - to.enforcementDisarmerContainerMaxAllowed == opts.enforcementDisarmerContainerMaxAllowed && - to.enforcementDisarmerContainerPeriod == opts.enforcementDisarmerContainerPeriod && - to.enforcementDisarmerExecutableEnabled == opts.enforcementDisarmerExecutableEnabled && - to.enforcementDisarmerExecutableMaxAllowed == opts.enforcementDisarmerExecutableMaxAllowed && - to.enforcementDisarmerExecutablePeriod == opts.enforcementDisarmerExecutablePeriod && - to.eventServerRetention == opts.eventServerRetention && - to.discardRuntime == opts.discardRuntime + return reflect.DeepEqual(to, opts) } diff --git a/pkg/security/utils/cache/lru_2layers.go b/pkg/security/utils/cache/lru_2layers.go new file mode 100644 index 0000000000000..66f0a71ef09d2 --- /dev/null +++ b/pkg/security/utils/cache/lru_2layers.go @@ -0,0 +1,172 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package cache holds cache related files +package cache + +import ( + "sync" + + "github.com/hashicorp/golang-lru/v2/simplelru" + "go.uber.org/atomic" +) + +// TwoLayersLRU defines a two layers LRU cache. +type TwoLayersLRU[K1 comparable, K2 comparable, V any] struct { + sync.RWMutex + + cache *simplelru.LRU[K1, *simplelru.LRU[K2, V]] + len *atomic.Uint64 + size int +} + +// NewTwoLayersLRU returns a new cache. +func NewTwoLayersLRU[K1 comparable, K2 comparable, V any](size int) (*TwoLayersLRU[K1, K2, V], error) { + cache, err := simplelru.NewLRU[K1, *simplelru.LRU[K2, V]](size+1, nil) // +1 as we want to handle the eviction manually + if err != nil { + return nil, err + } + + return &TwoLayersLRU[K1, K2, V]{ + cache: cache, + len: atomic.NewUint64(0), + size: size, + }, nil +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (tll *TwoLayersLRU[K1, K2, V]) Add(k1 K1, k2 K2, v V) bool { + tll.Lock() + defer tll.Unlock() + + l2LRU, exists := tll.cache.Get(k1) + if !exists { + lru, err := simplelru.NewLRU[K2, V](tll.size, nil) + if err != nil { + return false + } + l2LRU = lru + + tll.cache.Add(k1, lru) + } + + // check whether exists so that we propagate properly the lener + if l2LRU.Contains(k2) { + return l2LRU.Add(k2, v) + } + + var evicted bool + + // handle len in order to generate potential evictions + n := tll.len.Load() + if n >= uint64(tll.size) { + _, _, _, evicted = tll.removeOldest() + } + + tll.len.Inc() + + return l2LRU.Add(k2, v) || evicted +} + +// RemoveKey1 the whole layer 2 for the given key1. +func (tll *TwoLayersLRU[K1, K2, V]) RemoveKey1(k1 K1) bool { + tll.Lock() + defer tll.Unlock() + + l2LRU, exists := tll.cache.Peek(k1) + if !exists { + return false + } + + size := l2LRU.Len() + tll.len.Sub(uint64(size)) + + tll.cache.Remove(k1) + + return true +} + +// RemoveKey2 remove the entry in the second layer +func (tll *TwoLayersLRU[K1, K2, V]) RemoveKey2(k1 K1, k2 K2) bool { + tll.Lock() + defer tll.Unlock() + + l2LRU, exists := tll.cache.Peek(k1) + if !exists { + return false + } + if !l2LRU.Remove(k2) { + return false + } + + if l2LRU.Len() == 0 { + tll.cache.Remove(k1) + } + + tll.len.Dec() + + return true +} + +// RemoveOldest removes the oldest element +func (tll *TwoLayersLRU[K1, K2, V]) RemoveOldest() (K1, K2, V, bool) { + tll.Lock() + defer tll.Unlock() + return tll.removeOldest() +} + +func (tll *TwoLayersLRU[K1, K2, V]) removeOldest() (k1 K1, k2 K2, v V, evicted bool) { + k1, l2LRU, exists := tll.cache.GetOldest() + if !exists { + return + } + + k2, v, evicted = l2LRU.RemoveOldest() + + // remove the lru if empty + if l2LRU.Len() == 0 { + tll.cache.Remove(k1) + } + + if evicted { + tll.len.Dec() + } + + return k1, k2, v, evicted +} + +// Get looks up key values from the cache. +func (tll *TwoLayersLRU[K1, K2, V]) Get(k1 K1, k2 K2) (v V, ok bool) { + tll.Lock() + defer tll.Unlock() + + l2LRU, exists := tll.cache.Get(k1) + if !exists { + return v, false + } + + return l2LRU.Get(k2) +} + +// Len returns the number of entries +func (tll *TwoLayersLRU[K1, K2, V]) Len() int { + return int(tll.len.Load()) +} + +// Walk through all the keys +func (tll *TwoLayersLRU[K1, K2, V]) Walk(cb func(k1 K1, k2 K2, v V)) { + tll.RLock() + defer tll.RUnlock() + + for _, k1 := range tll.cache.Keys() { + if l2LRU, exists := tll.cache.Peek(k1); exists { + for _, k2 := range l2LRU.Keys() { + if value, exists := l2LRU.Peek(k2); exists { + cb(k1, k2, value) + } + } + } + } +} diff --git a/pkg/security/utils/cache/lru_2layers_test.go b/pkg/security/utils/cache/lru_2layers_test.go new file mode 100644 index 0000000000000..aa287e58bf818 --- /dev/null +++ b/pkg/security/utils/cache/lru_2layers_test.go @@ -0,0 +1,96 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package cache holds cache related files +package cache + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestTwoLayersLRU(t *testing.T) { + cache, err := NewTwoLayersLRU[string, int, int](2) + assert.Nil(t, err) + + t.Run("not-exists", func(t *testing.T) { + value, exists := cache.Get("a", 1) + assert.False(t, exists) + assert.Equal(t, value, 0) + }) + + t.Run("add-no-eviction", func(t *testing.T) { + evicted := cache.Add("a", 1, 44) + assert.False(t, evicted) + assert.Equal(t, cache.Len(), 1) + }) + + t.Run("get-key", func(t *testing.T) { + value, exists := cache.Get("a", 1) + assert.True(t, exists) + assert.Equal(t, value, 44) + }) + + t.Run("add-no-eviction", func(t *testing.T) { + evicted := cache.Add("a", 2, 55) + assert.False(t, evicted) + assert.Equal(t, cache.Len(), 2) + }) + + t.Run("remove-key2", func(t *testing.T) { + exists := cache.RemoveKey2("a", 2) + assert.True(t, exists) + assert.Equal(t, cache.Len(), 1) + }) + + t.Run("add-no-eviction", func(t *testing.T) { + evicted := cache.Add("b", 10, 99) + assert.False(t, evicted) + assert.Equal(t, cache.Len(), 2) + }) + + t.Run("remove-key2-oldest", func(t *testing.T) { + k1, k2, v, evicted := cache.RemoveOldest() + assert.True(t, evicted) + assert.Equal(t, k1, "a") + assert.Equal(t, k2, 1) + assert.Equal(t, v, 44) + assert.Equal(t, cache.Len(), 1) + }) + + // now the oldest is b/10 + t.Run("add-eviction", func(t *testing.T) { + evicted := cache.Add("c", 20, 990) + assert.False(t, evicted) + assert.Equal(t, cache.Len(), 2) + + evicted = cache.Add("d", 30, 1990) + assert.True(t, evicted) + assert.Equal(t, cache.Len(), 2) + + _, exists := cache.Get("b", 10) + assert.False(t, exists) + }) + + t.Run("remove-key1", func(t *testing.T) { + exists := cache.RemoveKey1("c") + assert.True(t, exists) + assert.Equal(t, cache.Len(), 1) + + _, exists = cache.Get("c", 20) + assert.False(t, exists) + }) + + t.Run("walk", func(t *testing.T) { + var count int + + cache.Walk(func(_ string, _, _ int) { + count++ + }) + + assert.Equal(t, count, 1) + }) +} diff --git a/pkg/security/utils/cgroup.go b/pkg/security/utils/cgroup.go index 7b4be338429a6..e7b736ee22085 100644 --- a/pkg/security/utils/cgroup.go +++ b/pkg/security/utils/cgroup.go @@ -12,14 +12,18 @@ import ( "bufio" "bytes" "crypto/sha256" + "errors" "fmt" "os" + "path/filepath" "strconv" "strings" - "github.com/DataDog/datadog-agent/pkg/security/secl/containerutils" - "github.com/DataDog/datadog-agent/pkg/security/secl/model" + "github.com/moby/sys/mountinfo" "golang.org/x/sys/unix" + + "github.com/DataDog/datadog-agent/pkg/security/secl/containerutils" + "github.com/DataDog/datadog-agent/pkg/util/kernel" ) // ContainerIDLen is the length of a container ID is the length of the hex representation of a sha256 hash @@ -44,61 +48,41 @@ func (cg ControlGroup) GetContainerContext() (containerutils.ContainerID, contai return containerutils.ContainerID(id), containerutils.CGroupFlags(flags) } -// GetContainerID returns the container id extracted from the path of the control group -func (cg ControlGroup) GetContainerID() containerutils.ContainerID { - id, _ := containerutils.FindContainerID(containerutils.CGroupID(cg.Path)) - return containerutils.ContainerID(id) -} - -func parseCgroupLine(line string, expectedID int) (ControlGroup, error) { - idstr, rest, ok := strings.Cut(line, ":") +func parseCgroupLine(line string) (string, string, string, error) { + id, rest, ok := strings.Cut(line, ":") if !ok { - return ControlGroup{}, fmt.Errorf("invalid cgroup line: %s", line) + return "", "", "", fmt.Errorf("invalid cgroup line: %s", line) } - id, err := strconv.Atoi(idstr) - if err != nil { - return ControlGroup{}, err - } - - if expectedID >= 0 && expectedID != id { - return ControlGroup{}, fmt.Errorf("found cgroup, but with wrong ID (%d, but expected %d): %s", id, expectedID, line) + ctrl, path, ok := strings.Cut(rest, ":") + if !ok { + return "", "", "", fmt.Errorf("invalid cgroup line: %s", line) } - controllers, path, ok := strings.Cut(rest, ":") - if !ok { - return ControlGroup{}, fmt.Errorf("invalid cgroup line: %s", line) + if rest == "/" { + return "", "", "", fmt.Errorf("invalid cgroup line: %s", line) } - return ControlGroup{ - ID: id, - Controllers: strings.Split(controllers, ","), - Path: path, - }, nil + return id, ctrl, path, nil } -// GetProcControlGroup0 returns the cgroup membership with index 0 of the specified task. -func GetProcControlGroup0(tgid, pid uint32) (ControlGroup, error) { - data, err := os.ReadFile(CgroupTaskPath(tgid, pid)) - if err != nil { - return ControlGroup{}, err - } +func parseProcControlGroupsData(data []byte, fnc func(string, string, string) bool) error { data = bytes.TrimSpace(data) - var lastLine []byte - for len(data) != 0 { eol := bytes.IndexByte(data, '\n') if eol < 0 { eol = len(data) } line := data[:eol] - if bytes.HasPrefix(line, []byte("0:")) { - return parseCgroupLine(string(line), 0) + + id, ctrl, path, err := parseCgroupLine(string(line)) + if err != nil { + return err } - if bytes.ContainsRune(line, ':') { - lastLine = line + if fnc(id, ctrl, path) { + return nil } nextStart := eol + 1 @@ -108,7 +92,28 @@ func GetProcControlGroup0(tgid, pid uint32) (ControlGroup, error) { data = data[nextStart:] } - return parseCgroupLine(string(lastLine), -1) + return nil +} + +func parseProcControlGroups(tgid, pid uint32, fnc func(string, string, string) bool) error { + data, err := os.ReadFile(CgroupTaskPath(tgid, pid)) + if err != nil { + return err + } + return parseProcControlGroupsData(data, fnc) +} + +func makeControlGroup(id, ctrl, path string) (ControlGroup, error) { + idInt, err := strconv.Atoi(id) + if err != nil { + return ControlGroup{}, err + } + + return ControlGroup{ + ID: idInt, + Controllers: strings.Split(ctrl, ","), + Path: path, + }, nil } // GetProcControlGroups returns the cgroup membership of the specified task. @@ -121,11 +126,17 @@ func GetProcControlGroups(tgid, pid uint32) ([]ControlGroup, error) { scanner := bufio.NewScanner(bytes.NewReader(data)) for scanner.Scan() { t := scanner.Text() - c, err := parseCgroupLine(t, -1) + id, ctrl, path, err := parseCgroupLine(t) + if err != nil { + return nil, err + } + + cgroup, err := makeControlGroup(id, ctrl, path) if err != nil { return nil, err } - cgroups = append(cgroups, c) + + cgroups = append(cgroups, cgroup) } return cgroups, nil } @@ -137,26 +148,154 @@ func GetProcContainerID(tgid, pid uint32) (containerutils.ContainerID, error) { return id, err } +// CGroupContext holds the cgroup context of a process +type CGroupContext struct { + CGroupID containerutils.CGroupID + CGroupFlags containerutils.CGroupFlags + CGroupFileMountID uint32 + CGroupFileInode uint64 +} + // GetProcContainerContext returns the container ID which the process belongs to along with its manager. Returns "" if the process does not belong // to a container. -func GetProcContainerContext(tgid, pid uint32) (containerutils.ContainerID, model.CGroupContext, error) { - cgroup, err := GetProcControlGroup0(tgid, pid) - if err != nil { - return "", model.CGroupContext{}, err - } +func GetProcContainerContext(tgid, pid uint32) (containerutils.ContainerID, CGroupContext, error) { + var ( + containerID containerutils.ContainerID + runtime containerutils.CGroupFlags + cgroupContext CGroupContext + ) - containerID, runtime := cgroup.GetContainerContext() - cgroupContext := model.CGroupContext{ - CGroupID: containerutils.CGroupID(cgroup.Path), - CGroupFlags: runtime, + if err := parseProcControlGroups(tgid, pid, func(id, ctrl, path string) bool { + if path == "/" { + return false + } else if ctrl != "" && !strings.HasPrefix(ctrl, "name=") { + // On cgroup v1 we choose to take the "name" ctrl entry (ID 1), as the ID 0 could be empty + // On cgroup v2, it's only a single line with ID 0 and no ctrl + // (Cf unit tests for examples) + return false + } + cgroup, err := makeControlGroup(id, ctrl, path) + if err != nil { + return false + } + + containerID, runtime = cgroup.GetContainerContext() + cgroupContext.CGroupID = containerutils.CGroupID(cgroup.Path) + cgroupContext.CGroupFlags = runtime + + return true + }); err != nil { + return "", CGroupContext{}, err } var fileStats unix.Statx_t taskPath := CgroupTaskPath(pid, pid) if err := unix.Statx(unix.AT_FDCWD, taskPath, 0, unix.STATX_INO|unix.STATX_MNT_ID, &fileStats); err == nil { - cgroupContext.CGroupFile.MountID = uint32(fileStats.Mnt_id) - cgroupContext.CGroupFile.Inode = fileStats.Ino + cgroupContext.CGroupFileMountID = uint32(fileStats.Mnt_id) + cgroupContext.CGroupFileInode = fileStats.Ino } return containerID, cgroupContext, nil } + +var defaultCGroupMountpoints = []string{ + "/sys/fs/cgroup", + "/sys/fs/cgroup/unified", +} + +// ErrNoCGroupMountpoint is returned when no cgroup mount point is found +var ErrNoCGroupMountpoint = errors.New("no cgroup mount point found") + +// CGroupFS is a helper type used to find the cgroup context of a process +type CGroupFS struct { + cGroupMountPoints []string +} + +// NewCGroupFS creates a new CGroupFS instance +func NewCGroupFS(cgroupMountPoints ...string) *CGroupFS { + cfs := &CGroupFS{} + + var cgroupMnts []string + if len(cgroupMountPoints) == 0 { + cgroupMnts = defaultCGroupMountpoints + } else { + cgroupMnts = cgroupMountPoints + } + + for _, mountpoint := range cgroupMnts { + hostMountpoint := filepath.Join(kernel.SysFSRoot(), strings.TrimPrefix(mountpoint, "/sys/")) + if mounted, _ := mountinfo.Mounted(hostMountpoint); mounted { + cfs.cGroupMountPoints = append(cfs.cGroupMountPoints, hostMountpoint) + } + } + + return cfs +} + +// FindCGroupContext returns the container ID, cgroup context and sysfs cgroup path the process belongs to. +// Returns "" as container ID and sysfs cgroup path, and an empty CGroupContext if the process does not belong to a container. +func (cfs *CGroupFS) FindCGroupContext(tgid, pid uint32) (containerutils.ContainerID, CGroupContext, string, error) { + if len(cfs.cGroupMountPoints) == 0 { + return "", CGroupContext{}, "", ErrNoCGroupMountpoint + } + + var ( + containerID containerutils.ContainerID + cgroupContext CGroupContext + sysFScGroupPath string + ) + + err := parseProcControlGroups(tgid, pid, func(_, ctrl, path string) bool { + if path == "/" { + return false + } else if ctrl != "" && !strings.HasPrefix(ctrl, "name=") { + // On cgroup v1 we choose to take the "name" ctrl entry (ID 1), as the ID 0 could be empty + // On cgroup v2, it's only a single line with ID 0 and no ctrl + // (Cf unit tests for examples) + return false + } + + ctrlDirectory := strings.TrimPrefix(ctrl, "name=") + for _, mountpoint := range cfs.cGroupMountPoints { + cgroupPath := filepath.Join(mountpoint, ctrlDirectory, path) + if exists, err := checkPidExists(cgroupPath, pid); err == nil && exists { + cgroupID := containerutils.CGroupID(path) + ctrID, flags := containerutils.FindContainerID(cgroupID) + cgroupContext.CGroupID = cgroupID + cgroupContext.CGroupFlags = containerutils.CGroupFlags(flags) + containerID = ctrID + sysFScGroupPath = cgroupPath + + var fileStatx unix.Statx_t + var fileStats unix.Stat_t + if err := unix.Statx(unix.AT_FDCWD, sysFScGroupPath, 0, unix.STATX_INO|unix.STATX_MNT_ID, &fileStatx); err == nil { + cgroupContext.CGroupFileMountID = uint32(fileStatx.Mnt_id) + cgroupContext.CGroupFileInode = fileStatx.Ino + } else if err := unix.Stat(sysFScGroupPath, &fileStats); err == nil { + cgroupContext.CGroupFileInode = fileStats.Ino + } + return true + } + } + return false + }) + if err != nil { + return "", CGroupContext{}, "", err + } + + return containerID, cgroupContext, sysFScGroupPath, nil +} + +func checkPidExists(sysFScGroupPath string, expectedPid uint32) (bool, error) { + data, err := os.ReadFile(filepath.Join(sysFScGroupPath, "cgroup.procs")) + if err != nil { + return false, err + } + scanner := bufio.NewScanner(bytes.NewReader(data)) + for scanner.Scan() { + if pid, err := strconv.Atoi(strings.TrimSpace(scanner.Text())); err == nil && uint32(pid) == expectedPid { + return true, nil + } + } + return false, nil +} diff --git a/pkg/security/utils/cgroup_test.go b/pkg/security/utils/cgroup_test.go new file mode 100644 index 0000000000000..7463ad1ae36cd --- /dev/null +++ b/pkg/security/utils/cgroup_test.go @@ -0,0 +1,273 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux + +package utils + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/DataDog/datadog-agent/pkg/security/secl/containerutils" +) + +func TestCGroupvParseLine(t *testing.T) { + line := `5:cpu,cpuacct:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod98005c3b_b650_4efe_8b91_2164d784397f.slice/cri-containerd-e8ac3efec3322d7f13cfa0cdee4344754d01bd4e50fea44e0753e83fdb74cab3.scope` + id, ctrl, path, err := parseCgroupLine(line) + + assert.Nil(t, err) + assert.Equal(t, "5", id) + assert.Equal(t, "cpu,cpuacct", ctrl) + assert.Equal(t, "/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod98005c3b_b650_4efe_8b91_2164d784397f.slice/cri-containerd-e8ac3efec3322d7f13cfa0cdee4344754d01bd4e50fea44e0753e83fdb74cab3.scope", path) +} + +type testCgroup struct { + name string + cgroupContent string + error bool + containerID string + runtime containerutils.CGroupFlags + path string +} + +func TestCGroup(t *testing.T) { + testsCgroup := []testCgroup{ + { + name: "cgroupv1-cri", + cgroupContent: `13:blkio:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod98005c3b_b650_4efe_8b91_2164d784397f.slice/cri-containerd-e8ac3efec3322d7f13cfa0cdee4344754d01bd4e50fea44e0753e83fdb74cab3.scope +12:memory:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod98005c3b_b650_4efe_8b91_2164d784397f.slice/cri-containerd-e8ac3efec3322d7f13cfa0cdee4344754d01bd4e50fea44e0753e83fdb74cab3.scope +11:misc:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod98005c3b_b650_4efe_8b91_2164d784397f.slice/cri-containerd-e8ac3efec3322d7f13cfa0cdee4344754d01bd4e50fea44e0753e83fdb74cab3.scope +10:pids:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod98005c3b_b650_4efe_8b91_2164d784397f.slice/cri-containerd-e8ac3efec3322d7f13cfa0cdee4344754d01bd4e50fea44e0753e83fdb74cab3.scope +9:hugetlb:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod98005c3b_b650_4efe_8b91_2164d784397f.slice/cri-containerd-e8ac3efec3322d7f13cfa0cdee4344754d01bd4e50fea44e0753e83fdb74cab3.scope +8:rdma:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod98005c3b_b650_4efe_8b91_2164d784397f.slice/cri-containerd-e8ac3efec3322d7f13cfa0cdee4344754d01bd4e50fea44e0753e83fdb74cab3.scope +7:perf_event:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod98005c3b_b650_4efe_8b91_2164d784397f.slice/cri-containerd-e8ac3efec3322d7f13cfa0cdee4344754d01bd4e50fea44e0753e83fdb74cab3.scope +6:cpuset:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod98005c3b_b650_4efe_8b91_2164d784397f.slice/cri-containerd-e8ac3efec3322d7f13cfa0cdee4344754d01bd4e50fea44e0753e83fdb74cab3.scope +5:cpu,cpuacct:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod98005c3b_b650_4efe_8b91_2164d784397f.slice/cri-containerd-e8ac3efec3322d7f13cfa0cdee4344754d01bd4e50fea44e0753e83fdb74cab3.scope +4:net_cls,net_prio:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod98005c3b_b650_4efe_8b91_2164d784397f.slice/cri-containerd-e8ac3efec3322d7f13cfa0cdee4344754d01bd4e50fea44e0753e83fdb74cab3.scope +3:freezer:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod98005c3b_b650_4efe_8b91_2164d784397f.slice/cri-containerd-e8ac3efec3322d7f13cfa0cdee4344754d01bd4e50fea44e0753e83fdb74cab3.scope +2:devices:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod98005c3b_b650_4efe_8b91_2164d784397f.slice/cri-containerd-e8ac3efec3322d7f13cfa0cdee4344754d01bd4e50fea44e0753e83fdb74cab3.scope +1:name=systemd:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod98005c3b_b650_4efe_8b91_2164d784397f.slice/cri-containerd-e8ac3efec3322d7f13cfa0cdee4344754d01bd4e50fea44e0753e83fdb74cab3.scope +0::/ +`, + error: false, + containerID: "e8ac3efec3322d7f13cfa0cdee4344754d01bd4e50fea44e0753e83fdb74cab3", + runtime: containerutils.CGroupFlags(containerutils.CGroupManagerCRI), + path: "/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod98005c3b_b650_4efe_8b91_2164d784397f.slice/cri-containerd-e8ac3efec3322d7f13cfa0cdee4344754d01bd4e50fea44e0753e83fdb74cab3.scope", + }, + { + name: "cgroupv1-docker", + cgroupContent: `13:memory:/docker/99d24a208bd5b9c9663e18c34e4bd793536f062d8299a5cca0e718994abd9182 +12:hugetlb:/docker/99d24a208bd5b9c9663e18c34e4bd793536f062d8299a5cca0e718994abd9182 +11:misc:/docker/99d24a208bd5b9c9663e18c34e4bd793536f062d8299a5cca0e718994abd9182 +10:blkio:/docker/99d24a208bd5b9c9663e18c34e4bd793536f062d8299a5cca0e718994abd9182 +9:rdma:/docker/99d24a208bd5b9c9663e18c34e4bd793536f062d8299a5cca0e718994abd9182 +8:perf_event:/docker/99d24a208bd5b9c9663e18c34e4bd793536f062d8299a5cca0e718994abd9182 +7:cpuset:/docker/99d24a208bd5b9c9663e18c34e4bd793536f062d8299a5cca0e718994abd9182 +6:pids:/docker/99d24a208bd5b9c9663e18c34e4bd793536f062d8299a5cca0e718994abd9182 +5:cpu,cpuacct:/docker/99d24a208bd5b9c9663e18c34e4bd793536f062d8299a5cca0e718994abd9182 +4:freezer:/docker/99d24a208bd5b9c9663e18c34e4bd793536f062d8299a5cca0e718994abd9182 +3:devices:/docker/99d24a208bd5b9c9663e18c34e4bd793536f062d8299a5cca0e718994abd9182 +2:net_cls,net_prio:/docker/99d24a208bd5b9c9663e18c34e4bd793536f062d8299a5cca0e718994abd9182 +1:name=systemd:/docker/99d24a208bd5b9c9663e18c34e4bd793536f062d8299a5cca0e718994abd9182 +0::/docker/99d24a208bd5b9c9663e18c34e4bd793536f062d8299a5cca0e718994abd9182 +`, + error: false, + containerID: "99d24a208bd5b9c9663e18c34e4bd793536f062d8299a5cca0e718994abd9182", + runtime: containerutils.CGroupFlags(containerutils.CGroupManagerDocker), + path: "/docker/99d24a208bd5b9c9663e18c34e4bd793536f062d8299a5cca0e718994abd9182", + }, + { + name: "cgroupv1-systemd-service", + cgroupContent: `13:memory:/system.slice/cups.service +12:hugetlb:/ +11:misc:/ +10:blkio:/system.slice/cups.service +9:rdma:/ +8:perf_event:/ +7:cpuset:/ +6:pids:/system.slice/cups.service +5:cpu,cpuacct:/system.slice/cups.service +4:freezer:/ +3:devices:/system.slice/cups.service +2:net_cls,net_prio:/ +1:name=systemd:/system.slice/cups.service +0::/system.slice/cups.service +`, + error: false, + containerID: "", + runtime: containerutils.CGroupFlags(containerutils.CGroupManagerSystemd), + path: "/system.slice/cups.service", + }, + { + name: "cgroupv1-systemd-subservice", + cgroupContent: `13:memory:/user.slice/user-1000.slice/user@1000.service +12:hugetlb:/ +11:misc:/ +10:blkio:/user.slice +9:rdma:/ +8:perf_event:/ +7:cpuset:/ +6:pids:/user.slice/user-1000.slice/user@1000.service +5:cpu,cpuacct:/user.slice +4:freezer:/ +3:devices:/user.slice +2:net_cls,net_prio:/ +1:name=systemd:/user.slice/user-1000.slice/user@1000.service/xdg-desktop-portal-gtk.service +0::/user.slice/user-1000.slice/user@1000.service/xdg-desktop-portal-gtk.service +`, + error: false, + containerID: "", + runtime: containerutils.CGroupFlags(containerutils.CGroupManagerSystemd), + path: "/user.slice/user-1000.slice/user@1000.service/xdg-desktop-portal-gtk.service", + }, + { + name: "cgroupv1-systemd-scope", + cgroupContent: `13:memory:/user.slice/user-1000.slice/user@1000.service +12:hugetlb:/ +11:misc:/ +10:blkio:/user.slice +9:rdma:/ +8:perf_event:/ +7:cpuset:/ +6:pids:/user.slice/user-1000.slice/user@1000.service +5:cpu,cpuacct:/user.slice +4:freezer:/ +3:devices:/user.slice +2:net_cls,net_prio:/ +1:name=systemd:/user.slice/user-1000.slice/user@1000.service/apps.slice/apps-org.gnome.Terminal.slice/vte-spawn-1d0750f1-4e83-4b26-81ae-e3770394b7f3.scope +0::/user.slice/user-1000.slice/user@1000.service/apps.slice/apps-org.gnome.Terminal.slice/vte-spawn-1d0750f1-4e83-4b26-81ae-e3770394b7f3.scope +`, + error: false, + containerID: "", + runtime: containerutils.CGroupFlags(containerutils.CGroupManagerSystemd | containerutils.CGroupManager(containerutils.SystemdScope)), + path: "/user.slice/user-1000.slice/user@1000.service/apps.slice/apps-org.gnome.Terminal.slice/vte-spawn-1d0750f1-4e83-4b26-81ae-e3770394b7f3.scope", + }, + { + name: "cgroupv1-empty", + cgroupContent: `12:pids:/ +11:devices:/ +10:blkio:/ +9:cpuset:/ +8:perf_event:/ +7:memory:/ +6:freezer:/ +5:hugetlb:/ +4:rdma:/ +3:net_cls,net_prio:/ +2:cpu,cpuacct:/ +1:name=systemd:/ +0::/ +`, + error: false, + containerID: "", + runtime: 0, + path: "", + }, + { + name: "cgroupv1-pid1", + cgroupContent: `13:memory:/init.scope +12:hugetlb:/ +11:misc:/ +10:blkio:/init.scope +9:rdma:/ +8:perf_event:/ +7:cpuset:/ +6:pids:/init.scope +5:cpu,cpuacct:/init.scope +4:freezer:/ +3:devices:/init.scope +2:net_cls,net_prio:/ +1:name=systemd:/init.scope +0::/init.scope +`, + error: false, + containerID: "", + runtime: containerutils.CGroupFlags(containerutils.CGroupManagerSystemd | containerutils.CGroupManager(containerutils.SystemdScope)), + path: "/init.scope", + }, + { + name: "cgroupv2-docker", + cgroupContent: `0::/system.slice/docker-473a28bd49fcbf3a24eb55563125720311181ee184ae9b88fc9a3fbb30031e47.scope +`, + error: false, + containerID: "473a28bd49fcbf3a24eb55563125720311181ee184ae9b88fc9a3fbb30031e47", + runtime: containerutils.CGroupFlags(containerutils.CGroupManagerDocker), + path: "/system.slice/docker-473a28bd49fcbf3a24eb55563125720311181ee184ae9b88fc9a3fbb30031e47.scope", + }, + { + name: "cgroupv2-systemd-service", + cgroupContent: `0::/system.slice/ssh.service +`, + error: false, + containerID: "", + runtime: containerutils.CGroupFlags(containerutils.CGroupManagerSystemd), + path: "/system.slice/ssh.service", + }, + { + name: "cgroupv2-systemd-scope", + cgroupContent: `0::/user.slice/user-1000.slice/session-4.scope +`, + error: false, + containerID: "", + runtime: containerutils.CGroupFlags(containerutils.CGroupManagerSystemd | containerutils.CGroupManager(containerutils.SystemdScope)), + path: "/user.slice/user-1000.slice/session-4.scope", + }, + { + name: "cgroupv2-pid1", + cgroupContent: `0::/init.scope +`, + error: false, + containerID: "", + runtime: containerutils.CGroupFlags(containerutils.CGroupManagerSystemd | containerutils.CGroupManager(containerutils.SystemdScope)), + path: "/init.scope", + }, + { + name: "cgroupv2-empty", + cgroupContent: `0::/ +`, + error: false, + containerID: "", + runtime: 0, + path: "", + }, + } + + for _, test := range testsCgroup { + var ( + containerID containerutils.ContainerID + runtime containerutils.CGroupFlags + cgroupContext CGroupContext + cgroupPath string + ) + + t.Run(test.name, func(t *testing.T) { + err := parseProcControlGroupsData([]byte(test.cgroupContent), func(id, ctrl, path string) bool { + if path == "/" { + return false + } else if ctrl != "" && !strings.HasPrefix(ctrl, "name=") { + return false + } + cgroup, err := makeControlGroup(id, ctrl, path) + if err != nil { + return false + } + + containerID, runtime = cgroup.GetContainerContext() + cgroupContext.CGroupID = containerutils.CGroupID(cgroup.Path) + cgroupContext.CGroupFlags = runtime + cgroupPath = path + return true + }) + + assert.Equal(t, test.error, err != nil) + assert.Equal(t, containerutils.ContainerID(test.containerID), containerID) + assert.Equal(t, test.runtime, runtime) + assert.Equal(t, test.path, cgroupPath) + }) + } +} diff --git a/pkg/security/utils/graph.go b/pkg/security/utils/graph.go index 74afe7012a4b7..95edbbdb13e00 100644 --- a/pkg/security/utils/graph.go +++ b/pkg/security/utils/graph.go @@ -27,16 +27,30 @@ type Node struct { // Edge describes an edge of a dot edge type Edge struct { - From GraphID - To GraphID - Color string + From GraphID + To GraphID + Color string + HasArrowHead bool + Label string + IsTable bool +} + +// SubGraph describes a dot subgraph +type SubGraph struct { + Name string + Title string + TitleSize int + Color string + Nodes map[GraphID]*Node + Edges []*Edge } // Graph describes a dot graph type Graph struct { - Title string - Nodes map[GraphID]*Node - Edges []*Edge + Title string + Nodes map[GraphID]*Node + Edges []*Edge + SubGraphs []*SubGraph } // EncodeDOT encodes an activity dump in the DOT format diff --git a/pkg/security/utils/hostname.go b/pkg/security/utils/hostnameutils/hostname.go similarity index 97% rename from pkg/security/utils/hostname.go rename to pkg/security/utils/hostnameutils/hostname.go index 1968802bd42ed..418cc81ab7a9a 100644 --- a/pkg/security/utils/hostname.go +++ b/pkg/security/utils/hostnameutils/hostname.go @@ -3,8 +3,8 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -// Package utils holds utils related files -package utils +// Package hostnameutils holds utils/hostname related files +package hostnameutils import ( "context" diff --git a/pkg/security/utils/hostname_testutil.go b/pkg/security/utils/hostnameutils/hostname_testutil.go similarity index 95% rename from pkg/security/utils/hostname_testutil.go rename to pkg/security/utils/hostnameutils/hostname_testutil.go index e8b180e1665b6..1c93dd43edf90 100644 --- a/pkg/security/utils/hostname_testutil.go +++ b/pkg/security/utils/hostnameutils/hostname_testutil.go @@ -5,7 +5,7 @@ //go:build test -package utils +package hostnameutils // SetCachedHostname test utility to set the cached hostname, to avoid fetching it from the core agent. func SetCachedHostname(name string) { diff --git a/pkg/security/resolvers/usersessions/k8s_user_session_context.go b/pkg/security/utils/k8sutils/k8s_user_session_context.go similarity index 93% rename from pkg/security/resolvers/usersessions/k8s_user_session_context.go rename to pkg/security/utils/k8sutils/k8s_user_session_context.go index 4b23ebad7084b..97b50be01acf6 100644 --- a/pkg/security/resolvers/usersessions/k8s_user_session_context.go +++ b/pkg/security/utils/k8sutils/k8s_user_session_context.go @@ -3,8 +3,8 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -// Package usersessions holds model related to the user sessions resolver -package usersessions +// Package k8sutils holds kubernetes utils related to the user sessions resolver +package k8sutils import ( "encoding/json" diff --git a/pkg/security/utils/pathutils/doc.go b/pkg/security/utils/pathutils/doc.go new file mode 100644 index 0000000000000..d9080d48103fc --- /dev/null +++ b/pkg/security/utils/pathutils/doc.go @@ -0,0 +1,7 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package pathutils holds path utils related files +package pathutils diff --git a/pkg/security/utils/path_linux.go b/pkg/security/utils/pathutils/path_linux.go similarity index 99% rename from pkg/security/utils/path_linux.go rename to pkg/security/utils/pathutils/path_linux.go index 09403dba511bc..ea3e3b5b2bd14 100644 --- a/pkg/security/utils/path_linux.go +++ b/pkg/security/utils/pathutils/path_linux.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package utils +package pathutils import ( "fmt" diff --git a/pkg/security/utils/path_linux_test.go b/pkg/security/utils/pathutils/path_linux_test.go similarity index 99% rename from pkg/security/utils/path_linux_test.go rename to pkg/security/utils/pathutils/path_linux_test.go index a62b29c6ddae6..6ff6d2226a272 100644 --- a/pkg/security/utils/path_linux_test.go +++ b/pkg/security/utils/pathutils/path_linux_test.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package utils +package pathutils import ( "testing" diff --git a/pkg/security/utils/path_windows.go b/pkg/security/utils/pathutils/path_windows.go similarity index 98% rename from pkg/security/utils/path_windows.go rename to pkg/security/utils/pathutils/path_windows.go index bbc6ed878bb07..c49c9b7439fce 100644 --- a/pkg/security/utils/path_windows.go +++ b/pkg/security/utils/pathutils/path_windows.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package utils +package pathutils import ( "strings" diff --git a/pkg/security/utils/proc_linux.go b/pkg/security/utils/proc_linux.go index 229a4b16350ed..d6144e22c1199 100644 --- a/pkg/security/utils/proc_linux.go +++ b/pkg/security/utils/proc_linux.go @@ -19,7 +19,7 @@ import ( "strings" "sync" - "github.com/DataDog/datadog-agent/pkg/security/secl/model" + "github.com/DataDog/datadog-agent/pkg/security/secl/model/sharedconsts" "github.com/DataDog/datadog-agent/pkg/util/kernel" "github.com/shirou/gopsutil/v4/process" ) @@ -162,18 +162,18 @@ func ModulesPath() string { func GetLoginUID(pid uint32) (uint32, error) { content, err := os.ReadFile(LoginUIDPath(pid)) if err != nil { - return model.AuditUIDUnset, err + return sharedconsts.AuditUIDUnset, err } data := strings.TrimSuffix(string(content), "\n") if len(data) == 0 { - return model.AuditUIDUnset, fmt.Errorf("invalid login uid: %v", data) + return sharedconsts.AuditUIDUnset, fmt.Errorf("invalid login uid: %v", data) } // parse login uid auid, err := strconv.ParseUint(data, 10, 32) if err != nil { - return model.AuditUIDUnset, fmt.Errorf("coudln't parse loginuid: %v", err) + return sharedconsts.AuditUIDUnset, fmt.Errorf("coudln't parse loginuid: %v", err) } return uint32(auid), nil } @@ -306,7 +306,7 @@ func EnvVars(priorityEnvsPrefixes []string, pid uint32, maxEnvVars int) ([]strin envs = append(envs, priorityEnvs...) for scanner.Scan() { - if len(envs) >= model.MaxArgsEnvsSize { + if len(envs) >= sharedconsts.MaxArgsEnvsSize { return envs, true, nil } diff --git a/pkg/serializer/go.mod b/pkg/serializer/go.mod index 0492d99ac7ba5..fc25e8060b390 100644 --- a/pkg/serializer/go.mod +++ b/pkg/serializer/go.mod @@ -11,11 +11,12 @@ replace ( github.com/DataDog/datadog-agent/comp/core/log/mock => ../../comp/core/log/mock github.com/DataDog/datadog-agent/comp/core/secrets => ../../comp/core/secrets github.com/DataDog/datadog-agent/comp/core/status => ../../comp/core/status + github.com/DataDog/datadog-agent/comp/core/tagger/origindetection => ../../comp/core/tagger/origindetection github.com/DataDog/datadog-agent/comp/core/telemetry => ../../comp/core/telemetry github.com/DataDog/datadog-agent/comp/def => ../../comp/def github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder => ../../comp/forwarder/defaultforwarder github.com/DataDog/datadog-agent/comp/forwarder/orchestrator/orchestratorinterface => ../../comp/forwarder/orchestrator/orchestratorinterface - github.com/DataDog/datadog-agent/comp/serializer/compression => ../../comp/serializer/compression + github.com/DataDog/datadog-agent/comp/serializer/metricscompression => ../../comp/serializer/metricscompression github.com/DataDog/datadog-agent/pkg/aggregator/ckey => ../aggregator/ckey github.com/DataDog/datadog-agent/pkg/api => ../api github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ../collector/check/defaults @@ -42,6 +43,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/backoff => ../util/backoff/ github.com/DataDog/datadog-agent/pkg/util/buf => ../util/buf/ github.com/DataDog/datadog-agent/pkg/util/common => ../util/common + github.com/DataDog/datadog-agent/pkg/util/compression => ../util/compression github.com/DataDog/datadog-agent/pkg/util/defaultpaths => ../../pkg/util/defaultpaths github.com/DataDog/datadog-agent/pkg/util/executable => ../util/executable/ github.com/DataDog/datadog-agent/pkg/util/filesystem => ../util/filesystem/ @@ -51,7 +53,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/json => ../util/json github.com/DataDog/datadog-agent/pkg/util/log => ../util/log github.com/DataDog/datadog-agent/pkg/util/log/setup => ../util/log/setup - github.com/DataDog/datadog-agent/pkg/util/optional => ../util/optional/ + github.com/DataDog/datadog-agent/pkg/util/option => ../util/option/ github.com/DataDog/datadog-agent/pkg/util/pointer => ../util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../util/scrubber/ github.com/DataDog/datadog-agent/pkg/util/sort => ../util/sort/ @@ -63,43 +65,45 @@ replace ( ) require ( - github.com/DataDog/agent-payload/v5 v5.0.138 - github.com/DataDog/datadog-agent/comp/core/config v0.57.1 + github.com/DataDog/agent-payload/v5 v5.0.141 + github.com/DataDog/datadog-agent/comp/core/config v0.61.0 github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder v0.56.0-rc.3 github.com/DataDog/datadog-agent/comp/forwarder/orchestrator/orchestratorinterface v0.56.0-rc.3 - github.com/DataDog/datadog-agent/comp/serializer/compression v0.56.0-rc.3 + github.com/DataDog/datadog-agent/comp/serializer/metricscompression v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/aggregator/ckey v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/config/mock v0.59.0 - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 github.com/DataDog/datadog-agent/pkg/metrics v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/process/util/api v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/tagger/types v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/tagset v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/telemetry v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/util/compression v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/json v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 github.com/DataDog/datadog-agent/pkg/version v0.59.1 - github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.22.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.24.0 github.com/gogo/protobuf v1.3.2 github.com/json-iterator/go v1.1.12 github.com/protocolbuffers/protoscope v0.0.0-20221109213918-8e7a6aafa2c9 github.com/richardartoul/molecule v1.0.1-0.20240531184615-7ca0df43c0b3 github.com/stretchr/testify v1.10.0 - google.golang.org/protobuf v1.35.2 + google.golang.org/protobuf v1.36.3 ) require ( - github.com/DataDog/datadog-agent/comp/core/flare/builder v0.57.1 // indirect - github.com/DataDog/datadog-agent/comp/core/flare/types v0.57.1 // indirect - github.com/DataDog/datadog-agent/comp/core/log/def v0.0.0-00010101000000-000000000000 // indirect + github.com/DataDog/datadog-agent/comp/core/flare/builder v0.59.0 // indirect + github.com/DataDog/datadog-agent/comp/core/flare/types v0.59.0 // indirect + github.com/DataDog/datadog-agent/comp/core/log/def v0.61.0 // indirect github.com/DataDog/datadog-agent/comp/core/secrets v0.59.0 // indirect github.com/DataDog/datadog-agent/comp/core/status v0.56.0-rc.3 // indirect + github.com/DataDog/datadog-agent/comp/core/tagger/origindetection v0.62.0-rc.1 // indirect github.com/DataDog/datadog-agent/comp/core/telemetry v0.57.1 // indirect - github.com/DataDog/datadog-agent/comp/def v0.57.1 // indirect + github.com/DataDog/datadog-agent/comp/def v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/api v0.57.1 // indirect github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.60.0-devel // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.60.0-devel // indirect @@ -110,17 +114,17 @@ require ( github.com/DataDog/datadog-agent/pkg/util/buf v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/common v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/fxutil v0.57.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/fxutil v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/http v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/sort v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 // indirect github.com/DataDog/sketches-go v1.4.6 // indirect github.com/DataDog/viper v1.14.0 // indirect @@ -144,7 +148,7 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect github.com/klauspost/compress v1.17.11 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect @@ -156,14 +160,14 @@ require ( github.com/patrickmn/go-cache v2.1.0+incompatible // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.60.1 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -177,9 +181,9 @@ require ( go.uber.org/fx v1.23.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/net v0.34.0 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/serializer/go.sum b/pkg/serializer/go.sum index 21381032cc3dc..00211b5c0d759 100644 --- a/pkg/serializer/go.sum +++ b/pkg/serializer/go.sum @@ -1,13 +1,13 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/DataDog/agent-payload/v5 v5.0.138 h1:Wg7hmWuoLC/o0X3zZ+uGcfRHPyaytljudgSY9O59zjc= -github.com/DataDog/agent-payload/v5 v5.0.138/go.mod h1:lxh9lb5xYrBXjblpIWYUi4deJqVbkIfkjwesi5nskDc= +github.com/DataDog/agent-payload/v5 v5.0.141 h1:pV76CyTUEe/LFuS7fwarIfOX5seSuYZylzhj1aGY2DQ= +github.com/DataDog/agent-payload/v5 v5.0.141/go.mod h1:lxh9lb5xYrBXjblpIWYUi4deJqVbkIfkjwesi5nskDc= github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49 h1:EbzDX8HPk5uE2FsJYxD74QmMw0/3CqSKhEr6teh0ncQ= github.com/DataDog/mmh3 v0.0.0-20210722141835-012dc69a9e49/go.mod h1:SvsjzyJlSg0rKsqYgdcFxeEVflx3ZNAyFfkUHP0TxXg= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.22.0 h1:cXcKVEU1D0HlguR7GunnvuI70TghkarCa9DApqzMY94= -github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.22.0/go.mod h1:ES00EXfyEKgUkjd93tAXCxJA6i0seeOhZoS5Cj2qzzg= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.22.0 h1:63SzQz9Ab8XJj8fQKQz6UZNBhOm8rucwzbDfwTVF6dQ= -github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.22.0/go.mod h1:E/PY/aQ6S/N5hBPHXZRGmovs5b1BSi4RHGNcB4yP/Z0= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.24.0 h1:ttW3C3IN8p1goqyvaVpT4Blzg3lQ+sh4MTtB33BbpdE= +github.com/DataDog/opentelemetry-mapping-go/pkg/internal/sketchtest v0.24.0/go.mod h1:FpUbxBqKdi16CDJnRifUzmkETaEYR75xvh2Vo8vvJN0= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.24.0 h1:Uha4TTkbCcYTvUbkbfvUjUmxtPaPKCOtwwl91erkRRg= +github.com/DataDog/opentelemetry-mapping-go/pkg/quantile v0.24.0/go.mod h1:RWoMSFb2Q+L0FSRYctEt8Wp0em+InUg+Oe+BU30e7gA= github.com/DataDog/sketches-go v1.4.6 h1:acd5fb+QdUzGrosfNLwrIhqyrbMORpvBy7mE+vHlT3I= github.com/DataDog/sketches-go v1.4.6/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg= github.com/DataDog/viper v1.14.0 h1:dIjTe/uJiah+QFqFZ+MXeqgmUvWhg37l37ZxFWxr3is= @@ -95,7 +95,6 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -148,8 +147,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -187,8 +186,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -205,8 +204,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -224,8 +223,8 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -236,8 +235,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -301,8 +300,8 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -323,8 +322,8 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -348,8 +347,8 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= @@ -386,8 +385,8 @@ google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRn google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/serializer/internal/metrics/events_test.go b/pkg/serializer/internal/metrics/events_test.go index 9e029147072f5..2ce3cb740bd6a 100644 --- a/pkg/serializer/internal/metrics/events_test.go +++ b/pkg/serializer/internal/metrics/events_test.go @@ -21,12 +21,12 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/comp/serializer/compression/common" - "github.com/DataDog/datadog-agent/comp/serializer/compression/selector" + metricscompression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/impl" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/metrics/event" "github.com/DataDog/datadog-agent/pkg/serializer/internal/stream" taggertypes "github.com/DataDog/datadog-agent/pkg/tagger/types" + "github.com/DataDog/datadog-agent/pkg/util/compression" ) func TestMarshal(t *testing.T) { @@ -173,8 +173,8 @@ func TestEventsSeveralPayloadsCreateSingleMarshaler(t *testing.T) { tests := map[string]struct { kind string }{ - "zlib": {kind: common.ZlibKind}, - "zstd": {kind: common.ZstdKind}, + "zlib": {kind: compression.ZlibKind}, + "zstd": {kind: compression.ZstdKind}, } for name, tc := range tests { t.Run(name, func(t *testing.T) { @@ -197,8 +197,8 @@ func TestEventsSeveralPayloadsCreateMarshalersBySourceType(t *testing.T) { tests := map[string]struct { kind string }{ - "zlib": {kind: common.ZlibKind}, - "zstd": {kind: common.ZstdKind}, + "zlib": {kind: compression.ZlibKind}, + "zstd": {kind: compression.ZstdKind}, } for name, tc := range tests { t.Run(name, func(t *testing.T) { @@ -259,8 +259,8 @@ func assertEqualEventsToMarshalJSON(t *testing.T, events Events) { tests := map[string]struct { kind string }{ - "zlib": {kind: common.ZlibKind}, - "zstd": {kind: common.ZstdKind}, + "zlib": {kind: compression.ZlibKind}, + "zstd": {kind: compression.ZstdKind}, } for name, tc := range tests { t.Run(name, func(t *testing.T) { @@ -371,7 +371,8 @@ func BenchmarkCreateSingleMarshalerOneEventBySource(b *testing.B) { func benchmarkCreateSingleMarshaler(b *testing.B, createEvents func(numberOfItem int) Events) { runBenchmark(b, func(b *testing.B, numberOfItem int) { cfg := configmock.New(b) - payloadBuilder := stream.NewJSONPayloadBuilder(true, cfg, selector.NewCompressor(cfg)) + compressor := metricscompression.NewCompressorReq(metricscompression.Requires{Cfg: cfg}).Comp + payloadBuilder := stream.NewJSONPayloadBuilder(true, cfg, compressor) events := createEvents(numberOfItem) b.ResetTimer() @@ -385,7 +386,8 @@ func benchmarkCreateSingleMarshaler(b *testing.B, createEvents func(numberOfItem func BenchmarkCreateMarshalersBySourceType(b *testing.B) { runBenchmark(b, func(b *testing.B, numberOfItem int) { cfg := configmock.New(b) - payloadBuilder := stream.NewJSONPayloadBuilder(true, cfg, selector.NewCompressor(cfg)) + compressor := metricscompression.NewCompressorReq(metricscompression.Requires{Cfg: cfg}).Comp + payloadBuilder := stream.NewJSONPayloadBuilder(true, cfg, compressor) events := createBenchmarkEvents(numberOfItem) b.ResetTimer() @@ -401,7 +403,9 @@ func BenchmarkCreateMarshalersBySourceType(b *testing.B) { func BenchmarkCreateMarshalersSeveralSourceTypes(b *testing.B) { runBenchmark(b, func(b *testing.B, numberOfItem int) { cfg := configmock.New(b) - payloadBuilder := stream.NewJSONPayloadBuilder(true, cfg, selector.NewCompressor(cfg)) + + compressor := metricscompression.NewCompressorReq(metricscompression.Requires{Cfg: cfg}).Comp + payloadBuilder := stream.NewJSONPayloadBuilder(true, cfg, compressor) events := Events{} // Half of events have the same source type diff --git a/pkg/serializer/internal/metrics/iterable_series.go b/pkg/serializer/internal/metrics/iterable_series.go index 24b8f4e68ea0e..e545789bb3cb9 100644 --- a/pkg/serializer/internal/metrics/iterable_series.go +++ b/pkg/serializer/internal/metrics/iterable_series.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder/transaction" - compression "github.com/DataDog/datadog-agent/comp/serializer/compression/def" + compression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/def" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/serializer/internal/stream" "github.com/DataDog/datadog-agent/pkg/serializer/marshaler" @@ -289,10 +289,6 @@ func (pb *PayloadsBuilder) writeSerie(serie *metrics.Serie) error { const serieMetadataOriginOriginService = 6 // |----| 'Origin' message // |-----------| 'origin_service' field index - const serieMetadataOriginOriginProductAgentType = 10 - // |----| 'Origin' message - // |-----------| 'OriginProduct' enum - // |-------| 'Agent' enum value addToPayload := func() error { err := pb.compressor.AddItem(pb.buf.Bytes()) @@ -409,7 +405,7 @@ func (pb *PayloadsBuilder) writeSerie(serie *metrics.Serie) error { return err } } - err = ps.Int32(serieMetadataOriginOriginProduct, serieMetadataOriginOriginProductAgentType) + err = ps.Int32(serieMetadataOriginOriginProduct, metricSourceToOriginProduct(serie.Source)) if err != nil { return err } diff --git a/pkg/serializer/internal/metrics/origin_mapping.go b/pkg/serializer/internal/metrics/origin_mapping.go index 02c54e1913363..c6b4c037e6eda 100644 --- a/pkg/serializer/internal/metrics/origin_mapping.go +++ b/pkg/serializer/internal/metrics/origin_mapping.go @@ -9,6 +9,15 @@ import ( "github.com/DataDog/datadog-agent/pkg/metrics" ) +func metricSourceToOriginProduct(ms metrics.MetricSource) int32 { + const serieMetadataOriginOriginProductAgentType = 10 + const serieMetadataOriginOriginProductDatadogExporterType = 19 + if ms >= metrics.MetricSourceOpenTelemetryCollectorUnknown && ms <= metrics.MetricSourceOpenTelemetryCollectorCouchdbReceiver { + return serieMetadataOriginOriginProductDatadogExporterType + } + return serieMetadataOriginOriginProductAgentType +} + func metricSourceToOriginCategory(ms metrics.MetricSource) int32 { // These constants map to specific fields in the 'OriginCategory' enum in origin.proto switch ms { @@ -711,6 +720,94 @@ func metricSourceToOriginService(ms metrics.MetricSource) int32 { return 203 case metrics.MetricSourceInternal: return 212 + + case metrics.MetricSourceOpenTelemetryCollectorUnknown: + return 0 + case metrics.MetricSourceOpenTelemetryCollectorDockerstatsReceiver: + return 217 + case metrics.MetricSourceOpenTelemetryCollectorElasticsearchReceiver: + return 218 + case metrics.MetricSourceOpenTelemetryCollectorExpvarReceiver: + return 219 + case metrics.MetricSourceOpenTelemetryCollectorFilestatsReceiver: + return 220 + case metrics.MetricSourceOpenTelemetryCollectorFlinkmetricsReceiver: + return 221 + case metrics.MetricSourceOpenTelemetryCollectorGitproviderReceiver: + return 222 + case metrics.MetricSourceOpenTelemetryCollectorHaproxyReceiver: + return 223 + case metrics.MetricSourceOpenTelemetryCollectorHostmetricsReceiver: + return 224 + case metrics.MetricSourceOpenTelemetryCollectorHttpcheckReceiver: + return 225 + case metrics.MetricSourceOpenTelemetryCollectorIisReceiver: + return 226 + case metrics.MetricSourceOpenTelemetryCollectorK8sclusterReceiver: + return 227 + case metrics.MetricSourceOpenTelemetryCollectorKafkametricsReceiver: + return 228 + case metrics.MetricSourceOpenTelemetryCollectorKubeletstatsReceiver: + return 229 + case metrics.MetricSourceOpenTelemetryCollectorMemcachedReceiver: + return 230 + case metrics.MetricSourceOpenTelemetryCollectorMongodbatlasReceiver: + return 231 + case metrics.MetricSourceOpenTelemetryCollectorMongodbReceiver: + return 232 + case metrics.MetricSourceOpenTelemetryCollectorMysqlReceiver: + return 233 + case metrics.MetricSourceOpenTelemetryCollectorNginxReceiver: + return 234 + case metrics.MetricSourceOpenTelemetryCollectorNsxtReceiver: + return 235 + case metrics.MetricSourceOpenTelemetryCollectorOracledbReceiver: + return 236 + case metrics.MetricSourceOpenTelemetryCollectorPostgresqlReceiver: + return 237 + case metrics.MetricSourceOpenTelemetryCollectorPrometheusReceiver: + return 238 + case metrics.MetricSourceOpenTelemetryCollectorRabbitmqReceiver: + return 239 + case metrics.MetricSourceOpenTelemetryCollectorRedisReceiver: + return 240 + case metrics.MetricSourceOpenTelemetryCollectorRiakReceiver: + return 241 + case metrics.MetricSourceOpenTelemetryCollectorSaphanaReceiver: + return 242 + case metrics.MetricSourceOpenTelemetryCollectorSnmpReceiver: + return 243 + case metrics.MetricSourceOpenTelemetryCollectorSnowflakeReceiver: + return 244 + case metrics.MetricSourceOpenTelemetryCollectorSplunkenterpriseReceiver: + return 245 + case metrics.MetricSourceOpenTelemetryCollectorSqlserverReceiver: + return 246 + case metrics.MetricSourceOpenTelemetryCollectorSshcheckReceiver: + return 247 + case metrics.MetricSourceOpenTelemetryCollectorStatsdReceiver: + return 248 + case metrics.MetricSourceOpenTelemetryCollectorVcenterReceiver: + return 249 + case metrics.MetricSourceOpenTelemetryCollectorZookeeperReceiver: + return 250 + case metrics.MetricSourceOpenTelemetryCollectorActiveDirectorydsReceiver: + return 251 + case metrics.MetricSourceOpenTelemetryCollectorAerospikeReceiver: + return 252 + case metrics.MetricSourceOpenTelemetryCollectorApacheReceiver: + return 253 + case metrics.MetricSourceOpenTelemetryCollectorApachesparkReceiver: + return 254 + case metrics.MetricSourceOpenTelemetryCollectorAzuremonitorReceiver: + return 255 + case metrics.MetricSourceOpenTelemetryCollectorBigipReceiver: + return 256 + case metrics.MetricSourceOpenTelemetryCollectorChronyReceiver: + return 257 + case metrics.MetricSourceOpenTelemetryCollectorCouchdbReceiver: + return 258 + case metrics.MetricSourceArgoRollouts: return 314 case metrics.MetricSourceArgoWorkflows: diff --git a/pkg/serializer/internal/metrics/series_test.go b/pkg/serializer/internal/metrics/series_test.go index c584f02875c4b..a6eb7a0f23638 100644 --- a/pkg/serializer/internal/metrics/series_test.go +++ b/pkg/serializer/internal/metrics/series_test.go @@ -20,13 +20,13 @@ import ( "github.com/DataDog/agent-payload/v5/gogen" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder/transaction" - "github.com/DataDog/datadog-agent/comp/serializer/compression/common" - "github.com/DataDog/datadog-agent/comp/serializer/compression/selector" + metricscompression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/impl" "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/serializer/internal/stream" "github.com/DataDog/datadog-agent/pkg/serializer/marshaler" "github.com/DataDog/datadog-agent/pkg/tagset" + "github.com/DataDog/datadog-agent/pkg/util/compression" ) func TestPopulateDeviceField(t *testing.T) { @@ -376,22 +376,22 @@ func TestMarshalSplitCompress(t *testing.T) { tests := map[string]struct { kind string }{ - "zlib": {kind: common.ZlibKind}, - "zstd": {kind: common.ZstdKind}, + "zlib": {kind: compression.ZlibKind}, + "zstd": {kind: compression.ZstdKind}, } for name, tc := range tests { t.Run(name, func(t *testing.T) { series := makeSeries(10000, 50) mockConfig := mock.New(t) mockConfig.SetWithoutSource("serializer_compressor_kind", tc.kind) - strategy := selector.NewCompressor(mockConfig) - payloads, err := series.MarshalSplitCompress(marshaler.NewBufferContext(), mockConfig, strategy) + compressor := metricscompression.NewCompressorReq(metricscompression.Requires{Cfg: mockConfig}).Comp + payloads, err := series.MarshalSplitCompress(marshaler.NewBufferContext(), mockConfig, compressor) require.NoError(t, err) // check that we got multiple payloads, so splitting occurred require.Greater(t, len(payloads), 1) for _, compressedPayload := range payloads { - payload, err := strategy.Decompress(compressedPayload.GetContent()) + payload, err := compressor.Decompress(compressedPayload.GetContent()) require.NoError(t, err) pl := new(gogen.MetricPayload) @@ -410,8 +410,8 @@ func TestMarshalSplitCompressPointsLimit(t *testing.T) { tests := map[string]struct { kind string }{ - "zlib": {kind: common.ZlibKind}, - "zstd": {kind: common.ZstdKind}, + "zlib": {kind: compression.ZlibKind}, + "zstd": {kind: compression.ZstdKind}, } for name, tc := range tests { t.Run(name, func(t *testing.T) { @@ -422,7 +422,8 @@ func TestMarshalSplitCompressPointsLimit(t *testing.T) { // ten series, each with 50 points, so two should fit in each payload series := makeSeries(10, 50) - payloads, err := series.MarshalSplitCompress(marshaler.NewBufferContext(), mockConfig, selector.NewCompressor(mockConfig)) + compressor := metricscompression.NewCompressorReq(metricscompression.Requires{Cfg: mockConfig}).Comp + payloads, err := series.MarshalSplitCompress(marshaler.NewBufferContext(), mockConfig, compressor) require.NoError(t, err) require.Equal(t, 5, len(payloads)) }) @@ -433,8 +434,8 @@ func TestMarshalSplitCompressMultiplePointsLimit(t *testing.T) { tests := map[string]struct { kind string }{ - "zlib": {kind: common.ZlibKind}, - "zstd": {kind: common.ZstdKind}, + "zlib": {kind: compression.ZlibKind}, + "zstd": {kind: compression.ZstdKind}, } for name, tc := range tests { t.Run(name, func(t *testing.T) { @@ -463,7 +464,8 @@ func TestMarshalSplitCompressMultiplePointsLimit(t *testing.T) { } series := CreateIterableSeries(CreateSerieSource(rawSeries)) - payloads, filteredPayloads, autoscalingFailoverPayloads, err := series.MarshalSplitCompressMultiple(mockConfig, selector.NewCompressor(mockConfig), + compressor := metricscompression.NewCompressorReq(metricscompression.Requires{Cfg: mockConfig}).Comp + payloads, filteredPayloads, autoscalingFailoverPayloads, err := series.MarshalSplitCompressMultiple(mockConfig, compressor, func(s *metrics.Serie) bool { return s.Name == "test.metrics42" }, @@ -484,8 +486,8 @@ func TestMarshalSplitCompressPointsLimitTooBig(t *testing.T) { tests := map[string]struct { kind string }{ - "zlib": {kind: common.ZlibKind}, - "zstd": {kind: common.ZstdKind}, + "zlib": {kind: compression.ZlibKind}, + "zstd": {kind: compression.ZstdKind}, } for name, tc := range tests { t.Run(name, func(t *testing.T) { @@ -494,7 +496,9 @@ func TestMarshalSplitCompressPointsLimitTooBig(t *testing.T) { mockConfig.SetWithoutSource("serializer_max_series_points_per_payload", 1) series := makeSeries(1, 2) - payloads, err := series.MarshalSplitCompress(marshaler.NewBufferContext(), mockConfig, selector.NewCompressor(mockConfig)) + + compressor := metricscompression.NewCompressorReq(metricscompression.Requires{Cfg: mockConfig}).Comp + payloads, err := series.MarshalSplitCompress(marshaler.NewBufferContext(), mockConfig, compressor) require.NoError(t, err) require.Len(t, payloads, 0) }) @@ -508,8 +512,8 @@ func TestPayloadsSeries(t *testing.T) { tests := map[string]struct { kind string }{ - "zlib": {kind: common.ZlibKind}, - "zstd": {kind: common.ZstdKind}, + "zlib": {kind: compression.ZlibKind}, + "zstd": {kind: compression.ZstdKind}, } for name, tc := range tests { t.Run(name, func(t *testing.T) { @@ -541,15 +545,16 @@ func TestPayloadsSeries(t *testing.T) { mockConfig := mock.New(t) mockConfig.SetWithoutSource("serializer_compressor_kind", tc.kind) originalLength := len(testSeries) - strategy := selector.NewCompressor(mockConfig) - builder := stream.NewJSONPayloadBuilder(true, mockConfig, strategy) + + compressor := metricscompression.NewCompressorReq(metricscompression.Requires{Cfg: mockConfig}).Comp + builder := stream.NewJSONPayloadBuilder(true, mockConfig, compressor) iterableSeries := CreateIterableSeries(CreateSerieSource(testSeries)) payloads, err := builder.BuildWithOnErrItemTooBigPolicy(iterableSeries, stream.DropItemOnErrItemTooBig) require.Nil(t, err) var splitSeries = []Series{} for _, compressedPayload := range payloads { - payload, err := strategy.Decompress(compressedPayload.GetContent()) + payload, err := compressor.Decompress(compressedPayload.GetContent()) require.NoError(t, err) var s = map[string]Series{} @@ -589,7 +594,8 @@ func BenchmarkPayloadsSeries(b *testing.B) { var r transaction.BytesPayloads mockConfig := mock.New(b) - builder := stream.NewJSONPayloadBuilder(true, mockConfig, selector.NewCompressor(mockConfig)) + compressor := metricscompression.NewCompressorReq(metricscompression.Requires{Cfg: mockConfig}).Comp + builder := stream.NewJSONPayloadBuilder(true, mockConfig, compressor) for n := 0; n < b.N; n++ { // always record the result of Payloads to prevent // the compiler eliminating the function call. diff --git a/pkg/serializer/internal/metrics/service_checks_test.go b/pkg/serializer/internal/metrics/service_checks_test.go index 2559d7c9ed2cc..94dd8e666cde2 100644 --- a/pkg/serializer/internal/metrics/service_checks_test.go +++ b/pkg/serializer/internal/metrics/service_checks_test.go @@ -15,7 +15,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/comp/serializer/compression/selector" + metricscompression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/impl" "github.com/DataDog/datadog-agent/pkg/config/mock" pkgconfigmodel "github.com/DataDog/datadog-agent/pkg/config/model" "github.com/DataDog/datadog-agent/pkg/metrics/servicecheck" @@ -80,14 +80,14 @@ func createServiceCheck(checkName string) *servicecheck.ServiceCheck { } func buildPayload(t *testing.T, m marshaler.StreamJSONMarshaler, cfg pkgconfigmodel.Config) [][]byte { - strategy := selector.NewCompressor(cfg) - builder := stream.NewJSONPayloadBuilder(true, cfg, strategy) + compressor := metricscompression.NewCompressorReq(metricscompression.Requires{Cfg: cfg}).Comp + builder := stream.NewJSONPayloadBuilder(true, cfg, compressor) payloads, err := stream.BuildJSONPayload(builder, m) assert.NoError(t, err) var uncompressedPayloads [][]byte for _, compressedPayload := range payloads { - payload, err := strategy.Decompress(compressedPayload.GetContent()) + payload, err := compressor.Decompress(compressedPayload.GetContent()) assert.NoError(t, err) uncompressedPayloads = append(uncompressedPayloads, payload) @@ -159,7 +159,8 @@ func createServiceChecks(numberOfItem int) ServiceChecks { func benchmarkJSONPayloadBuilderServiceCheck(b *testing.B, numberOfItem int) { mockConfig := mock.New(b) - payloadBuilder := stream.NewJSONPayloadBuilder(true, mockConfig, selector.NewCompressor(mockConfig)) + compressor := metricscompression.NewCompressorReq(metricscompression.Requires{Cfg: mockConfig}).Comp + payloadBuilder := stream.NewJSONPayloadBuilder(true, mockConfig, compressor) serviceChecks := createServiceChecks(numberOfItem) b.ResetTimer() @@ -200,9 +201,9 @@ func benchmarkPayloadsServiceCheck(b *testing.B, numberOfItem int) { b.ResetTimer() mockConfig := mock.New(b) - strategy := selector.NewCompressor(mockConfig) + compressor := metricscompression.NewCompressorReq(metricscompression.Requires{Cfg: mockConfig}).Comp for n := 0; n < b.N; n++ { - split.Payloads(serviceChecks, true, split.JSONMarshalFct, strategy) + split.Payloads(serviceChecks, true, split.JSONMarshalFct, compressor) } } diff --git a/pkg/serializer/internal/metrics/sketch_benchmark_test.go b/pkg/serializer/internal/metrics/sketch_benchmark_test.go index 0d1b6a0666c11..3cf90c370406b 100644 --- a/pkg/serializer/internal/metrics/sketch_benchmark_test.go +++ b/pkg/serializer/internal/metrics/sketch_benchmark_test.go @@ -12,7 +12,7 @@ import ( "github.com/stretchr/testify/require" - "github.com/DataDog/datadog-agent/comp/serializer/compression/selector" + metricscompression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/impl" "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/serializer/marshaler" @@ -30,9 +30,9 @@ func benchmarkSplitPayloadsSketchesSplit(b *testing.B, numPoints int) { b.ResetTimer() mockConfig := mock.New(b) - strategy := selector.NewCompressor(mockConfig) + compressor := metricscompression.NewCompressorReq(metricscompression.Requires{Cfg: mockConfig}).Comp for n := 0; n < b.N; n++ { - split.Payloads(serializer, true, split.ProtoMarshalFct, strategy) + split.Payloads(serializer, true, split.ProtoMarshalFct, compressor) } } @@ -45,10 +45,10 @@ func benchmarkSplitPayloadsSketchesNew(b *testing.B, numPoints int) { b.ReportAllocs() b.ResetTimer() mockConfig := mock.New(b) - strategy := selector.NewCompressor(mockConfig) + compressor := metricscompression.NewCompressorReq(metricscompression.Requires{Cfg: mockConfig}).Comp for n := 0; n < b.N; n++ { - payloads, err := serializer.MarshalSplitCompress(marshaler.NewBufferContext(), mockConfig, strategy) + payloads, err := serializer.MarshalSplitCompress(marshaler.NewBufferContext(), mockConfig, compressor) require.NoError(b, err) var pb int for _, p := range payloads { diff --git a/pkg/serializer/internal/metrics/sketch_series_list.go b/pkg/serializer/internal/metrics/sketch_series_list.go index 59608575fd81b..d73ffe9ebab53 100644 --- a/pkg/serializer/internal/metrics/sketch_series_list.go +++ b/pkg/serializer/internal/metrics/sketch_series_list.go @@ -14,7 +14,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder/transaction" - compression "github.com/DataDog/datadog-agent/comp/serializer/compression/def" + compression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/def" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/serializer/internal/stream" "github.com/DataDog/datadog-agent/pkg/serializer/marshaler" @@ -251,10 +251,6 @@ func (pb *payloadsBuilder) marshal(ss *metrics.SketchSeries) error { const sketchMetadataOriginOriginService = 6 // |----| 'Origin' message // |-----------| 'origin_service' field index - const serieMetadataOriginOriginProductAgentType = 10 - // |----| 'Origin' message - // |-----------| 'OriginProduct' enum - // |-------| 'Agent' enum value pb.buf.Reset() err := pb.ps.Embedded(payloadSketches, func(ps *molecule.ProtoStream) error { @@ -336,7 +332,7 @@ func (pb *payloadsBuilder) marshal(ss *metrics.SketchSeries) error { return err } } - err = ps.Int32(sketchMetadataOriginOriginProduct, serieMetadataOriginOriginProductAgentType) + err = ps.Int32(sketchMetadataOriginOriginProduct, metricSourceToOriginProduct(ss.Source)) if err != nil { return err } diff --git a/pkg/serializer/internal/metrics/sketch_series_test.go b/pkg/serializer/internal/metrics/sketch_series_test.go index 84ecf06c676ec..c82901fca6085 100644 --- a/pkg/serializer/internal/metrics/sketch_series_test.go +++ b/pkg/serializer/internal/metrics/sketch_series_test.go @@ -12,12 +12,12 @@ import ( "github.com/DataDog/agent-payload/v5/gogen" - "github.com/DataDog/datadog-agent/comp/serializer/compression/common" - "github.com/DataDog/datadog-agent/comp/serializer/compression/selector" + metricscompression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/impl" "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/serializer/marshaler" "github.com/DataDog/datadog-agent/pkg/tagset" + "github.com/DataDog/datadog-agent/pkg/util/compression" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -93,8 +93,8 @@ func TestSketchSeriesMarshalSplitCompressEmpty(t *testing.T) { tests := map[string]struct { kind string }{ - "zlib": {kind: common.ZlibKind}, - "zstd": {kind: common.ZstdKind}, + "zlib": {kind: compression.ZlibKind}, + "zstd": {kind: compression.ZstdKind}, } for name, tc := range tests { t.Run(name, func(t *testing.T) { @@ -102,15 +102,16 @@ func TestSketchSeriesMarshalSplitCompressEmpty(t *testing.T) { mockConfig.SetWithoutSource("serializer_compressor_kind", tc.kind) sl := SketchSeriesList{SketchesSource: metrics.NewSketchesSourceTest()} payload, _ := sl.Marshal() - strategy := selector.NewCompressor(mockConfig) - payloads, err := sl.MarshalSplitCompress(marshaler.NewBufferContext(), mockConfig, strategy) + + compressor := metricscompression.NewCompressorReq(metricscompression.Requires{Cfg: mockConfig}).Comp + payloads, err := sl.MarshalSplitCompress(marshaler.NewBufferContext(), mockConfig, compressor) assert.Nil(t, err) firstPayload := payloads[0] assert.Equal(t, 0, firstPayload.GetPointCount()) - decompressed, _ := strategy.Decompress(firstPayload.GetContent()) + decompressed, _ := compressor.Decompress(firstPayload.GetContent()) // Check that we encoded the protobuf correctly assert.Equal(t, decompressed, payload) }) @@ -122,8 +123,8 @@ func TestSketchSeriesMarshalSplitCompressItemTooBigIsDropped(t *testing.T) { kind string maxUncompressedSize int }{ - "zlib": {kind: common.ZlibKind, maxUncompressedSize: 100}, - "zstd": {kind: common.ZstdKind, maxUncompressedSize: 200}, + "zlib": {kind: compression.ZlibKind, maxUncompressedSize: 100}, + "zstd": {kind: compression.ZstdKind, maxUncompressedSize: 200}, } for name, tc := range tests { t.Run(name, func(t *testing.T) { @@ -144,15 +145,16 @@ func TestSketchSeriesMarshalSplitCompressItemTooBigIsDropped(t *testing.T) { }) serializer := SketchSeriesList{SketchesSource: sl} - strategy := selector.NewCompressor(mockConfig) - payloads, err := serializer.MarshalSplitCompress(marshaler.NewBufferContext(), mockConfig, strategy) + + compressor := metricscompression.NewCompressorReq(metricscompression.Requires{Cfg: mockConfig}).Comp + payloads, err := serializer.MarshalSplitCompress(marshaler.NewBufferContext(), mockConfig, compressor) assert.Nil(t, err) firstPayload := payloads[0] require.Equal(t, 0, firstPayload.GetPointCount()) - decompressed, _ := strategy.Decompress(firstPayload.GetContent()) + decompressed, _ := compressor.Decompress(firstPayload.GetContent()) pl := new(gogen.SketchPayload) if err := pl.Unmarshal(decompressed); err != nil { @@ -170,8 +172,8 @@ func TestSketchSeriesMarshalSplitCompress(t *testing.T) { tests := map[string]struct { kind string }{ - "zlib": {kind: common.ZlibKind}, - "zstd": {kind: common.ZstdKind}, + "zlib": {kind: compression.ZlibKind}, + "zstd": {kind: compression.ZstdKind}, } for name, tc := range tests { t.Run(name, func(t *testing.T) { @@ -185,14 +187,15 @@ func TestSketchSeriesMarshalSplitCompress(t *testing.T) { sl.Reset() serializer2 := SketchSeriesList{SketchesSource: sl} - strategy := selector.NewCompressor(mockConfig) - payloads, err := serializer2.MarshalSplitCompress(marshaler.NewBufferContext(), mockConfig, strategy) + + compressor := metricscompression.NewCompressorReq(metricscompression.Requires{Cfg: mockConfig}).Comp + payloads, err := serializer2.MarshalSplitCompress(marshaler.NewBufferContext(), mockConfig, compressor) require.NoError(t, err) firstPayload := payloads[0] assert.Equal(t, 11, firstPayload.GetPointCount()) - decompressed, _ := strategy.Decompress(firstPayload.GetContent()) + decompressed, _ := compressor.Decompress(firstPayload.GetContent()) pl := new(gogen.SketchPayload) err = pl.Unmarshal(decompressed) @@ -226,8 +229,8 @@ func TestSketchSeriesMarshalSplitCompressSplit(t *testing.T) { kind string maxUncompressedSize int }{ - "zlib": {kind: common.ZlibKind, maxUncompressedSize: 2000}, - "zstd": {kind: common.ZstdKind, maxUncompressedSize: 2000}, + "zlib": {kind: compression.ZlibKind, maxUncompressedSize: 2000}, + "zstd": {kind: compression.ZstdKind, maxUncompressedSize: 2000}, } for name, tc := range tests { t.Run(name, func(t *testing.T) { @@ -244,15 +247,16 @@ func TestSketchSeriesMarshalSplitCompressSplit(t *testing.T) { } serializer := SketchSeriesList{SketchesSource: sl} - strategy := selector.NewCompressor(mockConfig) - payloads, err := serializer.MarshalSplitCompress(marshaler.NewBufferContext(), mockConfig, strategy) + + compressor := metricscompression.NewCompressorReq(metricscompression.Requires{Cfg: mockConfig}).Comp + payloads, err := serializer.MarshalSplitCompress(marshaler.NewBufferContext(), mockConfig, compressor) assert.Nil(t, err) recoveredSketches := []gogen.SketchPayload{} recoveredCount := 0 pointCount := 0 for _, pld := range payloads { - decompressed, _ := strategy.Decompress(pld.GetContent()) + decompressed, _ := compressor.Decompress(pld.GetContent()) pl := new(gogen.SketchPayload) if err := pl.Unmarshal(decompressed); err != nil { @@ -293,8 +297,8 @@ func TestSketchSeriesMarshalSplitCompressMultiple(t *testing.T) { tests := map[string]struct { kind string }{ - "zlib": {kind: common.ZlibKind}, - "zstd": {kind: common.ZstdKind}, + "zlib": {kind: compression.ZlibKind}, + "zstd": {kind: compression.ZstdKind}, } for name, tc := range tests { t.Run(name, func(t *testing.T) { @@ -308,8 +312,8 @@ func TestSketchSeriesMarshalSplitCompressMultiple(t *testing.T) { sl.Reset() serializer2 := SketchSeriesList{SketchesSource: sl} - strategy := selector.NewCompressor(mockConfig) - payloads, filteredPayloads, err := serializer2.MarshalSplitCompressMultiple(mockConfig, strategy, func(ss *metrics.SketchSeries) bool { + compressor := metricscompression.NewCompressorReq(metricscompression.Requires{Cfg: mockConfig}).Comp + payloads, filteredPayloads, err := serializer2.MarshalSplitCompressMultiple(mockConfig, compressor, func(ss *metrics.SketchSeries) bool { return ss.Name == "name.0" }) require.NoError(t, err) diff --git a/pkg/serializer/internal/stream/compressor.go b/pkg/serializer/internal/stream/compressor.go index dea456eef7dba..6d97f16181d7b 100644 --- a/pkg/serializer/internal/stream/compressor.go +++ b/pkg/serializer/internal/stream/compressor.go @@ -11,9 +11,9 @@ import ( "errors" "expvar" + metricscompression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/def" "github.com/DataDog/datadog-agent/pkg/telemetry" - - compression "github.com/DataDog/datadog-agent/comp/serializer/compression/def" + "github.com/DataDog/datadog-agent/pkg/util/compression" ) var ( @@ -56,7 +56,7 @@ func init() { type Compressor struct { input *bytes.Buffer // temporary buffer for data that has not been compressed yet compressed *bytes.Buffer // output buffer containing the compressed payload - strategy compression.Component + strategy metricscompression.Component zipper compression.StreamCompressor header []byte // json header to print at the beginning of the payload footer []byte // json footer to append at the end of the payload @@ -71,7 +71,7 @@ type Compressor struct { } // NewCompressor returns a new instance of a Compressor -func NewCompressor(input, output *bytes.Buffer, maxPayloadSize, maxUncompressedSize int, header, footer []byte, separator []byte, compressor compression.Component) (*Compressor, error) { +func NewCompressor(input, output *bytes.Buffer, maxPayloadSize, maxUncompressedSize int, header, footer []byte, separator []byte, compressor compression.Compressor) (*Compressor, error) { c := &Compressor{ header: header, footer: footer, diff --git a/pkg/serializer/internal/stream/compressor_test.go b/pkg/serializer/internal/stream/compressor_test.go index f44b8cc2f617d..e074854273a38 100644 --- a/pkg/serializer/internal/stream/compressor_test.go +++ b/pkg/serializer/internal/stream/compressor_test.go @@ -17,15 +17,15 @@ import ( "github.com/stretchr/testify/require" "github.com/DataDog/datadog-agent/comp/core/config" - "github.com/DataDog/datadog-agent/comp/serializer/compression/common" - "github.com/DataDog/datadog-agent/comp/serializer/compression/selector" + metricscompression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/impl" "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/serializer/marshaler" + "github.com/DataDog/datadog-agent/pkg/util/compression" ) func payloadToString(payload []byte, cfg config.Component) string { - strategy := selector.NewCompressor(cfg) - p, err := strategy.Decompress(payload) + compressor := metricscompression.NewCompressorReq(metricscompression.Requires{Cfg: cfg}).Comp + p, err := compressor.Decompress(payload) if err != nil { return err.Error() } @@ -36,8 +36,8 @@ func TestCompressorSimple(t *testing.T) { tests := map[string]struct { kind string }{ - "zlib": {kind: common.ZlibKind}, - "zstd": {kind: common.ZstdKind}, + "zlib": {kind: compression.ZlibKind}, + "zstd": {kind: compression.ZstdKind}, } for name, tc := range tests { t.Run(name, func(t *testing.T) { @@ -45,10 +45,11 @@ func TestCompressorSimple(t *testing.T) { mockConfig.SetWithoutSource("serializer_compressor_kind", tc.kind) maxPayloadSize := mockConfig.GetInt("serializer_max_payload_size") maxUncompressedSize := mockConfig.GetInt("serializer_max_uncompressed_payload_size") + compressor := metricscompression.NewCompressorReq(metricscompression.Requires{Cfg: mockConfig}).Comp c, err := NewCompressor( &bytes.Buffer{}, &bytes.Buffer{}, maxPayloadSize, maxUncompressedSize, - []byte("{["), []byte("]}"), []byte(","), selector.NewCompressor(mockConfig)) + []byte("{["), []byte("]}"), []byte(","), compressor) require.NoError(t, err) for i := 0; i < 5; i++ { @@ -70,18 +71,20 @@ func TestCompressorAddItemErrCodeWithEmptyCompressor(t *testing.T) { tests := map[string]struct { kind string }{ - "zlib": {kind: common.ZlibKind}, - "zstd": {kind: common.ZstdKind}, + "zlib": {kind: compression.ZlibKind}, + "zstd": {kind: compression.ZstdKind}, } for name, tc := range tests { t.Run(name, func(t *testing.T) { mockConfig := mock.New(t) mockConfig.SetWithoutSource("serializer_compressor_kind", tc.kind) + + compressor := metricscompression.NewCompressorReq(metricscompression.Requires{Cfg: mockConfig}).Comp checkAddItemErrCode := func(maxPayloadSize, maxUncompressedSize, dataLen int) { c, err := NewCompressor( &bytes.Buffer{}, &bytes.Buffer{}, maxPayloadSize, maxUncompressedSize, - []byte("{["), []byte("]}"), []byte(","), selector.NewCompressor(mockConfig)) + []byte("{["), []byte("]}"), []byte(","), compressor) require.NoError(t, err) payload := strings.Repeat("A", dataLen) @@ -115,8 +118,8 @@ func TestOnePayloadSimple(t *testing.T) { tests := map[string]struct { kind string }{ - "zlib": {kind: common.ZlibKind}, - "zstd": {kind: common.ZstdKind}, + "zlib": {kind: compression.ZlibKind}, + "zstd": {kind: compression.ZstdKind}, } for name, tc := range tests { t.Run(name, func(t *testing.T) { @@ -128,7 +131,9 @@ func TestOnePayloadSimple(t *testing.T) { mockConfig := mock.New(t) mockConfig.SetWithoutSource("serializer_compressor_kind", tc.kind) - builder := NewJSONPayloadBuilder(true, mockConfig, selector.NewCompressor(mockConfig)) + + compressor := metricscompression.NewCompressorReq(metricscompression.Requires{Cfg: mockConfig}).Comp + builder := NewJSONPayloadBuilder(true, mockConfig, compressor) payloads, err := BuildJSONPayload(builder, m) require.NoError(t, err) require.Len(t, payloads, 1) @@ -143,8 +148,8 @@ func TestMaxCompressedSizePayload(t *testing.T) { kind string maxPayloadSize int }{ - "zlib": {kind: common.ZlibKind, maxPayloadSize: 22}, - "zstd": {kind: common.ZstdKind, maxPayloadSize: 90}, + "zlib": {kind: compression.ZlibKind, maxPayloadSize: 22}, + "zstd": {kind: compression.ZstdKind, maxPayloadSize: 90}, } for name, tc := range tests { t.Run(name, func(t *testing.T) { @@ -156,8 +161,8 @@ func TestMaxCompressedSizePayload(t *testing.T) { mockConfig := mock.New(t) mockConfig.SetWithoutSource("serializer_compressor_kind", tc.kind) mockConfig.SetDefault("serializer_max_payload_size", tc.maxPayloadSize) - - builder := NewJSONPayloadBuilder(true, mockConfig, selector.NewCompressor(mockConfig)) + compressor := metricscompression.NewCompressorReq(metricscompression.Requires{Cfg: mockConfig}).Comp + builder := NewJSONPayloadBuilder(true, mockConfig, compressor) payloads, err := BuildJSONPayload(builder, m) require.NoError(t, err) require.Len(t, payloads, 1) @@ -181,7 +186,8 @@ func TestZstdCompressionLevel(t *testing.T) { mockConfig.SetWithoutSource("serializer_compressor_kind", "zstd") mockConfig.SetDefault("serializer_zstd_compressor_level", level) - builder := NewJSONPayloadBuilder(true, mockConfig, selector.NewCompressor(mockConfig)) + compressor := metricscompression.NewCompressorReq(metricscompression.Requires{Cfg: mockConfig}).Comp + builder := NewJSONPayloadBuilder(true, mockConfig, compressor) payloads, err := BuildJSONPayload(builder, m) require.NoError(t, err) require.Len(t, payloads, 1) @@ -196,8 +202,8 @@ func TestTwoPayload(t *testing.T) { kind string maxPayloadSize int }{ - "zlib": {kind: common.ZlibKind, maxPayloadSize: 22}, - "zstd": {kind: common.ZstdKind, maxPayloadSize: 70}, + "zlib": {kind: compression.ZlibKind, maxPayloadSize: 22}, + "zstd": {kind: compression.ZstdKind, maxPayloadSize: 70}, } for name, tc := range tests { t.Run(name, func(t *testing.T) { @@ -210,7 +216,8 @@ func TestTwoPayload(t *testing.T) { mockConfig.SetDefault("serializer_max_payload_size", tc.maxPayloadSize) mockConfig.SetWithoutSource("serializer_compressor_kind", tc.kind) - builder := NewJSONPayloadBuilder(true, mockConfig, selector.NewCompressor(mockConfig)) + compressor := metricscompression.NewCompressorReq(metricscompression.Requires{Cfg: mockConfig}).Comp + builder := NewJSONPayloadBuilder(true, mockConfig, compressor) payloads, err := BuildJSONPayload(builder, m) require.NoError(t, err) require.Len(t, payloads, 2) @@ -225,8 +232,8 @@ func TestLockedCompressorProducesSamePayloads(t *testing.T) { tests := map[string]struct { kind string }{ - "zlib": {kind: common.ZlibKind}, - "zstd": {kind: common.ZstdKind}, + "zlib": {kind: compression.ZlibKind}, + "zstd": {kind: compression.ZstdKind}, } for name, tc := range tests { t.Run(name, func(t *testing.T) { @@ -238,8 +245,9 @@ func TestLockedCompressorProducesSamePayloads(t *testing.T) { mockConfig := mock.New(t) mockConfig.SetWithoutSource("serializer_compressor_kind", tc.kind) - builderLocked := NewJSONPayloadBuilder(true, mockConfig, selector.NewCompressor(mockConfig)) - builderUnLocked := NewJSONPayloadBuilder(false, mockConfig, selector.NewCompressor(mockConfig)) + compressor := metricscompression.NewCompressorReq(metricscompression.Requires{Cfg: mockConfig}).Comp + builderLocked := NewJSONPayloadBuilder(true, mockConfig, compressor) + builderUnLocked := NewJSONPayloadBuilder(false, mockConfig, compressor) payloads1, err := BuildJSONPayload(builderLocked, m) require.NoError(t, err) payloads2, err := BuildJSONPayload(builderUnLocked, m) @@ -255,16 +263,18 @@ func TestBuildWithOnErrItemTooBigPolicyMetadata(t *testing.T) { kind string maxUncompressedPayloadSize int }{ - "zlib": {kind: common.ZlibKind, maxUncompressedPayloadSize: 40}, - "zstd": {kind: common.ZstdKind, maxUncompressedPayloadSize: 170}, + "zlib": {kind: compression.ZlibKind, maxUncompressedPayloadSize: 40}, + "zstd": {kind: compression.ZstdKind, maxUncompressedPayloadSize: 170}, } for name, tc := range tests { t.Run(name, func(t *testing.T) { mockConfig := mock.New(t) mockConfig.SetWithoutSource("serializer_compressor_kind", tc.kind) mockConfig.SetWithoutSource("serializer_max_uncompressed_payload_size", tc.maxUncompressedPayloadSize) + + compressor := metricscompression.NewCompressorReq(metricscompression.Requires{Cfg: mockConfig}).Comp marshaler := &IterableStreamJSONMarshalerMock{index: 0, maxIndex: 100} - builder := NewJSONPayloadBuilder(false, mockConfig, selector.NewCompressor(mockConfig)) + builder := NewJSONPayloadBuilder(false, mockConfig, compressor) payloads, err := builder.BuildWithOnErrItemTooBigPolicy( marshaler, DropItemOnErrItemTooBig) diff --git a/pkg/serializer/internal/stream/json_payload_builder.go b/pkg/serializer/internal/stream/json_payload_builder.go index 0c12e7c2fb078..4812077688524 100644 --- a/pkg/serializer/internal/stream/json_payload_builder.go +++ b/pkg/serializer/internal/stream/json_payload_builder.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/comp/core/config" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder/transaction" - compression "github.com/DataDog/datadog-agent/comp/serializer/compression/def" + compression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/def" "github.com/DataDog/datadog-agent/pkg/serializer/marshaler" "github.com/DataDog/datadog-agent/pkg/telemetry" "github.com/DataDog/datadog-agent/pkg/util/log" diff --git a/pkg/serializer/serializer.go b/pkg/serializer/serializer.go index a60e474cebd35..54a6474b87720 100644 --- a/pkg/serializer/serializer.go +++ b/pkg/serializer/serializer.go @@ -21,7 +21,6 @@ import ( orchestratorForwarder "github.com/DataDog/datadog-agent/comp/forwarder/orchestrator/orchestratorinterface" "github.com/DataDog/datadog-agent/comp/core/config" - compression "github.com/DataDog/datadog-agent/comp/serializer/compression/def" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/metrics/event" "github.com/DataDog/datadog-agent/pkg/metrics/servicecheck" @@ -31,6 +30,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/serializer/marshaler" "github.com/DataDog/datadog-agent/pkg/serializer/split" "github.com/DataDog/datadog-agent/pkg/serializer/types" + "github.com/DataDog/datadog-agent/pkg/util/compression" "github.com/DataDog/datadog-agent/pkg/util/log" "github.com/DataDog/datadog-agent/pkg/version" @@ -107,7 +107,7 @@ type Serializer struct { orchestratorForwarder orchestratorForwarder.Component config config.Component - Strategy compression.Component + Strategy compression.Compressor seriesJSONPayloadBuilder *stream.JSONPayloadBuilder jsonExtraHeaders http.Header protobufExtraHeaders http.Header @@ -133,7 +133,7 @@ type Serializer struct { } // NewSerializer returns a new Serializer initialized -func NewSerializer(forwarder forwarder.Forwarder, orchestratorForwarder orchestratorForwarder.Component, compressor compression.Component, config config.Component, hostName string) *Serializer { +func NewSerializer(forwarder forwarder.Forwarder, orchestratorForwarder orchestratorForwarder.Component, compressor compression.Compressor, config config.Component, hostName string) *Serializer { streamAvailable := compressor.NewStreamCompressor(&bytes.Buffer{}) != nil diff --git a/pkg/serializer/serializer_benchmark_test.go b/pkg/serializer/serializer_benchmark_test.go index f02bc89a44f72..ec0a0ac9cc9dc 100644 --- a/pkg/serializer/serializer_benchmark_test.go +++ b/pkg/serializer/serializer_benchmark_test.go @@ -12,7 +12,7 @@ import ( "testing" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder/transaction" - "github.com/DataDog/datadog-agent/comp/serializer/compression/selector" + metricscompressionimpl "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/impl" "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/metrics/event" metricsserializer "github.com/DataDog/datadog-agent/pkg/serializer/internal/metrics" @@ -43,7 +43,8 @@ func benchmarkJSONStream(b *testing.B, passes int, sharedBuffers bool, numberOfE events := buildEvents(numberOfEvents) marshaler := events.CreateSingleMarshaler() mockConfig := mock.New(b) - payloadBuilder := stream.NewJSONPayloadBuilder(sharedBuffers, mockConfig, selector.NewCompressor(mockConfig)) + compressor := metricscompressionimpl.NewCompressorReq(metricscompressionimpl.Requires{Cfg: mockConfig}).Comp + payloadBuilder := stream.NewJSONPayloadBuilder(sharedBuffers, mockConfig, compressor) b.ResetTimer() for n := 0; n < b.N; n++ { @@ -58,9 +59,9 @@ func benchmarkSplit(b *testing.B, numberOfEvents int) { b.ResetTimer() mockConfig := mock.New(b) - strategy := selector.NewCompressor(mockConfig) + compressor := metricscompressionimpl.NewCompressorReq(metricscompressionimpl.Requires{Cfg: mockConfig}).Comp for n := 0; n < b.N; n++ { - results, _ = split.Payloads(events, true, split.JSONMarshalFct, strategy) + results, _ = split.Payloads(events, true, split.JSONMarshalFct, compressor) } } diff --git a/pkg/serializer/serializer_test.go b/pkg/serializer/serializer_test.go index 7675390cc3233..84e579bd9b029 100644 --- a/pkg/serializer/serializer_test.go +++ b/pkg/serializer/serializer_test.go @@ -22,21 +22,23 @@ import ( forwarder "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder/transaction" - "github.com/DataDog/datadog-agent/comp/serializer/compression/common" - compression "github.com/DataDog/datadog-agent/comp/serializer/compression/def" - "github.com/DataDog/datadog-agent/comp/serializer/compression/selector" + metricscompression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/def" + metricscompressionimpl "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/impl" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/metrics/event" "github.com/DataDog/datadog-agent/pkg/metrics/servicecheck" metricsserializer "github.com/DataDog/datadog-agent/pkg/serializer/internal/metrics" "github.com/DataDog/datadog-agent/pkg/serializer/marshaler" + "github.com/DataDog/datadog-agent/pkg/util/compression" ) func TestInitExtraHeadersNoopCompression(t *testing.T) { mockConfig := configmock.New(t) mockConfig.SetWithoutSource("serializer_compressor_kind", "blah") - s := NewSerializer(nil, nil, selector.NewCompressor(mockConfig), mockConfig, "testhost") + + compressor := metricscompressionimpl.NewCompressorReq(metricscompressionimpl.Requires{Cfg: mockConfig}).Comp + s := NewSerializer(nil, nil, compressor, mockConfig, "testhost") initExtraHeaders(s) expected := make(http.Header) @@ -51,10 +53,12 @@ func TestInitExtraHeadersNoopCompression(t *testing.T) { // No "Content-Encoding" header expected = make(http.Header) expected.Set("Content-Type", jsonContentType) + expected.Set("Content-Encoding", "identity") assert.Equal(t, expected, s.jsonExtraHeadersWithCompression) expected = make(http.Header) expected.Set("Content-Type", protobufContentType) + expected.Set("Content-Encoding", "identity") expected.Set(payloadVersionHTTPHeader, AgentPayloadVersion) assert.Equal(t, expected, s.protobufExtraHeadersWithCompression) } @@ -64,14 +68,15 @@ func TestInitExtraHeadersWithCompression(t *testing.T) { kind string expectedEncoding string }{ - "zlib": {kind: common.ZlibKind, expectedEncoding: compression.ZlibEncoding}, - "zstd": {kind: common.ZstdKind, expectedEncoding: compression.ZstdEncoding}, + "zlib": {kind: compression.ZlibKind, expectedEncoding: compression.ZlibEncoding}, + "zstd": {kind: compression.ZstdKind, expectedEncoding: compression.ZstdEncoding}, } for name, tc := range tests { t.Run(name, func(t *testing.T) { mockConfig := configmock.New(t) mockConfig.SetWithoutSource("serializer_compressor_kind", tc.kind) - s := NewSerializer(nil, nil, selector.NewCompressor(mockConfig), mockConfig, "testhost") + compressor := metricscompressionimpl.NewCompressorReq(metricscompressionimpl.Requires{Cfg: mockConfig}).Comp + s := NewSerializer(nil, nil, compressor, mockConfig, "testhost") initExtraHeaders(s) expected := make(http.Header) @@ -112,7 +117,7 @@ var ( ) type testPayload struct { - compressor compression.Component + compressor metricscompression.Component } //nolint:revive // TODO(AML) Fix revive linter @@ -250,8 +255,8 @@ func TestSendV1Events(t *testing.T) { tests := map[string]struct { kind string }{ - "zlib": {kind: common.ZlibKind}, - "zstd": {kind: common.ZstdKind}, + "zlib": {kind: compression.ZlibKind}, + "zstd": {kind: compression.ZstdKind}, } for name, tc := range tests { t.Run(name, func(t *testing.T) { @@ -260,7 +265,8 @@ func TestSendV1Events(t *testing.T) { mockConfig.SetWithoutSource("serializer_compressor_kind", tc.kind) f := &forwarder.MockedForwarder{} - s := NewSerializer(f, nil, selector.NewCompressor(mockConfig), mockConfig, "testhost") + compressor := metricscompressionimpl.NewCompressorReq(metricscompressionimpl.Requires{Cfg: mockConfig}).Comp + s := NewSerializer(f, nil, compressor, mockConfig, "testhost") matcher := createJSONPayloadMatcher(`{"apiKey":"","events":{},"internalHostname"`, s) f.On("SubmitV1Intake", matcher, s.jsonExtraHeadersWithCompression).Return(nil).Times(1) @@ -276,8 +282,8 @@ func TestSendV1EventsCreateMarshalersBySourceType(t *testing.T) { tests := map[string]struct { kind string }{ - "zlib": {kind: common.ZlibKind}, - "zstd": {kind: common.ZstdKind}, + "zlib": {kind: compression.ZlibKind}, + "zstd": {kind: compression.ZstdKind}, } for name, tc := range tests { @@ -287,7 +293,8 @@ func TestSendV1EventsCreateMarshalersBySourceType(t *testing.T) { mockConfig.SetWithoutSource("serializer_compressor_kind", tc.kind) f := &forwarder.MockedForwarder{} - s := NewSerializer(f, nil, selector.NewCompressor(mockConfig), mockConfig, "testhost") + compressor := metricscompressionimpl.NewCompressorReq(metricscompressionimpl.Requires{Cfg: mockConfig}).Comp + s := NewSerializer(f, nil, compressor, mockConfig, "testhost") events := event.Events{&event.Event{SourceTypeName: "source1"}, &event.Event{SourceTypeName: "source2"}, &event.Event{SourceTypeName: "source3"}} payloadsCountMatcher := func(payloadCount int) interface{} { @@ -315,8 +322,8 @@ func TestSendV1ServiceChecks(t *testing.T) { tests := map[string]struct { kind string }{ - "zlib": {kind: common.ZlibKind}, - "zstd": {kind: common.ZstdKind}, + "zlib": {kind: compression.ZlibKind}, + "zstd": {kind: compression.ZstdKind}, } for name, tc := range tests { @@ -325,7 +332,9 @@ func TestSendV1ServiceChecks(t *testing.T) { mockConfig := configmock.New(t) mockConfig.SetWithoutSource("enable_service_checks_stream_payload_serialization", false) mockConfig.SetWithoutSource("serializer_compressor_kind", tc.kind) - s := NewSerializer(f, nil, selector.NewCompressor(mockConfig), mockConfig, "testhost") + + compressor := metricscompressionimpl.NewCompressorReq(metricscompressionimpl.Requires{Cfg: mockConfig}).Comp + s := NewSerializer(f, nil, compressor, mockConfig, "testhost") matcher := createJSONPayloadMatcher(`[{"check":"","host_name":"","timestamp":0,"status":0,"message":"","tags":null}]`, s) f.On("SubmitV1CheckRuns", matcher, s.jsonExtraHeadersWithCompression).Return(nil).Times(1) @@ -340,8 +349,8 @@ func TestSendV1Series(t *testing.T) { tests := map[string]struct { kind string }{ - "zlib": {kind: common.ZlibKind}, - "zstd": {kind: common.ZstdKind}, + "zlib": {kind: compression.ZlibKind}, + "zstd": {kind: compression.ZstdKind}, } for name, tc := range tests { @@ -351,7 +360,9 @@ func TestSendV1Series(t *testing.T) { mockConfig.SetWithoutSource("enable_stream_payload_serialization", false) mockConfig.SetWithoutSource("use_v2_api.series", false) mockConfig.SetWithoutSource("serializer_compressor_kind", tc.kind) - s := NewSerializer(f, nil, selector.NewCompressor(mockConfig), mockConfig, "testhost") + + compressor := metricscompressionimpl.NewCompressorReq(metricscompressionimpl.Requires{Cfg: mockConfig}).Comp + s := NewSerializer(f, nil, compressor, mockConfig, "testhost") matcher := createJSONPayloadMatcher(`{"series":[]}`, s) f.On("SubmitV1Series", matcher, s.jsonExtraHeadersWithCompression).Return(nil).Times(1) @@ -367,8 +378,8 @@ func TestSendSeries(t *testing.T) { tests := map[string]struct { kind string }{ - "zlib": {kind: common.ZlibKind}, - "zstd": {kind: common.ZstdKind}, + "zlib": {kind: compression.ZlibKind}, + "zstd": {kind: compression.ZstdKind}, } for name, tc := range tests { @@ -377,7 +388,9 @@ func TestSendSeries(t *testing.T) { mockConfig := configmock.New(t) mockConfig.SetWithoutSource("use_v2_api.series", true) // default value, but just to be sure mockConfig.SetWithoutSource("serializer_compressor_kind", tc.kind) - s := NewSerializer(f, nil, selector.NewCompressor(mockConfig), mockConfig, "testhost") + + compressor := metricscompressionimpl.NewCompressorReq(metricscompressionimpl.Requires{Cfg: mockConfig}).Comp + s := NewSerializer(f, nil, compressor, mockConfig, "testhost") matcher := createProtoscopeMatcher(`1: { 1: { 1: {"host"} } 5: 3 @@ -396,8 +409,8 @@ func TestSendSketch(t *testing.T) { tests := map[string]struct { kind string }{ - "zlib": {kind: common.ZlibKind}, - "zstd": {kind: common.ZstdKind}, + "zlib": {kind: compression.ZlibKind}, + "zstd": {kind: compression.ZstdKind}, } for name, tc := range tests { @@ -406,7 +419,9 @@ func TestSendSketch(t *testing.T) { mockConfig := configmock.New(t) mockConfig.SetWithoutSource("use_v2_api.series", true) // default value, but just to be sure mockConfig.SetWithoutSource("serializer_compressor_kind", tc.kind) - s := NewSerializer(f, nil, selector.NewCompressor(mockConfig), mockConfig, "testhost") + + compressor := metricscompressionimpl.NewCompressorReq(metricscompressionimpl.Requires{Cfg: mockConfig}).Comp + s := NewSerializer(f, nil, compressor, mockConfig, "testhost") matcher := createProtoscopeMatcher(` 1: { 1: {"fakename"} 2: {"fakehost"} 8: { 1: { 4: 10 }}} 2: {} @@ -426,8 +441,8 @@ func TestSendMetadata(t *testing.T) { tests := map[string]struct { kind string }{ - "zlib": {kind: common.ZlibKind}, - "zstd": {kind: common.ZstdKind}, + "zlib": {kind: compression.ZlibKind}, + "zstd": {kind: compression.ZstdKind}, } for name, tc := range tests { @@ -435,7 +450,8 @@ func TestSendMetadata(t *testing.T) { f := &forwarder.MockedForwarder{} mockConfig := configmock.New(t) mockConfig.SetWithoutSource("serializer_compressor_kind", tc.kind) - compressor := selector.NewCompressor(mockConfig) + + compressor := metricscompressionimpl.NewCompressorReq(metricscompressionimpl.Requires{Cfg: mockConfig}).Comp s := NewSerializer(f, nil, compressor, mockConfig, "testhost") jsonPayloads, _ := mkPayloads(jsonString, true, s) f.On("SubmitMetadata", jsonPayloads, s.jsonExtraHeadersWithCompression).Return(nil).Times(1) @@ -461,8 +477,8 @@ func TestSendProcessesMetadata(t *testing.T) { tests := map[string]struct { kind string }{ - "zlib": {kind: common.ZlibKind}, - "zstd": {kind: common.ZstdKind}, + "zlib": {kind: compression.ZlibKind}, + "zstd": {kind: compression.ZstdKind}, } for name, tc := range tests { @@ -471,7 +487,9 @@ func TestSendProcessesMetadata(t *testing.T) { payload := []byte("\"test\"") mockConfig := configmock.New(t) mockConfig.SetWithoutSource("serializer_compressor_kind", tc.kind) - s := NewSerializer(f, nil, selector.NewCompressor(mockConfig), mockConfig, "testhost") + + compressor := metricscompressionimpl.NewCompressorReq(metricscompressionimpl.Requires{Cfg: mockConfig}).Comp + s := NewSerializer(f, nil, compressor, mockConfig, "testhost") payloads, _ := mkPayloads(payload, true, s) f.On("SubmitV1Intake", payloads, s.jsonExtraHeadersWithCompression).Return(nil).Times(1) @@ -495,8 +513,8 @@ func TestSendWithDisabledKind(t *testing.T) { tests := map[string]struct { kind string }{ - "zlib": {kind: common.ZlibKind}, - "zstd": {kind: common.ZstdKind}, + "zlib": {kind: compression.ZlibKind}, + "zstd": {kind: compression.ZstdKind}, } for name, tc := range tests { @@ -511,7 +529,9 @@ func TestSendWithDisabledKind(t *testing.T) { mockConfig.SetWithoutSource("serializer_compressor_kind", tc.kind) f := &forwarder.MockedForwarder{} - s := NewSerializer(f, nil, selector.NewCompressor(mockConfig), mockConfig, "testhost") + + compressor := metricscompressionimpl.NewCompressorReq(metricscompressionimpl.Requires{Cfg: mockConfig}).Comp + s := NewSerializer(f, nil, compressor, mockConfig, "testhost") jsonPayloads, _ := mkPayloads(jsonString, true, s) payload := &testPayload{} diff --git a/pkg/serializer/series_benchmark_test.go b/pkg/serializer/series_benchmark_test.go index 0ec06d0833991..8b96a7d4f88c8 100644 --- a/pkg/serializer/series_benchmark_test.go +++ b/pkg/serializer/series_benchmark_test.go @@ -14,7 +14,7 @@ import ( "github.com/stretchr/testify/require" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder/transaction" - "github.com/DataDog/datadog-agent/comp/serializer/compression/selector" + metricscompression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/impl" "github.com/DataDog/datadog-agent/pkg/config/mock" "github.com/DataDog/datadog-agent/pkg/metrics" metricsserializer "github.com/DataDog/datadog-agent/pkg/serializer/internal/metrics" @@ -77,12 +77,13 @@ func BenchmarkSeries(b *testing.B) { } bufferContext := marshaler.NewBufferContext() mockConfig := mock.New(b) + compressor := metricscompression.NewCompressorReq(metricscompression.Requires{Cfg: mockConfig}).Comp pb := func(series metrics.Series) (transaction.BytesPayloads, error) { iterableSeries := metricsserializer.CreateIterableSeries(metricsserializer.CreateSerieSource(series)) - return iterableSeries.MarshalSplitCompress(bufferContext, mockConfig, selector.NewCompressor(mockConfig)) + return iterableSeries.MarshalSplitCompress(bufferContext, mockConfig, compressor) } - payloadBuilder := stream.NewJSONPayloadBuilder(true, mockConfig, selector.NewCompressor(mockConfig)) + payloadBuilder := stream.NewJSONPayloadBuilder(true, mockConfig, compressor) json := func(series metrics.Series) (transaction.BytesPayloads, error) { iterableSeries := metricsserializer.CreateIterableSeries(metricsserializer.CreateSerieSource(series)) return payloadBuilder.BuildWithOnErrItemTooBigPolicy(iterableSeries, stream.DropItemOnErrItemTooBig) diff --git a/pkg/serializer/split/split.go b/pkg/serializer/split/split.go index 0d73e9b6ff0e6..00f65c90494cf 100644 --- a/pkg/serializer/split/split.go +++ b/pkg/serializer/split/split.go @@ -10,7 +10,7 @@ import ( "expvar" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder/transaction" - compression "github.com/DataDog/datadog-agent/comp/serializer/compression/def" + compression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/def" "github.com/DataDog/datadog-agent/pkg/serializer/marshaler" "github.com/DataDog/datadog-agent/pkg/telemetry" diff --git a/pkg/serializer/split/split_test.go b/pkg/serializer/split/split_test.go index 22fa78e0ff9bd..7c83aea822f6b 100644 --- a/pkg/serializer/split/split_test.go +++ b/pkg/serializer/split/split_test.go @@ -15,13 +15,14 @@ import ( "github.com/stretchr/testify/require" "github.com/DataDog/datadog-agent/comp/forwarder/defaultforwarder/transaction" - "github.com/DataDog/datadog-agent/comp/serializer/compression/common" - "github.com/DataDog/datadog-agent/comp/serializer/compression/selector" + metricscompression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/impl" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/metrics/event" "github.com/DataDog/datadog-agent/pkg/metrics/servicecheck" metricsserializer "github.com/DataDog/datadog-agent/pkg/serializer/internal/metrics" "github.com/DataDog/datadog-agent/pkg/tagset" + "github.com/DataDog/datadog-agent/pkg/util/compression" + "github.com/DataDog/datadog-agent/pkg/util/compression/selector" "github.com/DataDog/datadog-agent/pkg/config/mock" ) @@ -55,8 +56,8 @@ func testSplitPayloadsSeries(t *testing.T, numPoints int, compress bool) { tests := map[string]struct { kind string }{ - "zlib": {kind: common.ZlibKind}, - "zstd": {kind: common.ZstdKind}, + "zlib": {kind: compression.ZlibKind}, + "zstd": {kind: compression.ZstdKind}, } for name, tc := range tests { t.Run(name, func(t *testing.T) { @@ -85,9 +86,7 @@ func testSplitPayloadsSeries(t *testing.T, numPoints int, compress bool) { testSeries = append(testSeries, &point) } - mockConfig := mock.New(t) - mockConfig.SetWithoutSource("serializer_compressor_kind", tc.kind) - strategy := selector.NewCompressor(mockConfig) + strategy := selector.NewCompressor(tc.kind, 1) payloads, err := Payloads(testSeries, compress, JSONMarshalFct, strategy) require.Nil(t, err) @@ -137,12 +136,12 @@ func BenchmarkSplitPayloadsSeries(b *testing.B) { } mockConfig := mock.New(b) - strategy := selector.NewCompressor(mockConfig) + compressor := metricscompression.NewCompressorReq(metricscompression.Requires{Cfg: mockConfig}).Comp var r transaction.BytesPayloads for n := 0; n < b.N; n++ { // always record the result of Payloads to prevent // the compiler eliminating the function call. - r, _ = Payloads(testSeries, true, JSONMarshalFct, strategy) + r, _ = Payloads(testSeries, true, JSONMarshalFct, compressor) } // ensure we actually had to split @@ -194,8 +193,8 @@ func testSplitPayloadsEvents(t *testing.T, numPoints int, compress bool) { tests := map[string]struct { kind string }{ - "zlib": {kind: common.ZlibKind}, - "zstd": {kind: common.ZstdKind}, + "zlib": {kind: compression.ZlibKind}, + "zstd": {kind: compression.ZstdKind}, } for name, tc := range tests { t.Run(name, func(t *testing.T) { @@ -216,8 +215,8 @@ func testSplitPayloadsEvents(t *testing.T, numPoints int, compress bool) { mockConfig := mock.New(t) mockConfig.SetWithoutSource("serializer_compressor_kind", tc.kind) - strategy := selector.NewCompressor(mockConfig) - payloads, err := Payloads(testEvent, compress, JSONMarshalFct, strategy) + compressor := metricscompression.NewCompressorReq(metricscompression.Requires{Cfg: mockConfig}).Comp + payloads, err := Payloads(testEvent, compress, JSONMarshalFct, compressor) require.Nil(t, err) originalLength := len(testEvent.EventsArr) @@ -226,7 +225,7 @@ func testSplitPayloadsEvents(t *testing.T, numPoints int, compress bool) { var s map[string]interface{} localPayload := payload.GetContent() if compress { - localPayload, err = strategy.Decompress(localPayload) + localPayload, err = compressor.Decompress(localPayload) require.Nil(t, err) } @@ -275,8 +274,8 @@ func testSplitPayloadsServiceChecks(t *testing.T, numPoints int, compress bool) tests := map[string]struct { kind string }{ - "zlib": {kind: common.ZlibKind}, - "zstd": {kind: common.ZstdKind}, + "zlib": {kind: compression.ZlibKind}, + "zstd": {kind: compression.ZstdKind}, } for name, tc := range tests { t.Run(name, func(t *testing.T) { @@ -295,8 +294,8 @@ func testSplitPayloadsServiceChecks(t *testing.T, numPoints int, compress bool) mockConfig := mock.New(t) mockConfig.SetWithoutSource("serializer_compressor_kind", tc.kind) - strategy := selector.NewCompressor(mockConfig) - payloads, err := Payloads(testServiceChecks, compress, JSONMarshalFct, strategy) + compressor := metricscompression.NewCompressorReq(metricscompression.Requires{Cfg: mockConfig}).Comp + payloads, err := Payloads(testServiceChecks, compress, JSONMarshalFct, compressor) require.Nil(t, err) originalLength := len(testServiceChecks) @@ -305,7 +304,7 @@ func testSplitPayloadsServiceChecks(t *testing.T, numPoints int, compress bool) var s []interface{} localPayload := payload.GetContent() if compress { - localPayload, err = strategy.Decompress(localPayload) + localPayload, err = compressor.Decompress(localPayload) require.Nil(t, err) } diff --git a/pkg/serverless/invocationlifecycle/lifecycle_test.go b/pkg/serverless/invocationlifecycle/lifecycle_test.go index 3eefd8dc51bbd..2f2f36366475d 100644 --- a/pkg/serverless/invocationlifecycle/lifecycle_test.go +++ b/pkg/serverless/invocationlifecycle/lifecycle_test.go @@ -18,7 +18,8 @@ import ( "github.com/DataDog/datadog-agent/comp/core/hostname/hostnameimpl" log "github.com/DataDog/datadog-agent/comp/core/log/def" logmock "github.com/DataDog/datadog-agent/comp/core/log/mock" - compressionmock "github.com/DataDog/datadog-agent/comp/serializer/compression/fx-mock" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx-mock" + metricscompression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/fx-mock" "github.com/DataDog/datadog-agent/pkg/metrics" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/serverless/logs" @@ -1383,5 +1384,5 @@ func getEventFromFile(filename string) []byte { } func createDemultiplexer(t *testing.T) demultiplexer.FakeSamplerMock { - return fxutil.Test[demultiplexer.FakeSamplerMock](t, fx.Provide(func() log.Component { return logmock.New(t) }), compressionmock.MockModule(), demultiplexerimpl.FakeSamplerMockModule(), hostnameimpl.MockModule()) + return fxutil.Test[demultiplexer.FakeSamplerMock](t, fx.Provide(func() log.Component { return logmock.New(t) }), logscompression.MockModule(), metricscompression.MockModule(), demultiplexerimpl.FakeSamplerMockModule(), hostnameimpl.MockModule()) } diff --git a/pkg/serverless/logs/logs_test.go b/pkg/serverless/logs/logs_test.go index 132efb75bbcaa..9fd50a27f7128 100644 --- a/pkg/serverless/logs/logs_test.go +++ b/pkg/serverless/logs/logs_test.go @@ -26,7 +26,8 @@ import ( log "github.com/DataDog/datadog-agent/comp/core/log/def" logmock "github.com/DataDog/datadog-agent/comp/core/log/mock" "github.com/DataDog/datadog-agent/comp/logs/agent/config" - compressionmock "github.com/DataDog/datadog-agent/comp/serializer/compression/fx-mock" + logscompressionmock "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx-mock" + metricscompressionmock "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/fx-mock" "github.com/DataDog/datadog-agent/pkg/metrics" "github.com/DataDog/datadog-agent/pkg/serverless/executioncontext" serverlessMetrics "github.com/DataDog/datadog-agent/pkg/serverless/metrics" @@ -1475,5 +1476,5 @@ func TestMultipleStartLogCollection(t *testing.T) { } func createDemultiplexer(t *testing.T) demultiplexer.FakeSamplerMock { - return fxutil.Test[demultiplexer.FakeSamplerMock](t, fx.Provide(func() log.Component { return logmock.New(t) }), compressionmock.MockModule(), demultiplexerimpl.FakeSamplerMockModule(), hostnameimpl.MockModule()) + return fxutil.Test[demultiplexer.FakeSamplerMock](t, fx.Provide(func() log.Component { return logmock.New(t) }), logscompressionmock.MockModule(), metricscompressionmock.MockModule(), demultiplexerimpl.FakeSamplerMockModule(), hostnameimpl.MockModule()) } diff --git a/pkg/serverless/logs/scheduler.go b/pkg/serverless/logs/scheduler.go index b5be31485d14f..962652952aac6 100644 --- a/pkg/serverless/logs/scheduler.go +++ b/pkg/serverless/logs/scheduler.go @@ -10,6 +10,7 @@ import ( logsAgent "github.com/DataDog/datadog-agent/comp/logs/agent" "github.com/DataDog/datadog-agent/comp/logs/agent/agentimpl" "github.com/DataDog/datadog-agent/comp/logs/agent/config" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/def" "github.com/DataDog/datadog-agent/pkg/logs/schedulers/channel" "github.com/DataDog/datadog-agent/pkg/util/log" ) @@ -19,8 +20,8 @@ import ( var logsScheduler *channel.Scheduler // SetupLogAgent sets up the logs agent to handle messages on the given channel. -func SetupLogAgent(logChannel chan *config.ChannelMessage, sourceName string, source string, tagger tagger.Component) (logsAgent.ServerlessLogsAgent, error) { - agent := agentimpl.NewServerlessLogsAgent(tagger) +func SetupLogAgent(logChannel chan *config.ChannelMessage, sourceName string, source string, tagger tagger.Component, compression logscompression.Component) (logsAgent.ServerlessLogsAgent, error) { + agent := agentimpl.NewServerlessLogsAgent(tagger, compression) err := agent.Start() if err != nil { log.Error("Could not start an instance of the Logs Agent:", err) diff --git a/pkg/serverless/metrics/enhanced_metrics_test.go b/pkg/serverless/metrics/enhanced_metrics_test.go index 9c6f9472ee756..106a00af5896f 100644 --- a/pkg/serverless/metrics/enhanced_metrics_test.go +++ b/pkg/serverless/metrics/enhanced_metrics_test.go @@ -12,7 +12,8 @@ import ( "github.com/stretchr/testify/assert" "go.uber.org/fx" - compressionmock "github.com/DataDog/datadog-agent/comp/serializer/compression/fx-mock" + logscompression "github.com/DataDog/datadog-agent/comp/serializer/logscompression/fx-mock" + metricscompression "github.com/DataDog/datadog-agent/comp/serializer/metricscompression/fx-mock" "github.com/DataDog/datadog-agent/pkg/serverless/proc" "github.com/DataDog/datadog-agent/comp/aggregator/demultiplexer" @@ -834,5 +835,5 @@ func TestSendFailoverReasonMetric(t *testing.T) { } func createDemultiplexer(t *testing.T) demultiplexer.FakeSamplerMock { - return fxutil.Test[demultiplexer.FakeSamplerMock](t, fx.Provide(func() log.Component { return logmock.New(t) }), compressionmock.MockModule(), demultiplexerimpl.FakeSamplerMockModule(), hostnameimpl.MockModule()) + return fxutil.Test[demultiplexer.FakeSamplerMock](t, fx.Provide(func() log.Component { return logmock.New(t) }), metricscompression.MockModule(), logscompression.MockModule(), demultiplexerimpl.FakeSamplerMockModule(), hostnameimpl.MockModule()) } diff --git a/pkg/tagger/types/go.mod b/pkg/tagger/types/go.mod index 7cbd3c8ba886c..d03c89b1c9512 100644 --- a/pkg/tagger/types/go.mod +++ b/pkg/tagger/types/go.mod @@ -1,3 +1,7 @@ module github.com/DataDog/datadog-agent/pkg/tagger/types go 1.22.0 + +replace github.com/DataDog/datadog-agent/comp/core/tagger/origindetection => ../../../comp/core/tagger/origindetection + +require github.com/DataDog/datadog-agent/comp/core/tagger/origindetection v0.62.0-rc.1 diff --git a/pkg/tagger/types/go.sum b/pkg/tagger/types/go.sum index e69de29bb2d1d..93a77bf51f037 100644 --- a/pkg/tagger/types/go.sum +++ b/pkg/tagger/types/go.sum @@ -0,0 +1,8 @@ +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/pkg/tagger/types/types.go b/pkg/tagger/types/types.go index 07e9f0f1d1698..b3b3777a2521f 100644 --- a/pkg/tagger/types/types.go +++ b/pkg/tagger/types/types.go @@ -6,25 +6,13 @@ // Package types implements the types used by the Tagger for Origin Detection. package types -// ProductOrigin is the origin of the product that sent the entity. -type ProductOrigin int - -const ( - // ProductOriginDogStatsDLegacy is the ProductOrigin for DogStatsD in Legacy mode. - // TODO: remove this when dogstatsd_origin_detection_unified is enabled by default - ProductOriginDogStatsDLegacy ProductOrigin = iota - // ProductOriginDogStatsD is the ProductOrigin for DogStatsD. - ProductOriginDogStatsD ProductOrigin = iota - // ProductOriginAPM is the ProductOrigin for APM. - ProductOriginAPM ProductOrigin = iota -) +import "github.com/DataDog/datadog-agent/comp/core/tagger/origindetection" // OriginInfo contains the Origin Detection information. type OriginInfo struct { - ContainerIDFromSocket string // ContainerIDFromSocket is the origin resolved using Unix Domain Socket. - PodUID string // PodUID is the origin resolved from the Kubernetes Pod UID. - ContainerID string // ContainerID is the origin resolved from the container ID. - ExternalData string // ExternalData is the external data list. - Cardinality string // Cardinality is the cardinality of the resolved origin. - ProductOrigin ProductOrigin // ProductOrigin is the product that sent the origin information. + ContainerIDFromSocket string // ContainerIDFromSocket is the origin resolved using Unix Domain Socket. + LocalData origindetection.LocalData // LocalData is the local data list. + ExternalData origindetection.ExternalData // ExternalData is the external data list. + Cardinality string // Cardinality is the cardinality of the resolved origin. + ProductOrigin origindetection.ProductOrigin // ProductOrigin is the product that sent the origin information. } diff --git a/pkg/telemetry/go.mod b/pkg/telemetry/go.mod index 6b727ea0cfd97..3bc99f45c3fa5 100644 --- a/pkg/telemetry/go.mod +++ b/pkg/telemetry/go.mod @@ -6,7 +6,7 @@ replace ( github.com/DataDog/datadog-agent/comp/core/telemetry => ../../comp/core/telemetry github.com/DataDog/datadog-agent/comp/def => ../../comp/def github.com/DataDog/datadog-agent/pkg/util/fxutil => ../util/fxutil - github.com/DataDog/datadog-agent/pkg/util/optional => ../util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../util/option ) require ( @@ -17,7 +17,7 @@ require ( require ( github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/fxutil v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.55.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.55.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect @@ -27,7 +27,7 @@ require ( github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.20.5 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.60.1 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect @@ -36,7 +36,7 @@ require ( go.uber.org/fx v1.23.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/sys v0.28.0 // indirect - google.golang.org/protobuf v1.35.2 // indirect + golang.org/x/sys v0.29.0 // indirect + google.golang.org/protobuf v1.36.3 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/telemetry/go.sum b/pkg/telemetry/go.sum index 6ba871b73d5aa..4a9ee53da467e 100644 --- a/pkg/telemetry/go.sum +++ b/pkg/telemetry/go.sum @@ -25,8 +25,8 @@ github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+ github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= @@ -50,10 +50,10 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/pkg/trace/agent/normalizer.go b/pkg/trace/agent/normalizer.go index 25bf18fca89f7..16fe2c125e7c0 100644 --- a/pkg/trace/agent/normalizer.go +++ b/pkg/trace/agent/normalizer.go @@ -30,6 +30,8 @@ const ( tagSamplingPriority = "_sampling_priority_v1" // peerServiceKey is the key for the peer.service meta field. peerServiceKey = "peer.service" + // baseServiceKey is the key for the _dd.base_service meta field. + baseServiceKey = "_dd.base_service" ) var ( @@ -80,6 +82,24 @@ func (a *Agent) normalize(ts *info.TagStats, s *pb.Span) error { s.Meta[peerServiceKey] = ps } + bSvc, ok := s.Meta[baseServiceKey] + if ok { + bs, err := traceutil.NormalizePeerService(bSvc) + switch err { + case traceutil.ErrTooLong: + ts.SpansMalformed.BaseServiceTruncate.Inc() + log.Debugf("Fixing malformed trace. _dd.base_service is too long (reason:base_service_truncate), truncating _dd.base_service to length=%d: %s", traceutil.MaxServiceLen, bs) + case traceutil.ErrInvalid: + ts.SpansMalformed.BaseServiceInvalid.Inc() + log.Debugf("Fixing malformed trace. _dd.base_service is invalid (reason:base_service_invalid), replacing invalid _dd.base_service=%s with empty string", bSvc) + default: + if err != nil { + log.Debugf("Unexpected error in _dd.base_service normalization from original value (%s) to new value (%s): %s", bSvc, bs, err) + } + } + s.Meta[baseServiceKey] = bs + } + if a.conf.HasFeature("component2name") { // This feature flag determines the component tag to become the span name. // diff --git a/pkg/trace/agent/normalizer_test.go b/pkg/trace/agent/normalizer_test.go index 8ef3941f6c9bf..73d2d84565043 100644 --- a/pkg/trace/agent/normalizer_test.go +++ b/pkg/trace/agent/normalizer_test.go @@ -6,6 +6,7 @@ package agent import ( + "context" "fmt" "math" "math/rand" @@ -16,12 +17,16 @@ import ( "github.com/stretchr/testify/assert" "go.uber.org/atomic" + gzip "github.com/DataDog/datadog-agent/comp/trace/compression/impl-gzip" + "github.com/DataDog/datadog-agent/pkg/obfuscate" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/trace/info" "github.com/DataDog/datadog-agent/pkg/trace/sampler" + "github.com/DataDog/datadog-agent/pkg/trace/telemetry" "github.com/DataDog/datadog-agent/pkg/trace/testutil" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" + "github.com/DataDog/datadog-go/v5/statsd" ) func newTestSpan() *pb.Span { @@ -75,9 +80,13 @@ func TestNormalizeServicePassThru(t *testing.T) { a := &Agent{conf: config.New()} ts := newTagStats() s := newTestSpan() + s.Meta[peerServiceKey] = "foo" + s.Meta[baseServiceKey] = "bar" before := s.Service assert.NoError(t, a.normalize(ts, s)) assert.Equal(t, before, s.Service) + assert.Equal(t, "foo", s.Meta[peerServiceKey]) + assert.Equal(t, "bar", s.Meta[baseServiceKey]) assert.Equal(t, newTagStats(), ts) } @@ -86,8 +95,12 @@ func TestNormalizeEmptyServiceNoLang(t *testing.T) { ts := newTagStats() s := newTestSpan() s.Service = "" + s.Meta[peerServiceKey] = "" + s.Meta[baseServiceKey] = "" assert.NoError(t, a.normalize(ts, s)) assert.Equal(t, traceutil.DefaultServiceName, s.Service) + assert.Equal(t, "", s.Meta[peerServiceKey]) // no fallback on peer service tag + assert.Equal(t, "", s.Meta[baseServiceKey]) // no fallback on base service tag assert.Equal(t, tsMalformed(&info.SpansMalformed{ServiceEmpty: *atomic.NewInt64(1)}), ts) } @@ -97,8 +110,12 @@ func TestNormalizeEmptyServiceWithLang(t *testing.T) { s := newTestSpan() s.Service = "" ts.Lang = "java" + s.Meta[peerServiceKey] = "" + s.Meta[baseServiceKey] = "" assert.NoError(t, a.normalize(ts, s)) assert.Equal(t, s.Service, fmt.Sprintf("unnamed-%s-service", ts.Lang)) + assert.Equal(t, "", s.Meta[peerServiceKey]) // no fallback on peer service tag + assert.Equal(t, "", s.Meta[baseServiceKey]) // no fallback on base service tag tsExpected := tsMalformed(&info.SpansMalformed{ServiceEmpty: *atomic.NewInt64(1)}) tsExpected.Lang = ts.Lang assert.Equal(t, tsExpected, ts) @@ -109,9 +126,17 @@ func TestNormalizeLongService(t *testing.T) { ts := newTagStats() s := newTestSpan() s.Service = strings.Repeat("CAMEMBERT", 100) + s.Meta[peerServiceKey] = strings.Repeat("BRIE", 100) + s.Meta[baseServiceKey] = strings.Repeat("ROQUEFORT", 100) assert.NoError(t, a.normalize(ts, s)) assert.Equal(t, s.Service, s.Service[:traceutil.MaxServiceLen]) - assert.Equal(t, tsMalformed(&info.SpansMalformed{ServiceTruncate: *atomic.NewInt64(1)}), ts) + assert.Equal(t, s.Meta[peerServiceKey], s.Meta[peerServiceKey][:traceutil.MaxServiceLen]) + assert.Equal(t, s.Meta[baseServiceKey], s.Meta[baseServiceKey][:traceutil.MaxServiceLen]) + assert.Equal(t, tsMalformed(&info.SpansMalformed{ + ServiceTruncate: *atomic.NewInt64(1), + PeerServiceTruncate: *atomic.NewInt64(1), + BaseServiceTruncate: *atomic.NewInt64(1), + }), ts) } func TestNormalizeNamePassThru(t *testing.T) { @@ -423,8 +448,12 @@ func TestNormalizeServiceTag(t *testing.T) { ts := newTagStats() s := newTestSpan() s.Service = "retargeting(api-Staging " + s.Meta[peerServiceKey] = "retargeting(api-Peer " + s.Meta[baseServiceKey] = "retargeting(api-Base " assert.NoError(t, a.normalize(ts, s)) assert.Equal(t, "retargeting_api-staging", s.Service) + assert.Equal(t, "retargeting_api-peer", s.Meta[peerServiceKey]) + assert.Equal(t, "retargeting_api-base", s.Meta[baseServiceKey]) assert.Equal(t, newTagStats(), ts) } @@ -599,3 +628,19 @@ func BenchmarkNormalization(b *testing.B) { a.normalize(ts, span) } } + +func TestLexerNormalization(t *testing.T) { + ctx, cancelFunc := context.WithCancel(context.Background()) + cfg := config.New() + cfg.Endpoints[0].APIKey = "test" + cfg.SQLObfuscationMode = string(obfuscate.ObfuscateAndNormalize) + agnt := NewAgent(ctx, cfg, telemetry.NewNoopCollector(), &statsd.NoOpClient{}, gzip.NewComponent()) + defer cancelFunc() + span := &pb.Span{ + Resource: "SELECT * FROM [u].[users]", + Type: "sql", + Meta: map[string]string{"db.type": "sqlserver"}, + } + agnt.obfuscateSpan(span) + assert.Equal(t, "SELECT * FROM u.users", span.Resource) +} diff --git a/pkg/trace/agent/obfuscate.go b/pkg/trace/agent/obfuscate.go index 25042e3fd6edc..dc185b9ee7fea 100644 --- a/pkg/trace/agent/obfuscate.go +++ b/pkg/trace/agent/obfuscate.go @@ -6,31 +6,37 @@ package agent import ( + "strconv" "sync" "github.com/DataDog/datadog-agent/pkg/obfuscate" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/log" - "github.com/DataDog/datadog-agent/pkg/trace/traceutil" + "github.com/DataDog/datadog-agent/pkg/trace/transform" ) const ( - tagRedisRawCommand = "redis.raw_command" - tagMemcachedCommand = "memcached.command" - tagMongoDBQuery = "mongodb.query" - tagElasticBody = "elasticsearch.body" - tagOpenSearchBody = "opensearch.body" - tagSQLQuery = "sql.query" - tagHTTPURL = "http.url" + tagRedisRawCommand = transform.TagRedisRawCommand + tagMemcachedCommand = transform.TagMemcachedCommand + tagMongoDBQuery = transform.TagMongoDBQuery + tagElasticBody = transform.TagElasticBody + tagOpenSearchBody = transform.TagOpenSearchBody + tagSQLQuery = transform.TagSQLQuery + tagHTTPURL = transform.TagHTTPURL + tagDBMS = transform.TagDBMS ) const ( - textNonParsable = "Non-parsable SQL query" + textNonParsable = transform.TextNonParsable ) func (a *Agent) obfuscateSpan(span *pb.Span) { o := a.lazyInitObfuscator() + for _, spanEvent := range span.SpanEvents { + a.obfuscateSpanEvent(spanEvent) + } + if a.conf.Obfuscation != nil && a.conf.Obfuscation.CreditCards.Enabled { for k, v := range span.Meta { newV := o.ObfuscateCreditCardNumber(k, v) @@ -46,31 +52,21 @@ func (a *Agent) obfuscateSpan(span *pb.Span) { if span.Resource == "" { return } - oq, err := o.ObfuscateSQLString(span.Resource) + oq, err := transform.ObfuscateSQLSpan(o, span) if err != nil { // we have an error, discard the SQL to avoid polluting user resources. log.Debugf("Error parsing SQL query: %v. Resource: %q", err, span.Resource) - span.Resource = textNonParsable - traceutil.SetMeta(span, tagSQLQuery, textNonParsable) return } - - span.Resource = oq.Query - if len(oq.Metadata.TablesCSV) > 0 { - traceutil.SetMeta(span, "sql.tables", oq.Metadata.TablesCSV) + if oq == nil { + // no error was thrown but no query was found/sanitized either + return } - traceutil.SetMeta(span, tagSQLQuery, oq.Query) case "redis": + // if a span is redis type, it should be quantized regardless of obfuscation setting span.Resource = o.QuantizeRedisString(span.Resource) if a.conf.Obfuscation.Redis.Enabled { - if span.Meta == nil || span.Meta[tagRedisRawCommand] == "" { - return - } - if a.conf.Obfuscation.Redis.RemoveAllArgs { - span.Meta[tagRedisRawCommand] = o.RemoveAllRedisArgs(span.Meta[tagRedisRawCommand]) - return - } - span.Meta[tagRedisRawCommand] = o.ObfuscateRedisString(span.Meta[tagRedisRawCommand]) + transform.ObfuscateRedisSpan(o, span, a.conf.Obfuscation.Redis.RemoveAllArgs) } case "memcached": if !a.conf.Obfuscation.Memcached.Enabled { @@ -110,12 +106,58 @@ func (a *Agent) obfuscateSpan(span *pb.Span) { } } +// obfuscateSpanEvent uses the pre-configured agent obfuscator to do limited obfuscation of span events +// For now, we only obfuscate any credit-card like when enabled. +func (a *Agent) obfuscateSpanEvent(spanEvent *pb.SpanEvent) { + if a.conf.Obfuscation != nil && a.conf.Obfuscation.CreditCards.Enabled && spanEvent != nil { + for k, v := range spanEvent.Attributes { + var strValue string + switch v.Type { + case pb.AttributeAnyValue_STRING_VALUE: + strValue = v.StringValue + case pb.AttributeAnyValue_DOUBLE_VALUE: + strValue = strconv.FormatFloat(v.DoubleValue, 'f', -1, 64) + case pb.AttributeAnyValue_INT_VALUE: + strValue = strconv.FormatInt(v.IntValue, 10) + case pb.AttributeAnyValue_BOOL_VALUE: + continue // Booleans can't be credit cards + case pb.AttributeAnyValue_ARRAY_VALUE: + a.ccObfuscateAttributeArray(v, k, strValue) + } + newVal := a.obfuscator.ObfuscateCreditCardNumber(k, strValue) + if newVal != strValue { + *v = pb.AttributeAnyValue{Type: pb.AttributeAnyValue_STRING_VALUE, StringValue: newVal} + } + } + } +} + +func (a *Agent) ccObfuscateAttributeArray(v *pb.AttributeAnyValue, k string, strValue string) { + var arrStrValue string + for _, vElement := range v.ArrayValue.Values { + switch vElement.Type { + case pb.AttributeArrayValue_STRING_VALUE: + arrStrValue = vElement.StringValue + case pb.AttributeArrayValue_DOUBLE_VALUE: + arrStrValue = strconv.FormatFloat(vElement.DoubleValue, 'f', -1, 64) + case pb.AttributeArrayValue_INT_VALUE: + arrStrValue = strconv.FormatInt(vElement.IntValue, 10) + case pb.AttributeArrayValue_BOOL_VALUE: + continue // Booleans can't be credit cards + } + newVal := a.obfuscator.ObfuscateCreditCardNumber(k, arrStrValue) + if newVal != strValue { + *vElement = pb.AttributeArrayValue{Type: pb.AttributeArrayValue_STRING_VALUE, StringValue: newVal} + } + } +} + func (a *Agent) obfuscateStatsGroup(b *pb.ClientGroupedStats) { o := a.lazyInitObfuscator() switch b.Type { case "sql", "cassandra": - oq, err := o.ObfuscateSQLString(b.Resource) + oq, err := o.ObfuscateSQLStringForDBMS(b.Resource, b.DBType) if err != nil { log.Errorf("Error obfuscating stats group resource %q: %v", b.Resource, err) b.Resource = textNonParsable diff --git a/pkg/trace/agent/obfuscate_test.go b/pkg/trace/agent/obfuscate_test.go index 268f55630f4d9..59daaad39bd35 100644 --- a/pkg/trace/agent/obfuscate_test.go +++ b/pkg/trace/agent/obfuscate_test.go @@ -418,3 +418,92 @@ func BenchmarkCCObfuscation(b *testing.B) { agnt.obfuscateSpan(span) } } + +func TestObfuscateSpanEvent(t *testing.T) { + assert := assert.New(t) + ctx, cancelFunc := context.WithCancel(context.Background()) + cfg := config.New() + cfg.Endpoints[0].APIKey = "test" + cfg.Obfuscation = &config.ObfuscationConfig{ + CreditCards: obfuscate.CreditCardsConfig{Enabled: true}, + } + agnt := NewAgent(ctx, cfg, telemetry.NewNoopCollector(), &statsd.NoOpClient{}, gzip.NewComponent()) + defer cancelFunc() + testCases := []*struct { + span *pb.Span + }{ + { + &pb.Span{ + Resource: "rrr", + Type: "aaa", + Meta: map[string]string{}, + SpanEvents: []*pb.SpanEvent{ + { + Name: "evt", + Attributes: map[string]*pb.AttributeAnyValue{ + "str": { + Type: pb.AttributeAnyValue_STRING_VALUE, + StringValue: "5105-1051-0510-5100", + }, + "int": { + Type: pb.AttributeAnyValue_INT_VALUE, + IntValue: 5105105105105100, + }, + "dbl": { + Type: pb.AttributeAnyValue_DOUBLE_VALUE, + DoubleValue: 5105105105105100, + }, + "arr": { + Type: pb.AttributeAnyValue_ARRAY_VALUE, + ArrayValue: &pb.AttributeArray{ + Values: []*pb.AttributeArrayValue{ + { + Type: pb.AttributeArrayValue_STRING_VALUE, + StringValue: "5105-1051-0510-5100", + }, + { + Type: pb.AttributeArrayValue_INT_VALUE, + IntValue: 5105105105105100, + }, + { + Type: pb.AttributeArrayValue_DOUBLE_VALUE, + DoubleValue: 5105105105105100, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + for _, tc := range testCases { + agnt.obfuscateSpan(tc.span) + for _, v := range tc.span.SpanEvents[0].Attributes { + if v.Type == pb.AttributeAnyValue_ARRAY_VALUE { + for _, arrayValue := range v.ArrayValue.Values { + assert.Equal("?", arrayValue.StringValue) + } + } else { + assert.Equal("?", v.StringValue) + } + } + } +} + +func TestLexerObfuscation(t *testing.T) { + ctx, cancelFunc := context.WithCancel(context.Background()) + cfg := config.New() + cfg.Endpoints[0].APIKey = "test" + cfg.Features["sqllexer"] = struct{}{} + agnt := NewAgent(ctx, cfg, telemetry.NewNoopCollector(), &statsd.NoOpClient{}, gzip.NewComponent()) + defer cancelFunc() + span := &pb.Span{ + Resource: "SELECT * FROM [u].[users]", + Type: "sql", + Meta: map[string]string{"db.type": "sqlserver"}, + } + agnt.obfuscateSpan(span) + assert.Equal(t, "SELECT * FROM [u].[users]", span.Resource) +} diff --git a/pkg/trace/api/api.go b/pkg/trace/api/api.go index 5cb57d2478a27..057969b114317 100644 --- a/pkg/trace/api/api.go +++ b/pkg/trace/api/api.go @@ -576,7 +576,7 @@ func (r *HTTPReceiver) handleTraces(v Version, w http.ResponseWriter, req *http. case <-time.After(time.Duration(r.conf.DecoderTimeout) * time.Millisecond): // this payload can not be accepted io.Copy(io.Discard, req.Body) //nolint:errcheck - if h := req.Header.Get(header.SendRealHTTPStatus); h != "" { + if isHeaderTrue(header.SendRealHTTPStatus, req.Header.Get(header.SendRealHTTPStatus)) { w.WriteHeader(http.StatusTooManyRequests) } else { w.WriteHeader(r.rateLimiterResponse) @@ -643,13 +643,27 @@ func (r *HTTPReceiver) handleTraces(v Version, w http.ResponseWriter, req *http. payload := &Payload{ Source: ts, TracerPayload: tp, - ClientComputedTopLevel: req.Header.Get(header.ComputedTopLevel) != "", - ClientComputedStats: req.Header.Get(header.ComputedStats) != "", + ClientComputedTopLevel: isHeaderTrue(header.ComputedTopLevel, req.Header.Get(header.ComputedTopLevel)), + ClientComputedStats: isHeaderTrue(header.ComputedStats, req.Header.Get(header.ComputedStats)), ClientDroppedP0s: droppedTracesFromHeader(req.Header, ts), } r.out <- payload } +// isHeaderTrue returns true if value is non-empty and not a "false"-like value as defined by strconv.ParseBool +// e.g. (0, f, F, FALSE, False, false) will be considered false while all other values will be true. +func isHeaderTrue(key, value string) bool { + if len(value) == 0 { + return false + } + bval, err := strconv.ParseBool(value) + if err != nil { + log.Debug("Non-boolean value %s found in header %s, defaulting to true", value, key) + return true + } + return bval +} + func droppedTracesFromHeader(h http.Header, ts *info.TagStats) int64 { var dropped int64 if v := h.Get(header.DroppedP0Traces); v != "" { diff --git a/pkg/trace/api/api_test.go b/pkg/trace/api/api_test.go index 3d43d0dbe3c6e..e9632f7c573c4 100644 --- a/pkg/trace/api/api_test.go +++ b/pkg/trace/api/api_test.go @@ -719,6 +719,8 @@ func TestClientComputedStatsHeader(t *testing.T) { req.Header.Set(header.Lang, "lang1") if on { req.Header.Set(header.ComputedStats, "yes") + } else { + req.Header.Set(header.ComputedStats, "false") } var wg sync.WaitGroup wg.Add(1) @@ -1042,26 +1044,14 @@ func TestExpvar(t *testing.T) { } c := newTestReceiverConfig() - c.DebugServerPort = 6789 + c.DebugServerPort = 5012 info.InitInfo(c) - - // Starting a TLS httptest server to retrieve tlsCert - ts := httptest.NewTLSServer(http.HandlerFunc(func(_ http.ResponseWriter, _ *http.Request) {})) - tlsConfig := ts.TLS.Clone() - // Setting a client with the proper TLS configuration - client := ts.Client() - ts.Close() - - // Starting Debug Server s := NewDebugServer(c) - s.SetTLSConfig(tlsConfig) - - // Starting the Debug server s.Start() defer s.Stop() - resp, err := client.Get(fmt.Sprintf("https://127.0.0.1:%d/debug/vars", c.DebugServerPort)) - require.NoError(t, err) + resp, err := http.Get("http://127.0.0.1:5012/debug/vars") + assert.NoError(t, err) defer resp.Body.Close() t.Run("read-expvars", func(t *testing.T) { diff --git a/pkg/trace/api/debug_server.go b/pkg/trace/api/debug_server.go index 6fd2f39cc011a..828d5357330eb 100644 --- a/pkg/trace/api/debug_server.go +++ b/pkg/trace/api/debug_server.go @@ -9,7 +9,6 @@ package api import ( "context" - "crypto/tls" "expvar" "fmt" "net" @@ -30,10 +29,9 @@ const ( // DebugServer serves /debug/* endpoints type DebugServer struct { - conf *config.AgentConfig - server *http.Server - mux *http.ServeMux - tlsConfig *tls.Config + conf *config.AgentConfig + server *http.Server + mux *http.ServeMux } // NewDebugServer returns a debug server @@ -55,14 +53,13 @@ func (ds *DebugServer) Start() { WriteTimeout: defaultTimeout, Handler: ds.setupMux(), } - listener, err := net.Listen("tcp", net.JoinHostPort("127.0.0.1", strconv.Itoa(ds.conf.DebugServerPort))) + listener, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", ds.conf.DebugServerPort)) if err != nil { log.Errorf("Error creating debug server listener: %s", err) return } - tlsListener := tls.NewListener(listener, ds.tlsConfig) go func() { - if err := ds.server.Serve(tlsListener); err != nil && err != http.ErrServerClosed { + if err := ds.server.Serve(listener); err != nil && err != http.ErrServerClosed { log.Errorf("Could not start debug server: %s. Debug server disabled.", err) } }() @@ -85,11 +82,6 @@ func (ds *DebugServer) AddRoute(route string, handler http.Handler) { ds.mux.Handle(route, handler) } -// SetTLSConfig adds the provided tls.Config to the internal http.Server -func (ds *DebugServer) SetTLSConfig(config *tls.Config) { - ds.tlsConfig = config -} - func (ds *DebugServer) setupMux() *http.ServeMux { ds.mux.HandleFunc("/debug/pprof/", pprof.Index) ds.mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline) diff --git a/pkg/trace/api/info.go b/pkg/trace/api/info.go index 401ddbd4e64a9..35a975fccb313 100644 --- a/pkg/trace/api/info.go +++ b/pkg/trace/api/info.go @@ -84,6 +84,7 @@ func (r *HTTPReceiver) makeInfoHandler() (hash string, handler http.HandlerFunc) ClientDropP0s bool `json:"client_drop_p0s"` SpanMetaStructs bool `json:"span_meta_structs"` LongRunningSpans bool `json:"long_running_spans"` + SpanEvents bool `json:"span_events"` EvpProxyAllowedHeaders []string `json:"evp_proxy_allowed_headers"` Config reducedConfig `json:"config"` PeerTags []string `json:"peer_tags"` @@ -96,6 +97,7 @@ func (r *HTTPReceiver) makeInfoHandler() (hash string, handler http.HandlerFunc) ClientDropP0s: canDropP0, SpanMetaStructs: true, LongRunningSpans: true, + SpanEvents: true, EvpProxyAllowedHeaders: EvpProxyAllowedHeaders, SpanKindsStatsComputed: spanKindsStatsComputed, Config: reducedConfig{ diff --git a/pkg/trace/api/info_test.go b/pkg/trace/api/info_test.go index d8fc856b6a6be..a279d5a4d115b 100644 --- a/pkg/trace/api/info_test.go +++ b/pkg/trace/api/info_test.go @@ -300,6 +300,7 @@ func TestInfoHandler(t *testing.T) { "client_drop_p0s": nil, "span_meta_structs": nil, "long_running_spans": nil, + "span_events": nil, "evp_proxy_allowed_headers": nil, "peer_tags": nil, "span_kinds_stats_computed": nil, diff --git a/pkg/trace/api/internal/header/headers.go b/pkg/trace/api/internal/header/headers.go index 88cb3549b2836..9570ddbffdb45 100644 --- a/pkg/trace/api/internal/header/headers.go +++ b/pkg/trace/api/internal/header/headers.go @@ -56,11 +56,12 @@ const ( TracerVersion = "Datadog-Meta-Tracer-Version" // ComputedTopLevel specifies that the client has marked top-level spans, when set. - // Any non-empty value will mean 'yes'. + // Any value other than 0, f, F, FALSE, False, false will mean 'yes'. ComputedTopLevel = "Datadog-Client-Computed-Top-Level" // ComputedStats specifies whether the client has computed stats so that the agent // doesn't have to. + // Any value other than 0, f, F, FALSE, False, false will mean 'yes'. ComputedStats = "Datadog-Client-Computed-Stats" // DroppedP0Traces contains the number of P0 trace chunks dropped by the client. @@ -79,7 +80,7 @@ const ( // it wants to receive the "real" status in the response. By default, the agent // will send a 200 OK response for every payload, even those dropped due to // intake limits. - // Any value set in this header will cause the agent to send a 429 code to a client + // Any value other than 0, f, F, FALSE, False, false set in this header will cause the agent to send a 429 code to a client // when the payload cannot be submitted. SendRealHTTPStatus = "Datadog-Send-Real-Http-Status" ) diff --git a/pkg/trace/api/otlp.go b/pkg/trace/api/otlp.go index eb136d267e090..a8a37f4530165 100644 --- a/pkg/trace/api/otlp.go +++ b/pkg/trace/api/otlp.go @@ -17,6 +17,8 @@ import ( "sync" "time" + "github.com/DataDog/datadog-go/v5/statsd" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/api/internal/header" "github.com/DataDog/datadog-agent/pkg/trace/config" @@ -26,10 +28,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/trace/timing" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" "github.com/DataDog/datadog-agent/pkg/trace/transform" - "github.com/DataDog/datadog-go/v5/statsd" - "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes" - "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/source" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp" @@ -37,6 +36,9 @@ import ( semconv "go.opentelemetry.io/collector/semconv/v1.6.1" "google.golang.org/grpc" "google.golang.org/grpc/metadata" + + "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes" + "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/source" ) // keyStatsComputed specifies the resource attribute key which indicates if stats have been @@ -97,9 +99,9 @@ func NewOTLPReceiver(out chan<- *Payload, cfg *config.AgentConfig, statsd statsd ignoreResNames[resName] = struct{}{} } _ = statsd.Gauge("datadog.trace_agent.otlp.compute_top_level_by_span_kind", computeTopLevelBySpanKindVal, nil, 1) - enableReceiveResourceSpansV2Val := 0.0 - if cfg.HasFeature("enable_receive_resource_spans_v2") { - enableReceiveResourceSpansV2Val = 1.0 + enableReceiveResourceSpansV2Val := 1.0 + if cfg.HasFeature("disable_receive_resource_spans_v2") { + enableReceiveResourceSpansV2Val = 0.0 } _ = statsd.Gauge("datadog.trace_agent.otlp.enable_receive_resource_spans_v2", enableReceiveResourceSpansV2Val, nil, 1) return &OTLPReceiver{out: out, conf: cfg, cidProvider: NewIDProvider(cfg.ContainerProcRoot, cfg.ContainerIDFromOriginInfo), statsd: statsd, timing: timing, ignoreResNames: ignoreResNames} @@ -225,10 +227,10 @@ func (o *OTLPReceiver) SetOTelAttributeTranslator(attrstrans *attributes.Transla // ReceiveResourceSpans processes the given rspans and returns the source that it identified from processing them. func (o *OTLPReceiver) ReceiveResourceSpans(ctx context.Context, rspans ptrace.ResourceSpans, httpHeader http.Header) source.Source { - if o.conf.HasFeature("enable_receive_resource_spans_v2") { - return o.receiveResourceSpansV2(ctx, rspans, httpHeader.Get(header.ComputedStats) != "") + if o.conf.HasFeature("disable_receive_resource_spans_v2") { + return o.receiveResourceSpansV1(ctx, rspans, httpHeader) } - return o.receiveResourceSpansV1(ctx, rspans, httpHeader) + return o.receiveResourceSpansV2(ctx, rspans, isHeaderTrue(header.ComputedStats, httpHeader.Get(header.ComputedStats))) } func (o *OTLPReceiver) receiveResourceSpansV2(ctx context.Context, rspans ptrace.ResourceSpans, clientComputedStats bool) source.Source { @@ -417,8 +419,8 @@ func (o *OTLPReceiver) receiveResourceSpansV1(ctx context.Context, rspans ptrace _ = o.statsd.Count("datadog.trace_agent.otlp.traces", int64(len(tracesByID)), tags, 1) p := Payload{ Source: tagstats, - ClientComputedStats: rattr[keyStatsComputed] != "" || httpHeader.Get(header.ComputedStats) != "", - ClientComputedTopLevel: o.conf.HasFeature("enable_otlp_compute_top_level_by_span_kind") || httpHeader.Get(header.ComputedTopLevel) != "", + ClientComputedStats: rattr[keyStatsComputed] != "" || isHeaderTrue(header.ComputedStats, httpHeader.Get(header.ComputedStats)), + ClientComputedTopLevel: o.conf.HasFeature("enable_otlp_compute_top_level_by_span_kind") || isHeaderTrue(header.ComputedTopLevel, httpHeader.Get(header.ComputedTopLevel)), } if env == "" { env = o.conf.DefaultEnv @@ -639,12 +641,12 @@ func (o *OTLPReceiver) convertSpan(rattr map[string]string, lib pcommon.Instrume if span.Service == "" { span.Service = "OTLPResourceNoServiceName" } + res := pcommon.NewResource() + for k, v := range rattr { + res.Attributes().PutStr(k, v) + } if span.Resource == "" { if transform.OperationAndResourceNameV2Enabled(o.conf) { - res := pcommon.NewResource() - for k, v := range rattr { - res.Attributes().PutStr(k, v) - } span.Resource = traceutil.GetOTelResourceV2(in, res) } else { if r := resourceFromTags(span.Meta); r != "" { @@ -655,7 +657,7 @@ func (o *OTLPReceiver) convertSpan(rattr map[string]string, lib pcommon.Instrume } } if span.Type == "" { - span.Type = spanKind2Type(in.Kind(), span) + span.Type = traceutil.SpanKind2Type(in, res) } return span } @@ -695,30 +697,6 @@ func resourceFromTags(meta map[string]string) string { return "" } -// spanKind2Type returns a span's type based on the given kind and other present properties. -func spanKind2Type(kind ptrace.SpanKind, span *pb.Span) string { - var typ string - switch kind { - case ptrace.SpanKindServer: - typ = "web" - case ptrace.SpanKindClient: - typ = "http" - db, ok := span.Meta[string(semconv.AttributeDBSystem)] - if !ok { - break - } - switch db { - case "redis", "memcached": - typ = "cache" - default: - typ = "db" - } - default: - typ = "custom" - } - return typ -} - // computeTopLevelAndMeasured updates the span's top-level and measured attributes. // // An OTLP span is considered top-level if it is a root span or has a span kind of server or consumer. diff --git a/pkg/trace/api/otlp_test.go b/pkg/trace/api/otlp_test.go index 0966ee75ff1e9..eabd47be21aa0 100644 --- a/pkg/trace/api/otlp_test.go +++ b/pkg/trace/api/otlp_test.go @@ -39,6 +39,7 @@ import ( "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/ptrace" "go.opentelemetry.io/collector/pdata/ptrace/ptraceotlp" + semconv127 "go.opentelemetry.io/collector/semconv/v1.27.0" semconv "go.opentelemetry.io/collector/semconv/v1.6.1" ) @@ -164,8 +165,8 @@ func TestOTLPMetrics(t *testing.T) { func testOTLPMetrics(enableReceiveResourceSpansV2 bool, t *testing.T) { assert := assert.New(t) cfg := NewTestConfig(t) - if enableReceiveResourceSpansV2 { - cfg.Features["enable_receive_resource_spans_v2"] = struct{}{} + if !enableReceiveResourceSpansV2 { + cfg.Features["disable_receive_resource_spans_v2"] = struct{}{} } stats := &teststatsd.Client{} @@ -230,8 +231,8 @@ func testOTLPNameRemapping(enableReceiveResourceSpansV2 bool, t *testing.T) { // Verify that while EnableOperationAndResourceNamesV2 is in alpha, SpanNameRemappings overrides it cfg := NewTestConfig(t) cfg.Features["enable_operation_and_resource_name_logic_v2"] = struct{}{} - if enableReceiveResourceSpansV2 { - cfg.Features["enable_receive_resource_spans_v2"] = struct{}{} + if !enableReceiveResourceSpansV2 { + cfg.Features["disable_receive_resource_spans_v2"] = struct{}{} } cfg.OTLPReceiver.SpanNameRemappings = map[string]string{"libname.unspecified": "new"} out := make(chan *Payload, 1) @@ -268,8 +269,8 @@ func TestOTLPSpanNameV2(t *testing.T) { func testOTLPSpanNameV2(enableReceiveResourceSpansV2 bool, t *testing.T) { cfg := NewTestConfig(t) cfg.Features["enable_operation_and_resource_name_logic_v2"] = struct{}{} - if enableReceiveResourceSpansV2 { - cfg.Features["enable_receive_resource_spans_v2"] = struct{}{} + if !enableReceiveResourceSpansV2 { + cfg.Features["disable_receive_resource_spans_v2"] = struct{}{} } out := make(chan *Payload, 1) rcv := NewOTLPReceiver(out, cfg, &statsd.NoOpClient{}, &timing.NoopReporter{}) @@ -707,8 +708,8 @@ func TestCreateChunks(t *testing.T) { func testCreateChunk(enableReceiveResourceSpansV2 bool, t *testing.T) { cfg := NewTestConfig(t) - if enableReceiveResourceSpansV2 { - cfg.Features["enable_receive_resource_spans_v2"] = struct{}{} + if !enableReceiveResourceSpansV2 { + cfg.Features["disable_receive_resource_spans_v2"] = struct{}{} } cfg.OTLPReceiver.ProbabilisticSampling = 50 o := NewOTLPReceiver(nil, cfg, &statsd.NoOpClient{}, &timing.NoopReporter{}) @@ -762,8 +763,8 @@ func TestOTLPReceiveResourceSpans(t *testing.T) { func testOTLPReceiveResourceSpans(enableReceiveResourceSpansV2 bool, t *testing.T) { cfg := NewTestConfig(t) - if enableReceiveResourceSpansV2 { - cfg.Features["enable_receive_resource_spans_v2"] = struct{}{} + if !enableReceiveResourceSpansV2 { + cfg.Features["disable_receive_resource_spans_v2"] = struct{}{} } out := make(chan *Payload, 1) rcv := NewOTLPReceiver(out, cfg, &statsd.NoOpClient{}, &timing.NoopReporter{}) @@ -1259,8 +1260,8 @@ func testOTLPHostname(enableReceiveResourceSpansV2 bool, t *testing.T) { } for _, tt := range testcases { cfg := NewTestConfig(t) - if enableReceiveResourceSpansV2 { - cfg.Features["enable_receive_resource_spans_v2"] = struct{}{} + if !enableReceiveResourceSpansV2 { + cfg.Features["disable_receive_resource_spans_v2"] = struct{}{} } cfg.Hostname = tt.config out := make(chan *Payload, 1) @@ -1295,7 +1296,6 @@ func testOTLPHostname(enableReceiveResourceSpansV2 bool, t *testing.T) { func TestResourceRelatedSpanAttributesAreIgnored_ReceiveResourceSpansV2(t *testing.T) { cfg := NewTestConfig(t) - cfg.Features["enable_receive_resource_spans_v2"] = struct{}{} out := make(chan *Payload, 1) rcv := NewOTLPReceiver(out, cfg, &statsd.NoOpClient{}, &timing.NoopReporter{}) rattr := map[string]interface{}{} @@ -1342,16 +1342,16 @@ func TestOTLPReceiver(t *testing.T) { func testOTLPReceiver(enableReceiveResourceSpansV2 bool, t *testing.T) { t.Run("New", func(t *testing.T) { cfg := NewTestConfig(t) - if enableReceiveResourceSpansV2 { - cfg.Features["enable_receive_resource_spans_v2"] = struct{}{} + if !enableReceiveResourceSpansV2 { + cfg.Features["disable_receive_resource_spans_v2"] = struct{}{} } assert.NotNil(t, NewOTLPReceiver(nil, cfg, &statsd.NoOpClient{}, &timing.NoopReporter{}).conf) }) t.Run("Start/nil", func(t *testing.T) { cfg := NewTestConfig(t) - if enableReceiveResourceSpansV2 { - cfg.Features["enable_receive_resource_spans_v2"] = struct{}{} + if !enableReceiveResourceSpansV2 { + cfg.Features["disable_receive_resource_spans_v2"] = struct{}{} } o := NewOTLPReceiver(nil, cfg, &statsd.NoOpClient{}, &timing.NoopReporter{}) o.Start() @@ -1362,8 +1362,8 @@ func testOTLPReceiver(enableReceiveResourceSpansV2 bool, t *testing.T) { t.Run("Start/grpc", func(t *testing.T) { port := testutil.FreeTCPPort(t) cfg := NewTestConfig(t) - if enableReceiveResourceSpansV2 { - cfg.Features["enable_receive_resource_spans_v2"] = struct{}{} + if !enableReceiveResourceSpansV2 { + cfg.Features["disable_receive_resource_spans_v2"] = struct{}{} } cfg.OTLPReceiver = &config.OTLP{ BindHost: "localhost", @@ -1383,8 +1383,8 @@ func testOTLPReceiver(enableReceiveResourceSpansV2 bool, t *testing.T) { t.Run("processRequest", func(t *testing.T) { out := make(chan *Payload, 5) cfg := NewTestConfig(t) - if enableReceiveResourceSpansV2 { - cfg.Features["enable_receive_resource_spans_v2"] = struct{}{} + if !enableReceiveResourceSpansV2 { + cfg.Features["disable_receive_resource_spans_v2"] = struct{}{} } o := NewOTLPReceiver(out, cfg, &statsd.NoOpClient{}, &timing.NoopReporter{}) o.processRequest(context.Background(), http.Header(map[string][]string{ @@ -1590,55 +1590,7 @@ func TestOTLPHelpers(t *testing.T) { } }) - t.Run("spanKind2Type", func(t *testing.T) { - for _, tt := range []struct { - kind ptrace.SpanKind - meta map[string]string - out string - }{ - { - kind: ptrace.SpanKindServer, - out: "web", - }, - { - kind: ptrace.SpanKindClient, - out: "http", - }, - { - kind: ptrace.SpanKindClient, - meta: map[string]string{"db.system": "redis"}, - out: "cache", - }, - { - kind: ptrace.SpanKindClient, - meta: map[string]string{"db.system": "memcached"}, - out: "cache", - }, - { - kind: ptrace.SpanKindClient, - meta: map[string]string{"db.system": "other"}, - out: "db", - }, - { - kind: ptrace.SpanKindProducer, - out: "custom", - }, - { - kind: ptrace.SpanKindConsumer, - out: "custom", - }, - { - kind: ptrace.SpanKindInternal, - out: "custom", - }, - { - kind: ptrace.SpanKindUnspecified, - out: "custom", - }, - } { - assert.Equal(t, tt.out, spanKind2Type(tt.kind, &pb.Span{Meta: tt.meta})) - } - }) + // test spanKind2Type moved to pkg/trace/traceutil/otel_util_test.go t.Run("tagsFromHeaders", func(t *testing.T) { out := tagsFromHeaders(http.Header(map[string][]string{ @@ -1674,7 +1626,6 @@ func TestOTelSpanToDDSpan(t *testing.T) { func testOTelSpanToDDSpan(enableOperationAndResourceNameV2 bool, t *testing.T) { cfg := NewTestConfig(t) now := uint64(otlpTestSpan.StartTimestamp()) - cfg.Features["enable_receive_resource_spans_v2"] = struct{}{} if enableOperationAndResourceNameV2 { cfg.Features["enable_operation_and_resource_name_logic_v2"] = struct{}{} } @@ -1843,7 +1794,7 @@ func testOTelSpanToDDSpan(enableOperationAndResourceNameV2 bool, t *testing.T) { Error: 1, Meta: map[string]string{ "name": "john", - "env": "", + "deployment.environment": "prod", "otel.trace_id": "72df520af2bde7a5240031ead750e5f3", "otel.status_code": "Error", "otel.status_description": "Error", @@ -1876,9 +1827,19 @@ func testOTelSpanToDDSpan(enableOperationAndResourceNameV2 bool, t *testing.T) { }, }, { rattr: map[string]string{ - "service.name": "myservice", - "service.version": "v1.2.3", - "env": "staging", + "service.name": "myservice", + "service.version": "v1.2.3", + "env": "staging", + semconv127.AttributeClientAddress: "sample_client_address", + semconv127.AttributeHTTPResponseBodySize: "sample_content_length", + semconv127.AttributeHTTPResponseStatusCode: "sample_status_code", + semconv127.AttributeHTTPRequestBodySize: "sample_content_length", + "http.request.header.referrer": "sample_referrer", + semconv127.AttributeNetworkProtocolVersion: "sample_version", + semconv127.AttributeServerAddress: "sample_server_name", + semconv127.AttributeURLFull: "sample_url", + semconv127.AttributeUserAgentOriginal: "sample_useragent", + "http.request.header.example": "test", }, libname: "ddtracer", libver: "v2", @@ -1991,6 +1952,16 @@ func testOTelSpanToDDSpan(enableOperationAndResourceNameV2 bool, t *testing.T) { "http.route": "/path", "span.kind": "server", "_dd.span_events.has_exception": "true", + "http.client_ip": "sample_client_address", + "http.response.content_length": "sample_content_length", + "http.status_code": "sample_status_code", + "http.request.content_length": "sample_content_length", + "http.referrer": "sample_referrer", + "http.version": "sample_version", + "http.server_name": "sample_server_name", + "http.url": "sample_url", + "http.useragent": "sample_useragent", + "http.request.headers.example": "test", }, Metrics: map[string]float64{ "approx": 1.2, @@ -2105,21 +2076,19 @@ func testOTelSpanToDDSpan(enableOperationAndResourceNameV2 bool, t *testing.T) { Duration: 200000000, Error: 1, Meta: map[string]string{ - "env": "staging", - "otel.library.name": "ddtracer", - "otel.library.version": "v2", - "otel.status_code": "Error", - "error.msg": "201", - "http.request.method": "POST", - "http.method": "POST", - "url.path": "/uploads/4", - "url.scheme": "https", - "http.route": "/uploads/:document_id", - "http.response.status_code": "201", - "http.status_code": "201", - "error.type": "WebSocketDisconnect", - "otel.trace_id": "72df520af2bde7a5240031ead750e5f3", - "span.kind": "unspecified", + "env": "staging", + "otel.library.name": "ddtracer", + "otel.library.version": "v2", + "otel.status_code": "Error", + "error.msg": "201", + "http.method": "POST", + "url.path": "/uploads/4", + "url.scheme": "https", + "http.route": "/uploads/:document_id", + "http.status_code": "201", + "error.type": "WebSocketDisconnect", + "otel.trace_id": "72df520af2bde7a5240031ead750e5f3", + "span.kind": "unspecified", }, Type: "custom", }, @@ -2167,21 +2136,19 @@ func testOTelSpanToDDSpan(enableOperationAndResourceNameV2 bool, t *testing.T) { Duration: 200000000, Error: 1, Meta: map[string]string{ - "env": "staging", - "otel.library.name": "ddtracer", - "otel.library.version": "v2", - "otel.status_code": "Error", - "error.msg": "201", - "http.request.method": "POST", - "http.method": "POST", - "url.path": "/uploads/4", - "url.scheme": "https", - "http.route": "/uploads/:document_id", - "http.response.status_code": "201", - "http.status_code": "201", - "error.type": "WebSocketDisconnect", - "otel.trace_id": "72df520af2bde7a5240031ead750e5f3", - "span.kind": "unspecified", + "env": "staging", + "otel.library.name": "ddtracer", + "otel.library.version": "v2", + "otel.status_code": "Error", + "error.msg": "201", + "http.method": "POST", + "url.path": "/uploads/4", + "url.scheme": "https", + "http.route": "/uploads/:document_id", + "http.status_code": "201", + "error.type": "WebSocketDisconnect", + "otel.trace_id": "72df520af2bde7a5240031ead750e5f3", + "span.kind": "unspecified", }, Type: "custom", }, @@ -2277,8 +2244,8 @@ func testOTelSpanToDDSpan(enableOperationAndResourceNameV2 bool, t *testing.T) { Duration: 200000000, Error: 1, Meta: map[string]string{ - "container.id": "do-not-use", "deployment.environment.name": "do-not-use", + "container.id": "do-not-use", "k8s.pod.uid": "do-not-use", "datadog.host.name": "do-not-use", "otel.library.name": "ddtracer", @@ -2336,6 +2303,11 @@ func testOTelSpanToDDSpan(enableOperationAndResourceNameV2 bool, t *testing.T) { assert.Equal(v, got.Meta[k], fmt.Sprintf("(%d) Meta %v:%v", i, k, v)) } } + for k, v := range got.Meta { + if k != "events" && k != "_dd.span_links" { + assert.Equal(want.Meta[k], v, fmt.Sprintf("(%d) Meta %v:%v", i, k, v)) + } + } if len(want.Metrics) != len(got.Metrics) { t.Fatalf("(%d) Metrics count mismatch:\n\n%v\n\n%v", i, want.Metrics, got.Metrics) } @@ -3436,7 +3408,6 @@ func testOTLPConvertSpanSetPeerService(enableOperationAndResourceNameV2 bool, t func testOTelSpanToDDSpanSetPeerService(enableOperationAndResourceNameV2 bool, t *testing.T) { now := uint64(otlpTestSpan.StartTimestamp()) cfg := NewTestConfig(t) - cfg.Features["enable_receive_resource_spans_v2"] = struct{}{} if enableOperationAndResourceNameV2 { cfg.Features["enable_operation_and_resource_name_logic_v2"] = struct{}{} } @@ -3454,8 +3425,9 @@ func testOTelSpanToDDSpanSetPeerService(enableOperationAndResourceNameV2 bool, t }{ { rattr: map[string]string{ - "service.version": "v1.2.3", - "service.name": "myservice", + "service.version": "v1.2.3", + "service.name": "myservice", + "deployment.environment": "prod", }, libname: "ddtracer", libver: "v2", @@ -3467,8 +3439,7 @@ func testOTelSpanToDDSpanSetPeerService(enableOperationAndResourceNameV2 bool, t Start: now, End: now + 200000000, Attributes: map[string]interface{}{ - "peer.service": "userbase", - "deployment.environment": "prod", + "peer.service": "userbase", }, }), operationNameV1: "ddtracer.server", @@ -3483,6 +3454,7 @@ func testOTelSpanToDDSpanSetPeerService(enableOperationAndResourceNameV2 bool, t Start: int64(now), Duration: 200000000, Meta: map[string]string{ + "env": "prod", "deployment.environment": "prod", "otel.trace_id": "72df520af2bde7a5240031ead750e5f3", "otel.status_code": "Unset", @@ -3499,8 +3471,9 @@ func testOTelSpanToDDSpanSetPeerService(enableOperationAndResourceNameV2 bool, t }, { rattr: map[string]string{ - "service.version": "v1.2.3", - "service.name": "myservice", + "service.version": "v1.2.3", + "service.name": "myservice", + "deployment.environment": "prod", }, libname: "ddtracer", libver: "v2", @@ -3512,9 +3485,8 @@ func testOTelSpanToDDSpanSetPeerService(enableOperationAndResourceNameV2 bool, t Start: now, End: now + 200000000, Attributes: map[string]interface{}{ - "db.instance": "postgres", - "peer.service": "userbase", - "deployment.environment": "prod", + "db.instance": "postgres", + "peer.service": "userbase", }, }), operationNameV1: "ddtracer.server", @@ -3530,6 +3502,7 @@ func testOTelSpanToDDSpanSetPeerService(enableOperationAndResourceNameV2 bool, t Duration: 200000000, Meta: map[string]string{ "db.instance": "postgres", + "env": "prod", "deployment.environment": "prod", "otel.trace_id": "72df520af2bde7a5240031ead750e5f3", "otel.status_code": "Unset", @@ -3546,8 +3519,9 @@ func testOTelSpanToDDSpanSetPeerService(enableOperationAndResourceNameV2 bool, t }, { rattr: map[string]string{ - "service.version": "v1.2.3", - "service.name": "myservice", + "service.version": "v1.2.3", + "service.name": "myservice", + "deployment.environment": "prod", }, libname: "ddtracer", libver: "v2", @@ -3559,9 +3533,8 @@ func testOTelSpanToDDSpanSetPeerService(enableOperationAndResourceNameV2 bool, t Start: now, End: now + 200000000, Attributes: map[string]interface{}{ - "db.system": "postgres", - "net.peer.name": "remotehost", - "deployment.environment": "prod", + "db.system": "postgres", + "net.peer.name": "remotehost", }, }), operationNameV1: "ddtracer.client", @@ -3576,6 +3549,7 @@ func testOTelSpanToDDSpanSetPeerService(enableOperationAndResourceNameV2 bool, t Start: int64(now), Duration: 200000000, Meta: map[string]string{ + "env": "prod", "deployment.environment": "prod", "otel.trace_id": "72df520af2bde7a5240031ead750e5f3", "otel.status_code": "Unset", @@ -3593,8 +3567,9 @@ func testOTelSpanToDDSpanSetPeerService(enableOperationAndResourceNameV2 bool, t }, { rattr: map[string]string{ - "service.version": "v1.2.3", - "service.name": "myservice", + "service.version": "v1.2.3", + "service.name": "myservice", + "deployment.environment": "prod", }, libname: "ddtracer", libver: "v2", @@ -3606,9 +3581,8 @@ func testOTelSpanToDDSpanSetPeerService(enableOperationAndResourceNameV2 bool, t Start: now, End: now + 200000000, Attributes: map[string]interface{}{ - "rpc.service": "GetInstance", - "net.peer.name": "remotehost", - "deployment.environment": "prod", + "rpc.service": "GetInstance", + "net.peer.name": "remotehost", }, }), operationNameV1: "ddtracer.client", @@ -3623,6 +3597,7 @@ func testOTelSpanToDDSpanSetPeerService(enableOperationAndResourceNameV2 bool, t Start: int64(now), Duration: 200000000, Meta: map[string]string{ + "env": "prod", "deployment.environment": "prod", "otel.trace_id": "72df520af2bde7a5240031ead750e5f3", "otel.status_code": "Unset", @@ -3640,8 +3615,9 @@ func testOTelSpanToDDSpanSetPeerService(enableOperationAndResourceNameV2 bool, t }, { rattr: map[string]string{ - "service.version": "v1.2.3", - "service.name": "myservice", + "service.version": "v1.2.3", + "service.name": "myservice", + "deployment.environment": "prod", }, libname: "ddtracer", libver: "v2", @@ -3653,8 +3629,7 @@ func testOTelSpanToDDSpanSetPeerService(enableOperationAndResourceNameV2 bool, t Start: now, End: now + 200000000, Attributes: map[string]interface{}{ - "net.peer.name": "remotehost", - "deployment.environment": "prod", + "net.peer.name": "remotehost", }, }), operationNameV1: "ddtracer.server", @@ -3669,6 +3644,7 @@ func testOTelSpanToDDSpanSetPeerService(enableOperationAndResourceNameV2 bool, t Start: int64(now), Duration: 200000000, Meta: map[string]string{ + "env": "prod", "deployment.environment": "prod", "otel.trace_id": "72df520af2bde7a5240031ead750e5f3", "otel.status_code": "Unset", @@ -3685,8 +3661,9 @@ func testOTelSpanToDDSpanSetPeerService(enableOperationAndResourceNameV2 bool, t }, { rattr: map[string]string{ - "service.version": "v1.2.3", - "service.name": "myservice", + "service.version": "v1.2.3", + "service.name": "myservice", + "deployment.environment": "prod", }, libname: "ddtracer", libver: "v2", @@ -3699,7 +3676,6 @@ func testOTelSpanToDDSpanSetPeerService(enableOperationAndResourceNameV2 bool, t End: now + 200000000, Attributes: map[string]interface{}{ "aws.dynamodb.table_names": "my-table", - "deployment.environment": "prod", }, }), operationNameV1: "ddtracer.server", @@ -3714,6 +3690,7 @@ func testOTelSpanToDDSpanSetPeerService(enableOperationAndResourceNameV2 bool, t Start: int64(now), Duration: 200000000, Meta: map[string]string{ + "env": "prod", "deployment.environment": "prod", "otel.trace_id": "72df520af2bde7a5240031ead750e5f3", "otel.status_code": "Unset", @@ -3730,8 +3707,9 @@ func testOTelSpanToDDSpanSetPeerService(enableOperationAndResourceNameV2 bool, t }, { rattr: map[string]string{ - "service.version": "v1.2.3", - "service.name": "myservice", + "service.version": "v1.2.3", + "service.name": "myservice", + "deployment.environment": "prod", }, libname: "ddtracer", libver: "v2", @@ -3744,7 +3722,6 @@ func testOTelSpanToDDSpanSetPeerService(enableOperationAndResourceNameV2 bool, t End: now + 200000000, Attributes: map[string]interface{}{ "faas.document.collection": "my-s3-bucket", - "deployment.environment": "prod", }, }), operationNameV1: "ddtracer.server", @@ -3759,6 +3736,7 @@ func testOTelSpanToDDSpanSetPeerService(enableOperationAndResourceNameV2 bool, t Start: int64(now), Duration: 200000000, Meta: map[string]string{ + "env": "prod", "deployment.environment": "prod", "otel.trace_id": "72df520af2bde7a5240031ead750e5f3", "otel.status_code": "Unset", @@ -3814,8 +3792,8 @@ func testResourceAttributesMap(enableReceiveResourceSpansV2 bool, t *testing.T) lib := pcommon.NewInstrumentationScope() span := testutil.NewOTLPSpan(&testutil.OTLPSpan{}) cfg := NewTestConfig(t) - if enableReceiveResourceSpansV2 { - cfg.Features["enable_receive_resource_spans_v2"] = struct{}{} + if !enableReceiveResourceSpansV2 { + cfg.Features["disable_receive_resource_spans_v2"] = struct{}{} } NewOTLPReceiver(nil, cfg, &statsd.NoOpClient{}, &timing.NoopReporter{}).convertSpan(rattr, lib, span) assert.Len(t, rattr, 1) // ensure "rattr" has no new entries @@ -4292,8 +4270,8 @@ func benchmarkProcessRequest(enableReceiveResourceSpansV2 bool, b *testing.B) { }() cfg := NewBenchmarkTestConfig(b) - if enableReceiveResourceSpansV2 { - cfg.Features["enable_receive_resource_spans_v2"] = struct{}{} + if !enableReceiveResourceSpansV2 { + cfg.Features["disable_receive_resource_spans_v2"] = struct{}{} } r := NewOTLPReceiver(out, cfg, &statsd.NoOpClient{}, &timing.NoopReporter{}) b.ReportAllocs() @@ -4335,8 +4313,8 @@ func benchmarkProcessRequestTopLevel(enableReceiveResourceSpansV2 bool, b *testi }() cfg := NewBenchmarkTestConfig(b) - if enableReceiveResourceSpansV2 { - cfg.Features["enable_receive_resource_spans_v2"] = struct{}{} + if !enableReceiveResourceSpansV2 { + cfg.Features["disable_receive_resource_spans_v2"] = struct{}{} } cfg.Features["enable_otlp_compute_top_level_by_span_kind"] = struct{}{} r := NewOTLPReceiver(out, cfg, &statsd.NoOpClient{}, &timing.NoopReporter{}) diff --git a/pkg/trace/api/symdb.go b/pkg/trace/api/symdb.go index b238b478eb65c..0b888e050ad15 100644 --- a/pkg/trace/api/symdb.go +++ b/pkg/trace/api/symdb.go @@ -48,7 +48,7 @@ func (r *HTTPReceiver) symDBProxyHandler() http.Handler { apiKey = strings.TrimSpace(k) } transport := newMeasuringForwardingTransport( - config.New().NewHTTPTransport(), target, apiKey, r.conf.SymDBProxy.AdditionalEndpoints, "datadog.trace_agent.debugger.", []string{}, r.statsd) + r.conf.NewHTTPTransport(), target, apiKey, r.conf.SymDBProxy.AdditionalEndpoints, "datadog.trace_agent.debugger.", []string{}, r.statsd) return newSymDBProxy(r.conf, transport, hostTags) } diff --git a/pkg/trace/config/config.go b/pkg/trace/config/config.go index cbf0b0ea5c3a3..ea4451f10740d 100644 --- a/pkg/trace/config/config.go +++ b/pkg/trace/config/config.go @@ -121,8 +121,15 @@ type ObfuscationConfig struct { Cache obfuscate.CacheConfig `mapstructure:"cache"` } -func obfuscationMode(enabled bool) obfuscate.ObfuscationMode { - if enabled { +func obfuscationMode(conf *AgentConfig, sqllexerEnabled bool) obfuscate.ObfuscationMode { + if conf.SQLObfuscationMode != "" { + if conf.SQLObfuscationMode == string(obfuscate.ObfuscateOnly) || conf.SQLObfuscationMode == string(obfuscate.ObfuscateAndNormalize) { + return obfuscate.ObfuscationMode(conf.SQLObfuscationMode) + } + log.Warnf("Invalid SQL obfuscator mode %s, falling back to default", conf.SQLObfuscationMode) + return "" + } + if sqllexerEnabled { return obfuscate.ObfuscateOnly } return "" @@ -136,7 +143,7 @@ func (o *ObfuscationConfig) Export(conf *AgentConfig) obfuscate.Config { ReplaceDigits: conf.HasFeature("quantize_sql_tables") || conf.HasFeature("replace_sql_digits"), KeepSQLAlias: conf.HasFeature("keep_sql_alias"), DollarQuotedFunc: conf.HasFeature("dollar_quoted_func"), - ObfuscationMode: obfuscationMode(conf.HasFeature("sqllexer")), + ObfuscationMode: obfuscationMode(conf, conf.HasFeature("sqllexer")), }, ES: o.ES, OpenSearch: o.OpenSearch, @@ -356,6 +363,8 @@ type AgentConfig struct { MaxSenderRetries int // HTTP client used in writer connections. If nil, default client values will be used. HTTPClientFunc func() *http.Client `json:"-"` + // HTTP Transport used in writer connections. If nil, default transport values will be used. + HTTPTransportFunc func() *http.Transport `json:"-"` // internal telemetry StatsdEnabled bool @@ -396,6 +405,9 @@ type AgentConfig struct { // Obfuscation holds sensitive data obufscator's configuration. Obfuscation *ObfuscationConfig + // SQLObfuscationMode holds obfuscator mode. + SQLObfuscationMode string + // MaxResourceLen the maximum length the resource can have MaxResourceLen int @@ -542,6 +554,7 @@ func New() *AgentConfig { AnalyzedRateByServiceLegacy: make(map[string]float64), AnalyzedSpansByService: make(map[string]map[string]float64), Obfuscation: &ObfuscationConfig{}, + SQLObfuscationMode: "", MaxResourceLen: 5000, GlobalTags: computeGlobalTags(), @@ -611,6 +624,9 @@ func (c *AgentConfig) NewHTTPClient() *ResetClient { // NewHTTPTransport returns a new http.Transport to be used for outgoing connections to // the Datadog API. func (c *AgentConfig) NewHTTPTransport() *http.Transport { + if c.HTTPTransportFunc != nil { + return c.HTTPTransportFunc() + } transport := &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: c.SkipSSLValidation}, // below field values are from http.DefaultTransport (go1.12) diff --git a/pkg/trace/filters/replacer.go b/pkg/trace/filters/replacer.go index 7d243b21b4133..dda6c624e536c 100644 --- a/pkg/trace/filters/replacer.go +++ b/pkg/trace/filters/replacer.go @@ -12,6 +12,7 @@ import ( pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" + "github.com/DataDog/datadog-agent/pkg/trace/log" ) // Replacer is a filter which replaces tag values based on its @@ -45,6 +46,15 @@ func (f Replacer) Replace(trace pb.Trace) { } } s.Resource = re.ReplaceAllString(s.Resource, str) + for _, spanEvent := range s.SpanEvents { + if spanEvent != nil { + for keyAttr, val := range spanEvent.Attributes { + if !strings.HasPrefix(keyAttr, hiddenTagPrefix) { + spanEvent.Attributes[keyAttr] = f.replaceAttributeAnyValue(re, val, str) + } + } + } + } case "resource.name": s.Resource = re.ReplaceAllString(s.Resource, str) default: @@ -58,6 +68,13 @@ func (f Replacer) Replace(trace pb.Trace) { f.replaceNumericTag(re, s, key, str) } } + for _, spanEvent := range s.SpanEvents { + if spanEvent != nil { + if val, ok := spanEvent.Attributes[key]; ok { + spanEvent.Attributes[key] = f.replaceAttributeAnyValue(re, val, str) + } + } + } } } } @@ -75,6 +92,109 @@ func (f Replacer) replaceNumericTag(re *regexp.Regexp, s *pb.Span, key string, s } } +func (f Replacer) replaceAttributeAnyValue(re *regexp.Regexp, val *pb.AttributeAnyValue, str string) *pb.AttributeAnyValue { + switch val.Type { + case pb.AttributeAnyValue_STRING_VALUE: + return &pb.AttributeAnyValue{ + Type: pb.AttributeAnyValue_STRING_VALUE, + StringValue: re.ReplaceAllString(val.StringValue, str), + } + case pb.AttributeAnyValue_INT_VALUE: + replacedValue := re.ReplaceAllString(strconv.FormatInt(val.IntValue, 10), str) + return attributeAnyValFromString(replacedValue) + case pb.AttributeAnyValue_DOUBLE_VALUE: + replacedValue := re.ReplaceAllString(strconv.FormatFloat(val.DoubleValue, 'f', -1, 64), str) + return attributeAnyValFromString(replacedValue) + case pb.AttributeAnyValue_BOOL_VALUE: + replacedValue := re.ReplaceAllString(strconv.FormatBool(val.BoolValue), str) + return attributeAnyValFromString(replacedValue) + case pb.AttributeAnyValue_ARRAY_VALUE: + for _, value := range val.ArrayValue.Values { + *value = *f.replaceAttributeArrayValue(re, value, str) //todo test me + } + return val + default: + log.Error("Unknown OTEL AttributeAnyValue type %v, replacer code must be updated, replacing unknown type with `?`") + return &pb.AttributeAnyValue{ + Type: pb.AttributeAnyValue_STRING_VALUE, + StringValue: "?", + } + } +} + +func (f Replacer) replaceAttributeArrayValue(re *regexp.Regexp, val *pb.AttributeArrayValue, str string) *pb.AttributeArrayValue { + switch val.Type { + case pb.AttributeArrayValue_STRING_VALUE: + return &pb.AttributeArrayValue{ + Type: pb.AttributeArrayValue_STRING_VALUE, + StringValue: re.ReplaceAllString(val.StringValue, str), + } + case pb.AttributeArrayValue_INT_VALUE: + replacedValue := re.ReplaceAllString(strconv.FormatInt(val.IntValue, 10), str) + return attributeArrayValFromString(replacedValue) + case pb.AttributeArrayValue_DOUBLE_VALUE: + replacedValue := re.ReplaceAllString(strconv.FormatFloat(val.DoubleValue, 'f', -1, 64), str) + return attributeArrayValFromString(replacedValue) + case pb.AttributeArrayValue_BOOL_VALUE: + replacedValue := re.ReplaceAllString(strconv.FormatBool(val.BoolValue), str) + return attributeArrayValFromString(replacedValue) + default: + log.Error("Unknown OTEL AttributeArrayValue type %v, replacer code must be updated, replacing unknown type with `?`") + return &pb.AttributeArrayValue{ + Type: pb.AttributeArrayValue_STRING_VALUE, + StringValue: "?", + } + } +} + +func attributeAnyValFromString(s string) *pb.AttributeAnyValue { + if rf, err := strconv.ParseInt(s, 10, 64); err == nil { + return &pb.AttributeAnyValue{ + Type: pb.AttributeAnyValue_INT_VALUE, + IntValue: rf, + } + } else if rfFloat, err := strconv.ParseFloat(s, 64); err == nil { + return &pb.AttributeAnyValue{ + Type: pb.AttributeAnyValue_DOUBLE_VALUE, + DoubleValue: rfFloat, + } + // Restrict bool types to "true" "false" to avoid unexpected type changes + } else if s == "true" || s == "false" { + return &pb.AttributeAnyValue{ + Type: pb.AttributeAnyValue_BOOL_VALUE, + BoolValue: s == "true", + } + } + return &pb.AttributeAnyValue{ + Type: pb.AttributeAnyValue_STRING_VALUE, + StringValue: s, + } +} + +func attributeArrayValFromString(s string) *pb.AttributeArrayValue { + if rf, err := strconv.ParseInt(s, 10, 64); err == nil { + return &pb.AttributeArrayValue{ + Type: pb.AttributeArrayValue_INT_VALUE, + IntValue: rf, + } + } else if rfFloat, err := strconv.ParseFloat(s, 64); err == nil { + return &pb.AttributeArrayValue{ + Type: pb.AttributeArrayValue_DOUBLE_VALUE, + DoubleValue: rfFloat, + } + // Restrict bool types to "true" "false" to avoid unexpected type changes + } else if s == "true" || s == "false" { + return &pb.AttributeArrayValue{ + Type: pb.AttributeArrayValue_BOOL_VALUE, + BoolValue: s == "true", + } + } + return &pb.AttributeArrayValue{ + Type: pb.AttributeArrayValue_STRING_VALUE, + StringValue: s, + } +} + // ReplaceStatsGroup applies the replacer rules to the given stats bucket group. func (f Replacer) ReplaceStatsGroup(b *pb.ClientGroupedStats) { for _, rule := range f.rules { diff --git a/pkg/trace/filters/replacer_test.go b/pkg/trace/filters/replacer_test.go index d8d402251d7c1..2a44d94103ee0 100644 --- a/pkg/trace/filters/replacer_test.go +++ b/pkg/trace/filters/replacer_test.go @@ -16,10 +16,8 @@ import ( ) func TestReplacer(t *testing.T) { - assert := assert.New(t) - - t.Run("traces", func(_ *testing.T) { - for _, tt := range []struct { + t.Run("traces", func(tt *testing.T) { + for _, testCase := range []struct { rules [][3]string got, want map[string]string }{ @@ -63,28 +61,28 @@ func TestReplacer(t *testing.T) { }, }, } { - rules := parseRulesFromString(tt.rules) + rules := parseRulesFromString(testCase.rules) tr := NewReplacer(rules) - root := replaceFilterTestSpan(tt.got) - childSpan := replaceFilterTestSpan(tt.got) + root := replaceFilterTestSpan(testCase.got) + childSpan := replaceFilterTestSpan(testCase.got) trace := pb.Trace{root, childSpan} tr.Replace(trace) - for k, v := range tt.want { + for k, v := range testCase.want { switch k { case "resource.name": // test that the filter applies to all spans, not only the root - assert.Equal(v, root.Resource) - assert.Equal(v, childSpan.Resource) + assert.Equal(tt, v, root.Resource) + assert.Equal(tt, v, childSpan.Resource) default: - assert.Equal(v, root.Meta[k]) - assert.Equal(v, childSpan.Meta[k]) + assert.Equal(tt, v, root.Meta[k]) + assert.Equal(tt, v, childSpan.Meta[k]) } } } }) - t.Run("stats", func(_ *testing.T) { - for _, tt := range []struct { + t.Run("stats", func(tt *testing.T) { + for _, testCase := range []struct { rules [][3]string got, want *pb.ClientGroupedStats }{ @@ -117,9 +115,111 @@ func TestReplacer(t *testing.T) { }, }, } { - tr := NewReplacer(parseRulesFromString(tt.rules)) - tr.ReplaceStatsGroup(tt.got) - assert.Equal(tt.got, tt.want) + tr := NewReplacer(parseRulesFromString(testCase.rules)) + tr.ReplaceStatsGroup(testCase.got) + assert.Equal(tt, testCase.got, testCase.want) + } + }) + t.Run("span events", func(tt *testing.T) { + for _, testCase := range []struct { + rules [][3]string + got, want map[string]*pb.AttributeAnyValue + }{ + { + rules: [][3]string{ + {"http.url", "(token/)([^/]*)", "${1}?"}, + {"http.url", "guid", "[REDACTED]"}, + {"custom.tag", "(/foo/bar/).*", "${1}extra"}, + {"a", "b", "c"}, + {"some.num", "1", "one!"}, + {"some.dbl", "42.1", "42.5"}, + {"is.potato", "true", "false"}, + {"my.nums", "42", "100"}, + }, + got: map[string]*pb.AttributeAnyValue{ + "http.url": { + Type: pb.AttributeAnyValue_STRING_VALUE, + StringValue: "some/guid/token/abcdef/abc", + }, + "custom.tag": { + Type: pb.AttributeAnyValue_STRING_VALUE, + StringValue: "/foo/bar/foo", + }, + "some.num": { + Type: pb.AttributeAnyValue_INT_VALUE, + IntValue: 1, + }, + "some.dbl": { + Type: pb.AttributeAnyValue_DOUBLE_VALUE, + DoubleValue: 42.1, + }, + "is.potato": { + Type: pb.AttributeAnyValue_BOOL_VALUE, + BoolValue: true, + }, + "my.nums": { + Type: pb.AttributeAnyValue_ARRAY_VALUE, + ArrayValue: &pb.AttributeArray{ + Values: []*pb.AttributeArrayValue{ + { + Type: pb.AttributeArrayValue_INT_VALUE, + IntValue: 123, + }, + { + Type: pb.AttributeArrayValue_INT_VALUE, + IntValue: 42, + }, + }, + }, + }, + }, + want: map[string]*pb.AttributeAnyValue{ + "http.url": { + Type: pb.AttributeAnyValue_STRING_VALUE, + StringValue: "some/[REDACTED]/token/?/abc", + }, + "custom.tag": { + Type: pb.AttributeAnyValue_STRING_VALUE, + StringValue: "/foo/bar/extra", + }, + "some.num": { + Type: pb.AttributeAnyValue_STRING_VALUE, + StringValue: "one!", + }, + "some.dbl": { + Type: pb.AttributeAnyValue_DOUBLE_VALUE, + DoubleValue: 42.5, + }, + "is.potato": { + Type: pb.AttributeAnyValue_BOOL_VALUE, + BoolValue: false, + }, + "my.nums": { + Type: pb.AttributeAnyValue_ARRAY_VALUE, + ArrayValue: &pb.AttributeArray{ + Values: []*pb.AttributeArrayValue{ + { + Type: pb.AttributeArrayValue_INT_VALUE, + IntValue: 123, + }, + { + Type: pb.AttributeArrayValue_INT_VALUE, + IntValue: 100, + }, + }, + }, + }, + }, + }, + } { + rules := parseRulesFromString(testCase.rules) + tr := NewReplacer(rules) + root := replaceFilterTestSpanEvent(testCase.got) + trace := pb.Trace{root} + tr.Replace(trace) + for k, v := range testCase.want { + assert.Equal(tt, v, root.SpanEvents[0].Attributes[k]) + } } }) } @@ -153,6 +253,18 @@ func replaceFilterTestSpan(tags map[string]string) *pb.Span { return span } +// replaceFilterTestSpan creates a span with a span event with the provided attributes +func replaceFilterTestSpanEvent(attributes map[string]*pb.AttributeAnyValue) *pb.Span { + span := &pb.Span{SpanEvents: []*pb.SpanEvent{ + { + TimeUnixNano: 0, + Name: "foo", + Attributes: attributes, + }, + }} + return span +} + // TestReplaceFilterTestSpan tests the replaceFilterTestSpan test // helper function. func TestReplaceFilterTestSpan(t *testing.T) { diff --git a/pkg/trace/go.mod b/pkg/trace/go.mod index e0e3ee4906d99..a88049357c995 100644 --- a/pkg/trace/go.mod +++ b/pkg/trace/go.mod @@ -21,51 +21,56 @@ require ( github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 github.com/DataDog/datadog-go/v5 v5.6.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.22.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.24.0 github.com/DataDog/sketches-go v1.4.6 github.com/Microsoft/go-winio v0.6.2 github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc - github.com/golang/mock v1.6.0 + github.com/golang/mock v1.7.0-rc.1 github.com/golang/protobuf v1.5.4 github.com/google/go-cmp v0.6.0 github.com/google/gofuzz v1.2.0 github.com/google/uuid v1.6.0 - github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.115.0 + github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.118.0 github.com/stretchr/testify v1.10.0 - github.com/tinylib/msgp v1.2.4 + github.com/tinylib/msgp v1.2.5 github.com/vmihailenco/msgpack/v4 v4.3.13 - go.opentelemetry.io/collector/component v0.115.0 // indirect - go.opentelemetry.io/collector/consumer v1.21.0 - go.opentelemetry.io/collector/pdata v1.21.0 - go.opentelemetry.io/collector/processor/processortest v0.115.0 - go.opentelemetry.io/collector/semconv v0.115.0 - go.opentelemetry.io/otel v1.32.0 - go.opentelemetry.io/otel/metric v1.32.0 + go.opentelemetry.io/collector/component v0.118.0 // indirect + go.opentelemetry.io/collector/consumer v1.24.0 + go.opentelemetry.io/collector/pdata v1.24.0 + go.opentelemetry.io/collector/processor/processortest v0.118.0 + go.opentelemetry.io/collector/semconv v0.118.0 + go.opentelemetry.io/otel v1.33.0 + go.opentelemetry.io/otel/metric v1.33.0 go.uber.org/atomic v1.11.0 - golang.org/x/sys v0.28.0 + golang.org/x/sys v0.29.0 golang.org/x/time v0.8.0 - google.golang.org/grpc v1.67.1 - google.golang.org/protobuf v1.35.2 + google.golang.org/grpc v1.69.4 + google.golang.org/protobuf v1.36.3 gopkg.in/ini.v1 v1.67.0 k8s.io/apimachinery v0.31.2 ) require ( github.com/DataDog/datadog-agent/comp/core/tagger/origindetection v0.0.0-20241217122454-175edb6c74f2 - github.com/shirou/gopsutil/v4 v4.24.11 - go.opentelemetry.io/collector/component/componenttest v0.115.0 + github.com/shirou/gopsutil/v4 v4.24.12 + go.opentelemetry.io/collector/component/componenttest v0.118.0 ) -require go.opentelemetry.io/collector/processor v0.115.0 // indirect +require go.opentelemetry.io/collector/processor v0.118.0 // indirect + +require ( + go.opentelemetry.io/collector/consumer/xconsumer v0.118.0 // indirect + go.opentelemetry.io/collector/processor/xprocessor v0.118.0 // indirect +) require ( github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect - github.com/DataDog/go-sqllexer v0.0.17 // indirect + github.com/DataDog/go-sqllexer v0.0.20 // indirect github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect github.com/DataDog/zstd v1.5.6 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/containerd/cgroups/v3 v3.0.4 // indirect + github.com/containerd/cgroups/v3 v3.0.5 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect @@ -81,7 +86,7 @@ require ( github.com/moby/sys/userns v0.1.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.115.0 // indirect + github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.118.0 // indirect github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/outcaste-io/ristretto v0.2.3 // indirect github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect @@ -94,23 +99,22 @@ require ( github.com/tklauser/numcpus v0.8.0 // indirect github.com/vmihailenco/tagparser v0.1.2 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect - go.opentelemetry.io/collector/component/componentstatus v0.115.0 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.115.0 // indirect - go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0 // indirect - go.opentelemetry.io/collector/consumer/consumertest v0.115.0 // indirect - go.opentelemetry.io/collector/pdata/pprofile v0.115.0 // indirect - go.opentelemetry.io/collector/pdata/testdata v0.115.0 // indirect - go.opentelemetry.io/collector/pipeline v0.115.0 // indirect - go.opentelemetry.io/collector/processor/processorprofiles v0.115.0 // indirect - go.opentelemetry.io/otel/sdk v1.32.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.32.0 // indirect - go.opentelemetry.io/otel/trace v1.32.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/collector/component/componentstatus v0.118.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.118.0 // indirect + go.opentelemetry.io/collector/consumer/consumertest v0.118.0 // indirect + go.opentelemetry.io/collector/pdata/pprofile v0.118.0 // indirect + go.opentelemetry.io/collector/pdata/testdata v0.118.0 // indirect + go.opentelemetry.io/collector/pipeline v0.118.0 // indirect + go.opentelemetry.io/otel/sdk v1.33.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.33.0 // indirect + go.opentelemetry.io/otel/trace v1.33.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/net v0.33.0 // indirect + golang.org/x/net v0.34.0 // indirect golang.org/x/text v0.21.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) @@ -130,3 +134,6 @@ replace ( ) replace github.com/DataDog/datadog-agent/pkg/version => ../version + +// github.com/golang/mock is unmaintained and archived, v1.6.0 is the last released version +replace github.com/golang/mock => github.com/golang/mock v1.6.0 diff --git a/pkg/trace/go.sum b/pkg/trace/go.sum index 44ab376fcf70f..0601d1cd38db4 100644 --- a/pkg/trace/go.sum +++ b/pkg/trace/go.sum @@ -1,11 +1,11 @@ github.com/DataDog/datadog-go/v5 v5.6.0 h1:2oCLxjF/4htd55piM75baflj/KoE6VYS7alEUqFvRDw= github.com/DataDog/datadog-go/v5 v5.6.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= -github.com/DataDog/go-sqllexer v0.0.17 h1:u47fJAVg/+5DA74ZW3w0Qu+3qXHd3GtnA8ZBYixdPrM= -github.com/DataDog/go-sqllexer v0.0.17/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= +github.com/DataDog/go-sqllexer v0.0.20 h1:0fBknHo42yuhawZS3GtuQSdqcwaiojWjYNT6OdsZRfI= +github.com/DataDog/go-sqllexer v0.0.20/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4= github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.22.0 h1:yfk2cF8Bx98fSFpGrehEHh1FRqewfxcCTAbUDt5r3F8= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.22.0/go.mod h1:9qzpnBSxSOnKzbF/uHket3SSlQihQHix/ZRC2nZUUYQ= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.24.0 h1:Y65h9AvfQO7ONOBlqCetvvUhh2XO1wIzN7IfXVFjc84= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.24.0/go.mod h1:7aAFw4o5dZk/kqFniz7ljJwS8covz8DHouGl7BrsnLI= github.com/DataDog/sketches-go v1.4.6 h1:acd5fb+QdUzGrosfNLwrIhqyrbMORpvBy7mE+vHlT3I= github.com/DataDog/sketches-go v1.4.6/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg= github.com/DataDog/zstd v1.5.6 h1:LbEglqepa/ipmmQJUDnSsfvA8e8IStVcGaFWDuxvGOY= @@ -22,8 +22,8 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= -github.com/containerd/cgroups/v3 v3.0.4 h1:2fs7l3P0Qxb1nKWuJNFiwhp2CqiKzho71DQkDrHJIo4= -github.com/containerd/cgroups/v3 v3.0.4/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins= +github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJHo6Bzo= +github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -72,9 +72,8 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0 h1:ad0vkEBuk23VJzZR9nkLVG0YAoN9coASF1GusYX6AlU= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.23.0/go.mod h1:igFoXX2ELCW06bol23DWPB5BEWfZISOzSP5K2sbLea0= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.0 h1:VD1gqscl4nYs1YxVuSdemTrSgTKrwOWDK0FVFMqm+Cg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.0/go.mod h1:4EgsQoS4TOhJizV+JTFg40qx1Ofh3XmXEQNBpgvNT40= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -115,12 +114,12 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.115.0 h1:a36EJz/mb83f6ieX0v4fNDJ1jXqpeaM6DVQXeFDvdhw= -github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.115.0/go.mod h1:r5/40YO1eSP5ZreOmRzVOUtDr7YG39ZIUcVjHd+9Izc= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.115.0 h1:R9MRrO+dSkAHBQLZjuwjv2RHXHQqF2Wtm1Ki0VKD5cs= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.115.0/go.mod h1:rKXLXmwdUVcUHwTilroKSejbg3KSwLeYzNPSpkIEnv4= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.115.0 h1:vwZQ7k8oqlK0bdZYTsjP/59zjQQfjSD4fNsWIWsTu2w= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.115.0/go.mod h1:5ObSa9amrbzbYTdAK1Qhv3D/YqCxxnQhP0sk2eWB7Oo= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.118.0 h1:Xnwd0QEyBg6iNPUbc3CnHIb0hVjfTc+jHdFbA9VSa7k= +github.com/open-telemetry/opentelemetry-collector-contrib/internal/coreinternal v0.118.0/go.mod h1:rmqCuNFMNBUxgyufeU8rpVYOWau8ubr0gmSO1u+R5Hk= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.118.0 h1:zEdd1JoVEBX7Lmf/wjs+45p4rR5+HvT2iF5VcoOgK1g= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.118.0/go.mod h1:WE5ientZ87x3cySOh4D/uVUwxK82DMyCkLBJ43+ehDU= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.118.0 h1:pC1e5BvBf8rjwGb56MiTUFEDHU2LSclaqRNUs3z9Snw= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.118.0/go.mod h1:wZTrQ0XWb1A9XBhl1WmUKLPfqNjERKFYWT5WER70gLg= github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/outcaste-io/ristretto v0.2.3 h1:AK4zt/fJ76kjlYObOeNwh4T3asEuaCmp26pOvUOL9w0= @@ -140,16 +139,16 @@ github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+ github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ= +github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA= github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= @@ -168,8 +167,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/tinylib/msgp v1.2.4 h1:yLFeUGostXXSGW5vxfT5dXG/qzkn4schv2I7at5+hVU= -github.com/tinylib/msgp v1.2.4/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= +github.com/tinylib/msgp v1.2.5 h1:WeQg1whrXRFiZusidTQqzETkRpGjFjcIhW6uqWH09po= +github.com/tinylib/msgp v1.2.5/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= @@ -185,92 +184,94 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -go.opentelemetry.io/collector/component v0.115.0 h1:iLte1oCiXzjiCnaOBKdsXacfFiECecpWxW3/LeriMoo= -go.opentelemetry.io/collector/component v0.115.0/go.mod h1:oIUFiH7w1eOimdeYhFI+gAIxYSiLDocKVJ0PTvX7d6s= -go.opentelemetry.io/collector/component/componentstatus v0.115.0 h1:pbpUIL+uKDfEiSgKK+S5nuSL6MDIIQYsp4b65ZGVb9M= -go.opentelemetry.io/collector/component/componentstatus v0.115.0/go.mod h1:36A+9XSiOz0Cdhq+UwwPRlEr5CYuSkEnVO9om4BH7d0= -go.opentelemetry.io/collector/component/componenttest v0.115.0 h1:9URDJ9VyP6tuij+YHjp/kSSMecnZOd7oGvzu+rw9SJY= -go.opentelemetry.io/collector/component/componenttest v0.115.0/go.mod h1:PzXvNqKLCiSADZGZFKH+IOHMkaQ0GTHuzysfVbTPKYY= -go.opentelemetry.io/collector/config/configtelemetry v0.115.0 h1:U07FinCDop+r2RjWQ3aP9ZWONC7r7kQIp1GkXQi6nsI= -go.opentelemetry.io/collector/config/configtelemetry v0.115.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= -go.opentelemetry.io/collector/confmap v1.21.0 h1:1tIcx2/Suwg8VhuPmQw87ba0ludPmumpFCFRZZa6RXA= -go.opentelemetry.io/collector/confmap v1.21.0/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec= -go.opentelemetry.io/collector/confmap/provider/envprovider v1.21.0 h1:YLf++Z8CMp86AanfOCWUiE7vKbb1kSjgC3a9VJoxbD4= -go.opentelemetry.io/collector/confmap/provider/envprovider v1.21.0/go.mod h1:aSWLYcmgZZJDNtWN1M8JKQuehoGgOxibl1KuvKTar4M= -go.opentelemetry.io/collector/confmap/provider/fileprovider v1.21.0 h1:+zukkM+3l426iGoJkXTpLB2Z8QnZFu26TkGPjh5Rn/4= -go.opentelemetry.io/collector/confmap/provider/fileprovider v1.21.0/go.mod h1:BXBpQhF3n4CNLYO2n/mWZPd2U9ekpbLXLRGZrun1VfI= -go.opentelemetry.io/collector/confmap/provider/httpprovider v1.21.0 h1:NYYGM+SgIlTuNGjd8eGzDr8DkvOe4q7cXon8djF9yyI= -go.opentelemetry.io/collector/confmap/provider/httpprovider v1.21.0/go.mod h1:XRYbuwqq1awFuNhLDUv4aSvn6MzqX+abcevx1O+APJI= -go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.21.0 h1:P3Q9RytCMY76ORPCnkkjOa4fkuFqmZiQRor+F/nPlYE= -go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.21.0/go.mod h1:xhYhHK3yLQ78tsoaKPIGUfFulgy961ImOe2gATH3RQc= -go.opentelemetry.io/collector/connector v0.115.0 h1:4Kkm3HQFzNT1eliMOB8FbIn+PLMRJ2qQku5Vmy3V8Ko= -go.opentelemetry.io/collector/connector v0.115.0/go.mod h1:+ByuAmYLrYHoKh9B+LGqUc0N2kXcN2l8Dea8Mp6brZ8= -go.opentelemetry.io/collector/connector/connectorprofiles v0.115.0 h1:aW1f4Az0I+QJyImFccNWAXqik80bnNu27aQqi2hFfD8= -go.opentelemetry.io/collector/connector/connectorprofiles v0.115.0/go.mod h1:lmynB1CucydOsHa8RSSBh5roUZPfuiv65imXhtNzClM= -go.opentelemetry.io/collector/connector/connectortest v0.115.0 h1:GjtourFr0MJmlbtEPAZ/1BZCxkNAeJ0aMTlrxwftJ0k= -go.opentelemetry.io/collector/connector/connectortest v0.115.0/go.mod h1:f3KQXXNlh/XuV8elmnuVVyfY92dJCAovz10gD72OH0k= -go.opentelemetry.io/collector/consumer v1.21.0 h1:THKZ2Vbi6GkamjTBI2hFq5Dc4kINZTWGwQNa8d/Ty9g= -go.opentelemetry.io/collector/consumer v1.21.0/go.mod h1:FQcC4ThMtRYY41dv+IPNK8POLLhAFY3r1YR5fuP7iiY= -go.opentelemetry.io/collector/consumer/consumererror v0.115.0 h1:yli//xBCQMPZKXNgNlXemo4dvqhnFrAmCZ11DvQgmcY= -go.opentelemetry.io/collector/consumer/consumererror v0.115.0/go.mod h1:LwVzAvQ6ZVNG7mbOvurbAo+W/rKws0IcjOwriuZXqPE= -go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0 h1:H3fDuyQW1t2HWHkz96WMBQJKUevypOCjBqnqtaAWyoA= -go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0/go.mod h1:IzEmZ91Tp7TBxVDq8Cc9xvLsmO7H08njr6Pu9P5d9ns= -go.opentelemetry.io/collector/consumer/consumertest v0.115.0 h1:hru0I2447y0TluCdwlKYFFtgcpyCnlM+LiOK1JZyA70= -go.opentelemetry.io/collector/consumer/consumertest v0.115.0/go.mod h1:ybjALRJWR6aKNOzEMy1T1ruCULVDEjj4omtOJMrH/kU= -go.opentelemetry.io/collector/exporter v0.115.0 h1:JnxfpOnsuqhTPKJXVKJLS1Cv3BiVrVLzpHOjJEQw+xw= -go.opentelemetry.io/collector/exporter v0.115.0/go.mod h1:xof3fHQK8wADhaKLIJcQ7ChZaFLNC+haRdPN0wgl6kY= -go.opentelemetry.io/collector/exporter/exporterprofiles v0.115.0 h1:lSQEleCn/q9eFufcuK61NdFKU70ZlgI9dBjPCO/4CrE= -go.opentelemetry.io/collector/exporter/exporterprofiles v0.115.0/go.mod h1:7l5K2AecimX2kx+nZC1gKG3QkP247CO1+SodmJ4fFkQ= -go.opentelemetry.io/collector/exporter/exportertest v0.115.0 h1:P9SMTUXQOtcaq40bGtnnAe14zRmR4/yUgj/Tb2BEf/k= -go.opentelemetry.io/collector/exporter/exportertest v0.115.0/go.mod h1:1jMZ9gFGXglb8wfNrBZIgd+RvpZhSyFwdfE+Jtf9w4U= -go.opentelemetry.io/collector/extension v0.115.0 h1:/cBb8AUdD0KMWC6V3lvCC16eP9Fg0wd1Upcp5rgvuGI= -go.opentelemetry.io/collector/extension v0.115.0/go.mod h1:HI7Ak6loyi6ZrZPsQJW1OO1wbaAW8OqXLFNQlTZnreQ= -go.opentelemetry.io/collector/extension/extensioncapabilities v0.115.0 h1:/g25Hp5aoCNKdDjIb3Fc7XRglO8yaBRFLO/IUNPnqNI= -go.opentelemetry.io/collector/extension/extensioncapabilities v0.115.0/go.mod h1:EQx7ETiy330O6q05S2KRZsRNDg0aQEeJmVl7Ipx+Fcw= -go.opentelemetry.io/collector/extension/extensiontest v0.115.0 h1:GBVFxFEskR8jSdu9uaQh2qpXnN5VNXhXjpJ2UjxtE8I= -go.opentelemetry.io/collector/extension/extensiontest v0.115.0/go.mod h1:eu1ecbz5mT+cHoH2H3GmD/rOO0WsicSJD2RLrYuOmRA= -go.opentelemetry.io/collector/featuregate v1.21.0 h1:+EULHPJDLMipcwAGZVp9Nm8NriRvoBBMxp7MSiIZVMI= -go.opentelemetry.io/collector/featuregate v1.21.0/go.mod h1:3GaXqflNDVwWndNGBJ1+XJFy3Fv/XrFgjMN60N3z7yg= -go.opentelemetry.io/collector/internal/fanoutconsumer v0.115.0 h1:6DRiSECeApFq6Jj5ug77rG53R6FzJEZBfygkyMEXdpg= -go.opentelemetry.io/collector/internal/fanoutconsumer v0.115.0/go.mod h1:vgQf5HQdmLQqpDHpDq2S3nTRoUuKtRcZpRTsy+UiwYw= -go.opentelemetry.io/collector/otelcol v0.115.0 h1:wZhFGrSCZcTQ4qw4ePjI2PaSrOCejoQKAjprKD/xavs= -go.opentelemetry.io/collector/otelcol v0.115.0/go.mod h1:iK8DPvaizirIYKDl1zZG7DDYUj6GkkH4KHifVVM88vk= -go.opentelemetry.io/collector/otelcol/otelcoltest v0.115.0 h1:HNlFpQujlnvawBk8nvMGxzjDHWDCfSprxem/EpQn4u8= -go.opentelemetry.io/collector/otelcol/otelcoltest v0.115.0/go.mod h1:WsMbqYl2rm3nPFbdxQqyLXf4iu97nYLeuQ1seZIpV3Y= -go.opentelemetry.io/collector/pdata v1.21.0 h1:PG+UbiFMJ35X/WcAR7Rf/PWmWtRdW0aHlOidsR6c5MA= -go.opentelemetry.io/collector/pdata v1.21.0/go.mod h1:GKb1/zocKJMvxKbS+sl0W85lxhYBTFJ6h6I1tphVyDU= -go.opentelemetry.io/collector/pdata/pprofile v0.115.0 h1:NI89hy13vNDw7EOnQf7Jtitks4HJFO0SUWznTssmP94= -go.opentelemetry.io/collector/pdata/pprofile v0.115.0/go.mod h1:jGzdNfO0XTtfLjXCL/uCC1livg1LlfR+ix2WE/z3RpQ= -go.opentelemetry.io/collector/pdata/testdata v0.115.0 h1:Rblz+AKXdo3fG626jS+KSd0OSA4uMXcTQfpwed6P8LI= -go.opentelemetry.io/collector/pdata/testdata v0.115.0/go.mod h1:inNnRt6S2Nn260EfCBEcjesjlKOSsr0jPwkPqpBkt4s= -go.opentelemetry.io/collector/pipeline v0.115.0 h1:bmACBqb0e8U9ag+vGGHUP7kCfAO7HHROdtzIEg8ulus= -go.opentelemetry.io/collector/pipeline v0.115.0/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74= -go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.115.0 h1:3l9ruCAOrssTUDnyChKNzHWOdTtfThnYaoPZ1/+5sD0= -go.opentelemetry.io/collector/pipeline/pipelineprofiles v0.115.0/go.mod h1:2Myg+law/5lcezo9PhhZ0wjCaLYdGK24s1jDWbSW9VY= -go.opentelemetry.io/collector/processor v0.115.0 h1:+fveHGRe24PZPv/F5taahGuZ9HdNW44hgNWEJhIUdyc= -go.opentelemetry.io/collector/processor v0.115.0/go.mod h1:/oLHBlLsm7tFb7zOIrA5C0j14yBtjXKAgxJJ2Bktyk4= -go.opentelemetry.io/collector/processor/processorprofiles v0.115.0 h1:cCZAs+FXaebZPppqAN3m+X3etoSBL6NvyQo8l0hOZoo= -go.opentelemetry.io/collector/processor/processorprofiles v0.115.0/go.mod h1:kMxF0gknlWX4duuAJFi2/HuIRi6C3w95tOenRa0GKOY= -go.opentelemetry.io/collector/processor/processortest v0.115.0 h1:j9HEaYFOeOB6VYl9zGhBnhQbTkqGBa2udUvu5NTh6hc= -go.opentelemetry.io/collector/processor/processortest v0.115.0/go.mod h1:Gws+VEnp/eW3qAqPpqbKsrbnnxxNfyDjqrfUXbZfZic= -go.opentelemetry.io/collector/receiver v0.115.0 h1:55Q3Jvj6zHCIA1psKqi/3kEMJO4OqUF5tNAEYNdB1U8= -go.opentelemetry.io/collector/receiver v0.115.0/go.mod h1:nBSCh2O/WUcfgpJ+Jpz+B0z0Hn5jHeRvF2WmLij5EIY= -go.opentelemetry.io/collector/receiver/receiverprofiles v0.115.0 h1:R9JLaj2Al93smIPUkbJshAkb/cY0H5JBOxIx+Zu0NG4= -go.opentelemetry.io/collector/receiver/receiverprofiles v0.115.0/go.mod h1:05E5hGujWeeXJmzKZwTdHyZ/+rRyrQlQB5p5Q2XY39M= -go.opentelemetry.io/collector/receiver/receivertest v0.115.0 h1:OiB684SbHQi6/Pd3ZH0cXjYvCpBS9ilQBfTQx0wVXHg= -go.opentelemetry.io/collector/receiver/receivertest v0.115.0/go.mod h1:Y8Z9U/bz9Xpyt8GI8DxZZgryw3mnnIw+AeKVLTD2cP8= -go.opentelemetry.io/collector/semconv v0.115.0 h1:SoqMvg4ZEB3mz2EdAb6XYa+TuMo5Mir5FRBr3nVFUDY= -go.opentelemetry.io/collector/semconv v0.115.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= -go.opentelemetry.io/collector/service v0.115.0 h1:k4GAOiI5tZgB2QKgwA6c3TeAVr7QL/ft5cOQbzUr8Iw= -go.opentelemetry.io/collector/service v0.115.0/go.mod h1:DKde9LMhNebdREecDSsqiTFLI2wRc+IoV4/wGxU6goY= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/collector/component v0.118.0 h1:sSO/ObxJ+yH77Z4DmT1mlSuxhbgUmY1ztt7xCA1F/8w= +go.opentelemetry.io/collector/component v0.118.0/go.mod h1:LUJ3AL2b+tmFr3hZol3hzKzCMvNdqNq0M5CF3SWdv4M= +go.opentelemetry.io/collector/component/componentstatus v0.118.0 h1:1aCIdUjqz0noKNQr1v04P+lwF89Lkua5U7BhH9IAxkE= +go.opentelemetry.io/collector/component/componentstatus v0.118.0/go.mod h1:ynO1Nyj0t1h6x/djIMJy35bhnnWEc2mlQaFgDNUO504= +go.opentelemetry.io/collector/component/componenttest v0.118.0 h1:knEHckoiL2fEWSIc0iehg39zP4IXzi9sHa45O+oxKo8= +go.opentelemetry.io/collector/component/componenttest v0.118.0/go.mod h1:aHc7t7zVwCpbhrWIWY+GMuaMxMCUP8C8P7pJOt8r/vU= +go.opentelemetry.io/collector/config/configtelemetry v0.118.0 h1:UlN46EViG2X42odWtXgWaqY7Y01ZKpsnswSwXTWx5mM= +go.opentelemetry.io/collector/config/configtelemetry v0.118.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= +go.opentelemetry.io/collector/confmap v1.24.0 h1:UUHVhkDCsVw14jPOarug9PDQE2vaB2ELPWMr7ARFBCA= +go.opentelemetry.io/collector/confmap v1.24.0/go.mod h1:Rrhs+MWoaP6AswZp+ReQ2VO9dfOfcUjdjiSHBsG+nec= +go.opentelemetry.io/collector/confmap/provider/envprovider v1.24.0 h1:jAtaNR4b5gnddNzyfcpIhURSDq4rai667yV1Ngmku2Y= +go.opentelemetry.io/collector/confmap/provider/envprovider v1.24.0/go.mod h1:X0BuIYyscilkwApnmxlrdz0kTVWgKXq2ih8sTWm8Zio= +go.opentelemetry.io/collector/confmap/provider/fileprovider v1.24.0 h1:QoQulv9L20MhD1TFWH1scbRoo0bxbZqF2quh1VRNMh4= +go.opentelemetry.io/collector/confmap/provider/fileprovider v1.24.0/go.mod h1:ljIH/rWIUHJeWIDEKMRU/ufol/bcgC7ufamchtuTAwM= +go.opentelemetry.io/collector/confmap/provider/httpprovider v1.24.0 h1:1mbj6HlVZ4LNVBYrxM5jQEJKxinpe0LtNZwI7i8pQNY= +go.opentelemetry.io/collector/confmap/provider/httpprovider v1.24.0/go.mod h1:xM2qJmW6mB1lzFpLWIoxX/h4tUnoYTICZoqPND9YWi0= +go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.24.0 h1:Ncr7a3HbVpmjAvPHd0yQM/MV2p7HqJe+zvDPmHdjSCI= +go.opentelemetry.io/collector/confmap/provider/yamlprovider v1.24.0/go.mod h1:i7omVh3uK8efpr7/fSAcOh8Xiv3FLYL26wUuON9i1WI= +go.opentelemetry.io/collector/connector v0.118.0 h1:amay4UriWrtydaAxjQ8/MTTaVYERlZcYLCAGcjoBejw= +go.opentelemetry.io/collector/connector v0.118.0/go.mod h1:R6jbMrHZYg21pZ0nsoo4cSHIn7Lrdpi5R3OWcDEQwhE= +go.opentelemetry.io/collector/connector/connectortest v0.118.0 h1:hLMSTqtFWveXa3b1qJMEaWuaX3PHx7dfl8G/bsac2fE= +go.opentelemetry.io/collector/connector/connectortest v0.118.0/go.mod h1:hm6TNLiQLe65NpENCFsFoiO8fOf3BbN4UF1heUsT73Q= +go.opentelemetry.io/collector/connector/xconnector v0.118.0 h1:0s6rwZmt8va6xd3BEZs7s2QBNFNjLv0kzYi6l44dKqc= +go.opentelemetry.io/collector/connector/xconnector v0.118.0/go.mod h1:12mJPGWo90iZrrpgOkmSd5TkejweL34V/R6AqwqJnMA= +go.opentelemetry.io/collector/consumer v1.24.0 h1:7DeyBm9qdr1EPuCfPjWyChPK16DbVc0wZeSa9LZprFU= +go.opentelemetry.io/collector/consumer v1.24.0/go.mod h1:0G6jvZprIp4dpKMD1ZxCjriiP9GdFvFMObsQEtTk71s= +go.opentelemetry.io/collector/consumer/consumererror v0.118.0 h1:Cx//ZFDa6wUEoRDRYRZ/Rkb52dWNoHj2e9FdlcM9jCA= +go.opentelemetry.io/collector/consumer/consumererror v0.118.0/go.mod h1:2mhnzzLYR5zS2Zz4h9ZnRM8Uogu9qatcfQwGNenhing= +go.opentelemetry.io/collector/consumer/consumertest v0.118.0 h1:8AAS9ejQapP1zqt0+cI6u+AUBheT3X0171N9WtXWsVY= +go.opentelemetry.io/collector/consumer/consumertest v0.118.0/go.mod h1:spRM2wyGr4QZzqMHlLmZnqRCxqXN4Wd0piogC4Qb5PQ= +go.opentelemetry.io/collector/consumer/xconsumer v0.118.0 h1:guWnzzRqgCInjnYlOQ1BPrimppNGIVvnknAjlIbWXuY= +go.opentelemetry.io/collector/consumer/xconsumer v0.118.0/go.mod h1:C5V2d6Ys/Fi6k3tzjBmbdZ9v3J/rZSAMlhx4KVcMIIg= +go.opentelemetry.io/collector/exporter v0.118.0 h1:PE0vF2U+znOB8OVLPWNw40bGCoT/5QquQ8Xbz4i9Rb0= +go.opentelemetry.io/collector/exporter v0.118.0/go.mod h1:5ST3gxT/RzE/vg2bcGDtWJxlQF1ypwk50UpmdK1kUqY= +go.opentelemetry.io/collector/exporter/exportertest v0.118.0 h1:8gWky42BcJsxoaqWbnqCDUjP3Y84hjC6RD/UWHwR7sI= +go.opentelemetry.io/collector/exporter/exportertest v0.118.0/go.mod h1:UbpQBZvznA8YPqqcKlafVIhB6Qa4fPf2+I67MUGyNqo= +go.opentelemetry.io/collector/exporter/xexporter v0.118.0 h1:PZAo1CFhZHfQwtzUNj+Fwcv/21pWHJHTsrIddD096fw= +go.opentelemetry.io/collector/exporter/xexporter v0.118.0/go.mod h1:x4J+qyrRcp4DfWKqK3DLZomFTIUhedsqCQWqq6Gqps4= +go.opentelemetry.io/collector/extension v0.118.0 h1:9o5jLCTRvs0+rtFDx04zTBuB4WFrE0RvtVCPovYV0sA= +go.opentelemetry.io/collector/extension v0.118.0/go.mod h1:BFwB0WOlse6JnrStO44+k9kwUVjjtseFEHhJLHD7lBg= +go.opentelemetry.io/collector/extension/extensioncapabilities v0.118.0 h1:I/SjuacUXdBOxa6ZnVMuMKkZX+m40tUm+5YKqWnNv/c= +go.opentelemetry.io/collector/extension/extensioncapabilities v0.118.0/go.mod h1:IxDALY0rMvsENrVui7Y5tvvL/xHNgMKuhfiQiSHMiTQ= +go.opentelemetry.io/collector/extension/extensiontest v0.118.0 h1:rKBUaFS9elGfENG45wANmrwx7mHsmt1+YWCzxjftElg= +go.opentelemetry.io/collector/extension/extensiontest v0.118.0/go.mod h1:CqNXzkIOR32D8EUpptpOXhpFkibs3kFlRyNMEgIW8l4= +go.opentelemetry.io/collector/featuregate v1.24.0 h1:DEqDsuJgxjZ3E5JNC9hXCd4sWGFiF7h9kaziODuqwFY= +go.opentelemetry.io/collector/featuregate v1.24.0/go.mod h1:3GaXqflNDVwWndNGBJ1+XJFy3Fv/XrFgjMN60N3z7yg= +go.opentelemetry.io/collector/internal/fanoutconsumer v0.118.0 h1:affTj1Qxjbg9dZ1x2tbV9Rs9/otZQ1lHA++L8qB5KiQ= +go.opentelemetry.io/collector/internal/fanoutconsumer v0.118.0/go.mod h1:9mbE68mYdtTyozr3jTtNMB1RA5F8/dt2aWVYSu6bsQ4= +go.opentelemetry.io/collector/otelcol v0.118.0 h1:uSD3wU0sO4vsw5VvWI2yUFLggLdq1BWN/nC1LJXIhMg= +go.opentelemetry.io/collector/otelcol v0.118.0/go.mod h1:OdKz/AXj+ewCwXp/acZCBIoMIYiIxeNRNkbqUXvWi+o= +go.opentelemetry.io/collector/otelcol/otelcoltest v0.118.0 h1:s4yLzDUPzzPElvcOqth7iOuKe+eBo8iXy6bzAy57sXA= +go.opentelemetry.io/collector/otelcol/otelcoltest v0.118.0/go.mod h1:nNDwBOLXNHVnALpcBzkWQ/770WB3IFvEVgLjgujt3Eo= +go.opentelemetry.io/collector/pdata v1.24.0 h1:D6j92eAzmAbQgivNBUnt8r9juOl8ugb+ihYynoFZIEg= +go.opentelemetry.io/collector/pdata v1.24.0/go.mod h1:cf3/W9E/uIvPS4MR26SnMFJhraUCattzzM6qusuONuc= +go.opentelemetry.io/collector/pdata/pprofile v0.118.0 h1:VK/fr65VFOwEhsSGRPj5c3lCv0yIK1Kt0sZxv9WZBb8= +go.opentelemetry.io/collector/pdata/pprofile v0.118.0/go.mod h1:eJyP/vBm179EghV3dPSnamGAWQwLyd+4z/3yG54YFoQ= +go.opentelemetry.io/collector/pdata/testdata v0.118.0 h1:5N0w1SX9KIRkwvtkrpzQgXy9eGk3vfNG0ds6mhEPMIM= +go.opentelemetry.io/collector/pdata/testdata v0.118.0/go.mod h1:UY+GHV5bOC1BnFburOZ0wiHReJj1XbW12mi2Ogbc5Lw= +go.opentelemetry.io/collector/pipeline v0.118.0 h1:RI1DMe7L0+5hGkx0EDGxG00TaJoh96MEQppgOlGx1Oc= +go.opentelemetry.io/collector/pipeline v0.118.0/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74= +go.opentelemetry.io/collector/pipeline/xpipeline v0.118.0 h1:ZUVF1MYNQYZvmuL30KfP+QbVGSbFZvldBM9hgCe4J4k= +go.opentelemetry.io/collector/pipeline/xpipeline v0.118.0/go.mod h1:XgG1ktGO9J1f6fasMYPWSXL9Raan/VYB9vddKKWp5hQ= +go.opentelemetry.io/collector/processor v0.118.0 h1:NlqWiTTpPP+EPbrqTcNP9nh/4O4/9U9RGWVB49xo4ws= +go.opentelemetry.io/collector/processor v0.118.0/go.mod h1:Y8OD7wk51oPuBqrbn1qXIK91AbprRHP76hlvEzC24U4= +go.opentelemetry.io/collector/processor/processortest v0.118.0 h1:VfTLHuIaJWGyUmrvAOvf63gPMf1vAW68/jtJClEsKtU= +go.opentelemetry.io/collector/processor/processortest v0.118.0/go.mod h1:ZFWxsSoafGNOEk83FtGz43M5ypUzAOvGnfT0aQTDHdU= +go.opentelemetry.io/collector/processor/xprocessor v0.118.0 h1:M/EMhPRbadHLpv7g99fBjfgyuYexBZmgQqb2vjTXjvM= +go.opentelemetry.io/collector/processor/xprocessor v0.118.0/go.mod h1:lkoQoCv2Cz+C0kf2VHgBUDYWDecZLLeaHEvHDXbBCXU= +go.opentelemetry.io/collector/receiver v0.118.0 h1:X4mspHmbbtwdCQZ7o370kNmdWfxRnK1FrsvEShCCKEc= +go.opentelemetry.io/collector/receiver v0.118.0/go.mod h1:wFyfu6sgrkDPLQoGOGMuChGZzkZnYcI/tPJWV4CRTzs= +go.opentelemetry.io/collector/receiver/receivertest v0.118.0 h1:XlMr2mPsyXJsMUOqCpEoY3uCPsLZQbNA5fmVNDGB7Bw= +go.opentelemetry.io/collector/receiver/receivertest v0.118.0/go.mod h1:dtu/H1RNjhy11hTVf/XUfc02uGufMhYYdhhYBbglcUg= +go.opentelemetry.io/collector/receiver/xreceiver v0.118.0 h1:dzECve9e0H3ot0JWnWPuQr9Y84RhOYSd0+CjvJskx7Y= +go.opentelemetry.io/collector/receiver/xreceiver v0.118.0/go.mod h1:Lv1nD/mSYSP64iV8k+C+mWWZZOMLRubv9d1SUory3/E= +go.opentelemetry.io/collector/semconv v0.118.0 h1:V4vlMIK7TIaemrrn2VawvQPwruIKpj7Xgw9P5+BL56w= +go.opentelemetry.io/collector/semconv v0.118.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= +go.opentelemetry.io/collector/service v0.118.0 h1:acZ9LzUbEF5M3G7o5FgenPJVuuM2y8c4HW5JVm648L4= +go.opentelemetry.io/collector/service v0.118.0/go.mod h1:uw3cl3UtkAOrEr8UQV2lXKjyTIbhWxURaQec8kE+Pic= go.opentelemetry.io/contrib/bridges/otelzap v0.6.0 h1:j8icMXyyqNf6HGuwlYhniPnVsbJIq7n+WirDu3VAJdQ= go.opentelemetry.io/contrib/bridges/otelzap v0.6.0/go.mod h1:evIOZpl+kAlU5IsaYX2Siw+IbpacAZvXemVsgt70uvw= go.opentelemetry.io/contrib/config v0.10.0 h1:2JknAzMaYjxrHkTnZh3eOme/Y2P5eHE2SWfhfV6Xd6c= go.opentelemetry.io/contrib/config v0.10.0/go.mod h1:aND2M6/KfNkntI5cyvHriR/zvZgPf8j9yETdSmvpfmc= go.opentelemetry.io/contrib/propagators/b3 v1.31.0 h1:PQPXYscmwbCp76QDvO4hMngF2j8Bx/OTV86laEl8uqo= go.opentelemetry.io/contrib/propagators/b3 v1.31.0/go.mod h1:jbqfV8wDdqSDrAYxVpXQnpM0XFMq2FtDesblJ7blOwQ= -go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= -go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.7.0 h1:mMOmtYie9Fx6TSVzw4W+NTpvoaS1JWWga37oI1a/4qQ= go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.7.0/go.mod h1:yy7nDsMMBUkD+jeekJ36ur5f3jJIrmCwUrY67VFhNpA= go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.32.0 h1:j7ZSD+5yn+lo3sGV69nW04rRR0jhYnBwjuX3r0HvnK0= @@ -293,16 +294,16 @@ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 h1:UGZ1QwZWY67Z6Bm go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0/go.mod h1:fcwWuDuaObkkChiDlhEpSq9+X1C0omv+s5mBtToAQ64= go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk= go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8= -go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= -go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= -go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= -go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= go.opentelemetry.io/otel/sdk/log v0.7.0 h1:dXkeI2S0MLc5g0/AwxTZv6EUEjctiH8aG14Am56NTmQ= go.opentelemetry.io/otel/sdk/log v0.7.0/go.mod h1:oIRXpW+WD6M8BuGj5rtS0aRu/86cbDV/dAfNaZBIjYM= -go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= -go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= -go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= -go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= +go.opentelemetry.io/otel/sdk/metric v1.33.0 h1:Gs5VK9/WUJhNXZgn8MR6ITatvAmKeIuCtNbsP3JkNqU= +go.opentelemetry.io/otel/sdk/metric v1.33.0/go.mod h1:dL5ykHZmm1B1nVRk9dDjChwDmt81MjVp3gLkQRwKf/Q= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -318,8 +319,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f h1:XdNn9LlyWAhLVp6P/i8QYBW+hlyhrhei9uErw2B5GJo= golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f/go.mod h1:D5SMRVC3C2/4+F/DB1wZsLRnSNimn2Sp/NPsCrsv8ak= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -335,8 +336,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -357,8 +358,8 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -385,17 +386,16 @@ gonum.org/v1/gonum v0.15.1/go.mod h1:eZTZuRFrzu5pcyjN5wJhcIhnUdNijYxX1T2IcrOGY0o google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= -google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU= -google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= -google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= -google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= -google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= +google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f h1:gap6+3Gk41EItBuyi4XX/bp4oqJ3UwuIMl25yGinuAA= +google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:Ic02D47M+zbarjYYUlK57y316f2MoN0gjAwI3f2S95o= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= +google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= +google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/pkg/trace/info/info.go b/pkg/trace/info/info.go index 4198100ee6a5b..df86c40587fdc 100644 --- a/pkg/trace/info/info.go +++ b/pkg/trace/info/info.go @@ -8,7 +8,6 @@ package info import ( "bytes" - "crypto/tls" "encoding/json" "expvar" // automatically publish `/debug/vars` on HTTP port "fmt" @@ -237,9 +236,8 @@ func getProgramBanner(version string) (string, string) { // If error is nil, means the program is running. // If not, it displays a pretty-printed message anyway (for support) func Info(w io.Writer, conf *config.AgentConfig) error { - url := fmt.Sprintf("https://127.0.0.1:%d/debug/vars", conf.DebugServerPort) - tr := &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}} - client := http.Client{Timeout: 3 * time.Second, Transport: tr} + url := fmt.Sprintf("http://127.0.0.1:%d/debug/vars", conf.DebugServerPort) + client := http.Client{Timeout: 3 * time.Second} resp, err := client.Get(url) if err != nil { // OK, here, we can't even make an http call on the agent port, diff --git a/pkg/trace/info/info_test.go b/pkg/trace/info/info_test.go index 25b7cb42b13ff..596d536601e4c 100644 --- a/pkg/trace/info/info_test.go +++ b/pkg/trace/info/info_test.go @@ -63,7 +63,7 @@ func (h *testServerHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { func testServer(t *testing.T, testFile string) *httptest.Server { t.Helper() - server := httptest.NewTLSServer(&testServerHandler{t: t, testFile: testFile}) + server := httptest.NewServer(&testServerHandler{t: t, testFile: testFile}) t.Logf("test server (serving fake yet valid data) listening on %s", server.URL) return server } @@ -94,7 +94,7 @@ func (h *testServerWarningHandler) ServeHTTP(w http.ResponseWriter, r *http.Requ } func testServerWarning(t *testing.T) *httptest.Server { - server := httptest.NewTLSServer(&testServerWarningHandler{t: t}) + server := httptest.NewServer(&testServerWarningHandler{t: t}) t.Logf("test server (serving data containing worrying values) listening on %s", server.URL) return server } @@ -119,7 +119,7 @@ func (h *testServerErrorHandler) ServeHTTP(w http.ResponseWriter, r *http.Reques } func testServerError(t *testing.T) *httptest.Server { - server := httptest.NewTLSServer(&testServerErrorHandler{t: t}) + server := httptest.NewServer(&testServerErrorHandler{t: t}) t.Logf("test server (serving bad data to trigger errors) listening on %s", server.URL) return server } @@ -331,7 +331,7 @@ func TestError(t *testing.T) { assert.Equal(len(lines[1]), len(lines[2])) assert.Equal("", lines[3]) assert.Regexp(regexp.MustCompile(`^ Error: .*$`), lines[4]) - assert.Equal(fmt.Sprintf(" URL: https://127.0.0.1:%d/debug/vars", port), lines[5]) + assert.Equal(fmt.Sprintf(" URL: http://127.0.0.1:%d/debug/vars", port), lines[5]) assert.Equal("", lines[6]) assert.Equal("", lines[7]) } @@ -483,6 +483,8 @@ func TestPublishReceiverStats(t *testing.T) { atom(12), atom(13), atom(14), + atom(15), + atom(16), }, TracesFiltered: atom(4), TracesPriorityNone: atom(5), @@ -531,14 +533,16 @@ func TestPublishReceiverStats(t *testing.T) { "ServiceInvalid": 4.0, "PeerServiceTruncate": 5.0, "PeerServiceInvalid": 6.0, - "SpanNameEmpty": 7.0, - "SpanNameTruncate": 8.0, - "SpanNameInvalid": 9.0, - "ResourceEmpty": 10.0, - "TypeTruncate": 11.0, - "InvalidStartDate": 12.0, - "InvalidDuration": 13.0, - "InvalidHTTPStatusCode": 14.0, + "BaseServiceTruncate": 7.0, + "BaseServiceInvalid": 8.0, + "SpanNameEmpty": 9.0, + "SpanNameTruncate": 10.0, + "SpanNameInvalid": 11.0, + "ResourceEmpty": 12.0, + "TypeTruncate": 13.0, + "InvalidStartDate": 14.0, + "InvalidDuration": 15.0, + "InvalidHTTPStatusCode": 16.0, }, "SpansReceived": 10.0, "TracerVersion": "", diff --git a/pkg/trace/info/stats.go b/pkg/trace/info/stats.go index 289390aa3f507..885e11a04162e 100644 --- a/pkg/trace/info/stats.go +++ b/pkg/trace/info/stats.go @@ -263,6 +263,10 @@ type SpansMalformed struct { PeerServiceTruncate atomic.Int64 // PeerServiceInvalid is when a span's peer.service doesn't conform to Datadog tag naming standards PeerServiceInvalid atomic.Int64 + // BaseServiceTruncate is when a span's _dd.base_service is truncated for exceeding the max length + BaseServiceTruncate atomic.Int64 + // BaseServiceInvalid is when a span's _dd.base_service doesn't conform to Datadog tag naming standards + BaseServiceInvalid atomic.Int64 // SpanNameEmpty is when a span's Name is empty SpanNameEmpty atomic.Int64 // SpanNameTruncate is when a span's Name is truncated for exceeding the max length @@ -289,6 +293,8 @@ func (s *SpansMalformed) tagCounters() map[string]*atomic.Int64 { "service_invalid": &s.ServiceInvalid, "peer_service_truncate": &s.PeerServiceTruncate, "peer_service_invalid": &s.PeerServiceInvalid, + "base_service_truncate": &s.BaseServiceTruncate, + "base_service_invalid": &s.BaseServiceInvalid, "span_name_empty": &s.SpanNameEmpty, "span_name_truncate": &s.SpanNameTruncate, "span_name_invalid": &s.SpanNameInvalid, @@ -432,6 +438,8 @@ func (s *Stats) update(recent *Stats) { s.SpansMalformed.ServiceInvalid.Add(recent.SpansMalformed.ServiceInvalid.Load()) s.SpansMalformed.PeerServiceTruncate.Add(recent.SpansMalformed.PeerServiceTruncate.Load()) s.SpansMalformed.PeerServiceInvalid.Add(recent.SpansMalformed.PeerServiceInvalid.Load()) + s.SpansMalformed.BaseServiceTruncate.Add(recent.SpansMalformed.BaseServiceTruncate.Load()) + s.SpansMalformed.BaseServiceInvalid.Add(recent.SpansMalformed.BaseServiceInvalid.Load()) s.SpansMalformed.SpanNameEmpty.Add(recent.SpansMalformed.SpanNameEmpty.Load()) s.SpansMalformed.SpanNameTruncate.Add(recent.SpansMalformed.SpanNameTruncate.Load()) s.SpansMalformed.SpanNameInvalid.Add(recent.SpansMalformed.SpanNameInvalid.Load()) diff --git a/pkg/trace/info/stats_test.go b/pkg/trace/info/stats_test.go index 13f257a031f11..cecc9f1194e07 100644 --- a/pkg/trace/info/stats_test.go +++ b/pkg/trace/info/stats_test.go @@ -61,6 +61,8 @@ func TestSpansMalformed(t *testing.T) { "service_truncate": 0, "peer_service_truncate": 0, "peer_service_invalid": 0, + "base_service_truncate": 0, + "base_service_invalid": 0, "invalid_start_date": 0, "invalid_http_status_code": 0, "invalid_duration": 0, @@ -217,12 +219,14 @@ func TestReceiverStats(t *testing.T) { stats.SpansMalformed.SpanNameTruncate.Store(6) stats.SpansMalformed.PeerServiceTruncate.Store(7) stats.SpansMalformed.PeerServiceInvalid.Store(8) - stats.SpansMalformed.SpanNameInvalid.Store(9) - stats.SpansMalformed.ResourceEmpty.Store(10) - stats.SpansMalformed.TypeTruncate.Store(11) - stats.SpansMalformed.InvalidStartDate.Store(12) - stats.SpansMalformed.InvalidDuration.Store(13) - stats.SpansMalformed.InvalidHTTPStatusCode.Store(14) + stats.SpansMalformed.BaseServiceTruncate.Store(9) + stats.SpansMalformed.BaseServiceInvalid.Store(10) + stats.SpansMalformed.SpanNameInvalid.Store(11) + stats.SpansMalformed.ResourceEmpty.Store(12) + stats.SpansMalformed.TypeTruncate.Store(13) + stats.SpansMalformed.InvalidStartDate.Store(14) + stats.SpansMalformed.InvalidDuration.Store(15) + stats.SpansMalformed.InvalidHTTPStatusCode.Store(16) return &ReceiverStats{ Stats: map[Tags]*TagStats{ tags: { @@ -236,7 +240,7 @@ func TestReceiverStats(t *testing.T) { t.Run("PublishAndReset", func(t *testing.T) { rs := testStats() rs.PublishAndReset(statsclient) - assert.EqualValues(t, 42, len(statsclient.CountCalls)) + assert.EqualValues(t, 44, len(statsclient.CountCalls)) assertStatsAreReset(t, rs) }) @@ -258,7 +262,7 @@ func TestReceiverStats(t *testing.T) { logs := strings.Split(b.String(), "\n") assert.Equal(t, "[INFO] [lang:go lang_version:1.12 lang_vendor:gov interpreter:gcc tracer_version:1.33 endpoint_version:v0.4 service:service] -> traces received: 1, traces filtered: 4, traces amount: 9 bytes, events extracted: 13, events sampled: 14", logs[0]) - assert.Equal(t, "[WARN] [lang:go lang_version:1.12 lang_vendor:gov interpreter:gcc tracer_version:1.33 endpoint_version:v0.4 service:service] -> traces_dropped(decoding_error:1, empty_trace:3, foreign_span:6, payload_too_large:2, span_id_zero:5, timeout:7, trace_id_zero:4, unexpected_eof:8), spans_malformed(duplicate_span_id:1, invalid_duration:13, invalid_http_status_code:14, invalid_start_date:12, peer_service_invalid:8, peer_service_truncate:7, resource_empty:10, service_empty:2, service_invalid:4, service_truncate:3, span_name_empty:5, span_name_invalid:9, span_name_truncate:6, type_truncate:11). Enable debug logging for more details.", + assert.Equal(t, "[WARN] [lang:go lang_version:1.12 lang_vendor:gov interpreter:gcc tracer_version:1.33 endpoint_version:v0.4 service:service] -> traces_dropped(decoding_error:1, empty_trace:3, foreign_span:6, payload_too_large:2, span_id_zero:5, timeout:7, trace_id_zero:4, unexpected_eof:8), spans_malformed(base_service_invalid:10, base_service_truncate:9, duplicate_span_id:1, invalid_duration:15, invalid_http_status_code:16, invalid_start_date:14, peer_service_invalid:8, peer_service_truncate:7, resource_empty:12, service_empty:2, service_invalid:4, service_truncate:3, span_name_empty:5, span_name_invalid:11, span_name_truncate:6, type_truncate:13). Enable debug logging for more details.", logs[1]) assertStatsAreReset(t, rs) diff --git a/pkg/trace/sampler/catalog_test.go b/pkg/trace/sampler/catalog_test.go index 44a5d6c3136ac..193fc4ea06277 100644 --- a/pkg/trace/sampler/catalog_test.go +++ b/pkg/trace/sampler/catalog_test.go @@ -11,6 +11,7 @@ import ( "sync" "testing" + "github.com/DataDog/datadog-go/v5/statsd" "github.com/stretchr/testify/assert" ) @@ -60,7 +61,7 @@ func TestNewServiceLookup(t *testing.T) { func TestServiceKeyCatalogRegister(t *testing.T) { cat := newServiceLookup(0) - s := getTestPrioritySampler() + s := getTestPrioritySampler(&statsd.NoOpClient{}) _, root1 := getTestTraceWithService("service1", s) sig1 := cat.register(ServiceSignature{root1.Service, defaultEnv}) @@ -168,7 +169,7 @@ func TestServiceKeyCatalogRatesByService(t *testing.T) { assert := assert.New(t) cat := newServiceLookup(0) - s := getTestPrioritySampler() + s := getTestPrioritySampler(&statsd.NoOpClient{}) _, root1 := getTestTraceWithService("service1", s) sig1 := cat.register(ServiceSignature{root1.Service, defaultEnv}) diff --git a/pkg/trace/sampler/coresampler.go b/pkg/trace/sampler/coresampler.go index 9d1b334742da0..8f4b7e23e2ba2 100644 --- a/pkg/trace/sampler/coresampler.go +++ b/pkg/trace/sampler/coresampler.go @@ -52,9 +52,7 @@ type Sampler struct { // extraRate is an extra raw sampling rate to apply on top of the sampler rate extraRate float64 - totalSeen float32 - totalKept *atomic.Int64 - + metrics metrics tags []string exit chan struct{} stopped chan struct{} @@ -64,14 +62,15 @@ type Sampler struct { // newSampler returns an initialized Sampler func newSampler(extraRate float64, targetTPS float64, tags []string, statsd statsd.ClientInterface) *Sampler { s := &Sampler{ - seen: make(map[Signature][numBuckets]float32), - + seen: make(map[Signature][numBuckets]float32), extraRate: extraRate, targetTPS: atomic.NewFloat64(targetTPS), tags: tags, - - totalKept: atomic.NewInt64(0), - + metrics: metrics{ + tags: tags, + statsd: statsd, + value: make(map[metricsKey]metricsValue), + }, exit: make(chan struct{}), stopped: make(chan struct{}), statsd: statsd, @@ -140,7 +139,6 @@ func (s *Sampler) countWeightedSig(now time.Time, signature Signature, n float32 buckets[bucketID%numBuckets] += n s.seen[signature] = buckets - s.totalSeen += n s.muSeen.Unlock() return updateRates } @@ -248,11 +246,6 @@ func zeroAndGetMax(buckets [numBuckets]float32, previousBucket, newBucket int64) return maxBucket, buckets } -// countSample counts a trace sampled by the sampler. -func (s *Sampler) countSample() { - s.totalKept.Inc() -} - // getSignatureSampleRate returns the sampling rate to apply to a signature func (s *Sampler) getSignatureSampleRate(sig Signature) float64 { s.muRates.RLock() @@ -311,14 +304,8 @@ func (s *Sampler) size() int64 { } func (s *Sampler) report() { - s.muSeen.Lock() - seen := int64(s.totalSeen) - s.totalSeen = 0 - s.muSeen.Unlock() - kept := s.totalKept.Swap(0) - _ = s.statsd.Count("datadog.trace_agent.sampler.kept", kept, s.tags, 1) - _ = s.statsd.Count("datadog.trace_agent.sampler.seen", seen, s.tags, 1) - _ = s.statsd.Gauge("datadog.trace_agent.sampler.size", float64(s.size()), s.tags, 1) + s.metrics.report() + _ = s.statsd.Gauge(metricSamplerSize, float64(s.size()), s.tags, 1) } // Stop stops the main Run loop diff --git a/pkg/trace/sampler/coresampler_test.go b/pkg/trace/sampler/coresampler_test.go index 27a36cc7bc614..7a3fdd1af2318 100644 --- a/pkg/trace/sampler/coresampler_test.go +++ b/pkg/trace/sampler/coresampler_test.go @@ -10,23 +10,38 @@ import ( "testing" "time" + "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/DataDog/datadog-go/v5/statsd" + mockStatsd "github.com/DataDog/datadog-go/v5/statsd/mocks" ) -func TestSamplerAccessRace(_ *testing.T) { - s := newSampler(1, 2, nil, &statsd.NoOpClient{}) +func TestSamplerAccessRace(t *testing.T) { + goroutineN := 5 + loopCount := 10000 + totalCount := goroutineN * loopCount + serviceSignature := ServiceSignature{ + Name: "test-service", + Env: "test-env", + } + ctrl := gomock.NewController(t) + defer ctrl.Finish() + statsdClient := mockStatsd.NewMockClientInterface(ctrl) + statsdClient.EXPECT().Count(metricSamplerSeen, gomock.Any(), []string{"target_service:test-service", "target_env:test-env"}, float64(1)).MinTimes(loopCount).MaxTimes(totalCount) + statsdClient.EXPECT().Count(metricSamplerKept, gomock.Any(), []string{"target_service:test-service", "target_env:test-env"}, float64(1)).MinTimes(loopCount).MaxTimes(totalCount) + statsdClient.EXPECT().Gauge(metricSamplerSize, gomock.Any(), nil, float64(1)).Times(totalCount) + s := newSampler(1, 2, nil, statsdClient) var wg sync.WaitGroup - wg.Add(5) - for j := 0; j < 5; j++ { + wg.Add(goroutineN) + for j := 0; j < goroutineN; j++ { go func(j int) { defer wg.Done() - for i := 0; i < 10000; i++ { - s.countWeightedSig(time.Now().Add(time.Duration(5*(j+i))*time.Second), Signature(i%3), 5) + for i := 0; i < loopCount; i++ { + s.countWeightedSig(time.Now().Add(time.Duration(5*(j+i))*time.Second), serviceSignature.Hash(), 5) + s.metrics.record(true, newMetricsKey(serviceSignature.Name, serviceSignature.Env, nil)) s.report() - s.countSample() s.getSignatureSampleRate(Signature(i % 3)) s.getAllSignatureSampleRates() } diff --git a/pkg/trace/sampler/metrics.go b/pkg/trace/sampler/metrics.go new file mode 100644 index 0000000000000..76425ab80e0d1 --- /dev/null +++ b/pkg/trace/sampler/metrics.go @@ -0,0 +1,90 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package sampler + +import ( + "sync" + + "github.com/DataDog/datadog-go/v5/statsd" +) + +const ( + metricSamplerSeen = "datadog.trace_agent.sampler.seen" + metricSamplerKept = "datadog.trace_agent.sampler.kept" + metricSamplerSize = "datadog.trace_agent.sampler.size" +) + +type metrics struct { + statsd statsd.ClientInterface + tags []string + valueMutex sync.Mutex + value map[metricsKey]metricsValue +} + +type metricsKey [3]string + +func newMetricsKey(service, env string, samplingPriority *SamplingPriority) metricsKey { + var key metricsKey + if service != "" { + key[0] = "target_service:" + service + } + if env != "" { + key[1] = "target_env:" + env + } + if samplingPriority != nil { + key[2] = samplingPriority.tag() + } + return key +} + +func (k metricsKey) tags() []string { + tags := make([]string, 0, len(k)) + for _, v := range k { + if v != "" { + tags = append(tags, v) + } + } + return tags +} + +type metricsValue struct { + seen int64 + kept int64 +} + +func (m *metrics) record(sampled bool, metricsKey metricsKey) { + m.valueMutex.Lock() + defer m.valueMutex.Unlock() + v, ok := m.value[metricsKey] + if !ok { + mv := metricsValue{seen: 1} + if sampled { + mv.kept = 1 + } + m.value[metricsKey] = mv + return + } + v.seen++ + if sampled { + v.kept++ + } + m.value[metricsKey] = v +} + +func (m *metrics) report() { + m.valueMutex.Lock() + defer m.valueMutex.Unlock() + for key, value := range m.value { + tags := append(m.tags, key.tags()...) + if value.seen > 0 { + _ = m.statsd.Count(metricSamplerSeen, value.seen, tags, 1) + } + if value.kept > 0 { + _ = m.statsd.Count(metricSamplerKept, value.kept, tags, 1) + } + } + m.value = make(map[metricsKey]metricsValue) // reset counters +} diff --git a/pkg/trace/sampler/prioritysampler.go b/pkg/trace/sampler/prioritysampler.go index 9753de6bf318e..217284e81d49d 100644 --- a/pkg/trace/sampler/prioritysampler.go +++ b/pkg/trace/sampler/prioritysampler.go @@ -109,6 +109,9 @@ func (s *PrioritySampler) Sample(now time.Time, trace *pb.TraceChunk, root *pb.S // but the rule of thumb is: respect client choice. sampled := samplingPriority > 0 + serviceSignature := ServiceSignature{Name: root.Service, Env: toSamplerEnv(tracerEnv, s.agentEnv)} + s.sampler.metrics.record(sampled, newMetricsKey(serviceSignature.Name, serviceSignature.Env, &samplingPriority)) + // Short-circuit and return without counting the trace in the sampling rate logic // if its value has not been set automatically by the client lib. // The feedback loop should be scoped to the values it can act upon. @@ -119,14 +122,13 @@ func (s *PrioritySampler) Sample(now time.Time, trace *pb.TraceChunk, root *pb.S return sampled } - signature := s.catalog.register(ServiceSignature{Name: root.Service, Env: toSamplerEnv(tracerEnv, s.agentEnv)}) + signature := s.catalog.register(serviceSignature) // Update sampler state by counting this trace s.countSignature(now, root, signature, clientDroppedP0sWeight) if sampled { s.applyRate(root, signature) - s.sampler.countSample() } return sampled } diff --git a/pkg/trace/sampler/prioritysampler_test.go b/pkg/trace/sampler/prioritysampler_test.go index bf8bd97697b67..f80d0a854ff97 100644 --- a/pkg/trace/sampler/prioritysampler_test.go +++ b/pkg/trace/sampler/prioritysampler_test.go @@ -13,6 +13,8 @@ import ( pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-go/v5/statsd" + mockStatsd "github.com/DataDog/datadog-go/v5/statsd/mocks" + "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" "go.uber.org/atomic" ) @@ -21,14 +23,14 @@ func randomTraceID() uint64 { return uint64(rand.Int63()) } -func getTestPrioritySampler() *PrioritySampler { +func getTestPrioritySampler(statsd statsd.ClientInterface) *PrioritySampler { // No extra fixed sampling, no maximum TPS conf := &config.AgentConfig{ ExtraSampleRate: 1.0, TargetTPS: 0.0, } - return NewPrioritySampler(conf, &DynamicConfig{}, &statsd.NoOpClient{}) + return NewPrioritySampler(conf, &DynamicConfig{}, statsd) } func getTestTraceWithService(service string, s *PrioritySampler) (*pb.TraceChunk, *pb.Span) { @@ -61,64 +63,103 @@ func getTestTraceWithService(service string, s *PrioritySampler) (*pb.TraceChunk } func TestPrioritySample(t *testing.T) { - // Simple sample unit test - assert := assert.New(t) - - env := defaultEnv - - s := getTestPrioritySampler() - - assert.Equal(float32(0), s.sampler.totalSeen, "checking fresh backend total score is 0") - assert.Equal(int64(0), s.sampler.totalKept.Load(), "checking fresh backend sampled score is 0") - - s = getTestPrioritySampler() - chunk, root := getTestTraceWithService("my-service", s) - - chunk.Priority = -1 - sampled := s.Sample(time.Now(), chunk, root, env, 0) - assert.False(sampled, "trace with negative priority is dropped") - assert.Equal(float32(0), s.sampler.totalSeen, "sampling a priority -1 trace should *NOT* impact sampler backend") - assert.Equal(int64(0), s.sampler.totalKept.Load(), "sampling a priority -1 trace should *NOT* impact sampler backend") - - s = getTestPrioritySampler() - chunk, root = getTestTraceWithService("my-service", s) - - chunk.Priority = 0 - sampled = s.Sample(time.Now(), chunk, root, env, 0) - assert.False(sampled, "trace with priority 0 is dropped") - assert.True(float32(0) < s.sampler.totalSeen, "sampling a priority 0 trace should increase total score") - assert.Equal(int64(0), s.sampler.totalKept.Load(), "sampling a priority 0 trace should *NOT* increase sampled score") - - s = getTestPrioritySampler() - chunk, root = getTestTraceWithService("my-service", s) - - chunk.Priority = 1 - sampled = s.Sample(time.Now(), chunk, root, env, 0) - assert.True(sampled, "trace with priority 1 is kept") - assert.True(float32(0) < s.sampler.totalSeen, "sampling a priority 0 trace should increase total score") - assert.True(int64(0) < s.sampler.totalKept.Load(), "sampling a priority 0 trace should increase sampled score") - - s = getTestPrioritySampler() - chunk, root = getTestTraceWithService("my-service", s) - - chunk.Priority = 2 - sampled = s.Sample(time.Now(), chunk, root, env, 0) - assert.True(sampled, "trace with priority 2 is kept") - assert.Equal(float32(0), s.sampler.totalSeen, "sampling a priority 2 trace should *NOT* increase total score") - assert.Equal(int64(0), s.sampler.totalKept.Load(), "sampling a priority 2 trace should *NOT* increase sampled score") - - s = getTestPrioritySampler() - chunk, root = getTestTraceWithService("my-service", s) - - chunk.Priority = int32(PriorityUserKeep) - sampled = s.Sample(time.Now(), chunk, root, env, 0) - assert.True(sampled, "trace with high priority is kept") - assert.Equal(float32(0), s.sampler.totalSeen, "sampling a high priority trace should *NOT* increase total score") - assert.Equal(int64(0), s.sampler.totalKept.Load(), "sampling a high priority trace should *NOT* increase sampled score") - - chunk.Priority = int32(PriorityNone) - sampled = s.Sample(time.Now(), chunk, root, env, 0) - assert.False(sampled, "this should not happen but a trace without priority sampling set should be dropped") + tests := []struct { + priority SamplingPriority + expectedSampled bool + }{ + { + priority: PriorityNone, + expectedSampled: false, + }, + { + priority: PriorityUserDrop, + expectedSampled: false, + }, + { + priority: PriorityAutoDrop, + expectedSampled: false, + }, + { + priority: PriorityAutoKeep, + expectedSampled: true, + }, + { + priority: PriorityUserKeep, + expectedSampled: true, + }, + } + for _, tt := range tests { + t.Run(tt.priority.tag(), func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + statsdClient := mockStatsd.NewMockClientInterface(ctrl) + + s := getTestPrioritySampler(statsdClient) + + // make sure + // - we report the right metrics per service and env. + // - we aggregate metric values before calling statsd. + expectedTagsA := []string{ + "sampler:priority", + "target_service:service-a", + "target_env:testEnv", + } + if tt.priority == PriorityNone { + expectedTagsA = append(expectedTagsA, "sampling_priority:auto_drop") + } else { + expectedTagsA = append(expectedTagsA, tt.priority.tag()) + } + chunkA, rootA := getTestTraceWithService("service-a", s) + chunkA.Priority = int32(tt.priority) + assert.Equal(t, tt.expectedSampled, s.Sample(time.Now(), chunkA, rootA, defaultEnv, 0)) + assert.Equal(t, tt.expectedSampled, s.Sample(time.Now(), chunkA, rootA, defaultEnv, 0)) + expectedTagsB := []string{ + "sampler:priority", + "target_service:service-b", + "target_env:testEnv", + } + if tt.priority == PriorityNone { + expectedTagsB = append(expectedTagsB, "sampling_priority:auto_drop") + } else { + expectedTagsB = append(expectedTagsB, tt.priority.tag()) + } + chunkB, rootB := getTestTraceWithService("service-b", s) + chunkB.Priority = int32(tt.priority) + assert.Equal(t, tt.expectedSampled, s.Sample(time.Now(), chunkB, rootB, defaultEnv, 0)) + assert.Equal(t, tt.expectedSampled, s.Sample(time.Now(), chunkB, rootB, defaultEnv, 0)) + if tt.expectedSampled { + statsdClient.EXPECT().Count(metricSamplerSeen, int64(2), expectedTagsA, float64(1)).Times(1) + statsdClient.EXPECT().Count(metricSamplerKept, int64(2), expectedTagsA, float64(1)).Times(1) + statsdClient.EXPECT().Count(metricSamplerSeen, int64(2), expectedTagsB, float64(1)).Times(1) + statsdClient.EXPECT().Count(metricSamplerKept, int64(2), expectedTagsB, float64(1)).Times(1) + } else { + statsdClient.EXPECT().Count(metricSamplerSeen, int64(2), expectedTagsA, float64(1)).Times(1) + statsdClient.EXPECT().Count(metricSamplerSeen, int64(2), expectedTagsB, float64(1)).Times(1) + statsdClient.EXPECT().Count(metricSamplerKept, gomock.Any(), gomock.Any(), float64(1)).Times(0) + } + statsdClient.EXPECT().Gauge(metricSamplerSize, gomock.Any(), []string{"sampler:priority"}, float64(1)).Times(1) + s.sampler.report() + + // make sure we reset the counters + statsdClient.EXPECT().Count(metricSamplerKept, gomock.Any(), gomock.Any(), float64(1)).Times(0) + statsdClient.EXPECT().Count(metricSamplerSeen, gomock.Any(), gomock.Any(), float64(1)).Times(0) + statsdClient.EXPECT().Gauge(metricSamplerSize, gomock.Any(), []string{"sampler:priority"}, float64(1)).Times(1) + s.sampler.report() + + // make sure we report the metrics only for service-a. + assert.Equal(t, tt.expectedSampled, s.Sample(time.Now(), chunkB, rootB, defaultEnv, 0)) + statsdClient.EXPECT().Count(metricSamplerKept, gomock.Any(), expectedTagsA, float64(1)).Times(0) + statsdClient.EXPECT().Count(metricSamplerSeen, gomock.Any(), expectedTagsA, float64(1)).Times(0) + if tt.expectedSampled { + statsdClient.EXPECT().Count(metricSamplerKept, int64(1), expectedTagsB, float64(1)).Times(1) + } else { + statsdClient.EXPECT().Count(metricSamplerKept, gomock.Any(), expectedTagsB, float64(1)).Times(0) + } + statsdClient.EXPECT().Count(metricSamplerSeen, int64(1), expectedTagsB, float64(1)).Times(1) + statsdClient.EXPECT().Gauge(metricSamplerSize, gomock.Any(), []string{"sampler:priority"}, float64(1)).Times(1) + s.sampler.report() + }) + } } func TestPrioritySamplerTPSFeedbackLoop(t *testing.T) { @@ -153,7 +194,7 @@ func TestPrioritySamplerTPSFeedbackLoop(t *testing.T) { for _, tc := range testCases { rand.Seed(3) - s := getTestPrioritySampler() + s := getTestPrioritySampler(&statsd.NoOpClient{}) t.Logf("testing targetTPS=%0.1f generatedTPS=%0.1f clientDrop=%v", tc.targetTPS, tc.generatedTPS, tc.clientDrop) s.sampler.targetTPS = atomic.NewFloat64(tc.targetTPS) diff --git a/pkg/trace/sampler/probabilistic.go b/pkg/trace/sampler/probabilistic.go index dffde2589039a..d4d48d17848da 100644 --- a/pkg/trace/sampler/probabilistic.go +++ b/pkg/trace/sampler/probabilistic.go @@ -18,8 +18,6 @@ import ( "github.com/DataDog/datadog-agent/pkg/trace/log" "github.com/DataDog/datadog-agent/pkg/trace/watchdog" - "go.uber.org/atomic" - "github.com/DataDog/datadog-go/v5/statsd" ) @@ -41,6 +39,7 @@ type ProbabilisticSampler struct { hashSeed []byte scaledSamplingPercentage uint32 samplingPercentage float64 + metrics metrics // fullTraceIDMode looks at the full 128-bit trace ID to make the sampling decision // This can be useful when trying to run this probabilistic sampler alongside the // OTEL probabilistic sampler processor which always looks at the full 128-bit trace id. @@ -48,11 +47,6 @@ type ProbabilisticSampler struct { // drop the top 64 bits of the trace ID. fullTraceIDMode bool - statsd statsd.ClientInterface - tracesSeen *atomic.Int64 - tracesKept *atomic.Int64 - tags []string - // start/stop synchronization stopOnce sync.Once stop chan struct{} @@ -70,13 +64,14 @@ func NewProbabilisticSampler(conf *config.AgentConfig, statsd statsd.ClientInter hashSeed: hashSeedBytes, scaledSamplingPercentage: uint32(conf.ProbabilisticSamplerSamplingPercentage * percentageScaleFactor), samplingPercentage: float64(conf.ProbabilisticSamplerSamplingPercentage) / 100., - statsd: statsd, - tracesSeen: atomic.NewInt64(0), - tracesKept: atomic.NewInt64(0), - tags: []string{"sampler:probabilistic"}, - stop: make(chan struct{}), - stopped: make(chan struct{}), - fullTraceIDMode: fullTraceIDMode, + metrics: metrics{ + statsd: statsd, + tags: []string{"sampler:probabilistic"}, + value: make(map[metricsKey]metricsValue), + }, + stop: make(chan struct{}), + stopped: make(chan struct{}), + fullTraceIDMode: fullTraceIDMode, } } @@ -87,15 +82,15 @@ func (ps *ProbabilisticSampler) Start() { return } go func() { - defer watchdog.LogOnPanic(ps.statsd) + defer watchdog.LogOnPanic(ps.metrics.statsd) statsTicker := time.NewTicker(10 * time.Second) defer statsTicker.Stop() for { select { case <-statsTicker.C: - ps.report() + ps.metrics.report() case <-ps.stop: - ps.report() + ps.metrics.report() close(ps.stopped) return } @@ -116,11 +111,14 @@ func (ps *ProbabilisticSampler) Stop() { } // Sample a trace given the chunk's root span, returns true if the trace should be kept -func (ps *ProbabilisticSampler) Sample(root *trace.Span) bool { +func (ps *ProbabilisticSampler) Sample(root *trace.Span) (sampled bool) { if !ps.enabled { return false } - ps.tracesSeen.Add(1) + + defer func() { + ps.metrics.record(sampled, newMetricsKey(root.Service, "", nil)) + }() tid := make([]byte, 16) var err error @@ -140,17 +138,10 @@ func (ps *ProbabilisticSampler) Sample(root *trace.Span) bool { hash := hasher.Sum32() keep := hash&bitMaskHashBuckets < ps.scaledSamplingPercentage if keep { - ps.tracesKept.Add(1) + sampled = true setMetric(root, probRateKey, ps.samplingPercentage) } - return keep -} - -func (ps *ProbabilisticSampler) report() { - seen := ps.tracesSeen.Swap(0) - kept := ps.tracesKept.Swap(0) - _ = ps.statsd.Count("datadog.trace_agent.sampler.kept", kept, ps.tags, 1) - _ = ps.statsd.Count("datadog.trace_agent.sampler.seen", seen, ps.tags, 1) + return } func get128BitTraceID(span *trace.Span) ([]byte, error) { diff --git a/pkg/trace/sampler/probabilistic_test.go b/pkg/trace/sampler/probabilistic_test.go index 6dc7afd3e7eb1..7b7b1fe6af281 100644 --- a/pkg/trace/sampler/probabilistic_test.go +++ b/pkg/trace/sampler/probabilistic_test.go @@ -11,6 +11,8 @@ import ( "encoding/hex" "testing" + mockStatsd "github.com/DataDog/datadog-go/v5/statsd/mocks" + "github.com/golang/mock/gomock" "github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -85,6 +87,52 @@ func TestProbabilisticSampler(t *testing.T) { }) assert.False(t, sampled) }) + t.Run("keep-dd-metrics", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + statsdClient := mockStatsd.NewMockClientInterface(ctrl) + + tid := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + conf := &config.AgentConfig{ + ProbabilisticSamplerEnabled: true, + ProbabilisticSamplerHashSeed: 0, + ProbabilisticSamplerSamplingPercentage: 41, + Features: map[string]struct{}{"probabilistic_sampler_full_trace_id": {}}, + } + sampler := NewProbabilisticSampler(conf, statsdClient) + sampled := sampler.Sample(&trace.Span{ + TraceID: binary.BigEndian.Uint64(tid[8:]), + Meta: map[string]string{"_dd.p.tid": hex.EncodeToString(tid[:8])}, + }) + assert.True(t, sampled) + + statsdClient.EXPECT().Count(metricSamplerKept, int64(1), []string{"sampler:probabilistic"}, float64(1)).Times(1) + statsdClient.EXPECT().Count(metricSamplerSeen, int64(1), []string{"sampler:probabilistic"}, float64(1)).Times(1) + sampler.metrics.report() + }) + t.Run("drop-dd-metrics", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + statsdClient := mockStatsd.NewMockClientInterface(ctrl) + + tid := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + conf := &config.AgentConfig{ + ProbabilisticSamplerEnabled: true, + ProbabilisticSamplerHashSeed: 0, + ProbabilisticSamplerSamplingPercentage: 40, + Features: map[string]struct{}{"probabilistic_sampler_full_trace_id": {}}, + } + sampler := NewProbabilisticSampler(conf, statsdClient) + sampled := sampler.Sample(&trace.Span{ + TraceID: 555, + Meta: map[string]string{"_dd.p.tid": hex.EncodeToString(tid[:8])}, + }) + assert.False(t, sampled) + + statsdClient.EXPECT().Count(metricSamplerKept, gomock.Any(), gomock.Any(), gomock.Any()).Times(0) + statsdClient.EXPECT().Count(metricSamplerSeen, int64(1), []string{"sampler:probabilistic"}, float64(1)).Times(1) + sampler.metrics.report() + }) t.Run("keep-dd-64-full", func(t *testing.T) { conf := &config.AgentConfig{ ProbabilisticSamplerEnabled: true, diff --git a/pkg/trace/sampler/sampler.go b/pkg/trace/sampler/sampler.go index 747ae8bee7ec1..a9fb50257f3e6 100644 --- a/pkg/trace/sampler/sampler.go +++ b/pkg/trace/sampler/sampler.go @@ -68,6 +68,23 @@ const ( samplerHasher = uint64(1111111111111111111) ) +func (s SamplingPriority) tag() string { + var v string + switch s { + case PriorityUserDrop: + v = "manual_drop" + case PriorityAutoDrop: + v = "auto_drop" + case PriorityAutoKeep: + v = "auto_keep" + case PriorityUserKeep: + v = "manual_keep" + default: + v = "none" + } + return "sampling_priority:" + v +} + // SampleByRate returns whether to keep a trace, based on its ID and a sampling rate. // This assumes that trace IDs are nearly uniformly distributed. func SampleByRate(traceID uint64, rate float64) bool { diff --git a/pkg/trace/sampler/scoresampler.go b/pkg/trace/sampler/scoresampler.go index 8ef1b4e33d4f9..99b60e792d229 100644 --- a/pkg/trace/sampler/scoresampler.go +++ b/pkg/trace/sampler/scoresampler.go @@ -71,7 +71,9 @@ func (s *ScoreSampler) Sample(now time.Time, trace pb.Trace, root *pb.Span, env rate := s.getSignatureSampleRate(signature) - return s.applySampleRate(root, rate) + sampled := s.applySampleRate(root, rate) + s.metrics.record(sampled, newMetricsKey(root.Service, env, nil)) + return sampled } // UpdateTargetTPS updates the target tps @@ -90,7 +92,6 @@ func (s *ScoreSampler) applySampleRate(root *pb.Span, rate float64) bool { traceID := root.TraceID sampled := SampleByRate(traceID, newRate) if sampled { - s.countSample() setMetric(root, s.samplingRateKey, rate) } return sampled diff --git a/pkg/trace/stats/otel_benckmark_test.go b/pkg/trace/stats/otel_benckmark_test.go index 0b632d9d51a8e..2eb9bcb7ec26f 100644 --- a/pkg/trace/stats/otel_benckmark_test.go +++ b/pkg/trace/stats/otel_benckmark_test.go @@ -9,7 +9,6 @@ import ( "testing" "time" - "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes" "github.com/stretchr/testify/assert" "go.opentelemetry.io/collector/component/componenttest" @@ -17,8 +16,106 @@ import ( "go.opentelemetry.io/collector/pdata/ptrace" semconv "go.opentelemetry.io/collector/semconv/v1.17.0" "go.opentelemetry.io/otel/metric/noop" + + "github.com/DataDog/datadog-agent/pkg/obfuscate" + "github.com/DataDog/datadog-agent/pkg/trace/config" ) +func BenchmarkOTelStatsWithoutObfuscation(b *testing.B) { + benchmarkOTelObfuscation(b, false) +} + +func BenchmarkOTelStatsWithObfuscation(b *testing.B) { + benchmarkOTelObfuscation(b, true) +} + +func benchmarkOTelObfuscation(b *testing.B, enableObfuscation bool) { + start := time.Now().Add(-1 * time.Second) + end := time.Now() + set := componenttest.NewNopTelemetrySettings() + set.MeterProvider = noop.NewMeterProvider() + attributesTranslator, err := attributes.NewTranslator(set) + assert.NoError(b, err) + + traces := ptrace.NewTraces() + rspan := traces.ResourceSpans().AppendEmpty() + res := rspan.Resource() + for k, v := range map[string]string{ + semconv.AttributeServiceName: "svc", + semconv.AttributeDeploymentEnvironment: "tracer_env", + semconv.AttributeDBSystem: "mysql", + semconv.AttributeDBStatement: ` + SELECT + u.id, + u.name, + u.email, + o.order_id, + o.total_amount, + p.product_name, + p.price + FROM + users u + JOIN + orders o ON u.id = o.user_id + JOIN + order_items oi ON o.order_id = oi.order_id + JOIN + products p ON oi.product_id = p.product_id + WHERE + u.status = 'active' + AND o.order_date BETWEEN '2023-01-01' AND '2023-12-31' + AND p.category IN ('electronics', 'books') + GROUP BY + u.id, + u.name, + u.email, + o.order_id, + o.total_amount, + p.product_name, + p.price + ORDER BY + o.order_date DESC, + p.price ASC + LIMIT 100; + `, + } { + res.Attributes().PutStr(k, v) + } + sspan := rspan.ScopeSpans().AppendEmpty() + span := sspan.Spans().AppendEmpty() + span.SetTraceID(testTraceID) + span.SetSpanID(testSpanID1) + span.SetStartTimestamp(pcommon.NewTimestampFromTime(start)) + span.SetEndTimestamp(pcommon.NewTimestampFromTime(end)) + span.SetName("span_name") + span.SetKind(ptrace.SpanKindClient) + + conf := config.New() + conf.Hostname = "agent_host" + conf.DefaultEnv = "agent_env" + conf.Obfuscation.Redis.Enabled = true + conf.Features["enable_operation_and_resource_name_logic_v2"] = struct{}{} + conf.OTLPReceiver.AttributesTranslator = attributesTranslator + + concentrator := NewTestConcentratorWithCfg(time.Now(), conf) + + var obfuscator *obfuscate.Obfuscator + if enableObfuscation { + obfuscator = newTestObfuscator(conf) + } + + b.ResetTimer() + + for n := 0; n < b.N; n++ { + inputs := OTLPTracesToConcentratorInputsWithObfuscation(traces, conf, nil, nil, obfuscator) + assert.Len(b, inputs, 1) + input := inputs[0] + concentrator.Add(input) + stats := concentrator.Flush(true) + assert.Len(b, stats.Stats, 1) + } +} + func BenchmarkOTelContainerTags(b *testing.B) { start := time.Now().Add(-1 * time.Second) end := time.Now() diff --git a/pkg/trace/stats/otel_util.go b/pkg/trace/stats/otel_util.go index 43bac8b408a8e..a9e3b18d556ba 100644 --- a/pkg/trace/stats/otel_util.go +++ b/pkg/trace/stats/otel_util.go @@ -6,9 +6,12 @@ package stats import ( - "github.com/DataDog/datadog-agent/pkg/trace/transform" "slices" + "github.com/DataDog/datadog-agent/pkg/obfuscate" + "github.com/DataDog/datadog-agent/pkg/trace/log" + "github.com/DataDog/datadog-agent/pkg/trace/transform" + "go.opentelemetry.io/collector/pdata/ptrace" semconv "go.opentelemetry.io/collector/semconv/v1.17.0" @@ -34,6 +37,21 @@ func OTLPTracesToConcentratorInputs( conf *config.AgentConfig, containerTagKeys []string, peerTagKeys []string, +) []Input { + return OTLPTracesToConcentratorInputsWithObfuscation(traces, conf, containerTagKeys, peerTagKeys, nil) +} + +// OTLPTracesToConcentratorInputsWithObfuscation converts eligible OTLP spans to Concentrator Input. +// The converted Inputs only have the minimal number of fields for APM stats calculation and are only meant +// to be used in Concentrator.Add(). Do not use them for other purposes. +// This function enables obfuscation of spans prior to stats calculation and datadogconnector will migrate +// to this function once this function is published as part of latest pkg/trace module. +func OTLPTracesToConcentratorInputsWithObfuscation( + traces ptrace.Traces, + conf *config.AgentConfig, + containerTagKeys []string, + peerTagKeys []string, + obfuscator *obfuscate.Obfuscator, ) []Input { spanByID, resByID, scopeByID := traceutil.IndexOTelSpans(traces) topLevelByKind := conf.HasFeature("enable_otlp_compute_top_level_by_span_kind") @@ -83,7 +101,11 @@ func OTLPTracesToConcentratorInputs( chunks[ckey] = chunk } _, isTop := topLevelSpans[spanID] - chunk.Spans = append(chunk.Spans, transform.OtelSpanToDDSpanMinimal(otelspan, otelres, scopeByID[spanID], isTop, topLevelByKind, conf, peerTagKeys)) + ddSpan := transform.OtelSpanToDDSpanMinimal(otelspan, otelres, scopeByID[spanID], isTop, topLevelByKind, conf, peerTagKeys) + if obfuscator != nil { + obfuscateSpanForConcentrator(obfuscator, ddSpan, conf) + } + chunk.Spans = append(chunk.Spans, ddSpan) } inputs := make([]Input, 0, len(chunks)) @@ -103,3 +125,29 @@ func OTLPTracesToConcentratorInputs( } return inputs } + +func obfuscateSpanForConcentrator(o *obfuscate.Obfuscator, span *pb.Span, conf *config.AgentConfig) { + if span.Meta == nil { + return + } + switch span.Type { + case "sql", "cassandra": + _, err := transform.ObfuscateSQLSpan(o, span) + if err != nil { + log.Debugf("Error parsing SQL query: %v. Resource: %q", err, span.Resource) + } + case "redis": + span.Resource = o.QuantizeRedisString(span.Resource) + if conf.Obfuscation.Redis.Enabled { + transform.ObfuscateRedisSpan(o, span, conf.Obfuscation.Redis.RemoveAllArgs) + } + } +} + +// newTestObfuscator creates a new obfuscator for testing +func newTestObfuscator(conf *config.AgentConfig) *obfuscate.Obfuscator { + oconf := conf.Obfuscation.Export(conf) + oconf.Redis.Enabled = true + o := obfuscate.NewObfuscator(oconf) + return o +} diff --git a/pkg/trace/stats/otel_util_test.go b/pkg/trace/stats/otel_util_test.go index 939c27f8c9f77..5098773adec17 100644 --- a/pkg/trace/stats/otel_util_test.go +++ b/pkg/trace/stats/otel_util_test.go @@ -9,8 +9,6 @@ import ( "testing" "time" - pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" - "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes" "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" @@ -20,6 +18,10 @@ import ( semconv "go.opentelemetry.io/collector/semconv/v1.17.0" "go.opentelemetry.io/otel/metric/noop" "google.golang.org/protobuf/testing/protocmp" + + "github.com/DataDog/datadog-agent/pkg/obfuscate" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" + "github.com/DataDog/datadog-agent/pkg/trace/config" ) var ( @@ -43,22 +45,25 @@ func TestProcessOTLPTraces(t *testing.T) { parentID := pcommon.SpanID(testSpanID2) for _, tt := range []struct { - name string - traceID *pcommon.TraceID - spanID *pcommon.SpanID - parentSpanID *pcommon.SpanID - spanName string - rattrs map[string]string - sattrs map[string]any - spanKind ptrace.SpanKind - libname string - spanNameAsResourceName bool - spanNameRemappings map[string]string - ignoreRes []string - peerTagsAggr bool - legacyTopLevel bool - ctagKeys []string - expected *pb.StatsPayload + name string + traceID *pcommon.TraceID + spanID *pcommon.SpanID + parentSpanID *pcommon.SpanID + spanName string + rattrs map[string]string + sattrs map[string]any + spanKind ptrace.SpanKind + libname string + spanNameAsResourceName bool + spanNameRemappings map[string]string + ignoreRes []string + peerTagsAggr bool + legacyTopLevel bool + ctagKeys []string + expected *pb.StatsPayload + enableObfuscation bool + enableReceiveResourceSpansV2 bool + enableOperationAndResourceNameV2 bool }{ { name: "empty trace id", @@ -168,6 +173,26 @@ func TestProcessOTLPTraces(t *testing.T) { ignoreRes: []string{"GET /home"}, expected: &pb.StatsPayload{AgentEnv: agentEnv, AgentHostname: agentHost}, }, + { + name: "obfuscate sql span", + spanName: "spanname8", + spanKind: ptrace.SpanKindClient, + rattrs: map[string]string{"service.name": "svc", semconv.AttributeDBSystem: semconv.AttributeDBSystemMSSQL, semconv.AttributeDBStatement: "SELECT username FROM users WHERE id = 12345"}, + enableObfuscation: true, + enableReceiveResourceSpansV2: true, + enableOperationAndResourceNameV2: true, + expected: createStatsPayload(agentEnv, agentHost, "svc", "client.request", "sql", "client", "SELECT username FROM users WHERE id = ?", agentHost, agentEnv, "", nil, nil, true, false), + }, + { + name: "obfuscated redis span", + spanName: "spanname9", + rattrs: map[string]string{"service.name": "svc", "host.name": "test-host", "db.system": "redis", "db.statement": "SET key value"}, + spanKind: ptrace.SpanKindClient, + enableObfuscation: true, + enableReceiveResourceSpansV2: true, + enableOperationAndResourceNameV2: true, + expected: createStatsPayload(agentEnv, agentHost, "svc", "client.request", "redis", "client", "SET", "test-host", agentEnv, "", nil, nil, true, false), + }, } { t.Run(tt.name, func(t *testing.T) { traces := ptrace.NewTraces() @@ -211,10 +236,13 @@ func TestProcessOTLPTraces(t *testing.T) { conf.Hostname = agentHost conf.DefaultEnv = agentEnv conf.Features["enable_cid_stats"] = struct{}{} + if !tt.enableReceiveResourceSpansV2 { + conf.Features["disable_receive_resource_spans_v2"] = struct{}{} + } conf.PeerTagsAggregation = tt.peerTagsAggr conf.OTLPReceiver.AttributesTranslator = attributesTranslator conf.OTLPReceiver.SpanNameAsResourceName = tt.spanNameAsResourceName - if conf.OTLPReceiver.SpanNameAsResourceName { + if conf.OTLPReceiver.SpanNameAsResourceName || tt.enableOperationAndResourceNameV2 { // Verify that while EnableOperationAndResourceNamesV2 is in alpha, SpanNameAsResourceName overrides it conf.Features["enable_operation_and_resource_name_logic_v2"] = struct{}{} } @@ -225,7 +253,14 @@ func TestProcessOTLPTraces(t *testing.T) { } concentrator := NewTestConcentratorWithCfg(time.Now(), conf) - inputs := OTLPTracesToConcentratorInputs(traces, conf, tt.ctagKeys, conf.ConfiguredPeerTags()) + var obfuscator *obfuscate.Obfuscator + var inputs []Input + if tt.enableObfuscation { + obfuscator = newTestObfuscator(conf) + inputs = OTLPTracesToConcentratorInputsWithObfuscation(traces, conf, tt.ctagKeys, conf.ConfiguredPeerTags(), obfuscator) + } else { + inputs = OTLPTracesToConcentratorInputs(traces, conf, tt.ctagKeys, conf.ConfiguredPeerTags()) + } for _, input := range inputs { concentrator.Add(input) } diff --git a/pkg/trace/stats/oteltest/go.mod b/pkg/trace/stats/oteltest/go.mod index 3f97dcf41808c..f6b25c790839e 100644 --- a/pkg/trace/stats/oteltest/go.mod +++ b/pkg/trace/stats/oteltest/go.mod @@ -4,39 +4,39 @@ go 1.22.0 require ( github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/statsprocessor v0.56.0-rc.3 + github.com/DataDog/datadog-agent/pkg/obfuscate v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/proto v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/trace v0.56.0-rc.3 github.com/DataDog/datadog-go/v5 v5.6.0 - github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.22.0 + github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.24.0 github.com/google/go-cmp v0.6.0 github.com/stretchr/testify v1.10.0 - go.opentelemetry.io/collector/component/componenttest v0.115.0 - go.opentelemetry.io/collector/pdata v1.21.0 - go.opentelemetry.io/collector/semconv v0.115.0 - go.opentelemetry.io/otel/metric v1.32.0 - google.golang.org/protobuf v1.35.2 + go.opentelemetry.io/collector/component/componenttest v0.118.0 + go.opentelemetry.io/collector/pdata v1.24.0 + go.opentelemetry.io/collector/semconv v0.118.0 + go.opentelemetry.io/otel/metric v1.33.0 + google.golang.org/protobuf v1.36.3 ) -require go.opentelemetry.io/collector/component v0.115.0 // indirect +require go.opentelemetry.io/collector/component v0.118.0 // indirect require ( github.com/DataDog/datadog-agent/comp/core/tagger/origindetection v0.0.0-20241217122454-175edb6c74f2 // indirect github.com/DataDog/datadog-agent/comp/trace/compression/def v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/comp/trace/compression/impl-gzip v0.56.0-rc.3 // indirect - github.com/DataDog/datadog-agent/pkg/obfuscate v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/cgroups v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/log v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect - github.com/DataDog/go-sqllexer v0.0.17 // indirect + github.com/DataDog/go-sqllexer v0.0.20 // indirect github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect github.com/DataDog/sketches-go v1.4.6 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect - github.com/containerd/cgroups/v3 v3.0.4 // indirect + github.com/containerd/cgroups/v3 v3.0.5 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/docker/go-units v0.5.0 // indirect @@ -47,7 +47,7 @@ require ( github.com/go-ole/go-ole v1.3.0 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/mock v1.6.0 // indirect + github.com/golang/mock v1.7.0-rc.1 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/uuid v1.6.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -64,25 +64,26 @@ require ( github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect - github.com/tinylib/msgp v1.2.4 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect + github.com/tinylib/msgp v1.2.5 // indirect github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/numcpus v0.8.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect - go.opentelemetry.io/collector/config/configtelemetry v0.115.0 // indirect - go.opentelemetry.io/otel v1.32.0 // indirect - go.opentelemetry.io/otel/sdk v1.32.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.32.0 // indirect - go.opentelemetry.io/otel/trace v1.32.0 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/collector/config/configtelemetry v0.118.0 // indirect + go.opentelemetry.io/otel v1.33.0 // indirect + go.opentelemetry.io/otel/sdk v1.33.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.33.0 // indirect + go.opentelemetry.io/otel/trace v1.33.0 // indirect go.uber.org/atomic v1.11.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/net v0.33.0 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/net v0.34.0 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.8.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect - google.golang.org/grpc v1.67.1 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect + google.golang.org/grpc v1.69.4 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) @@ -106,9 +107,12 @@ replace ( github.com/DataDog/datadog-agent/pkg/trace => ../../ github.com/DataDog/datadog-agent/pkg/util/cgroups => ../../../util/cgroups github.com/DataDog/datadog-agent/pkg/util/log => ../../../util/log - github.com/DataDog/datadog-agent/pkg/util/optional => ../../../util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../../util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../util/scrubber ) replace github.com/DataDog/datadog-agent/pkg/version => ../../../version + +// github.com/golang/mock is unmaintained and archived, v1.6.0 is the last released version +replace github.com/golang/mock => github.com/golang/mock v1.6.0 diff --git a/pkg/trace/stats/oteltest/go.sum b/pkg/trace/stats/oteltest/go.sum index abd898f9bed2a..c28b03c3ee2b0 100644 --- a/pkg/trace/stats/oteltest/go.sum +++ b/pkg/trace/stats/oteltest/go.sum @@ -1,11 +1,11 @@ github.com/DataDog/datadog-go/v5 v5.6.0 h1:2oCLxjF/4htd55piM75baflj/KoE6VYS7alEUqFvRDw= github.com/DataDog/datadog-go/v5 v5.6.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw= -github.com/DataDog/go-sqllexer v0.0.17 h1:u47fJAVg/+5DA74ZW3w0Qu+3qXHd3GtnA8ZBYixdPrM= -github.com/DataDog/go-sqllexer v0.0.17/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= +github.com/DataDog/go-sqllexer v0.0.20 h1:0fBknHo42yuhawZS3GtuQSdqcwaiojWjYNT6OdsZRfI= +github.com/DataDog/go-sqllexer v0.0.20/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc= github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4= github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.22.0 h1:yfk2cF8Bx98fSFpGrehEHh1FRqewfxcCTAbUDt5r3F8= -github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.22.0/go.mod h1:9qzpnBSxSOnKzbF/uHket3SSlQihQHix/ZRC2nZUUYQ= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.24.0 h1:Y65h9AvfQO7ONOBlqCetvvUhh2XO1wIzN7IfXVFjc84= +github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.24.0/go.mod h1:7aAFw4o5dZk/kqFniz7ljJwS8covz8DHouGl7BrsnLI= github.com/DataDog/sketches-go v1.4.6 h1:acd5fb+QdUzGrosfNLwrIhqyrbMORpvBy7mE+vHlT3I= github.com/DataDog/sketches-go v1.4.6/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg= github.com/DataDog/zstd v1.5.6 h1:LbEglqepa/ipmmQJUDnSsfvA8e8IStVcGaFWDuxvGOY= @@ -18,8 +18,8 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= -github.com/containerd/cgroups/v3 v3.0.4 h1:2fs7l3P0Qxb1nKWuJNFiwhp2CqiKzho71DQkDrHJIo4= -github.com/containerd/cgroups/v3 v3.0.4/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins= +github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJHo6Bzo= +github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -80,10 +80,10 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.115.0 h1:R9MRrO+dSkAHBQLZjuwjv2RHXHQqF2Wtm1Ki0VKD5cs= -github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.115.0/go.mod h1:rKXLXmwdUVcUHwTilroKSejbg3KSwLeYzNPSpkIEnv4= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.115.0 h1:vwZQ7k8oqlK0bdZYTsjP/59zjQQfjSD4fNsWIWsTu2w= -github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.115.0/go.mod h1:5ObSa9amrbzbYTdAK1Qhv3D/YqCxxnQhP0sk2eWB7Oo= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.118.0 h1:zEdd1JoVEBX7Lmf/wjs+45p4rR5+HvT2iF5VcoOgK1g= +github.com/open-telemetry/opentelemetry-collector-contrib/pkg/sampling v0.118.0/go.mod h1:WE5ientZ87x3cySOh4D/uVUwxK82DMyCkLBJ43+ehDU= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.118.0 h1:pC1e5BvBf8rjwGb56MiTUFEDHU2LSclaqRNUs3z9Snw= +github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.118.0/go.mod h1:wZTrQ0XWb1A9XBhl1WmUKLPfqNjERKFYWT5WER70gLg= github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/outcaste-io/ristretto v0.2.3 h1:AK4zt/fJ76kjlYObOeNwh4T3asEuaCmp26pOvUOL9w0= @@ -103,8 +103,8 @@ github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA= github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -119,8 +119,8 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/tinylib/msgp v1.2.4 h1:yLFeUGostXXSGW5vxfT5dXG/qzkn4schv2I7at5+hVU= -github.com/tinylib/msgp v1.2.4/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= +github.com/tinylib/msgp v1.2.5 h1:WeQg1whrXRFiZusidTQqzETkRpGjFjcIhW6uqWH09po= +github.com/tinylib/msgp v1.2.5/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= @@ -134,46 +134,48 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -go.opentelemetry.io/collector/component v0.115.0 h1:iLte1oCiXzjiCnaOBKdsXacfFiECecpWxW3/LeriMoo= -go.opentelemetry.io/collector/component v0.115.0/go.mod h1:oIUFiH7w1eOimdeYhFI+gAIxYSiLDocKVJ0PTvX7d6s= -go.opentelemetry.io/collector/component/componentstatus v0.115.0 h1:pbpUIL+uKDfEiSgKK+S5nuSL6MDIIQYsp4b65ZGVb9M= -go.opentelemetry.io/collector/component/componentstatus v0.115.0/go.mod h1:36A+9XSiOz0Cdhq+UwwPRlEr5CYuSkEnVO9om4BH7d0= -go.opentelemetry.io/collector/component/componenttest v0.115.0 h1:9URDJ9VyP6tuij+YHjp/kSSMecnZOd7oGvzu+rw9SJY= -go.opentelemetry.io/collector/component/componenttest v0.115.0/go.mod h1:PzXvNqKLCiSADZGZFKH+IOHMkaQ0GTHuzysfVbTPKYY= -go.opentelemetry.io/collector/config/configtelemetry v0.115.0 h1:U07FinCDop+r2RjWQ3aP9ZWONC7r7kQIp1GkXQi6nsI= -go.opentelemetry.io/collector/config/configtelemetry v0.115.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= -go.opentelemetry.io/collector/consumer v1.21.0 h1:THKZ2Vbi6GkamjTBI2hFq5Dc4kINZTWGwQNa8d/Ty9g= -go.opentelemetry.io/collector/consumer v1.21.0/go.mod h1:FQcC4ThMtRYY41dv+IPNK8POLLhAFY3r1YR5fuP7iiY= -go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0 h1:H3fDuyQW1t2HWHkz96WMBQJKUevypOCjBqnqtaAWyoA= -go.opentelemetry.io/collector/consumer/consumerprofiles v0.115.0/go.mod h1:IzEmZ91Tp7TBxVDq8Cc9xvLsmO7H08njr6Pu9P5d9ns= -go.opentelemetry.io/collector/consumer/consumertest v0.115.0 h1:hru0I2447y0TluCdwlKYFFtgcpyCnlM+LiOK1JZyA70= -go.opentelemetry.io/collector/consumer/consumertest v0.115.0/go.mod h1:ybjALRJWR6aKNOzEMy1T1ruCULVDEjj4omtOJMrH/kU= -go.opentelemetry.io/collector/pdata v1.21.0 h1:PG+UbiFMJ35X/WcAR7Rf/PWmWtRdW0aHlOidsR6c5MA= -go.opentelemetry.io/collector/pdata v1.21.0/go.mod h1:GKb1/zocKJMvxKbS+sl0W85lxhYBTFJ6h6I1tphVyDU= -go.opentelemetry.io/collector/pdata/pprofile v0.115.0 h1:NI89hy13vNDw7EOnQf7Jtitks4HJFO0SUWznTssmP94= -go.opentelemetry.io/collector/pdata/pprofile v0.115.0/go.mod h1:jGzdNfO0XTtfLjXCL/uCC1livg1LlfR+ix2WE/z3RpQ= -go.opentelemetry.io/collector/pdata/testdata v0.115.0 h1:Rblz+AKXdo3fG626jS+KSd0OSA4uMXcTQfpwed6P8LI= -go.opentelemetry.io/collector/pdata/testdata v0.115.0/go.mod h1:inNnRt6S2Nn260EfCBEcjesjlKOSsr0jPwkPqpBkt4s= -go.opentelemetry.io/collector/pipeline v0.115.0 h1:bmACBqb0e8U9ag+vGGHUP7kCfAO7HHROdtzIEg8ulus= -go.opentelemetry.io/collector/pipeline v0.115.0/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74= -go.opentelemetry.io/collector/processor v0.115.0 h1:+fveHGRe24PZPv/F5taahGuZ9HdNW44hgNWEJhIUdyc= -go.opentelemetry.io/collector/processor v0.115.0/go.mod h1:/oLHBlLsm7tFb7zOIrA5C0j14yBtjXKAgxJJ2Bktyk4= -go.opentelemetry.io/collector/processor/processorprofiles v0.115.0 h1:cCZAs+FXaebZPppqAN3m+X3etoSBL6NvyQo8l0hOZoo= -go.opentelemetry.io/collector/processor/processorprofiles v0.115.0/go.mod h1:kMxF0gknlWX4duuAJFi2/HuIRi6C3w95tOenRa0GKOY= -go.opentelemetry.io/collector/processor/processortest v0.115.0 h1:j9HEaYFOeOB6VYl9zGhBnhQbTkqGBa2udUvu5NTh6hc= -go.opentelemetry.io/collector/processor/processortest v0.115.0/go.mod h1:Gws+VEnp/eW3qAqPpqbKsrbnnxxNfyDjqrfUXbZfZic= -go.opentelemetry.io/collector/semconv v0.115.0 h1:SoqMvg4ZEB3mz2EdAb6XYa+TuMo5Mir5FRBr3nVFUDY= -go.opentelemetry.io/collector/semconv v0.115.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= -go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U= -go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg= -go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M= -go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8= -go.opentelemetry.io/otel/sdk v1.32.0 h1:RNxepc9vK59A8XsgZQouW8ue8Gkb4jpWtJm9ge5lEG4= -go.opentelemetry.io/otel/sdk v1.32.0/go.mod h1:LqgegDBjKMmb2GC6/PrTnteJG39I8/vJCAP9LlJXEjU= -go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU= -go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ= -go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM= -go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/collector/component v0.118.0 h1:sSO/ObxJ+yH77Z4DmT1mlSuxhbgUmY1ztt7xCA1F/8w= +go.opentelemetry.io/collector/component v0.118.0/go.mod h1:LUJ3AL2b+tmFr3hZol3hzKzCMvNdqNq0M5CF3SWdv4M= +go.opentelemetry.io/collector/component/componentstatus v0.118.0 h1:1aCIdUjqz0noKNQr1v04P+lwF89Lkua5U7BhH9IAxkE= +go.opentelemetry.io/collector/component/componentstatus v0.118.0/go.mod h1:ynO1Nyj0t1h6x/djIMJy35bhnnWEc2mlQaFgDNUO504= +go.opentelemetry.io/collector/component/componenttest v0.118.0 h1:knEHckoiL2fEWSIc0iehg39zP4IXzi9sHa45O+oxKo8= +go.opentelemetry.io/collector/component/componenttest v0.118.0/go.mod h1:aHc7t7zVwCpbhrWIWY+GMuaMxMCUP8C8P7pJOt8r/vU= +go.opentelemetry.io/collector/config/configtelemetry v0.118.0 h1:UlN46EViG2X42odWtXgWaqY7Y01ZKpsnswSwXTWx5mM= +go.opentelemetry.io/collector/config/configtelemetry v0.118.0/go.mod h1:SlBEwQg0qly75rXZ6W1Ig8jN25KBVBkFIIAUI1GiAAE= +go.opentelemetry.io/collector/consumer v1.24.0 h1:7DeyBm9qdr1EPuCfPjWyChPK16DbVc0wZeSa9LZprFU= +go.opentelemetry.io/collector/consumer v1.24.0/go.mod h1:0G6jvZprIp4dpKMD1ZxCjriiP9GdFvFMObsQEtTk71s= +go.opentelemetry.io/collector/consumer/consumertest v0.118.0 h1:8AAS9ejQapP1zqt0+cI6u+AUBheT3X0171N9WtXWsVY= +go.opentelemetry.io/collector/consumer/consumertest v0.118.0/go.mod h1:spRM2wyGr4QZzqMHlLmZnqRCxqXN4Wd0piogC4Qb5PQ= +go.opentelemetry.io/collector/consumer/xconsumer v0.118.0 h1:guWnzzRqgCInjnYlOQ1BPrimppNGIVvnknAjlIbWXuY= +go.opentelemetry.io/collector/consumer/xconsumer v0.118.0/go.mod h1:C5V2d6Ys/Fi6k3tzjBmbdZ9v3J/rZSAMlhx4KVcMIIg= +go.opentelemetry.io/collector/pdata v1.24.0 h1:D6j92eAzmAbQgivNBUnt8r9juOl8ugb+ihYynoFZIEg= +go.opentelemetry.io/collector/pdata v1.24.0/go.mod h1:cf3/W9E/uIvPS4MR26SnMFJhraUCattzzM6qusuONuc= +go.opentelemetry.io/collector/pdata/pprofile v0.118.0 h1:VK/fr65VFOwEhsSGRPj5c3lCv0yIK1Kt0sZxv9WZBb8= +go.opentelemetry.io/collector/pdata/pprofile v0.118.0/go.mod h1:eJyP/vBm179EghV3dPSnamGAWQwLyd+4z/3yG54YFoQ= +go.opentelemetry.io/collector/pdata/testdata v0.118.0 h1:5N0w1SX9KIRkwvtkrpzQgXy9eGk3vfNG0ds6mhEPMIM= +go.opentelemetry.io/collector/pdata/testdata v0.118.0/go.mod h1:UY+GHV5bOC1BnFburOZ0wiHReJj1XbW12mi2Ogbc5Lw= +go.opentelemetry.io/collector/pipeline v0.118.0 h1:RI1DMe7L0+5hGkx0EDGxG00TaJoh96MEQppgOlGx1Oc= +go.opentelemetry.io/collector/pipeline v0.118.0/go.mod h1:qE3DmoB05AW0C3lmPvdxZqd/H4po84NPzd5MrqgtL74= +go.opentelemetry.io/collector/processor v0.118.0 h1:NlqWiTTpPP+EPbrqTcNP9nh/4O4/9U9RGWVB49xo4ws= +go.opentelemetry.io/collector/processor v0.118.0/go.mod h1:Y8OD7wk51oPuBqrbn1qXIK91AbprRHP76hlvEzC24U4= +go.opentelemetry.io/collector/processor/processortest v0.118.0 h1:VfTLHuIaJWGyUmrvAOvf63gPMf1vAW68/jtJClEsKtU= +go.opentelemetry.io/collector/processor/processortest v0.118.0/go.mod h1:ZFWxsSoafGNOEk83FtGz43M5ypUzAOvGnfT0aQTDHdU= +go.opentelemetry.io/collector/processor/xprocessor v0.118.0 h1:M/EMhPRbadHLpv7g99fBjfgyuYexBZmgQqb2vjTXjvM= +go.opentelemetry.io/collector/processor/xprocessor v0.118.0/go.mod h1:lkoQoCv2Cz+C0kf2VHgBUDYWDecZLLeaHEvHDXbBCXU= +go.opentelemetry.io/collector/semconv v0.118.0 h1:V4vlMIK7TIaemrrn2VawvQPwruIKpj7Xgw9P5+BL56w= +go.opentelemetry.io/collector/semconv v0.118.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= +go.opentelemetry.io/otel/sdk/metric v1.33.0 h1:Gs5VK9/WUJhNXZgn8MR6ITatvAmKeIuCtNbsP3JkNqU= +go.opentelemetry.io/otel/sdk/metric v1.33.0/go.mod h1:dL5ykHZmm1B1nVRk9dDjChwDmt81MjVp3gLkQRwKf/Q= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= @@ -186,8 +188,8 @@ go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -196,8 +198,8 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -214,8 +216,8 @@ golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -234,12 +236,12 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= -google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= -google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= +google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= +google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/pkg/trace/stats/oteltest/otel_apm_stats_comparison_test.go b/pkg/trace/stats/oteltest/otel_apm_stats_comparison_test.go index 1baa72bc0dff6..779bf3c03af0d 100644 --- a/pkg/trace/stats/oteltest/otel_apm_stats_comparison_test.go +++ b/pkg/trace/stats/oteltest/otel_apm_stats_comparison_test.go @@ -24,6 +24,7 @@ import ( "google.golang.org/protobuf/testing/protocmp" "github.com/DataDog/datadog-agent/comp/otelcol/otlp/components/statsprocessor" + "github.com/DataDog/datadog-agent/pkg/obfuscate" pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" traceconfig "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/trace/stats" @@ -49,8 +50,8 @@ func testOTelAPMStatsMatch(enableReceiveResourceSpansV2 bool, t *testing.T) { require.NoError(t, err) tcfg := getTraceAgentCfg(attributesTranslator) peerTagKeys := tcfg.ConfiguredPeerTags() - if enableReceiveResourceSpansV2 { - tcfg.Features["enable_receive_resource_spans_v2"] = struct{}{} + if !enableReceiveResourceSpansV2 { + tcfg.Features["disable_receive_resource_spans_v2"] = struct{}{} } metricsClient := &statsd.NoOpClient{} @@ -70,8 +71,9 @@ func testOTelAPMStatsMatch(enableReceiveResourceSpansV2 bool, t *testing.T) { // fakeAgent1 has OTLP traces go through the old pipeline: ReceiveResourceSpan -> TraceWriter -> ... -> Concentrator.Run fakeAgent1.Ingest(ctx, traces) + obfuscator := newTestObfuscator(tcfg) // fakeAgent2 calls the new API in Concentrator that directly calculates APM stats for OTLP traces - inputs := stats.OTLPTracesToConcentratorInputs(traces, tcfg, []string{semconv.AttributeContainerID, semconv.AttributeK8SContainerName}, peerTagKeys) + inputs := stats.OTLPTracesToConcentratorInputsWithObfuscation(traces, tcfg, []string{semconv.AttributeContainerID, semconv.AttributeK8SContainerName}, peerTagKeys, obfuscator) for _, input := range inputs { fakeAgent2.Concentrator.Add(input) } @@ -158,6 +160,7 @@ func getTestTraces() ptrace.Traces { rootattrs.PutInt(semconv.AttributeHTTPStatusCode, 404) rootattrs.PutStr(semconv.AttributePeerService, "test_peer_svc") rootattrs.PutStr(semconv.AttributeDBSystem, "redis") + rootattrs.PutStr(semconv.AttributeDBStatement, "SET key value") root.Status().SetCode(ptrace.StatusCodeError) child1 := sspan.Spans().AppendEmpty() @@ -201,3 +204,11 @@ func getTestTraces() ptrace.Traces { return traces } + +// newTestObfuscator creates a new obfuscator for testing +func newTestObfuscator(conf *traceconfig.AgentConfig) *obfuscate.Obfuscator { + oconf := conf.Obfuscation.Export(conf) + oconf.Redis.Enabled = true + o := obfuscate.NewObfuscator(oconf) + return o +} diff --git a/pkg/trace/traceutil/otel_util.go b/pkg/trace/traceutil/otel_util.go index 5e645f7b6673b..9290745e88f42 100644 --- a/pkg/trace/traceutil/otel_util.go +++ b/pkg/trace/traceutil/otel_util.go @@ -10,14 +10,16 @@ import ( "encoding/binary" "strings" - "github.com/DataDog/datadog-agent/pkg/trace/log" "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes" "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/source" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/ptrace" semconv117 "go.opentelemetry.io/collector/semconv/v1.17.0" + semconv126 "go.opentelemetry.io/collector/semconv/v1.26.0" semconv "go.opentelemetry.io/collector/semconv/v1.6.1" "go.opentelemetry.io/otel/attribute" + + "github.com/DataDog/datadog-agent/pkg/trace/log" ) // Util functions for converting OTel semantics to DD semantics. @@ -32,6 +34,96 @@ const ( TagStatusCode = "http.status_code" ) +// span.Type constants for db systems +const ( + spanTypeSQL = "sql" + spanTypeCassandra = "cassandra" + spanTypeRedis = "redis" + spanTypeMemcached = "memcached" + spanTypeMongoDB = "mongodb" + spanTypeElasticsearch = "elasticsearch" + spanTypeOpenSearch = "opensearch" + spanTypeDB = "db" +) + +// DBTypes are semconv types that should map to span.Type values given in the mapping +var dbTypes = map[string]string{ + // SQL db types + semconv.AttributeDBSystemOtherSQL: spanTypeSQL, + semconv.AttributeDBSystemMSSQL: spanTypeSQL, + semconv.AttributeDBSystemMySQL: spanTypeSQL, + semconv.AttributeDBSystemOracle: spanTypeSQL, + semconv.AttributeDBSystemDB2: spanTypeSQL, + semconv.AttributeDBSystemPostgreSQL: spanTypeSQL, + semconv.AttributeDBSystemRedshift: spanTypeSQL, + semconv.AttributeDBSystemCloudscape: spanTypeSQL, + semconv.AttributeDBSystemHSQLDB: spanTypeSQL, + semconv.AttributeDBSystemMaxDB: spanTypeSQL, + semconv.AttributeDBSystemIngres: spanTypeSQL, + semconv.AttributeDBSystemFirstSQL: spanTypeSQL, + semconv.AttributeDBSystemEDB: spanTypeSQL, + semconv.AttributeDBSystemCache: spanTypeSQL, + semconv.AttributeDBSystemFirebird: spanTypeSQL, + semconv.AttributeDBSystemDerby: spanTypeSQL, + semconv.AttributeDBSystemInformix: spanTypeSQL, + semconv.AttributeDBSystemMariaDB: spanTypeSQL, + semconv.AttributeDBSystemSqlite: spanTypeSQL, + semconv.AttributeDBSystemSybase: spanTypeSQL, + semconv.AttributeDBSystemTeradata: spanTypeSQL, + semconv.AttributeDBSystemVertica: spanTypeSQL, + semconv.AttributeDBSystemH2: spanTypeSQL, + semconv.AttributeDBSystemColdfusion: spanTypeSQL, + semconv.AttributeDBSystemCockroachdb: spanTypeSQL, + semconv.AttributeDBSystemProgress: spanTypeSQL, + semconv.AttributeDBSystemHanaDB: spanTypeSQL, + semconv.AttributeDBSystemAdabas: spanTypeSQL, + semconv.AttributeDBSystemFilemaker: spanTypeSQL, + semconv.AttributeDBSystemInstantDB: spanTypeSQL, + semconv.AttributeDBSystemInterbase: spanTypeSQL, + semconv.AttributeDBSystemNetezza: spanTypeSQL, + semconv.AttributeDBSystemPervasive: spanTypeSQL, + semconv.AttributeDBSystemPointbase: spanTypeSQL, + semconv117.AttributeDBSystemClickhouse: spanTypeSQL, // not in semconv 1.6.1 + + // Cassandra db types + semconv.AttributeDBSystemCassandra: spanTypeCassandra, + + // Redis db types + semconv.AttributeDBSystemRedis: spanTypeRedis, + + // Memcached db types + semconv.AttributeDBSystemMemcached: spanTypeMemcached, + + // Mongodb db types + semconv.AttributeDBSystemMongoDB: spanTypeMongoDB, + + // Elasticsearch db types + semconv.AttributeDBSystemElasticsearch: spanTypeElasticsearch, + + // Opensearch db types, not in semconv 1.6.1 + semconv117.AttributeDBSystemOpensearch: spanTypeOpenSearch, + + // Generic db types + semconv.AttributeDBSystemHive: spanTypeDB, + semconv.AttributeDBSystemHBase: spanTypeDB, + semconv.AttributeDBSystemNeo4j: spanTypeDB, + semconv.AttributeDBSystemCouchbase: spanTypeDB, + semconv.AttributeDBSystemCouchDB: spanTypeDB, + semconv.AttributeDBSystemCosmosDB: spanTypeDB, + semconv.AttributeDBSystemDynamoDB: spanTypeDB, + semconv.AttributeDBSystemGeode: spanTypeDB, +} + +// checkDBType checks if the dbType is a known db type and returns the corresponding span.Type +func checkDBType(dbType string) string { + spanType, ok := dbTypes[dbType] + if ok { + return spanType + } + // span type not found, return generic db type + return spanTypeDB +} + // IndexOTelSpans iterates over the input OTel spans and returns 3 maps: // OTel spans indexed by span ID, OTel resources indexed by span ID, OTel instrumentation scopes indexed by span ID. // Skips spans with invalid trace ID or span ID. If there are multiple spans with the same (non-zero) span ID, the last one wins. @@ -128,7 +220,33 @@ func GetOTelAttrValInResAndSpanAttrs(span ptrace.Span, res pcommon.Resource, nor return GetOTelAttrVal(span.Attributes(), normalize, keys...) } +// SpanKind2Type returns a span's type based on the given kind and other present properties. +// This function is used in Resource V1 logic only. See GetOtelSpanType for Resource V2 logic. +func SpanKind2Type(span ptrace.Span, res pcommon.Resource) string { + var typ string + switch span.Kind() { + case ptrace.SpanKindServer: + typ = "web" + case ptrace.SpanKindClient: + typ = "http" + db := GetOTelAttrValInResAndSpanAttrs(span, res, true, semconv.AttributeDBSystem) + if db == "" { + break + } + switch db { + case "redis", "memcached": + typ = "cache" + default: + typ = "db" + } + default: + typ = "custom" + } + return typ +} + // GetOTelSpanType returns the DD span type based on OTel span kind and attributes. +// This logic is used in ReceiveResourceSpansV2 logic func GetOTelSpanType(span ptrace.Span, res pcommon.Resource) string { typ := GetOTelAttrValInResAndSpanAttrs(span, res, false, "span.type") if typ != "" { @@ -139,12 +257,10 @@ func GetOTelSpanType(span ptrace.Span, res pcommon.Resource) string { typ = "web" case ptrace.SpanKindClient: db := GetOTelAttrValInResAndSpanAttrs(span, res, true, semconv.AttributeDBSystem) - if db == "redis" || db == "memcached" { - typ = "cache" - } else if db != "" { - typ = "db" - } else { + if db == "" { typ = "http" + } else { + typ = checkDBType(db) } default: typ = "custom" @@ -266,6 +382,20 @@ func GetOTelResourceV2(span ptrace.Span, res pcommon.Resource) (resName string) } return } + + if m := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv.AttributeDBSystem); m != "" { + // Since traces are obfuscated by span.Resource in pkg/trace/agent/obfuscate.go, we should use span.Resource as the resource name. + // https://github.com/DataDog/datadog-agent/blob/62619a69cff9863f5b17215847b853681e36ff15/pkg/trace/agent/obfuscate.go#L32 + if dbStatement := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv.AttributeDBStatement); dbStatement != "" { + resName = dbStatement + return + } + if dbQuery := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv126.AttributeDBQueryText); dbQuery != "" { + resName = dbQuery + return + } + } + resName = span.Name() return diff --git a/pkg/trace/traceutil/otel_util_test.go b/pkg/trace/traceutil/otel_util_test.go index 201c64a0745b8..c9aab2bde0309 100644 --- a/pkg/trace/traceutil/otel_util_test.go +++ b/pkg/trace/traceutil/otel_util_test.go @@ -15,6 +15,8 @@ import ( "go.opentelemetry.io/collector/component/componenttest" "go.opentelemetry.io/collector/pdata/pcommon" "go.opentelemetry.io/collector/pdata/ptrace" + semconv117 "go.opentelemetry.io/collector/semconv/v1.17.0" + semconv126 "go.opentelemetry.io/collector/semconv/v1.26.0" semconv "go.opentelemetry.io/collector/semconv/v1.6.1" "go.opentelemetry.io/otel/metric/noop" ) @@ -176,19 +178,43 @@ func TestGetOTelSpanType(t *testing.T) { name: "redis span", spanKind: ptrace.SpanKindClient, rattrs: map[string]string{semconv.AttributeDBSystem: "redis"}, - expected: "cache", + expected: spanTypeRedis, }, { name: "memcached span", spanKind: ptrace.SpanKindClient, rattrs: map[string]string{semconv.AttributeDBSystem: "memcached"}, - expected: "cache", + expected: spanTypeMemcached, + }, + { + name: "sql db client span", + spanKind: ptrace.SpanKindClient, + rattrs: map[string]string{semconv.AttributeDBSystem: semconv.AttributeDBSystemPostgreSQL}, + expected: spanTypeSQL, + }, + { + name: "elastic db client span", + spanKind: ptrace.SpanKindClient, + rattrs: map[string]string{semconv.AttributeDBSystem: semconv.AttributeDBSystemElasticsearch}, + expected: spanTypeElasticsearch, + }, + { + name: "opensearch db client span", + spanKind: ptrace.SpanKindClient, + rattrs: map[string]string{semconv.AttributeDBSystem: semconv117.AttributeDBSystemOpensearch}, + expected: spanTypeOpenSearch, + }, + { + name: "cassandra db client span", + spanKind: ptrace.SpanKindClient, + rattrs: map[string]string{semconv.AttributeDBSystem: semconv.AttributeDBSystemCassandra}, + expected: spanTypeCassandra, }, { name: "other db client span", spanKind: ptrace.SpanKindClient, - rattrs: map[string]string{semconv.AttributeDBSystem: "postgres"}, - expected: "db", + rattrs: map[string]string{semconv.AttributeDBSystem: semconv.AttributeDBSystemCouchDB}, + expected: spanTypeDB, }, { name: "http client span", @@ -214,6 +240,65 @@ func TestGetOTelSpanType(t *testing.T) { } } +func TestSpanKind2Type(t *testing.T) { + for _, tt := range []struct { + kind ptrace.SpanKind + meta map[string]string + out string + }{ + { + kind: ptrace.SpanKindServer, + out: "web", + }, + { + kind: ptrace.SpanKindClient, + out: "http", + }, + { + kind: ptrace.SpanKindClient, + meta: map[string]string{"db.system": "redis"}, + out: "cache", + }, + { + kind: ptrace.SpanKindClient, + meta: map[string]string{"db.system": "memcached"}, + out: "cache", + }, + { + kind: ptrace.SpanKindClient, + meta: map[string]string{"db.system": "other"}, + out: "db", + }, + { + kind: ptrace.SpanKindProducer, + out: "custom", + }, + { + kind: ptrace.SpanKindConsumer, + out: "custom", + }, + { + kind: ptrace.SpanKindInternal, + out: "custom", + }, + { + kind: ptrace.SpanKindUnspecified, + out: "custom", + }, + } { + t.Run(tt.out, func(t *testing.T) { + span := ptrace.NewSpan() + span.SetKind(tt.kind) + res := pcommon.NewResource() + for k, v := range tt.meta { + res.Attributes().PutStr(k, v) + } + actual := SpanKind2Type(span, res) + assert.Equal(t, tt.out, actual) + }) + } +} + func TestGetOTelService(t *testing.T) { for _, tt := range []struct { name string @@ -314,6 +399,26 @@ func TestGetOTelResource(t *testing.T) { expectedV1: "query myQuery", expectedV2: "query myQuery", }, + { + name: "SQL statement resource", + rattrs: map[string]string{ + semconv.AttributeDBSystem: "mysql", + semconv.AttributeDBStatement: "SELECT * FROM table WHERE id = 12345", + }, + sattrs: map[string]string{"span.name": "span_name"}, + expectedV1: "span_name", + expectedV2: "SELECT * FROM table WHERE id = 12345", + }, + { + name: "Redis command resource", + rattrs: map[string]string{ + semconv.AttributeDBSystem: "redis", + semconv126.AttributeDBQueryText: "SET key value", + }, + sattrs: map[string]string{"span.name": "span_name"}, + expectedV1: "span_name", + expectedV2: "SET key value", + }, } { t.Run(tt.name, func(t *testing.T) { span := ptrace.NewSpan() diff --git a/pkg/trace/transform/obfuscate.go b/pkg/trace/transform/obfuscate.go new file mode 100644 index 0000000000000..c87a9cdecee9f --- /dev/null +++ b/pkg/trace/transform/obfuscate.go @@ -0,0 +1,68 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package transform + +import ( + "github.com/DataDog/datadog-agent/pkg/obfuscate" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" + "github.com/DataDog/datadog-agent/pkg/trace/traceutil" +) + +const ( + // TagRedisRawCommand represents a redis raw command tag + TagRedisRawCommand = "redis.raw_command" + // TagMemcachedCommand represents a memcached command tag + TagMemcachedCommand = "memcached.command" + // TagMongoDBQuery represents a MongoDB query tag + TagMongoDBQuery = "mongodb.query" + // TagElasticBody represents an Elasticsearch body tag + TagElasticBody = "elasticsearch.body" + // TagOpenSearchBody represents an OpenSearch body tag + TagOpenSearchBody = "opensearch.body" + // TagSQLQuery represents a SQL query tag + TagSQLQuery = "sql.query" + // TagHTTPURL represents an HTTP URL tag + TagHTTPURL = "http.url" + // TagDBMS represents a DBMS tag + TagDBMS = "db.type" +) + +const ( + // TextNonParsable is the error text used when a query is non-parsable + TextNonParsable = "Non-parsable SQL query" +) + +// ObfuscateSQLSpan obfuscates a SQL span using pkg/obfuscate logic +func ObfuscateSQLSpan(o *obfuscate.Obfuscator, span *pb.Span) (*obfuscate.ObfuscatedQuery, error) { + if span.Resource == "" { + return nil, nil + } + oq, err := o.ObfuscateSQLStringForDBMS(span.Resource, span.Meta[TagDBMS]) + if err != nil { + // we have an error, discard the SQL to avoid polluting user resources. + span.Resource = TextNonParsable + traceutil.SetMeta(span, TagSQLQuery, TextNonParsable) + return nil, err + } + span.Resource = oq.Query + if len(oq.Metadata.TablesCSV) > 0 { + traceutil.SetMeta(span, "sql.tables", oq.Metadata.TablesCSV) + } + traceutil.SetMeta(span, TagSQLQuery, oq.Query) + return oq, nil +} + +// ObfuscateRedisSpan obfuscates a Redis span using pkg/obfuscate logic +func ObfuscateRedisSpan(o *obfuscate.Obfuscator, span *pb.Span, removeAllArgs bool) { + if span.Meta == nil || span.Meta[TagRedisRawCommand] == "" { + return + } + if removeAllArgs { + span.Meta[TagRedisRawCommand] = o.RemoveAllRedisArgs(span.Meta[TagRedisRawCommand]) + return + } + span.Meta[TagRedisRawCommand] = o.ObfuscateRedisString(span.Meta[TagRedisRawCommand]) +} diff --git a/pkg/trace/transform/transform.go b/pkg/trace/transform/transform.go index c1d5ff03b1a66..48cb662512f2c 100644 --- a/pkg/trace/transform/transform.go +++ b/pkg/trace/transform/transform.go @@ -13,14 +13,16 @@ import ( "strconv" "strings" + "go.opentelemetry.io/collector/pdata/pcommon" + "go.opentelemetry.io/collector/pdata/ptrace" + semconv "go.opentelemetry.io/collector/semconv/v1.6.1" + pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace" "github.com/DataDog/datadog-agent/pkg/trace/config" "github.com/DataDog/datadog-agent/pkg/trace/sampler" "github.com/DataDog/datadog-agent/pkg/trace/traceutil" "github.com/DataDog/datadog-agent/pkg/util/log" - "go.opentelemetry.io/collector/pdata/pcommon" - "go.opentelemetry.io/collector/pdata/ptrace" - semconv "go.opentelemetry.io/collector/semconv/v1.6.1" + "github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes" ) // OperationAndResourceNameV2Enabled checks if the new operation and resource name logic should be used @@ -49,6 +51,17 @@ func OtelSpanToDDSpanMinimal( resourceName = traceutil.GetOTelResourceV1(otelspan, otelres) } + // correct span type logic if using new resource receiver, keep same if on v1. separate from OperationAndResourceNameV2Enabled. + var spanType string + if conf.HasFeature("disable_receive_resource_spans_v2") { + spanType = traceutil.GetOTelAttrValInResAndSpanAttrs(otelspan, otelres, true, "span.type") + if spanType == "" { + spanType = traceutil.SpanKind2Type(otelspan, otelres) + } + } else { + spanType = traceutil.GetOTelSpanType(otelspan, otelres) + } + ddspan := &pb.Span{ Service: traceutil.GetOTelService(otelres, true), Name: operationName, @@ -58,7 +71,7 @@ func OtelSpanToDDSpanMinimal( ParentID: traceutil.OTelSpanIDToUint64(otelspan.ParentSpanID()), Start: int64(otelspan.StartTimestamp()), Duration: int64(otelspan.EndTimestamp()) - int64(otelspan.StartTimestamp()), - Type: traceutil.GetOTelSpanType(otelspan, otelres), + Type: spanType, Meta: make(map[string]string, otelres.Attributes().Len()+otelspan.Attributes().Len()), Metrics: map[string]float64{}, } @@ -88,6 +101,27 @@ func OtelSpanToDDSpanMinimal( return ddspan } +func isDatadogAPMConventionKey(k string) bool { + return k == "service.name" || k == "operation.name" || k == "resource.name" || k == "span.type" || k == "http.method" || k == "http.status_code" +} + +func setMetaOTLPWithHTTPMappings(k string, value string, ddspan *pb.Span) { + datadogKey, found := attributes.HTTPMappings[k] + switch { + case found && value != "": + ddspan.Meta[datadogKey] = value + case strings.HasPrefix(k, "http.request.header."): + key := fmt.Sprintf("http.request.headers.%s", strings.TrimPrefix(k, "http.request.header.")) + ddspan.Meta[key] = value + // Exclude Datadog APM conventions. + // These are handled above explicitly. + case !isDatadogAPMConventionKey(k): + SetMetaOTLP(ddspan, k, value) + default: + return + } +} + // OtelSpanToDDSpan converts an OTel span to a DD span. func OtelSpanToDDSpan( otelspan ptrace.Span, @@ -105,9 +139,8 @@ func OtelSpanToDDSpan( ddspan := OtelSpanToDDSpanMinimal(otelspan, otelres, lib, isTopLevel, topLevelByKind, conf, peerTagKeys) otelres.Attributes().Range(func(k string, v pcommon.Value) bool { - if k != "service.name" && k != "operation.name" && k != "resource.name" && k != "span.type" { - SetMetaOTLP(ddspan, k, v.AsString()) - } + value := v.AsString() + setMetaOTLPWithHTTPMappings(k, value, ddspan) return true }) @@ -144,11 +177,7 @@ func OtelSpanToDDSpan( case pcommon.ValueTypeInt: SetMetricOTLP(ddspan, k, float64(v.Int())) default: - // Exclude Datadog APM conventions. - // These are handled below explicitly. - if k != "http.method" && k != "http.status_code" && k != "service.name" && k != "operation.name" && k != "resource.name" && k != "span.type" { - SetMetaOTLP(ddspan, k, value) - } + setMetaOTLPWithHTTPMappings(k, value, ddspan) } // `http.method` was renamed to `http.request.method` in the HTTP stabilization from v1.23. @@ -158,7 +187,6 @@ func OtelSpanToDDSpan( // See https://datadoghq.atlassian.net/wiki/spaces/APM/pages/2357395856/Span+attributes#[inlineExtension]HTTP if k == "http.request.method" { gotMethodFromNewConv = true - ddspan.Meta["http.method"] = value } else if k == "http.method" && !gotMethodFromNewConv { ddspan.Meta["http.method"] = value } @@ -170,7 +198,6 @@ func OtelSpanToDDSpan( // See https://datadoghq.atlassian.net/wiki/spaces/APM/pages/2357395856/Span+attributes#[inlineExtension]HTTP if k == "http.response.status_code" { gotStatusCodeFromNewConv = true - ddspan.Meta["http.status_code"] = value } else if k == "http.status_code" && !gotStatusCodeFromNewConv { ddspan.Meta["http.status_code"] = value } diff --git a/pkg/util/cgroups/go.mod b/pkg/util/cgroups/go.mod index 0be6406d65503..c680dd792e58a 100644 --- a/pkg/util/cgroups/go.mod +++ b/pkg/util/cgroups/go.mod @@ -11,7 +11,7 @@ replace ( require ( github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 - github.com/containerd/cgroups/v3 v3.0.4 + github.com/containerd/cgroups/v3 v3.0.5 github.com/google/go-cmp v0.6.0 github.com/karrick/godirwalk v1.17.0 github.com/stretchr/testify v1.10.0 @@ -29,8 +29,8 @@ require ( github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/sys v0.28.0 // indirect - google.golang.org/protobuf v1.35.2 // indirect + golang.org/x/sys v0.29.0 // indirect + google.golang.org/protobuf v1.36.3 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/util/cgroups/go.sum b/pkg/util/cgroups/go.sum index d2a0373b5e095..55f3ba7318c71 100644 --- a/pkg/util/cgroups/go.sum +++ b/pkg/util/cgroups/go.sum @@ -1,7 +1,7 @@ github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= -github.com/containerd/cgroups/v3 v3.0.4 h1:2fs7l3P0Qxb1nKWuJNFiwhp2CqiKzho71DQkDrHJIo4= -github.com/containerd/cgroups/v3 v3.0.4/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins= +github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJHo6Bzo= +github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -31,10 +31,10 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/pkg/util/cloudproviders/cloudfoundry/cloudfoundry.go b/pkg/util/cloudproviders/cloudfoundry/cloudfoundry.go index 773aa31b06baa..05943bf8cadfd 100644 --- a/pkg/util/cloudproviders/cloudfoundry/cloudfoundry.go +++ b/pkg/util/cloudproviders/cloudfoundry/cloudfoundry.go @@ -13,7 +13,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/log" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" - "github.com/DataDog/datadog-agent/pkg/util" + netutil "github.com/DataDog/datadog-agent/pkg/util/net" ) var ( @@ -22,7 +22,7 @@ var ( ) // Define alias in order to mock in the tests -var getFqdn = util.Fqdn +var getFqdn = netutil.Fqdn // GetHostAliases returns the host aliases from Cloud Foundry // diff --git a/pkg/util/cloudproviders/cloudfoundry/cloudfoundry_test.go b/pkg/util/cloudproviders/cloudfoundry/cloudfoundry_test.go index a54d762def126..47a0b4a83631d 100644 --- a/pkg/util/cloudproviders/cloudfoundry/cloudfoundry_test.go +++ b/pkg/util/cloudproviders/cloudfoundry/cloudfoundry_test.go @@ -13,7 +13,7 @@ import ( "github.com/stretchr/testify/assert" configmock "github.com/DataDog/datadog-agent/pkg/config/mock" - "github.com/DataDog/datadog-agent/pkg/util" + netutil "github.com/DataDog/datadog-agent/pkg/util/net" ) func TestHostAliasDisable(t *testing.T) { @@ -30,7 +30,7 @@ func TestHostAliasDisable(t *testing.T) { func TestHostAlias(t *testing.T) { ctx := context.Background() - defer func() { getFqdn = util.Fqdn }() + defer func() { getFqdn = netutil.Fqdn }() mockConfig := configmock.New(t) mockConfig.SetWithoutSource("cloud_foundry", true) diff --git a/pkg/util/cloudproviders/cloudproviders.go b/pkg/util/cloudproviders/cloudproviders.go index d9de6ac5fa68c..4ce5d0dab6d45 100644 --- a/pkg/util/cloudproviders/cloudproviders.go +++ b/pkg/util/cloudproviders/cloudproviders.go @@ -13,9 +13,9 @@ import ( logcomp "github.com/DataDog/datadog-agent/comp/core/log/def" pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" - "github.com/DataDog/datadog-agent/pkg/util" "github.com/DataDog/datadog-agent/pkg/util/kubelet" "github.com/DataDog/datadog-agent/pkg/util/log" + "github.com/DataDog/datadog-agent/pkg/util/sort" "github.com/DataDog/datadog-agent/pkg/util/ec2" @@ -144,7 +144,7 @@ func GetHostAliases(ctx context.Context) []string { } wg.Wait() - return util.SortUniqInPlace(aliases) + return sort.UniqInPlace(aliases) } // GetPublicIPv4 returns the public IPv4 from different providers diff --git a/pkg/util/common.go b/pkg/util/common.go deleted file mode 100644 index 5403e6b25cffb..0000000000000 --- a/pkg/util/common.go +++ /dev/null @@ -1,201 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -// Package util provides various functions -package util - -import ( - "context" - "fmt" - "io" - "math/rand" - "net/http" - "os" - "path" - "path/filepath" - "time" - - "github.com/DataDog/datadog-agent/comp/core/autodiscovery/integration" - pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" - "github.com/DataDog/datadog-agent/pkg/version" -) - -func init() { - rand.Seed(time.Now().UnixNano()) -} - -// CopyFile atomically copies file path `src“ to file path `dst`. -func CopyFile(src, dst string) error { - fi, err := os.Stat(src) - if err != nil { - return err - } - perm := fi.Mode() - - in, err := os.Open(src) - if err != nil { - return err - } - defer in.Close() - - tmp, err := os.CreateTemp(filepath.Dir(dst), "") - if err != nil { - return err - } - tmpName := tmp.Name() - - _, err = io.Copy(tmp, in) - if err != nil { - tmp.Close() - os.Remove(tmpName) - return err - } - - err = tmp.Close() - if err != nil { - os.Remove(tmpName) - return err - } - - err = os.Chmod(tmpName, perm) - if err != nil { - os.Remove(tmpName) - return err - } - - err = os.Rename(tmpName, dst) - if err != nil { - os.Remove(tmpName) - return err - } - - return nil -} - -// CopyFileAll calls CopyFile, but will create necessary directories for `dst`. -func CopyFileAll(src, dst string) error { - err := EnsureParentDirsExist(dst) - if err != nil { - return err - } - - return CopyFile(src, dst) -} - -// CopyDir copies directory recursively -func CopyDir(src, dst string) error { - var ( - err error - fds []os.DirEntry - srcinfo os.FileInfo - ) - - if srcinfo, err = os.Stat(src); err != nil { - return err - } - - if err = os.MkdirAll(dst, srcinfo.Mode()); err != nil { - return err - } - - if fds, err = os.ReadDir(src); err != nil { - return err - } - for _, fd := range fds { - s := path.Join(src, fd.Name()) - d := path.Join(dst, fd.Name()) - - if fd.IsDir() { - err = CopyDir(s, d) - } else { - err = CopyFile(s, d) - } - if err != nil { - return err - } - } - return nil -} - -// EnsureParentDirsExist makes a path immediately available for -// writing by creating the necessary parent directories. -func EnsureParentDirsExist(p string) error { - err := os.MkdirAll(filepath.Dir(p), os.ModePerm) - if err != nil { - return err - } - - return nil -} - -// HTTPHeaders returns a http headers including various basic information (User-Agent, Content-Type...). -func HTTPHeaders() map[string]string { - av, _ := version.Agent() - return map[string]string{ - "User-Agent": fmt.Sprintf("Datadog Agent/%s", av.GetNumber()), - "Content-Type": "application/x-www-form-urlencoded", - "Accept": "text/html, */*", - } -} - -// GetJSONSerializableMap returns a JSON serializable map from a raw map -func GetJSONSerializableMap(m interface{}) interface{} { - switch x := m.(type) { - // unbelievably I cannot collapse this into the next (identical) case - case map[interface{}]interface{}: - j := integration.JSONMap{} - for k, v := range x { - j[k.(string)] = GetJSONSerializableMap(v) - } - return j - case integration.RawMap: - j := integration.JSONMap{} - for k, v := range x { - j[k.(string)] = GetJSONSerializableMap(v) - } - return j - case integration.JSONMap: - j := integration.JSONMap{} - for k, v := range x { - j[k] = GetJSONSerializableMap(v) - } - return j - case []interface{}: - j := make([]interface{}, len(x)) - - for i, v := range x { - j[i] = GetJSONSerializableMap(v) - } - return j - } - return m - -} - -// GetGoRoutinesDump returns the stack trace of every Go routine of a running Agent. -func GetGoRoutinesDump() (string, error) { - ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) - if err != nil { - return "", err - } - - pprofURL := fmt.Sprintf("http://%v:%s/debug/pprof/goroutine?debug=2", - ipcAddress, pkgconfigsetup.Datadog().GetString("expvar_port")) - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - client := http.Client{} - req, err := http.NewRequest(http.MethodGet, pprofURL, nil) - if err != nil { - return "", err - } - resp, err := client.Do(req.WithContext(ctx)) - if err != nil { - return "", err - } - defer resp.Body.Close() - - data, err := io.ReadAll(resp.Body) - return string(data), err -} diff --git a/pkg/util/compression/compression.go b/pkg/util/compression/compression.go new file mode 100644 index 0000000000000..81cd2d4c5784a --- /dev/null +++ b/pkg/util/compression/compression.go @@ -0,0 +1,53 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package compression provides a set of constants describing the compression options +package compression + +import ( + "bytes" + "io" +) + +// ZlibKind defines a const value for the zlib compressor +const ZlibKind = "zlib" + +// ZstdKind defines a const value for the zstd compressor +const ZstdKind = "zstd" + +// GzipKind defines a const value for the gzip compressor +const GzipKind = "gzip" + +// NoneKind defines a const value for disabling compression +const NoneKind = "none" + +// ZlibEncoding is the content-encoding value for Zlib +const ZlibEncoding = "deflate" + +// ZstdEncoding is the content-encoding value for Zstd +const ZstdEncoding = "zstd" + +// GzipEncoding is the content-encoding value for Gzip +const GzipEncoding = "gzip" + +// Compressor is the interface that a given compression algorithm +// needs to implement +type Compressor interface { + Compress(src []byte) ([]byte, error) + Decompress(src []byte) ([]byte, error) + CompressBound(sourceLen int) int + ContentEncoding() string + NewStreamCompressor(output *bytes.Buffer) StreamCompressor +} + +// StreamCompressor is the interface that the compression algorithm +// should implement for streaming +type StreamCompressor interface { + io.WriteCloser + Flush() error +} + +// ZstdCompressionLevel is a wrapper type over int for the compression level for zstd compression, if that is selected. +type ZstdCompressionLevel int diff --git a/pkg/util/compression/go.mod b/pkg/util/compression/go.mod new file mode 100644 index 0000000000000..ae8bec1e39438 --- /dev/null +++ b/pkg/util/compression/go.mod @@ -0,0 +1,130 @@ +module github.com/DataDog/datadog-agent/pkg/util/compression + +go 1.22.0 + +replace github.com/DataDog/datadog-agent/pkg/util/log => ../log + +require ( + github.com/DataDog/datadog-agent/comp/core/config v0.61.0 + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 + github.com/DataDog/zstd v1.5.6 + github.com/klauspost/compress v1.17.11 +) + +require ( + github.com/DataDog/datadog-agent/comp/core/flare/builder v0.59.0 // indirect + github.com/DataDog/datadog-agent/comp/core/flare/types v0.59.0 // indirect + github.com/DataDog/datadog-agent/comp/core/secrets v0.59.0 // indirect + github.com/DataDog/datadog-agent/comp/def v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/mock v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/fxutil v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect + github.com/DataDog/viper v1.14.0 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/ebitengine/purego v0.8.1 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect + github.com/go-ole/go-ole v1.3.0 // indirect + github.com/hashicorp/hcl v1.0.1-vault-5 // indirect + github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect + github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect + github.com/pelletier/go-toml v1.9.5 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.7.1 // indirect + github.com/spf13/cobra v1.8.1 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/stretchr/testify v1.10.0 // indirect + github.com/tklauser/go-sysconf v0.3.14 // indirect + github.com/tklauser/numcpus v0.8.0 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect + go.uber.org/atomic v1.11.0 // indirect + go.uber.org/dig v1.18.0 // indirect + go.uber.org/fx v1.23.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/sys v0.29.0 // indirect + golang.org/x/text v0.21.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) + +replace ( + github.com/DataDog/datadog-agent/comp/core/config => ../../../comp/core/config + github.com/DataDog/datadog-agent/pkg/util/defaultpaths => ../defaultpaths + github.com/DataDog/datadog-agent/pkg/util/option => ../option + github.com/DataDog/datadog-agent/pkg/util/scrubber => ../scrubber + github.com/DataDog/datadog-agent/pkg/version => ../../version +) + +replace github.com/DataDog/datadog-agent/comp/api/api/def => ../../../comp/api/api/def + +replace github.com/DataDog/datadog-agent/comp/core/flare/builder => ../../../comp/core/flare/builder + +replace github.com/DataDog/datadog-agent/comp/core/flare/types => ../../../comp/core/flare/types + +replace github.com/DataDog/datadog-agent/comp/core/secrets => ../../../comp/core/secrets + +replace github.com/DataDog/datadog-agent/comp/core/telemetry => ../../../comp/core/telemetry + +replace github.com/DataDog/datadog-agent/comp/def => ../../../comp/def + +replace github.com/DataDog/datadog-agent/pkg/collector/check/defaults => ../../collector/check/defaults + +replace github.com/DataDog/datadog-agent/pkg/config/env => ../../config/env + +replace github.com/DataDog/datadog-agent/pkg/config/mock => ../../config/mock + +replace github.com/DataDog/datadog-agent/pkg/config/model => ../../config/model + +replace github.com/DataDog/datadog-agent/pkg/config/nodetreemodel => ../../config/nodetreemodel + +replace github.com/DataDog/datadog-agent/pkg/config/setup => ../../config/setup + +replace github.com/DataDog/datadog-agent/pkg/config/teeconfig => ../../config/teeconfig + +replace github.com/DataDog/datadog-agent/pkg/util/executable => ../executable + +replace github.com/DataDog/datadog-agent/pkg/util/filesystem => ../filesystem + +replace github.com/DataDog/datadog-agent/pkg/util/fxutil => ../fxutil + +replace github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../hostname/validate + +replace github.com/DataDog/datadog-agent/pkg/util/pointer => ../pointer + +replace github.com/DataDog/datadog-agent/pkg/util/system => ../system + +replace github.com/DataDog/datadog-agent/pkg/util/system/socket => ../system/socket + +replace github.com/DataDog/datadog-agent/pkg/util/testutil => ../testutil + +replace github.com/DataDog/datadog-agent/pkg/util/winutil => ../winutil + +replace github.com/DataDog/datadog-agent/pkg/config/structure => ../../config/structure diff --git a/pkg/util/compression/go.sum b/pkg/util/compression/go.sum new file mode 100644 index 0000000000000..a6edf2e7c34b9 --- /dev/null +++ b/pkg/util/compression/go.sum @@ -0,0 +1,335 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/DataDog/viper v1.14.0 h1:dIjTe/uJiah+QFqFZ+MXeqgmUvWhg37l37ZxFWxr3is= +github.com/DataDog/viper v1.14.0/go.mod h1:wDdUVJ2SHaMaPrCZrlRCObwkubsX8j5sme3LaR/SGTc= +github.com/DataDog/zstd v1.5.6 h1:LbEglqepa/ipmmQJUDnSsfvA8e8IStVcGaFWDuxvGOY= +github.com/DataDog/zstd v1.5.6/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= +github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/ebitengine/purego v0.8.1 h1:sdRKd6plj7KYW33EH5As6YKfe8m9zbN9JMrOjNVF/BE= +github.com/ebitengine/purego v0.8.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= +github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= +github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= +github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb h1:PGufWXXDq9yaev6xX1YQauaO1MV90e6Mpoq1I7Lz/VM= +github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uiaSepXwyf3o52HaUYcV+Tu66S3F5GA= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE= +github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= +github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= +github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= +github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20200122045848-3419fae592fc/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw= +go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.23.0 h1:lIr/gYWQGfTwGcSXWXu4vP5Ws6iqnNEIY+F/aFzCKTg= +go.uber.org/fx v1.23.0/go.mod h1:o/D9n+2mLP6v1EG+qsdT1O8wKopYAsqZasju97SDFCU= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= diff --git a/pkg/util/compression/impl-gzip/gzip_strategy.go b/pkg/util/compression/impl-gzip/gzip_strategy.go new file mode 100644 index 0000000000000..c5f9a7b6c68b7 --- /dev/null +++ b/pkg/util/compression/impl-gzip/gzip_strategy.go @@ -0,0 +1,127 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package gzipimpl provides a set of functions for compressing with zlib / zstd / gzip +package gzipimpl + +import ( + "bytes" + "compress/gzip" + "io" + + "github.com/DataDog/datadog-agent/pkg/util/compression" + "github.com/DataDog/datadog-agent/pkg/util/log" +) + +// Requires contains the compression level for gzip compression +type Requires struct { + Level int +} + +// GzipStrategy is the strategy for when serializer_compression_kind is gzip +type GzipStrategy struct { + level int +} + +// New returns a new GzipStrategy +func New(req Requires) compression.Compressor { + level := req.Level + if level < gzip.NoCompression { + log.Warnf("Gzip log level set to %d, minimum is %d.", level, gzip.NoCompression) + level = gzip.NoCompression + } else if level > gzip.BestCompression { + log.Warnf("Gzip log level set to %d, maximum is %d.", level, gzip.BestCompression) + level = gzip.BestCompression + } + + return &GzipStrategy{ + level: level, + } +} + +// Compress will compress the data with gzip +func (s *GzipStrategy) Compress(src []byte) (result []byte, err error) { + var compressedPayload bytes.Buffer + gzipWriter, err := gzip.NewWriterLevel(&compressedPayload, s.level) + + if err != nil { + return nil, err + } + _, err = gzipWriter.Write(src) + if err != nil { + return nil, err + } + err = gzipWriter.Flush() + if err != nil { + return nil, err + } + err = gzipWriter.Close() + if err != nil { + return nil, err + } + + return compressedPayload.Bytes(), nil +} + +// Decompress will decompress the data with gzip +func (s *GzipStrategy) Decompress(src []byte) ([]byte, error) { + reader, err := gzip.NewReader(bytes.NewReader(src)) + if err != nil { + return nil, err + } + defer reader.Close() + + // Read all decompressed data + var result bytes.Buffer + _, err = io.Copy(&result, reader) + if err != nil { + return nil, err + } + + return result.Bytes(), nil +} + +// CompressBound returns the worst case size needed for a destination buffer +// when using gzip +// +// The worst case expansion is a few bytes for the gzip file header, plus +// 5 bytes per 32 KiB block, or an expansion ratio of 0.015% for large files. +// The additional 18 bytes comes from the header (10 bytes) and trailer +// (8 bytes). There is no theoretical maximum to the header, +// but we don't set any extra header fields so it is safe to assume +// +// Source: https://www.gnu.org/software/gzip/manual/html_node/Overview.html +// More details are in the linked RFC: https://www.ietf.org/rfc/rfc1952.txt +func (s *GzipStrategy) CompressBound(sourceLen int) int { + return sourceLen + (sourceLen/32768)*5 + 18 +} + +// ContentEncoding returns the content encoding value for gzip +func (s *GzipStrategy) ContentEncoding() string { + return compression.GzipEncoding +} + +// NewStreamCompressor returns a new gzip Writer +func (s *GzipStrategy) NewStreamCompressor(output *bytes.Buffer) compression.StreamCompressor { + // Ensure level is within a range that doesn't cause NewWriterLevel to error. + level := s.level + if level < gzip.HuffmanOnly { + log.Warnf("Gzip streaming log level set to %d, minimum is %d. Setting to minimum.", level, gzip.HuffmanOnly) + level = gzip.HuffmanOnly + } + + if level > gzip.BestCompression { + log.Warnf("Gzip streaming log level set to %d, maximum is %d. Setting to maximum.", level, gzip.BestCompression) + level = gzip.BestCompression + } + + writer, err := gzip.NewWriterLevel(output, level) + if err != nil { + log.Warnf("Error creating gzip writer with level %d. Using default.", level) + writer = gzip.NewWriter(output) + } + + return writer +} diff --git a/comp/serializer/compression/impl-noop/no_strategy.go b/pkg/util/compression/impl-noop/no_strategy.go similarity index 78% rename from comp/serializer/compression/impl-noop/no_strategy.go rename to pkg/util/compression/impl-noop/no_strategy.go index 3c092a8a536d4..74acedca7c1cc 100644 --- a/comp/serializer/compression/impl-noop/no_strategy.go +++ b/pkg/util/compression/impl-noop/no_strategy.go @@ -9,23 +9,15 @@ package noopimpl import ( "bytes" - compression "github.com/DataDog/datadog-agent/comp/serializer/compression/def" + "github.com/DataDog/datadog-agent/pkg/util/compression" ) -// Provides contains the compression component -type Provides struct { - Comp compression.Component -} - // NoopStrategy is the strategy for when serializer_compressor_kind is neither zlib nor zstd -type NoopStrategy struct { -} +type NoopStrategy struct{} -// NewComponent returns a new NoopStrategy for when kind is neither zlib nor zstd -func NewComponent() Provides { - return Provides{ - Comp: &NoopStrategy{}, - } +// New returns a new NoopStrategy for when kind is neither zlib nor zstd +func New() compression.Compressor { + return &NoopStrategy{} } // Compress implements the Compress method for NoopStrategy to satisfy the Compressor interface @@ -45,7 +37,7 @@ func (s *NoopStrategy) CompressBound(sourceLen int) int { // ContentEncoding implements the ContentEncoding method for NoopStrategy to satisfy the Compressor interface func (s *NoopStrategy) ContentEncoding() string { - return "" + return "identity" } // NewStreamCompressor returns a nil when there is no compression implementation. diff --git a/comp/serializer/compression/impl-zlib/zlib_strategy.go b/pkg/util/compression/impl-zlib/zlib_strategy.go similarity index 84% rename from comp/serializer/compression/impl-zlib/zlib_strategy.go rename to pkg/util/compression/impl-zlib/zlib_strategy.go index 0bd47af4f91fc..7bf8a20033429 100644 --- a/comp/serializer/compression/impl-zlib/zlib_strategy.go +++ b/pkg/util/compression/impl-zlib/zlib_strategy.go @@ -11,23 +11,15 @@ import ( "compress/zlib" "io" - compression "github.com/DataDog/datadog-agent/comp/serializer/compression/def" + "github.com/DataDog/datadog-agent/pkg/util/compression" ) -// Provides contains the compression component -type Provides struct { - Comp compression.Component -} - // ZlibStrategy is the strategy for when serializer_compressor_kind is zlib -type ZlibStrategy struct { -} +type ZlibStrategy struct{} -// NewComponent returns a new ZlibStrategy -func NewComponent() Provides { - return Provides{ - Comp: &ZlibStrategy{}, - } +// New returns a new ZlibStrategy +func New() compression.Compressor { + return &ZlibStrategy{} } // Compress will compress the data with zlib diff --git a/pkg/util/compression/impl-zstd-nocgo/zstd_nocgo_strategy.go b/pkg/util/compression/impl-zstd-nocgo/zstd_nocgo_strategy.go new file mode 100644 index 0000000000000..d79eaa2ba8253 --- /dev/null +++ b/pkg/util/compression/impl-zstd-nocgo/zstd_nocgo_strategy.go @@ -0,0 +1,88 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package zstdimpl provides a set of functions for compressing with zstd +package zstdimpl + +import ( + "bytes" + "os" + "strconv" + + "github.com/DataDog/datadog-agent/pkg/util/log" + "github.com/klauspost/compress/zstd" + + "github.com/DataDog/datadog-agent/pkg/util/compression" +) + +// Requires contains the compression level for zstd compression +type Requires struct { + Level compression.ZstdCompressionLevel +} + +// ZstdNoCgoStrategy can be manually selected via component - it's not used by any selector / config option +type ZstdNoCgoStrategy struct { + level int + encoder *zstd.Encoder +} + +// New returns a new ZstdNoCgoStrategy +func New(reqs Requires) compression.Compressor { + level := int(reqs.Level) + log.Debugf("Compressing native zstd at level %d", level) + + conc, err := strconv.Atoi(os.Getenv("ZSTD_NOCGO_CONCURRENCY")) + if err != nil { + conc = 1 + } + + window, err := strconv.Atoi(os.Getenv("ZSTD_NOCGO_WINDOW")) + if err != nil { + window = 1 << 15 + } + log.Debugf("native zstd concurrency %d", conc) + log.Debugf("native zstd window size %d", window) + encoder, err := zstd.NewWriter(nil, + zstd.WithEncoderLevel(zstd.EncoderLevelFromZstd(level)), + zstd.WithEncoderConcurrency(conc), + zstd.WithLowerEncoderMem(true), + zstd.WithWindowSize(window)) + if err != nil { + _ = log.Errorf("Error creating zstd encoder: %v", err) + return nil + } + + return &ZstdNoCgoStrategy{ + level: level, + encoder: encoder, + } +} + +// Compress will compress the data with zstd +func (s *ZstdNoCgoStrategy) Compress(src []byte) ([]byte, error) { + return s.encoder.EncodeAll(src, nil), nil +} + +// Decompress will decompress the data with zstd +func (s *ZstdNoCgoStrategy) Decompress(src []byte) ([]byte, error) { + decoder, _ := zstd.NewReader(nil) + return decoder.DecodeAll(src, nil) +} + +// CompressBound returns the worst case size needed for a destination buffer when using zstd +func (s *ZstdNoCgoStrategy) CompressBound(sourceLen int) int { + return s.encoder.MaxEncodedSize(sourceLen) +} + +// ContentEncoding returns the content encoding value for zstd +func (s *ZstdNoCgoStrategy) ContentEncoding() string { + return compression.ZstdEncoding +} + +// NewStreamCompressor returns a new zstd Writer +func (s *ZstdNoCgoStrategy) NewStreamCompressor(output *bytes.Buffer) compression.StreamCompressor { + writer, _ := zstd.NewWriter(output, zstd.WithEncoderLevel(zstd.EncoderLevelFromZstd(s.level))) + return writer +} diff --git a/comp/serializer/compression/impl-zstd/zstd_strategy.go b/pkg/util/compression/impl-zstd/zstd_strategy.go similarity index 80% rename from comp/serializer/compression/impl-zstd/zstd_strategy.go rename to pkg/util/compression/impl-zstd/zstd_strategy.go index 3b90737bbfce8..8727c25d3ca35 100644 --- a/comp/serializer/compression/impl-zstd/zstd_strategy.go +++ b/pkg/util/compression/impl-zstd/zstd_strategy.go @@ -11,17 +11,12 @@ import ( "github.com/DataDog/zstd" - compression "github.com/DataDog/datadog-agent/comp/serializer/compression/def" + "github.com/DataDog/datadog-agent/pkg/util/compression" ) // Requires contains the compression level for zstd compression type Requires struct { - Level int -} - -// Provides contains the compression component -type Provides struct { - Comp compression.Component + Level compression.ZstdCompressionLevel } // ZstdStrategy is the strategy for when serializer_compressor_kind is zstd @@ -29,12 +24,10 @@ type ZstdStrategy struct { level int } -// NewComponent returns a new ZstdStrategy -func NewComponent(reqs Requires) Provides { - return Provides{ - Comp: &ZstdStrategy{ - level: reqs.Level, - }, +// New returns a new ZstdStrategy +func New(reqs Requires) compression.Compressor { + return &ZstdStrategy{ + level: int(reqs.Level), } } diff --git a/pkg/util/compression/selector/fromconfig.go b/pkg/util/compression/selector/fromconfig.go new file mode 100644 index 0000000000000..6b3e5143e560a --- /dev/null +++ b/pkg/util/compression/selector/fromconfig.go @@ -0,0 +1,31 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package selector provides correct compression impl to fx +package selector + +import ( + "github.com/DataDog/datadog-agent/comp/core/config" + common "github.com/DataDog/datadog-agent/pkg/util/compression" +) + +// FromConfig will return the compression algorithm specified in the provided config +// under the `serializer_compressor_kind` key. +// If `zstd` the compression level is taken from the serializer_zstd_compressor_level +// key. +func FromConfig(cfg config.Reader) common.Compressor { + kind := cfg.GetString("serializer_compressor_kind") + var level int + + switch kind { + case common.ZstdKind: + level = cfg.GetInt("serializer_zstd_compressor_level") + case common.GzipKind: + // There is no configuration option for gzip compression level when set via this method. + level = 6 + } + + return NewCompressor(kind, level) +} diff --git a/pkg/util/compression/selector/no-zlib-no-zstd.go b/pkg/util/compression/selector/no-zlib-no-zstd.go new file mode 100644 index 0000000000000..282c3e369715b --- /dev/null +++ b/pkg/util/compression/selector/no-zlib-no-zstd.go @@ -0,0 +1,40 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build !zlib && !zstd + +// Package selector provides correct compression impl to fx +package selector + +import ( + common "github.com/DataDog/datadog-agent/pkg/util/compression" + implgzip "github.com/DataDog/datadog-agent/pkg/util/compression/impl-gzip" + implnoop "github.com/DataDog/datadog-agent/pkg/util/compression/impl-noop" + "github.com/DataDog/datadog-agent/pkg/util/log" +) + +// NewCompressor returns a new Compressor based on serializer_compressor_kind +// This function is called only when there is no zlib or zstd tag +func NewCompressor(kind string, level int) common.Compressor { + switch kind { + case common.GzipKind: + return implgzip.New(implgzip.Requires{ + Level: level, + }) + case common.NoneKind: + return implnoop.New() + default: + log.Error("invalid compression set") + return implnoop.New() + } +} + +// NewNoopCompressor returns a new Noop Compressor. It does not do any +// compression, but can be used to create a compressor that does at a later +// point. +// This function is called only when there is no zlib or zstd tag +func NewNoopCompressor() common.Compressor { + return implnoop.New() +} diff --git a/pkg/util/compression/selector/zlib-and-zstd.go b/pkg/util/compression/selector/zlib-and-zstd.go new file mode 100644 index 0000000000000..5f3aafb8a5a06 --- /dev/null +++ b/pkg/util/compression/selector/zlib-and-zstd.go @@ -0,0 +1,48 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build zlib && zstd + +// Package selector provides correct compression impl to fx +package selector + +import ( + common "github.com/DataDog/datadog-agent/pkg/util/compression" + implgzip "github.com/DataDog/datadog-agent/pkg/util/compression/impl-gzip" + implnoop "github.com/DataDog/datadog-agent/pkg/util/compression/impl-noop" + implzlib "github.com/DataDog/datadog-agent/pkg/util/compression/impl-zlib" + implzstd "github.com/DataDog/datadog-agent/pkg/util/compression/impl-zstd" + "github.com/DataDog/datadog-agent/pkg/util/log" +) + +// NewCompressor returns a new Compressor based on serializer_compressor_kind +// This function is called only when the zlib build tag is included +func NewCompressor(kind string, level int) common.Compressor { + switch kind { + case common.ZlibKind: + return implzlib.New() + case common.ZstdKind: + return implzstd.New(implzstd.Requires{ + Level: common.ZstdCompressionLevel(level), + }) + case common.GzipKind: + return implgzip.New(implgzip.Requires{ + Level: level, + }) + case common.NoneKind: + return implnoop.New() + default: + log.Error("invalid compression set") + return implnoop.New() + } +} + +// NewNoopCompressor returns a new Noop Compressor. It does not do any +// compression, but can be used to create a compressor that does at a later +// point. +// This function is called only when there is no zlib or zstd tag +func NewNoopCompressor() common.Compressor { + return implnoop.New() +} diff --git a/pkg/util/compression/selector/zlib-no-zstd.go b/pkg/util/compression/selector/zlib-no-zstd.go new file mode 100644 index 0000000000000..4244c59fbff6a --- /dev/null +++ b/pkg/util/compression/selector/zlib-no-zstd.go @@ -0,0 +1,46 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build zlib && !zstd + +// Package selector provides correct compression impl to fx +package selector + +import ( + common "github.com/DataDog/datadog-agent/pkg/util/compression" + implgzip "github.com/DataDog/datadog-agent/pkg/util/compression/impl-gzip" + implnoop "github.com/DataDog/datadog-agent/pkg/util/compression/impl-noop" + implzlib "github.com/DataDog/datadog-agent/pkg/util/compression/impl-zlib" + "github.com/DataDog/datadog-agent/pkg/util/log" +) + +// NewCompressorKind returns a new Compressor based on serializer_compressor_kind +// This function is called only when the zlib build tag is included +func NewCompressor(kind string, level int) common.Compressor { + switch kind { + case common.ZlibKind: + return implzlib.New() + case common.ZstdKind: + log.Warn("zstd build tag not included. using zlib") + return implzlib.New() + case common.GzipKind: + return implgzip.New(implgzip.Requires{ + Level: level, + }) + case common.NoneKind: + return implnoop.New() + default: + log.Error("invalid compression set") + return implnoop.New() + } +} + +// NewNoopCompressor returns a new Noop Compressor. It does not do any +// compression, but can be used to create a compressor that does at a later +// point. +// This function is called only when there is no zlib or zstd tag +func NewNoopCompressor() common.Compressor { + return implnoop.New() +} diff --git a/pkg/util/containers/metrics/containerd/collector.go b/pkg/util/containers/metrics/containerd/collector.go index 32796977df88b..d6f52607d41a6 100644 --- a/pkg/util/containers/metrics/containerd/collector.go +++ b/pkg/util/containers/metrics/containerd/collector.go @@ -20,7 +20,7 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/config/env" "github.com/DataDog/datadog-agent/pkg/util/containerd" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" //nolint:revive // TODO(CINT) Fix revive linter cutil "github.com/DataDog/datadog-agent/pkg/util/containerd" @@ -39,7 +39,7 @@ const ( func init() { provider.RegisterCollector(provider.CollectorFactory{ ID: collectorID, - Constructor: func(cache *provider.Cache, _ optional.Option[workloadmeta.Component]) (provider.CollectorMetadata, error) { + Constructor: func(cache *provider.Cache, _ option.Option[workloadmeta.Component]) (provider.CollectorMetadata, error) { return newContainerdCollector(cache) }, }) diff --git a/pkg/util/containers/metrics/cri/collector.go b/pkg/util/containers/metrics/cri/collector.go index be41ce4bd4ea3..8c6b746f7bd6b 100644 --- a/pkg/util/containers/metrics/cri/collector.go +++ b/pkg/util/containers/metrics/cri/collector.go @@ -16,7 +16,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/config/env" "github.com/DataDog/datadog-agent/pkg/util/containers/cri" "github.com/DataDog/datadog-agent/pkg/util/containers/metrics/provider" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/util/pointer" ) @@ -28,7 +28,7 @@ const ( func init() { provider.RegisterCollector(provider.CollectorFactory{ ID: collectorID, - Constructor: func(cache *provider.Cache, _ optional.Option[workloadmeta.Component]) (provider.CollectorMetadata, error) { + Constructor: func(cache *provider.Cache, _ option.Option[workloadmeta.Component]) (provider.CollectorMetadata, error) { return newCRICollector(cache) }, }) diff --git a/pkg/util/containers/metrics/docker/collector.go b/pkg/util/containers/metrics/docker/collector.go index 35e7deea6fb26..0fee784773ff6 100644 --- a/pkg/util/containers/metrics/docker/collector.go +++ b/pkg/util/containers/metrics/docker/collector.go @@ -23,7 +23,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/containers/metrics/provider" "github.com/DataDog/datadog-agent/pkg/util/docker" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/util/pointer" ) @@ -38,7 +38,7 @@ const ( func init() { provider.RegisterCollector(provider.CollectorFactory{ ID: collectorID, - Constructor: func(cache *provider.Cache, wmeta optional.Option[workloadmeta.Component]) (provider.CollectorMetadata, error) { + Constructor: func(cache *provider.Cache, wmeta option.Option[workloadmeta.Component]) (provider.CollectorMetadata, error) { return newDockerCollector(cache, wmeta) }, }) @@ -47,10 +47,10 @@ func init() { type dockerCollector struct { du *docker.DockerUtil pidCache *provider.Cache - metadataStore optional.Option[workloadmeta.Component] + metadataStore option.Option[workloadmeta.Component] } -func newDockerCollector(cache *provider.Cache, wmeta optional.Option[workloadmeta.Component]) (provider.CollectorMetadata, error) { +func newDockerCollector(cache *provider.Cache, wmeta option.Option[workloadmeta.Component]) (provider.CollectorMetadata, error) { var collectorMetadata provider.CollectorMetadata if !env.IsFeaturePresent(env.Docker) { diff --git a/pkg/util/containers/metrics/docker/collector_test.go b/pkg/util/containers/metrics/docker/collector_test.go index 25d9fdbcf94de..b8419d3cd9704 100644 --- a/pkg/util/containers/metrics/docker/collector_test.go +++ b/pkg/util/containers/metrics/docker/collector_test.go @@ -25,7 +25,7 @@ import ( workloadmetamock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/mock" "github.com/DataDog/datadog-agent/pkg/util/containers/metrics/provider" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/util/pointer" ) @@ -94,7 +94,7 @@ func TestGetContainerIDForPID(t *testing.T) { collector := dockerCollector{ pidCache: provider.NewCache(pidCacheGCInterval), - metadataStore: optional.NewOption[workloadmeta.Component](mockStore), + metadataStore: option.New[workloadmeta.Component](mockStore), } mockStore.Set(&workloadmeta.Container{ diff --git a/pkg/util/containers/metrics/ecsfargate/collector.go b/pkg/util/containers/metrics/ecsfargate/collector.go index 0a8ccf85ac5af..e797b22e27d2f 100644 --- a/pkg/util/containers/metrics/ecsfargate/collector.go +++ b/pkg/util/containers/metrics/ecsfargate/collector.go @@ -20,7 +20,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/ecs/metadata" v2 "github.com/DataDog/datadog-agent/pkg/util/ecs/metadata/v2" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/util/pointer" ) @@ -40,7 +40,7 @@ var ecsUnsetMemoryLimit = uint64(math.Pow(2, 62)) func init() { provider.RegisterCollector(provider.CollectorFactory{ ID: collectorID, - Constructor: func(cache *provider.Cache, _ optional.Option[workloadmeta.Component]) (provider.CollectorMetadata, error) { + Constructor: func(cache *provider.Cache, _ option.Option[workloadmeta.Component]) (provider.CollectorMetadata, error) { return newEcsFargateCollector(cache) }, }) diff --git a/pkg/util/containers/metrics/kubelet/collector.go b/pkg/util/containers/metrics/kubelet/collector.go index e2d5a2d6e1ca7..8c541da241c49 100644 --- a/pkg/util/containers/metrics/kubelet/collector.go +++ b/pkg/util/containers/metrics/kubelet/collector.go @@ -20,7 +20,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/containers/metrics/provider" kutil "github.com/DataDog/datadog-agent/pkg/util/kubernetes/kubelet" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/util/pointer" "k8s.io/kubelet/pkg/apis/stats/v1alpha1" @@ -41,7 +41,7 @@ const ( func init() { provider.RegisterCollector(provider.CollectorFactory{ ID: collectorID, - Constructor: func(cache *provider.Cache, wmeta optional.Option[workloadmeta.Component]) (provider.CollectorMetadata, error) { + Constructor: func(cache *provider.Cache, wmeta option.Option[workloadmeta.Component]) (provider.CollectorMetadata, error) { instance, ok := wmeta.Get() if !ok { return provider.CollectorMetadata{}, errors.New("missing workloadmeta component") diff --git a/pkg/util/containers/metrics/provider/provider.go b/pkg/util/containers/metrics/provider/provider.go index de23c913160dc..aed328f8860aa 100644 --- a/pkg/util/containers/metrics/provider/provider.go +++ b/pkg/util/containers/metrics/provider/provider.go @@ -14,7 +14,7 @@ import ( "sync" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/util/retry" ) @@ -118,7 +118,7 @@ type GenericProvider struct { } // GetProvider returns the metrics provider singleton -func GetProvider(wmeta optional.Option[workloadmeta.Component]) Provider { +func GetProvider(wmeta option.Option[workloadmeta.Component]) Provider { initMetricsProvider.Do(func() { metricsProvider = newProvider(wmeta) }) @@ -126,7 +126,7 @@ func GetProvider(wmeta optional.Option[workloadmeta.Component]) Provider { return metricsProvider } -func newProvider(wmeta optional.Option[workloadmeta.Component]) *GenericProvider { +func newProvider(wmeta option.Option[workloadmeta.Component]) *GenericProvider { provider := &GenericProvider{ cache: NewCache(cacheGCInterval), metaCollector: newMetaCollector(), diff --git a/pkg/util/containers/metrics/provider/provider_test.go b/pkg/util/containers/metrics/provider/provider_test.go index 357f9a95f998f..e5cbbb51c9e47 100644 --- a/pkg/util/containers/metrics/provider/provider_test.go +++ b/pkg/util/containers/metrics/provider/provider_test.go @@ -9,7 +9,7 @@ import ( "testing" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/util/pointer" "github.com/stretchr/testify/assert" ) @@ -48,7 +48,7 @@ func TestRuntimeMetadataString(t *testing.T) { } func TestGenericProvider(t *testing.T) { - provider := newProvider(optional.NewNoneOption[workloadmeta.Component]()) + provider := newProvider(option.None[workloadmeta.Component]()) // First collector is going to be priority 1 on stats and 2 on network statsCollector := &dummyCollector{ diff --git a/pkg/util/containers/metrics/provider/registry.go b/pkg/util/containers/metrics/provider/registry.go index 15dc40da027f0..02f186a7fca3e 100644 --- a/pkg/util/containers/metrics/provider/registry.go +++ b/pkg/util/containers/metrics/provider/registry.go @@ -16,7 +16,7 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const ( @@ -52,7 +52,7 @@ type CollectorMetadata struct { // CollectorFactory allows to register a factory to dynamically create Collector at startup type CollectorFactory struct { ID string - Constructor func(*Cache, optional.Option[workloadmeta.Component]) (CollectorMetadata, error) + Constructor func(*Cache, option.Option[workloadmeta.Component]) (CollectorMetadata, error) } // GenericProvider offers an interface to retrieve a metrics collector @@ -77,7 +77,7 @@ func newCollectorRegistry() *collectorRegistry { } // catalogUpdatedCallback : blocking call in the retryCollectors() function (background goroutine) -func (cr *collectorRegistry) run(c context.Context, cache *Cache, wmeta optional.Option[workloadmeta.Component], catalogUpdatedCallback func(CollectorCatalog)) { +func (cr *collectorRegistry) run(c context.Context, cache *Cache, wmeta option.Option[workloadmeta.Component], catalogUpdatedCallback func(CollectorCatalog)) { cr.discoveryOnce.Do(func() { cr.catalogUpdatedCallback = catalogUpdatedCallback @@ -89,7 +89,7 @@ func (cr *collectorRegistry) run(c context.Context, cache *Cache, wmeta optional }) } -func (cr *collectorRegistry) collectorDiscovery(c context.Context, cache *Cache, wmeta optional.Option[workloadmeta.Component]) { +func (cr *collectorRegistry) collectorDiscovery(c context.Context, cache *Cache, wmeta option.Option[workloadmeta.Component]) { ticker := time.NewTicker(minRetryInterval) defer ticker.Stop() for { @@ -115,7 +115,7 @@ func (cr *collectorRegistry) registerCollector(collectorFactory CollectorFactory } // retryCollectors is not thread safe on purpose. It's only called by a single goroutine from `cr.run` -func (cr *collectorRegistry) retryCollectors(cache *Cache, wmeta optional.Option[workloadmeta.Component]) int { +func (cr *collectorRegistry) retryCollectors(cache *Cache, wmeta option.Option[workloadmeta.Component]) int { cr.registeredCollectorsLock.Lock() defer cr.registeredCollectorsLock.Unlock() diff --git a/pkg/util/containers/metrics/provider/registry_test.go b/pkg/util/containers/metrics/provider/registry_test.go index a58d528a7a8e0..8873c3157cfcb 100644 --- a/pkg/util/containers/metrics/provider/registry_test.go +++ b/pkg/util/containers/metrics/provider/registry_test.go @@ -10,7 +10,7 @@ import ( "testing" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/stretchr/testify/assert" ) @@ -20,13 +20,13 @@ func TestCollectorRegistry(t *testing.T) { assert.Nil(t, c.effectiveCollectors[RuntimeMetadata{runtime: RuntimeNameDocker}]) // Check for collectors (none are registered, should not change output) - c.retryCollectors(nil, optional.NewNoneOption[workloadmeta.Component]()) + c.retryCollectors(nil, option.None[workloadmeta.Component]()) assert.Nil(t, c.effectiveCollectors[RuntimeMetadata{runtime: RuntimeNameDocker}]) c.registerCollector( CollectorFactory{ ID: "dummy1", - Constructor: func(*Cache, optional.Option[workloadmeta.Component]) (CollectorMetadata, error) { + Constructor: func(*Cache, option.Option[workloadmeta.Component]) (CollectorMetadata, error) { collector := dummyCollector{ id: "dummy1", selfContainerID: "dummy1", @@ -44,7 +44,7 @@ func TestCollectorRegistry(t *testing.T) { c.registerCollector( CollectorFactory{ ID: "dummy2", - Constructor: func(*Cache, optional.Option[workloadmeta.Component]) (CollectorMetadata, error) { + Constructor: func(*Cache, option.Option[workloadmeta.Component]) (CollectorMetadata, error) { return CollectorMetadata{}, ErrPermaFail }, }, @@ -54,7 +54,7 @@ func TestCollectorRegistry(t *testing.T) { c.registerCollector( CollectorFactory{ ID: "dummy3", - Constructor: func(*Cache, optional.Option[workloadmeta.Component]) (CollectorMetadata, error) { + Constructor: func(*Cache, option.Option[workloadmeta.Component]) (CollectorMetadata, error) { if dummy3Retries < 2 { dummy3Retries++ return CollectorMetadata{}, fmt.Errorf("not yet okay") @@ -90,7 +90,7 @@ func TestCollectorRegistry(t *testing.T) { assert.Equal(t, expected, actual) } - wmeta := optional.NewNoneOption[workloadmeta.Component]() + wmeta := option.None[workloadmeta.Component]() collectorsToRetry := c.retryCollectors(nil, wmeta) assert.Equal(t, 1, collectorsToRetry) assertCollectors(map[RuntimeMetadata]string{ @@ -121,7 +121,7 @@ func TestCollectorRegistry(t *testing.T) { c.registerCollector( CollectorFactory{ ID: "dummy4", - Constructor: func(*Cache, optional.Option[workloadmeta.Component]) (CollectorMetadata, error) { + Constructor: func(*Cache, option.Option[workloadmeta.Component]) (CollectorMetadata, error) { collector := dummyCollector{ id: "dummy4", selfContainerID: "dummy4", diff --git a/pkg/util/containers/metrics/system/collector_linux.go b/pkg/util/containers/metrics/system/collector_linux.go index 8de32d49e825f..22aa728ef6f35 100644 --- a/pkg/util/containers/metrics/system/collector_linux.go +++ b/pkg/util/containers/metrics/system/collector_linux.go @@ -22,7 +22,7 @@ import ( "github.com/DataDog/datadog-agent/pkg/util/cgroups" "github.com/DataDog/datadog-agent/pkg/util/containers/metrics/provider" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/DataDog/datadog-agent/pkg/util/pointer" systemutils "github.com/DataDog/datadog-agent/pkg/util/system" ) @@ -37,7 +37,7 @@ const ( func init() { provider.RegisterCollector(provider.CollectorFactory{ ID: systemCollectorID, - Constructor: func(cache *provider.Cache, wlm optional.Option[workloadmeta.Component]) (provider.CollectorMetadata, error) { + Constructor: func(cache *provider.Cache, wlm option.Option[workloadmeta.Component]) (provider.CollectorMetadata, error) { return newSystemCollector(cache, wlm) }, }) @@ -52,7 +52,7 @@ type systemCollector struct { hostCgroupNamespace bool } -func newSystemCollector(cache *provider.Cache, wlm optional.Option[workloadmeta.Component]) (provider.CollectorMetadata, error) { +func newSystemCollector(cache *provider.Cache, wlm option.Option[workloadmeta.Component]) (provider.CollectorMetadata, error) { var err error var hostPrefix string var collectorMetadata provider.CollectorMetadata diff --git a/pkg/util/defaultpaths/go.mod b/pkg/util/defaultpaths/go.mod index 0f0c498c8154e..5bfcfd0b30df4 100644 --- a/pkg/util/defaultpaths/go.mod +++ b/pkg/util/defaultpaths/go.mod @@ -13,7 +13,7 @@ require ( github.com/DataDog/datadog-agent/pkg/util/executable v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/winutil v0.56.0-rc.3 - golang.org/x/sys v0.28.0 + golang.org/x/sys v0.29.0 ) require ( diff --git a/pkg/util/defaultpaths/go.sum b/pkg/util/defaultpaths/go.sum index 82beecfd023fb..3053742cabe86 100644 --- a/pkg/util/defaultpaths/go.sum +++ b/pkg/util/defaultpaths/go.sum @@ -16,8 +16,8 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/pkg/util/ecs/metadata/v3or4/client_test.go b/pkg/util/ecs/metadata/v3or4/client_test.go index f8d55bf8135d7..0951e2404cc2d 100644 --- a/pkg/util/ecs/metadata/v3or4/client_test.go +++ b/pkg/util/ecs/metadata/v3or4/client_test.go @@ -50,7 +50,7 @@ func TestGetV4TaskWithTagsWithoutRetryWithDelay(t *testing.T) { ts.Close() - // default timeout is 500ms while the delay is 1.5s + // default timeout is 1000ms while the delay is 1.5s require.True(t, os.IsTimeout(err)) require.Nil(t, task) require.Equal(t, uint64(1), dummyECS.RequestCount.Load()) @@ -77,11 +77,10 @@ func TestGetV4TaskWithTagsWithRetryWithDelay(t *testing.T) { require.NoError(t, err) require.Equal(t, expected, task) - // 3 requests: 1 initial request + 2 retries and server delay is 1.5s - // 1st request failed: request timeout is 500ms - // 2nd request failed: request timeout is 1s - // 3rd request succeed: request timeout is 2s - require.Equal(t, uint64(3), dummyECS.RequestCount.Load()) + // 2 requests: 1 initial request + 1 retry and server delay is 1.5s + // 1st request failed: request timeout is 1s + // 2nd request succeed: request timeout is 2s + require.Equal(t, uint64(2), dummyECS.RequestCount.Load()) } // expected is an expected Task from ./testdata/task.json diff --git a/pkg/util/encoding/binary.go b/pkg/util/encoding/binary.go new file mode 100644 index 0000000000000..1ad9d2a220788 --- /dev/null +++ b/pkg/util/encoding/binary.go @@ -0,0 +1,36 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +// Package encoding is for utilities relating to the encoding package from the stdlib +package encoding + +import ( + "encoding" +) + +// BinaryUnmarshalCallback returns a function that will decode the argument byte slice into T +// using `newFn` to create an instance of T and the encoding.BinaryUnmarshaler interface to do the actual conversion. +// `callback` will be called with the resulting T. +// If the argument byte slice is empty, callback will be called with `nil`. +// Unmarshalling errors will be provided to the callback as the second argument. The data argument to the callback +// may still be non-nil even if there was an error. This allows the callback to handle the allocated object, even +// in the face of errors. +func BinaryUnmarshalCallback[T encoding.BinaryUnmarshaler](newFn func() T, callback func(T, error)) func(buf []byte) { + return func(buf []byte) { + if len(buf) == 0 { + var nilvalue T + callback(nilvalue, nil) + return + } + + d := newFn() + if err := d.UnmarshalBinary(buf); err != nil { + // pass d here so callback can choose how to deal with the data + callback(d, err) + return + } + callback(d, nil) + } +} diff --git a/pkg/util/encoding/binary_test.go b/pkg/util/encoding/binary_test.go new file mode 100644 index 0000000000000..4412d5c317651 --- /dev/null +++ b/pkg/util/encoding/binary_test.go @@ -0,0 +1,62 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +package encoding + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +type emptyTestType struct { +} + +func (tt *emptyTestType) UnmarshalBinary(_ []byte) error { + return nil +} + +type errorTestType struct{} + +func (tt *errorTestType) UnmarshalBinary(_ []byte) error { + return errors.New("error") +} + +type dataTestType struct { + buf []byte +} + +func (tt *dataTestType) UnmarshalBinary(data []byte) error { + tt.buf = data + return nil +} + +func TestBinaryUnmarshalCallback(t *testing.T) { + cb := BinaryUnmarshalCallback(func() *emptyTestType { + return new(emptyTestType) + }, func(x *emptyTestType, err error) { + assert.Nil(t, x) + assert.NoError(t, err) + }) + cb(nil) + cb([]byte{}) + + cb = BinaryUnmarshalCallback(func() *errorTestType { + return new(errorTestType) + }, func(x *errorTestType, err error) { + assert.NotNil(t, x) + assert.Error(t, err) + }) + cb([]byte{1, 2}) + + cb = BinaryUnmarshalCallback(func() *dataTestType { + return new(dataTestType) + }, func(x *dataTestType, err error) { + assert.Equal(t, []byte{1, 2}, x.buf) + assert.NoError(t, err) + }) + cb([]byte{1, 2}) +} diff --git a/pkg/util/filesystem/file.go b/pkg/util/filesystem/file.go index b08a5b0cff5bc..4516f218bf69f 100644 --- a/pkg/util/filesystem/file.go +++ b/pkg/util/filesystem/file.go @@ -7,7 +7,10 @@ package filesystem import ( "bufio" + "io" "os" + "path" + "path/filepath" ) // FileExists returns true if a file exists and is accessible, false otherwise @@ -31,3 +34,107 @@ func ReadLines(filename string) ([]string, error) { } return ret, scanner.Err() } + +// CopyFile atomically copies file path `src“ to file path `dst`. +func CopyFile(src, dst string) error { + fi, err := os.Stat(src) + if err != nil { + return err + } + perm := fi.Mode() + + in, err := os.Open(src) + if err != nil { + return err + } + defer in.Close() + + tmp, err := os.CreateTemp(filepath.Dir(dst), "") + if err != nil { + return err + } + tmpName := tmp.Name() + + _, err = io.Copy(tmp, in) + if err != nil { + tmp.Close() + os.Remove(tmpName) + return err + } + + err = tmp.Close() + if err != nil { + os.Remove(tmpName) + return err + } + + err = os.Chmod(tmpName, perm) + if err != nil { + os.Remove(tmpName) + return err + } + + err = os.Rename(tmpName, dst) + if err != nil { + os.Remove(tmpName) + return err + } + + return nil +} + +// CopyFileAll calls CopyFile, but will create necessary directories for `dst`. +func CopyFileAll(src, dst string) error { + err := EnsureParentDirsExist(dst) + if err != nil { + return err + } + + return CopyFile(src, dst) +} + +// CopyDir copies directory recursively +func CopyDir(src, dst string) error { + var ( + err error + fds []os.DirEntry + srcinfo os.FileInfo + ) + + if srcinfo, err = os.Stat(src); err != nil { + return err + } + + if err = os.MkdirAll(dst, srcinfo.Mode()); err != nil { + return err + } + + if fds, err = os.ReadDir(src); err != nil { + return err + } + for _, fd := range fds { + s := path.Join(src, fd.Name()) + d := path.Join(dst, fd.Name()) + + if fd.IsDir() { + err = CopyDir(s, d) + } else { + err = CopyFile(s, d) + } + if err != nil { + return err + } + } + return nil +} + +// EnsureParentDirsExist makes a path immediately available for +// writing by creating the necessary parent directories. +func EnsureParentDirsExist(p string) error { + err := os.MkdirAll(filepath.Dir(p), os.ModePerm) + if err != nil { + return err + } + + return nil +} diff --git a/pkg/util/filesystem/file_test.go b/pkg/util/filesystem/file_test.go new file mode 100644 index 0000000000000..97e5b333b5063 --- /dev/null +++ b/pkg/util/filesystem/file_test.go @@ -0,0 +1,43 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +package filesystem + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCopyDir(t *testing.T) { + assert := assert.New(t) + src := t.TempDir() + dst := t.TempDir() + + files := map[string]string{ + "a/b/c/d.txt": "d.txt", + "e/f/g/h.txt": "h.txt", + "i/j/k.txt": "k.txt", + } + + for file, content := range files { + p := filepath.Join(src, file) + err := os.MkdirAll(filepath.Dir(p), os.ModePerm) + assert.NoError(err) + err = os.WriteFile(p, []byte(content), os.ModePerm) + assert.NoError(err) + } + err := CopyDir(src, dst) + assert.NoError(err) + + for file, content := range files { + p := filepath.Join(dst, file) + actual, err := os.ReadFile(p) + assert.NoError(err) + assert.Equal(string(actual), content) + } +} diff --git a/pkg/util/filesystem/go.mod b/pkg/util/filesystem/go.mod index 31b6f1fbd4ab0..6ffb2b0912b7f 100644 --- a/pkg/util/filesystem/go.mod +++ b/pkg/util/filesystem/go.mod @@ -13,9 +13,9 @@ require ( github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb - github.com/shirou/gopsutil/v4 v4.24.11 + github.com/shirou/gopsutil/v4 v4.24.12 github.com/stretchr/testify v1.10.0 - golang.org/x/sys v0.28.0 + golang.org/x/sys v0.29.0 ) require ( @@ -26,7 +26,7 @@ require ( github.com/ebitengine/purego v0.8.1 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.uber.org/atomic v1.11.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/util/filesystem/go.sum b/pkg/util/filesystem/go.sum index ebdb96f4beb1d..3b4ce01753544 100644 --- a/pkg/util/filesystem/go.sum +++ b/pkg/util/filesystem/go.sum @@ -15,12 +15,12 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= @@ -31,8 +31,8 @@ golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/pkg/util/flavor/go.mod b/pkg/util/flavor/go.mod index 91efb20fa615f..9ee96a99d22c2 100644 --- a/pkg/util/flavor/go.mod +++ b/pkg/util/flavor/go.mod @@ -21,7 +21,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/fxutil => ../fxutil github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../hostname/validate github.com/DataDog/datadog-agent/pkg/util/log => ../log - github.com/DataDog/datadog-agent/pkg/util/optional => ../optional + github.com/DataDog/datadog-agent/pkg/util/option => ../option github.com/DataDog/datadog-agent/pkg/util/pointer => ../pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../scrubber github.com/DataDog/datadog-agent/pkg/util/system => ../system @@ -39,20 +39,20 @@ require ( github.com/DataDog/datadog-agent/comp/core/secrets v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect github.com/DataDog/viper v1.14.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect @@ -64,24 +64,24 @@ require ( github.com/hashicorp/hcl v1.0.1-vault-5 // indirect github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/numcpus v0.8.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/util/flavor/go.sum b/pkg/util/flavor/go.sum index 7fdf16db5981c..15a68c06d091d 100644 --- a/pkg/util/flavor/go.sum +++ b/pkg/util/flavor/go.sum @@ -71,7 +71,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -109,8 +108,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -137,8 +136,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -155,8 +154,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -169,8 +168,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -181,8 +180,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -235,8 +234,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -273,8 +272,8 @@ golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= @@ -302,8 +301,8 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/util/fxutil/go.mod b/pkg/util/fxutil/go.mod index 48681aa9b89a5..b5e870c77f0cf 100644 --- a/pkg/util/fxutil/go.mod +++ b/pkg/util/fxutil/go.mod @@ -4,7 +4,7 @@ go 1.22.0 require ( github.com/DataDog/datadog-agent/comp/def v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/optional v0.55.0 + github.com/DataDog/datadog-agent/pkg/util/option v0.55.0 github.com/spf13/cobra v1.8.1 github.com/stretchr/testify v1.10.0 go.uber.org/fx v1.23.0 @@ -12,7 +12,7 @@ require ( replace ( github.com/DataDog/datadog-agent/comp/def => ../../../comp/def - github.com/DataDog/datadog-agent/pkg/util/optional => ../optional + github.com/DataDog/datadog-agent/pkg/util/option => ../option ) @@ -24,6 +24,6 @@ require ( go.uber.org/dig v1.18.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/sys v0.29.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/util/fxutil/go.sum b/pkg/util/fxutil/go.sum index 3dcc180d9ba1c..66bf0027fa26b 100644 --- a/pkg/util/fxutil/go.sum +++ b/pkg/util/fxutil/go.sum @@ -28,8 +28,8 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/pkg/util/fxutil/provide_optional.go b/pkg/util/fxutil/provide_optional.go index f8b4ff23aa5bb..dbb0a9a49e2ee 100644 --- a/pkg/util/fxutil/provide_optional.go +++ b/pkg/util/fxutil/provide_optional.go @@ -6,20 +6,20 @@ package fxutil import ( - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" "go.uber.org/fx" ) // ProvideOptional takes a type parameter and fx.Provide's a NewOption wrapper for that type func ProvideOptional[T any]() fx.Option { - return fx.Provide(func(actualType T) optional.Option[T] { - return optional.NewOption[T](actualType) + return fx.Provide(func(actualType T) option.Option[T] { + return option.New[T](actualType) }) } // ProvideNoneOptional provide a none optional for the type func ProvideNoneOptional[T any]() fx.Option { - return fx.Provide(func() optional.Option[T] { - return optional.NewNoneOption[T]() + return fx.Provide(func() option.Option[T] { + return option.None[T]() }) } diff --git a/pkg/util/goroutinesdump/goroutinedump.go b/pkg/util/goroutinesdump/goroutinedump.go new file mode 100644 index 0000000000000..0231f72dc21fd --- /dev/null +++ b/pkg/util/goroutinesdump/goroutinedump.go @@ -0,0 +1,43 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package goroutinesdump provides functions to get the stack trace of every Go routine of a running Agent. +package goroutinesdump + +import ( + "context" + "fmt" + "io" + "net/http" + "time" + + pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" +) + +// Get returns the stack trace of every Go routine of a running Agent. +func Get() (string, error) { + ipcAddress, err := pkgconfigsetup.GetIPCAddress(pkgconfigsetup.Datadog()) + if err != nil { + return "", err + } + + pprofURL := fmt.Sprintf("http://%v:%s/debug/pprof/goroutine?debug=2", + ipcAddress, pkgconfigsetup.Datadog().GetString("expvar_port")) + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + client := http.Client{} + req, err := http.NewRequest(http.MethodGet, pprofURL, nil) + if err != nil { + return "", err + } + resp, err := client.Do(req.WithContext(ctx)) + if err != nil { + return "", err + } + defer resp.Body.Close() + + data, err := io.ReadAll(resp.Body) + return string(data), err +} diff --git a/pkg/util/grpc/go.mod b/pkg/util/grpc/go.mod index 8c98d5f5187ee..6bfe764a112bb 100644 --- a/pkg/util/grpc/go.mod +++ b/pkg/util/grpc/go.mod @@ -24,7 +24,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/fxutil => ../fxutil github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../hostname/validate github.com/DataDog/datadog-agent/pkg/util/log => ../log - github.com/DataDog/datadog-agent/pkg/util/optional => ../optional + github.com/DataDog/datadog-agent/pkg/util/option => ../option github.com/DataDog/datadog-agent/pkg/util/pointer => ../pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../scrubber github.com/DataDog/datadog-agent/pkg/util/system => ../system @@ -37,33 +37,33 @@ replace ( require ( github.com/DataDog/datadog-agent/pkg/api v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/proto v0.56.0-rc.3 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 github.com/stretchr/testify v1.10.0 - golang.org/x/net v0.33.0 - google.golang.org/grpc v1.67.1 + golang.org/x/net v0.34.0 + google.golang.org/grpc v1.69.4 ) require ( - cloud.google.com/go/compute/metadata v0.5.2 // indirect + cloud.google.com/go/compute/metadata v0.6.0 // indirect github.com/DataDog/datadog-agent/comp/core/secrets v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/utils v0.56.0-rc.3 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect github.com/DataDog/viper v1.14.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect @@ -73,36 +73,37 @@ require ( github.com/fsnotify/fsnotify v1.8.0 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.0 // indirect github.com/hashicorp/hcl v1.0.1-vault-5 // indirect github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/tinylib/msgp v1.2.4 // indirect + github.com/tinylib/msgp v1.2.5 // indirect github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/numcpus v0.8.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect + go.opentelemetry.io/otel/metric v1.33.0 // indirect + go.opentelemetry.io/otel/sdk v1.33.0 // indirect + go.opentelemetry.io/otel/trace v1.33.0 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/oauth2 v0.23.0 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect - google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect - google.golang.org/protobuf v1.35.2 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect + google.golang.org/protobuf v1.36.3 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/pkg/util/grpc/go.sum b/pkg/util/grpc/go.sum index c91536ab269b2..d9772b72324ce 100644 --- a/pkg/util/grpc/go.sum +++ b/pkg/util/grpc/go.sum @@ -1,7 +1,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo= -cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= +cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= +cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/viper v1.14.0 h1:dIjTe/uJiah+QFqFZ+MXeqgmUvWhg37l37ZxFWxr3is= github.com/DataDog/viper v1.14.0/go.mod h1:wDdUVJ2SHaMaPrCZrlRCObwkubsX8j5sme3LaR/SGTc= @@ -13,7 +12,6 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -62,6 +60,10 @@ github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vb github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= @@ -85,12 +87,12 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= @@ -100,8 +102,8 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vb github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.0 h1:VD1gqscl4nYs1YxVuSdemTrSgTKrwOWDK0FVFMqm+Cg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.0/go.mod h1:4EgsQoS4TOhJizV+JTFg40qx1Ofh3XmXEQNBpgvNT40= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/hcl v1.0.1-vault-5 h1:kI3hhbbyzr4dldA8UdTb7ZlVVlI2DACdCfz31RPDgJM= github.com/hashicorp/hcl v1.0.1-vault-5/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= @@ -129,8 +131,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -159,8 +161,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -177,8 +179,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -191,8 +193,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -203,8 +205,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -223,8 +225,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tinylib/msgp v1.2.4 h1:yLFeUGostXXSGW5vxfT5dXG/qzkn4schv2I7at5+hVU= -github.com/tinylib/msgp v1.2.4/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= +github.com/tinylib/msgp v1.2.5 h1:WeQg1whrXRFiZusidTQqzETkRpGjFjcIhW6uqWH09po= +github.com/tinylib/msgp v1.2.5/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= @@ -241,6 +243,18 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= +go.opentelemetry.io/otel/sdk/metric v1.33.0 h1:Gs5VK9/WUJhNXZgn8MR6ITatvAmKeIuCtNbsP3JkNqU= +go.opentelemetry.io/otel/sdk/metric v1.33.0/go.mod h1:dL5ykHZmm1B1nVRk9dDjChwDmt81MjVp3gLkQRwKf/Q= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -268,8 +282,8 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -281,7 +295,6 @@ golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -290,14 +303,12 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= +golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -314,14 +325,13 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= @@ -353,13 +363,10 @@ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoA google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU= -google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= -google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28 h1:M0KvPgPmDZHPlbRbaNU1APr28TvwvvdUPlSv7PUvy8g= -google.golang.org/genproto/googleapis/api v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:dguCy7UOdZhTvLzDyt15+rOrawrpM4q7DD9dQ1P11P4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI= +google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f h1:gap6+3Gk41EItBuyi4XX/bp4oqJ3UwuIMl25yGinuAA= +google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:Ic02D47M+zbarjYYUlK57y316f2MoN0gjAwI3f2S95o= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= @@ -368,11 +375,10 @@ google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.67.1 h1:zWnc1Vrcno+lHZCOofnIMvycFcc0QRGIzm9dhnDX68E= -google.golang.org/grpc v1.67.1/go.mod h1:1gLDyUQU7CTLJI90u3nXZ9ekeghjeM7pTDZlqFNg2AA= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A= +google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/util/http/go.mod b/pkg/util/http/go.mod index 56e028a8317d6..f45c0e970077f 100644 --- a/pkg/util/http/go.mod +++ b/pkg/util/http/go.mod @@ -21,7 +21,7 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../pkg/util/fxutil github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../../pkg/util/hostname/validate github.com/DataDog/datadog-agent/pkg/util/log => ../../util/log - github.com/DataDog/datadog-agent/pkg/util/optional => ../../../pkg/util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../../pkg/util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../pkg/util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../util/scrubber/ github.com/DataDog/datadog-agent/pkg/util/system => ../../../pkg/util/system @@ -32,29 +32,29 @@ replace ( require ( github.com/DataDog/datadog-agent/pkg/config/mock v0.59.0 - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 github.com/stretchr/testify v1.10.0 - golang.org/x/net v0.33.0 + golang.org/x/net v0.34.0 ) require ( github.com/DataDog/datadog-agent/comp/core/secrets v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect github.com/DataDog/viper v1.14.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect @@ -66,24 +66,24 @@ require ( github.com/hashicorp/hcl v1.0.1-vault-5 // indirect github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/numcpus v0.8.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/util/http/go.sum b/pkg/util/http/go.sum index 1051ef2578f7d..2503097a4102a 100644 --- a/pkg/util/http/go.sum +++ b/pkg/util/http/go.sum @@ -71,7 +71,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -109,8 +108,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -137,8 +136,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -155,8 +154,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -169,8 +168,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -181,8 +180,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -235,8 +234,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -254,8 +253,8 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= -golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -275,8 +274,8 @@ golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= @@ -304,8 +303,8 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/util/installinfo/install_info_nix.go b/pkg/util/installinfo/install_info_nix.go index 58124e8ab1a79..aa1ba9321c25c 100644 --- a/pkg/util/installinfo/install_info_nix.go +++ b/pkg/util/installinfo/install_info_nix.go @@ -13,11 +13,9 @@ package installinfo import ( - "context" "encoding/json" "fmt" "os" - "os/exec" "path/filepath" "strconv" "strings" @@ -28,8 +26,6 @@ import ( "gopkg.in/yaml.v2" ) -const execTimeout = 30 * time.Second - var ( configDir = "/etc/datadog-agent" installInfoFile = filepath.Join(configDir, "install_info") @@ -37,14 +33,12 @@ var ( ) // WriteInstallInfo write install info and signature files -func WriteInstallInfo(installerVersion, installType string) error { +func WriteInstallInfo(tool, toolVersion, installType string) error { // avoid rewriting the files if they already exist if _, err := os.Stat(installInfoFile); err == nil { - log.Info("Install info file already exists, skipping") return nil } - tool, version := getToolVersion() - if err := writeInstallInfo(tool, version, installerVersion); err != nil { + if err := writeInstallInfo(tool, toolVersion, installType); err != nil { return fmt.Errorf("failed to write install info file: %v", err) } if err := writeInstallSignature(installType); err != nil { @@ -63,50 +57,6 @@ func RmInstallInfo() { } } -func getToolVersion() (string, string) { - tool := "unknown" - version := "unknown" - if _, err := exec.LookPath("dpkg-query"); err == nil { - tool = "dpkg" - toolVersion, err := getDpkgVersion() - if err == nil { - version = toolVersion - } - return tool, version - } - if _, err := exec.LookPath("rpm"); err == nil { - tool = "rpm" - toolVersion, err := getRPMVersion() - if err == nil { - version = fmt.Sprintf("rpm-%s", toolVersion) - } - } - return tool, version -} - -func getRPMVersion() (string, error) { - cancelctx, cancelfunc := context.WithTimeout(context.Background(), execTimeout) - defer cancelfunc() - output, err := exec.CommandContext(cancelctx, "rpm", "-q", "-f", "/bin/rpm", "--queryformat", "%%{VERSION}").Output() - return string(output), err -} - -func getDpkgVersion() (string, error) { - cancelctx, cancelfunc := context.WithTimeout(context.Background(), execTimeout) - defer cancelfunc() - cmd := exec.CommandContext(cancelctx, "dpkg-query", "--showformat=${Version}", "--show", "dpkg") - output, err := cmd.Output() - if err != nil { - log.Warnf("Failed to get dpkg version: %s", err) - return "", err - } - splitVersion := strings.Split(strings.TrimSpace(string(output)), ".") - if len(splitVersion) < 3 { - return "", fmt.Errorf("failed to parse dpkg version: %s", string(output)) - } - return strings.Join(splitVersion[:3], "."), nil -} - func writeInstallInfo(tool, version, installerVersion string) error { info := installInfoMethod{ Method: InstallInfo{ diff --git a/pkg/util/installinfo/install_info_nix_test.go b/pkg/util/installinfo/install_info_nix_test.go index 34acacd32d715..57e648c0bcce4 100644 --- a/pkg/util/installinfo/install_info_nix_test.go +++ b/pkg/util/installinfo/install_info_nix_test.go @@ -74,11 +74,11 @@ func TestDoubleWrite(t *testing.T) { s, _ := getFromPath(installInfoFile) assert.Nil(t, s) - assert.NoError(t, WriteInstallInfo("v1", "")) + assert.NoError(t, WriteInstallInfo("dpkg", "v1", "")) v1, err := getFromPath(installInfoFile) assert.NoError(t, err) - assert.NoError(t, WriteInstallInfo("v2", "")) + assert.NoError(t, WriteInstallInfo("dpkg", "v2", "")) v2, err := getFromPath(installInfoFile) assert.NoError(t, err) @@ -89,7 +89,7 @@ func TestRmInstallInfo(t *testing.T) { tmpDir := t.TempDir() installInfoFile = filepath.Join(tmpDir, "install_info") installSigFile = filepath.Join(tmpDir, "install.json") - assert.NoError(t, WriteInstallInfo("v1", "")) + assert.NoError(t, WriteInstallInfo("tool", "v1", "")) assert.True(t, fileExists(installInfoFile)) assert.True(t, fileExists(installSigFile)) diff --git a/pkg/util/installinfo/install_info_windows.go b/pkg/util/installinfo/install_info_windows.go new file mode 100644 index 0000000000000..e1f052f5bed99 --- /dev/null +++ b/pkg/util/installinfo/install_info_windows.go @@ -0,0 +1,14 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build windows + +package installinfo + +// WriteInstallInfo write install info and signature files +func WriteInstallInfo(_, _, _ string) error { + // Placeholder for Windows, this is done in tools/windows/DatadogAgentInstaller + return nil +} diff --git a/pkg/util/kubernetes/apiserver/apiserver.go b/pkg/util/kubernetes/apiserver/apiserver.go index d96a07666fbff..09f9674977939 100644 --- a/pkg/util/kubernetes/apiserver/apiserver.go +++ b/pkg/util/kubernetes/apiserver/apiserver.go @@ -206,7 +206,8 @@ func WaitForAPIClient(ctx context.Context) (*APIClient, error) { } } -func getClientConfig(timeout time.Duration, qps float32, burst int) (*rest.Config, error) { +// GetClientConfig returns a REST client configuration +func GetClientConfig(timeout time.Duration, qps float32, burst int) (*rest.Config, error) { var clientConfig *rest.Config var err error cfgPath := pkgconfigsetup.Datadog().GetString("kubernetes_kubeconfig_path") @@ -253,7 +254,7 @@ func getClientConfig(timeout time.Duration, qps float32, burst int) (*rest.Confi func GetKubeClient(timeout time.Duration, qps float32, burst int) (kubernetes.Interface, error) { // TODO: Remove custom warning logger when we remove usage of ComponentStatus rest.SetDefaultWarningHandler(CustomWarningLogger{}) - clientConfig, err := getClientConfig(timeout, qps, burst) + clientConfig, err := GetClientConfig(timeout, qps, burst) if err != nil { return nil, err } @@ -262,7 +263,7 @@ func GetKubeClient(timeout time.Duration, qps float32, burst int) (kubernetes.In } func getKubeDynamicClient(timeout time.Duration, qps float32, burst int) (dynamic.Interface, error) { - clientConfig, err := getClientConfig(timeout, qps, burst) + clientConfig, err := GetClientConfig(timeout, qps, burst) if err != nil { return nil, err } @@ -271,7 +272,7 @@ func getKubeDynamicClient(timeout time.Duration, qps float32, burst int) (dynami } func getCRDClient(timeout time.Duration, qps float32, burst int) (*clientset.Clientset, error) { - clientConfig, err := getClientConfig(timeout, qps, burst) + clientConfig, err := GetClientConfig(timeout, qps, burst) if err != nil { return nil, err } @@ -280,7 +281,7 @@ func getCRDClient(timeout time.Duration, qps float32, burst int) (*clientset.Cli } func getAPISClient(timeout time.Duration, qps float32, burst int) (*apiregistrationclient.ApiregistrationV1Client, error) { - clientConfig, err := getClientConfig(timeout, qps, burst) + clientConfig, err := GetClientConfig(timeout, qps, burst) if err != nil { return nil, err } @@ -288,7 +289,7 @@ func getAPISClient(timeout time.Duration, qps float32, burst int) (*apiregistrat } func getKubeVPAClient(timeout time.Duration, qps float32, burst int) (vpa.Interface, error) { - clientConfig, err := getClientConfig(timeout, qps, burst) + clientConfig, err := GetClientConfig(timeout, qps, burst) if err != nil { return nil, err } @@ -297,7 +298,7 @@ func getKubeVPAClient(timeout time.Duration, qps float32, burst int) (vpa.Interf } func getScaleClient(discoveryCl discovery.ServerResourcesInterface, restMapper meta.RESTMapper, timeout time.Duration, qps float32, burst int) (scale.ScalesGetter, error) { - clientConfig, err := getClientConfig(timeout, qps, burst) + clientConfig, err := GetClientConfig(timeout, qps, burst) if err != nil { return nil, err } @@ -661,7 +662,7 @@ func (c *APIClient) GetARandomNodeName(ctx context.Context) (string, error) { // RESTClient returns a new REST client func (c *APIClient) RESTClient(apiPath string, groupVersion *schema.GroupVersion, negotiatedSerializer runtime.NegotiatedSerializer) (*rest.RESTClient, error) { - clientConfig, err := getClientConfig(c.defaultClientTimeout, standardClientQPSLimit, standardClientQPSBurst) + clientConfig, err := GetClientConfig(c.defaultClientTimeout, standardClientQPSLimit, standardClientQPSBurst) if err != nil { return nil, err } @@ -675,7 +676,7 @@ func (c *APIClient) RESTClient(apiPath string, groupVersion *schema.GroupVersion // MetadataClient returns a new kubernetes metadata client func (c *APIClient) MetadataClient() (metadata.Interface, error) { - clientConfig, err := getClientConfig(c.defaultInformerTimeout, standardClientQPSLimit, standardClientQPSBurst) + clientConfig, err := GetClientConfig(c.defaultInformerTimeout, standardClientQPSLimit, standardClientQPSBurst) if err != nil { return nil, err } @@ -686,7 +687,7 @@ func (c *APIClient) MetadataClient() (metadata.Interface, error) { // NewSPDYExecutor returns a new SPDY executor for the provided method and URL func (c *APIClient) NewSPDYExecutor(apiPath string, groupVersion *schema.GroupVersion, negotiatedSerializer runtime.NegotiatedSerializer, method string, url *url.URL) (remotecommand.Executor, error) { - clientConfig, err := getClientConfig(c.defaultClientTimeout, standardClientQPSLimit, standardClientQPSBurst) + clientConfig, err := GetClientConfig(c.defaultClientTimeout, standardClientQPSLimit, standardClientQPSBurst) if err != nil { return nil, err } @@ -697,3 +698,18 @@ func (c *APIClient) NewSPDYExecutor(apiPath string, groupVersion *schema.GroupVe return remotecommand.NewSPDYExecutor(clientConfig, method, url) } + +// GetKubeSecret fetches a secret from k8s +func GetKubeSecret(namespace string, name string) (map[string][]byte, error) { + kubeClient, err := GetKubeClient(10*time.Second, 0, 0) // Default QPS and burst to Kube client defaults using 0) + if err != nil { + return nil, err + } + + secret, err := kubeClient.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + return secret.Data, nil +} diff --git a/pkg/util/kubernetes/apiserver/apiserver_nocompile.go b/pkg/util/kubernetes/apiserver/apiserver_nocompile.go index 5c34f2e7105e0..2b17bf65ff3d9 100644 --- a/pkg/util/kubernetes/apiserver/apiserver_nocompile.go +++ b/pkg/util/kubernetes/apiserver/apiserver_nocompile.go @@ -11,9 +11,6 @@ package apiserver import ( "context" "errors" - "time" - - "k8s.io/client-go/kubernetes" apiv1 "github.com/DataDog/datadog-agent/pkg/clusteragent/api/v1" "github.com/DataDog/datadog-agent/pkg/util/log" @@ -64,9 +61,7 @@ func GetNodeLabels(_ *APIClient, _ string) (map[string]string, error) { return nil, nil } -// GetKubeClient returns a Kubernetes client. -// -//nolint:revive // TODO(CINT) Fix revive linter -func GetKubeClient(_ time.Duration, _ float32, _ int) (kubernetes.Interface, error) { +// GetKubeSecret fetches a secret from k8s +func GetKubeSecret(string, string) (map[string][]byte, error) { return nil, ErrNotCompiled } diff --git a/pkg/util/kubernetes/apiserver/controllers/controllers.go b/pkg/util/kubernetes/apiserver/controllers/controllers.go index bb3c3dda78e42..d90f81e5a503a 100644 --- a/pkg/util/kubernetes/apiserver/controllers/controllers.go +++ b/pkg/util/kubernetes/apiserver/controllers/controllers.go @@ -27,7 +27,7 @@ import ( pkgconfigsetup "github.com/DataDog/datadog-agent/pkg/config/setup" "github.com/DataDog/datadog-agent/pkg/util/kubernetes/apiserver" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" ) const autoscalerNowHandleMsgEvent = "Autoscaler is now handled by the Cluster-Agent" @@ -73,7 +73,7 @@ type ControllerContext struct { IsLeaderFunc func() bool EventRecorder record.EventRecorder WorkloadMeta workloadmeta.Component - DatadogClient optional.Option[datadogclient.Component] + DatadogClient option.Option[datadogclient.Component] StopCh chan struct{} } diff --git a/pkg/util/log/go.mod b/pkg/util/log/go.mod index a439a62283f76..6f9b4cd1b83b7 100644 --- a/pkg/util/log/go.mod +++ b/pkg/util/log/go.mod @@ -2,7 +2,11 @@ module github.com/DataDog/datadog-agent/pkg/util/log go 1.22.0 -replace github.com/DataDog/datadog-agent/pkg/util/scrubber => ../scrubber +replace ( + github.com/DataDog/datadog-agent/pkg/util/compression => ../compression + github.com/DataDog/datadog-agent/pkg/util/scrubber => ../scrubber + github.com/cihub/seelog => github.com/cihub/seelog v0.0.0-20151216151435-d2c6e5aa9fbf // v2.6 +) require ( github.com/DataDog/datadog-agent/pkg/util/scrubber v0.56.0-rc.3 diff --git a/pkg/util/log/go.sum b/pkg/util/log/go.sum index ffca41d546584..6cfc51252f2be 100644 --- a/pkg/util/log/go.sum +++ b/pkg/util/log/go.sum @@ -1,5 +1,5 @@ -github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= -github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= +github.com/cihub/seelog v0.0.0-20151216151435-d2c6e5aa9fbf h1:XI2tOTCBqEnMyN2j1yPBI07yQHeywUSCEf8YWqf0oKw= +github.com/cihub/seelog v0.0.0-20151216151435-d2c6e5aa9fbf/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= diff --git a/pkg/util/log/setup/go.mod b/pkg/util/log/setup/go.mod index f2c45da509f53..0c18e93de1c12 100644 --- a/pkg/util/log/setup/go.mod +++ b/pkg/util/log/setup/go.mod @@ -21,19 +21,20 @@ replace ( github.com/DataDog/datadog-agent/pkg/util/fxutil => ../../../../pkg/util/fxutil github.com/DataDog/datadog-agent/pkg/util/hostname/validate => ../../../../pkg/util/hostname/validate github.com/DataDog/datadog-agent/pkg/util/log => ../../../../pkg/util/log - github.com/DataDog/datadog-agent/pkg/util/optional => ../../../../pkg/util/optional + github.com/DataDog/datadog-agent/pkg/util/option => ../../../../pkg/util/option github.com/DataDog/datadog-agent/pkg/util/pointer => ../../../../pkg/util/pointer github.com/DataDog/datadog-agent/pkg/util/scrubber => ../../../util/scrubber/ github.com/DataDog/datadog-agent/pkg/util/system => ../../../../pkg/util/system github.com/DataDog/datadog-agent/pkg/util/system/socket => ../../../../pkg/util/system/socket github.com/DataDog/datadog-agent/pkg/util/testutil => ../../../../pkg/util/testutil github.com/DataDog/datadog-agent/pkg/util/winutil => ../../../../pkg/util/winutil + github.com/cihub/seelog => github.com/cihub/seelog v0.0.0-20151216151435-d2c6e5aa9fbf // v2.6 ) require ( github.com/DataDog/datadog-agent/pkg/config/mock v0.59.0 - github.com/DataDog/datadog-agent/pkg/config/model v0.59.0 - github.com/DataDog/datadog-agent/pkg/util/log v0.59.1 + github.com/DataDog/datadog-agent/pkg/config/model v0.61.0 + github.com/DataDog/datadog-agent/pkg/util/log v0.60.1 github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 github.com/stretchr/testify v1.10.0 ) @@ -42,19 +43,19 @@ require ( github.com/DataDog/datadog-agent/comp/core/secrets v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/collector/check/defaults v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/env v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/config/nodetreemodel v0.61.0 // indirect github.com/DataDog/datadog-agent/pkg/config/setup v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/structure v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/config/teeconfig v0.59.0 // indirect github.com/DataDog/datadog-agent/pkg/util/executable v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/filesystem v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/filesystem v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/hostname/validate v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/optional v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/pointer v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/scrubber v0.59.1 // indirect - github.com/DataDog/datadog-agent/pkg/util/system v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/option v0.59.0 // indirect + github.com/DataDog/datadog-agent/pkg/util/pointer v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/scrubber v0.60.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/system v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/util/system/socket v0.59.0 // indirect - github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 // indirect + github.com/DataDog/datadog-agent/pkg/util/winutil v0.60.1 // indirect github.com/DataDog/datadog-agent/pkg/version v0.59.1 // indirect github.com/DataDog/viper v1.14.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect @@ -65,24 +66,24 @@ require ( github.com/hashicorp/hcl v1.0.1-vault-5 // indirect github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb // indirect github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect - github.com/shirou/gopsutil/v4 v4.24.11 // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/shirou/gopsutil/v4 v4.24.12 // indirect github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.7.0 // indirect + github.com/spf13/cast v1.7.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/numcpus v0.8.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.uber.org/atomic v1.11.0 // indirect - golang.org/x/exp v0.0.0-20241210194714-1829a127f884 // indirect - golang.org/x/sys v0.28.0 // indirect + golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect + golang.org/x/sys v0.29.0 // indirect golang.org/x/text v0.21.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/pkg/util/log/setup/go.sum b/pkg/util/log/setup/go.sum index 7fdf16db5981c..40fc4bb4d4941 100644 --- a/pkg/util/log/setup/go.sum +++ b/pkg/util/log/setup/go.sum @@ -21,8 +21,8 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs= -github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= +github.com/cihub/seelog v0.0.0-20151216151435-d2c6e5aa9fbf h1:XI2tOTCBqEnMyN2j1yPBI07yQHeywUSCEf8YWqf0oKw= +github.com/cihub/seelog v0.0.0-20151216151435-d2c6e5aa9fbf/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= @@ -71,7 +71,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -109,8 +108,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= @@ -137,8 +136,8 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -155,8 +154,8 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc= -github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -169,8 +168,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -181,8 +180,8 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.7.0 h1:ntdiHjuueXFgm5nzDRdOS4yfT43P5Fnud6DH50rz/7w= -github.com/spf13/cast v1.7.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -235,8 +234,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884 h1:Y/Mj/94zIQQGHVSv1tTtQBDaQaJe62U9bkDZKKyhPCU= -golang.org/x/exp v0.0.0-20241210194714-1829a127f884/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA= +golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -273,8 +272,8 @@ golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= @@ -302,8 +301,8 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU= +google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/pkg/util/net/docs.go b/pkg/util/net/docs.go new file mode 100644 index 0000000000000..5ca07b4dedebc --- /dev/null +++ b/pkg/util/net/docs.go @@ -0,0 +1,7 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +// Package net provides network utils. +package net diff --git a/pkg/util/host.go b/pkg/util/net/host.go similarity index 98% rename from pkg/util/host.go rename to pkg/util/net/host.go index 57b974cb64308..988e60784b3d5 100644 --- a/pkg/util/host.go +++ b/pkg/util/net/host.go @@ -5,7 +5,7 @@ //go:build !serverless -package util +package net import ( "net" diff --git a/pkg/util/host_serverless.go b/pkg/util/net/host_serverless.go similarity index 96% rename from pkg/util/host_serverless.go rename to pkg/util/net/host_serverless.go index 1337fb4056ccc..70c1c9a969b20 100644 --- a/pkg/util/host_serverless.go +++ b/pkg/util/net/host_serverless.go @@ -5,7 +5,7 @@ //go:build serverless -package util +package net // Fqdn returns the FQDN for the host if any func Fqdn(hostname string) string { diff --git a/pkg/util/optional/go.mod b/pkg/util/option/go.mod similarity index 87% rename from pkg/util/optional/go.mod rename to pkg/util/option/go.mod index 296224899d8b2..d09b45a5c2273 100644 --- a/pkg/util/optional/go.mod +++ b/pkg/util/option/go.mod @@ -1,4 +1,4 @@ -module github.com/DataDog/datadog-agent/pkg/util/optional +module github.com/DataDog/datadog-agent/pkg/util/option go 1.22.0 diff --git a/pkg/util/optional/go.sum b/pkg/util/option/go.sum similarity index 100% rename from pkg/util/optional/go.sum rename to pkg/util/option/go.sum diff --git a/pkg/util/optional/optional.go b/pkg/util/option/option.go similarity index 67% rename from pkg/util/optional/optional.go rename to pkg/util/option/option.go index aa0437139dd1a..f7791246e7f79 100644 --- a/pkg/util/optional/optional.go +++ b/pkg/util/option/option.go @@ -3,8 +3,8 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -// Package optional has optional types and functions used by Agent. -package optional +// Package option has optional types and functions used by Agent. +package option // Option represents an optional type. // By default, no value is set and a call to Get() returns (T{}, false) @@ -13,28 +13,28 @@ type Option[T any] struct { set bool } -// NewOption creates a new instance of Option[T] with a value set. A call to Get() will returns (value, true) -func NewOption[T any](value T) Option[T] { +// New creates a new instance of Option[T] with a value set. A call to Get() will returns (value, true) +func New[T any](value T) Option[T] { return Option[T]{ value: value, set: true, } } -// NewOptionPtr creates a new instance of Option[T] with a value set. A call to Get() will returns (value, true) -func NewOptionPtr[T any](value T) *Option[T] { - option := NewOption[T](value) +// NewPtr creates a new instance of Option[T] with a value set. A call to Get() will returns (value, true) +func NewPtr[T any](value T) *Option[T] { + option := New[T](value) return &option } -// NewNoneOption creates a new instance of Option[T] without any value set. -func NewNoneOption[T any]() Option[T] { +// None creates a new instance of Option[T] without any value set. +func None[T any]() Option[T] { return Option[T]{} } -// NewNoneOptionPtr creates a new instance of Option[T] without any value set. -func NewNoneOptionPtr[T any]() *Option[T] { - option := NewNoneOption[T]() +// NonePtr creates a new instance of Option[T] without any value set. +func NonePtr[T any]() *Option[T] { + option := None[T]() return &option } @@ -58,9 +58,9 @@ func (o *Option[T]) Reset() { func MapOption[T1 any, T2 any](optional Option[T1], fct func(T1) T2) Option[T2] { value, ok := optional.Get() if !ok { - return NewNoneOption[T2]() + return None[T2]() } - return NewOption(fct(value)) + return New(fct(value)) } // UnmarshalYAML unmarshals an Option[T] from YAML @@ -68,10 +68,10 @@ func (o *Option[T]) UnmarshalYAML(unmarshal func(interface{}) error) error { var v T err := unmarshal(&v) if err != nil { - *o = NewNoneOption[T]() + *o = None[T]() return err } - *o = NewOption[T](v) + *o = New[T](v) return nil } diff --git a/pkg/util/optional/optional_test.go b/pkg/util/option/option_test.go similarity index 82% rename from pkg/util/optional/optional_test.go rename to pkg/util/option/option_test.go index 86cafca52f7f1..6b0f72f084431 100644 --- a/pkg/util/optional/optional_test.go +++ b/pkg/util/option/option_test.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package optional +package option import ( "testing" @@ -12,18 +12,18 @@ import ( ) func TestOptionConstructors(t *testing.T) { - optional := NewOption(42) + optional := New(42) v, ok := optional.Get() require.True(t, ok) require.Equal(t, 42, v) - optional = NewNoneOption[int]() + optional = None[int]() _, ok = optional.Get() require.False(t, ok) } func TestOptionSetReset(t *testing.T) { - optional := NewOption(0) + optional := New(0) optional.Set(42) v, ok := optional.Get() require.True(t, ok) @@ -38,14 +38,14 @@ func TestMapOption(t *testing.T) { return len(v) } - optionalStr := NewOption("hello") + optionalStr := New("hello") optionalInt := MapOption(optionalStr, getLen) v, ok := optionalInt.Get() require.True(t, ok) require.Equal(t, 5, v) - optionalStr = NewNoneOption[string]() + optionalStr = None[string]() optionalInt = MapOption(optionalStr, getLen) _, ok = optionalInt.Get() @@ -53,7 +53,7 @@ func TestMapOption(t *testing.T) { } func TestSetIfNone(t *testing.T) { - optional := NewOption(42) + optional := New(42) optional.SetIfNone(10) v, ok := optional.Get() @@ -68,15 +68,15 @@ func TestSetIfNone(t *testing.T) { } func TestSetOptionIfNone(t *testing.T) { - optional := NewOption(42) + optional := New(42) - optional.SetOptionIfNone(NewOption(10)) + optional.SetOptionIfNone(New(10)) v, ok := optional.Get() require.Equal(t, 42, v) require.True(t, ok) optional.Reset() - optional.SetOptionIfNone(NewOption(10)) + optional.SetOptionIfNone(New(10)) v, ok = optional.Get() require.Equal(t, 10, v) require.True(t, ok) diff --git a/pkg/util/port/portlist/netstat.go b/pkg/util/port/portlist/netstat.go index b79f100f9055b..1be223e078e21 100644 --- a/pkg/util/port/portlist/netstat.go +++ b/pkg/util/port/portlist/netstat.go @@ -3,132 +3,31 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2014-present Datadog, Inc. -//go:build darwin - package portlist import ( - "bufio" - "bytes" - "io" - - "go4.org/mem" + "net/netip" ) -// parsePort returns the port number at the end of s following the last "." or -// ":", whichever comes last. It returns -1 on a parse error or invalid number -// and 0 if the port number was "*". -// -// This is basically net.SplitHostPort except that it handles a "." (as macOS -// and others return in netstat output), uses mem.RO, and validates that the -// port must be numeric and in the uint16 range. -func parsePort(s mem.RO) int { - // a.b.c.d:1234 or [a:b:c:d]:1234 - i1 := mem.LastIndexByte(s, ':') - // a.b.c.d.1234 or [a:b:c:d].1234 - i2 := mem.LastIndexByte(s, '.') - - i := i1 - if i2 > i { - i = i2 - } - if i < 0 { - // no match; weird - return -1 - } - - portstr := s.SliceFrom(i + 1) - if portstr.EqualString("*") { - return 0 - } - - port, err := mem.ParseUint(portstr, 10, 16) - if err != nil { - // invalid port; weird - return -1 - } - - return int(port) +// Entry is a single entry in the connection table. +type Entry struct { + Local, Remote netip.AddrPort + Pid int + State string + OSMetadata OSMetadata } -func isLoopbackAddr(s mem.RO) bool { - return mem.HasPrefix(s, mem.S("127.")) || - mem.HasPrefix(s, mem.S("[::1]:")) || - mem.HasPrefix(s, mem.S("::1.")) +// Table contains local machine's TCP connection entries. +// +// Currently only TCP (IPv4 and IPv6) are included. +type Table struct { + Entries []Entry } -// appendParsePortsNetstat appends to base listening ports -// from "netstat" output, read from br. See TestParsePortsNetstat -// for example input lines. +// GetConnTable returns the connection table. // -// This used to be a lowest common denominator parser for "netstat -na" format. -// All of Linux, Windows, and macOS support -na and give similar-ish output -// formats that we can parse without special detection logic. -// Unfortunately, options to filter by proto or state are non-portable, -// so we'll filter for ourselves. -// Nowadays, though, we only use it for macOS as of 2022-11-04. -func appendParsePortsNetstat(base []Port, br *bufio.Reader, includeLocalhost bool) ([]Port, error) { - ret := base - var fieldBuf [10]mem.RO - for { - line, err := br.ReadBytes('\n') - if err != nil { - if err == io.EOF { - break - } - return nil, err - } - trimline := bytes.TrimSpace(line) - cols := mem.AppendFields(fieldBuf[:0], mem.B(trimline)) - if len(cols) < 1 { - continue - } - protos := cols[0] - - var proto string - var laddr, raddr mem.RO - if mem.HasPrefixFold(protos, mem.S("tcp")) { - if len(cols) < 4 { - continue - } - proto = "tcp" - laddr = cols[len(cols)-3] - raddr = cols[len(cols)-2] - state := cols[len(cols)-1] - if !mem.HasPrefix(state, mem.S("LISTEN")) { - // not interested in non-listener sockets - continue - } - if !includeLocalhost && isLoopbackAddr(laddr) { - // not interested in loopback-bound listeners - continue - } - } else if mem.HasPrefixFold(protos, mem.S("udp")) { - if len(cols) < 3 { - continue - } - proto = "udp" - laddr = cols[len(cols)-2] - raddr = cols[len(cols)-1] - if !includeLocalhost && isLoopbackAddr(laddr) { - // not interested in loopback-bound listeners - continue - } - } else { - // not interested in other protocols - continue - } - - lport := parsePort(laddr) - rport := parsePort(raddr) - if rport > 0 || lport <= 0 { - // not interested in "connected" sockets - continue - } - ret = append(ret, Port{ - Proto: proto, - Port: uint16(lport), - }) - } - return ret, nil +// It returns ErrNotImplemented if the table is not available for the +// current operating system. +func GetConnTable() (*Table, error) { + return getConnTable() } diff --git a/pkg/util/port/portlist/netstat_darwin.go b/pkg/util/port/portlist/netstat_darwin.go new file mode 100644 index 0000000000000..7542980776b17 --- /dev/null +++ b/pkg/util/port/portlist/netstat_darwin.go @@ -0,0 +1,146 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2014-present Datadog, Inc. + +package portlist + +import ( + "bufio" + "bytes" + "errors" + "io" + + "go4.org/mem" +) + +// ErrNotImplemented is the "not implemented" error given by `gopsutil` when an +// OS doesn't support and API. Unfortunately it's in an internal package so +// we can't import it so we'll copy it here. +var ErrNotImplemented = errors.New("not implemented yet") + +// OSMetadata includes any additional OS-specific information that may be +// obtained during the retrieval of a given Entry. +type OSMetadata struct{} + +// parsePort returns the port number at the end of s following the last "." or +// ":", whichever comes last. It returns -1 on a parse error or invalid number +// and 0 if the port number was "*". +// +// This is basically net.SplitHostPort except that it handles a "." (as macOS +// and others return in netstat output), uses mem.RO, and validates that the +// port must be numeric and in the uint16 range. +func parsePort(s mem.RO) int { + // a.b.c.d:1234 or [a:b:c:d]:1234 + i1 := mem.LastIndexByte(s, ':') + // a.b.c.d.1234 or [a:b:c:d].1234 + i2 := mem.LastIndexByte(s, '.') + + i := i1 + if i2 > i { + i = i2 + } + if i < 0 { + // no match; weird + return -1 + } + + portstr := s.SliceFrom(i + 1) + if portstr.EqualString("*") { + return 0 + } + + port, err := mem.ParseUint(portstr, 10, 16) + if err != nil { + // invalid port; weird + return -1 + } + + return int(port) +} + +func isLoopbackAddr(s mem.RO) bool { + return mem.HasPrefix(s, mem.S("127.")) || + mem.HasPrefix(s, mem.S("[::1]:")) || + mem.HasPrefix(s, mem.S("::1.")) +} + +// appendParsePortsNetstat appends to base listening ports +// from "netstat" output, read from br. See TestParsePortsNetstat +// for example input lines. +// +// This used to be a lowest common denominator parser for "netstat -na" format. +// All of Linux, Windows, and macOS support -na and give similar-ish output +// formats that we can parse without special detection logic. +// Unfortunately, options to filter by proto or state are non-portable, +// so we'll filter for ourselves. +// Nowadays, though, we only use it for macOS as of 2022-11-04. +func appendParsePortsNetstat(base []Port, br *bufio.Reader, includeLocalhost bool) ([]Port, error) { + ret := base + var fieldBuf [10]mem.RO + for { + line, err := br.ReadBytes('\n') + if err != nil { + if err == io.EOF { + break + } + return nil, err + } + trimline := bytes.TrimSpace(line) + cols := mem.AppendFields(fieldBuf[:0], mem.B(trimline)) + if len(cols) < 1 { + continue + } + protos := cols[0] + + var proto string + var laddr, raddr mem.RO + if mem.HasPrefixFold(protos, mem.S("tcp")) { + if len(cols) < 4 { + continue + } + proto = "tcp" + laddr = cols[len(cols)-3] + raddr = cols[len(cols)-2] + state := cols[len(cols)-1] + if !mem.HasPrefix(state, mem.S("LISTEN")) { + // not interested in non-listener sockets + continue + } + if !includeLocalhost && isLoopbackAddr(laddr) { + // not interested in loopback-bound listeners + continue + } + } else if mem.HasPrefixFold(protos, mem.S("udp")) { + if len(cols) < 3 { + continue + } + proto = "udp" + laddr = cols[len(cols)-2] + raddr = cols[len(cols)-1] + if !includeLocalhost && isLoopbackAddr(laddr) { + // not interested in loopback-bound listeners + continue + } + } else { + // not interested in other protocols + continue + } + + lport := parsePort(laddr) + rport := parsePort(raddr) + if rport > 0 || lport <= 0 { + // not interested in "connected" sockets + continue + } + ret = append(ret, Port{ + Proto: proto, + Port: uint16(lport), + }) + } + return ret, nil +} + +func getConnTable() (*Table, error) { + return nil, ErrNotImplemented +} diff --git a/pkg/util/port/portlist/netstat_noimpl.go b/pkg/util/port/portlist/netstat_noimpl.go new file mode 100644 index 0000000000000..eeef1b4de430b --- /dev/null +++ b/pkg/util/port/portlist/netstat_noimpl.go @@ -0,0 +1,23 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2014-present Datadog, Inc. + +//go:build !windows && !darwin + +package portlist + +import "errors" + +// ErrNotImplemented is the "not implemented" error given by `gopsutil` when an +// OS doesn't support and API. Unfortunately it's in an internal package so +// we can't import it so we'll copy it here. +var ErrNotImplemented = errors.New("not implemented yet") + +// OSMetadata includes any additional OS-specific information that may be +// obtained during the retrieval of a given Entry. +type OSMetadata struct{} + +func getConnTable() (*Table, error) { + return nil, ErrNotImplemented +} diff --git a/pkg/util/port/portlist/netstat_windows.go b/pkg/util/port/portlist/netstat_windows.go new file mode 100644 index 0000000000000..b36f0a12f2429 --- /dev/null +++ b/pkg/util/port/portlist/netstat_windows.go @@ -0,0 +1,280 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2014-present Datadog, Inc. + +package portlist + +import ( + "errors" + "fmt" + "math/bits" + "net/netip" + "unsafe" + + "golang.org/x/sys/cpu" + "golang.org/x/sys/windows" +) + +// OSMetadata includes any additional OS-specific information that may be +// obtained during the retrieval of a given Entry. +type OSMetadata interface { + // GetModule returns the entry's module name. + // + // It returns ("", nil) if no entry is found. As of 2023-01-27, any returned + // error is silently discarded by its sole caller in portlist_windows.go and + // treated equivalently as returning ("", nil), but this may change in the + // future. An error should only be returned in casees that are worthy of + // being logged at least. + GetModule() (string, error) +} + +// See https://docs.microsoft.com/en-us/windows/win32/api/iphlpapi/nf-iphlpapi-getextendedtcptable + +// TCP_TABLE_OWNER_MODULE_ALL means to include the PID and module. The table type +// we get back from Windows depends on AF_INET vs AF_INET6: +// MIB_TCPTABLE_OWNER_MODULE for v4 or MIB_TCP6TABLE_OWNER_MODULE for v6. +const tcpTableOwnerModuleAll = 8 + +// TCPIP_OWNER_MODULE_BASIC_INFO means to request "basic information" about the +// owner module. +const tcpipOwnerModuleBasicInfo = 0 + +var ( + iphlpapi = windows.NewLazySystemDLL("iphlpapi.dll") + getTCPTable = iphlpapi.NewProc("GetExtendedTcpTable") + getOwnerModuleFromTCPEntry = iphlpapi.NewProc("GetOwnerModuleFromTcpEntry") + getOwnerModuleFromTCP6Entry = iphlpapi.NewProc("GetOwnerModuleFromTcp6Entry") + // TODO: GetExtendedUdpTable also? if/when needed. +) + +type _MIB_TCPROW_OWNER_MODULE struct { //nolint:revive // Windows API type + state uint32 + localAddr uint32 + localPort uint32 + remoteAddr uint32 + remotePort uint32 + pid uint32 + createTimestamp int64 + owningModuleInfo [16]uint64 +} + +func ipport4(addr uint32, port uint16) netip.AddrPort { + if !cpu.IsBigEndian { + addr = bits.ReverseBytes32(addr) + } + return netip.AddrPortFrom( + netip.AddrFrom4([4]byte{byte(addr >> 24), byte(addr >> 16), byte(addr >> 8), byte(addr)}), + port) +} + +func port(v *uint32) uint16 { + if !cpu.IsBigEndian { + return uint16(bits.ReverseBytes32(*v) >> 16) + } + return uint16(*v >> 16) +} + +var states = []string{ + "", + "CLOSED", + "LISTEN", + "SYN-SENT", + "SYN-RECEIVED", + "ESTABLISHED", + "FIN-WAIT-1", + "FIN-WAIT-2", + "CLOSE-WAIT", + "CLOSING", + "LAST-ACK", + "DELETE-TCB", +} + +func state(v uint32) string { + if v < uint32(len(states)) { + return states[v] + } + return fmt.Sprintf("unknown-state-%d", v) +} + +// See https://web.archive.org/web/20221219213143/https://learn.microsoft.com/en-us/windows/win32/api/iprtrmib/ns-iprtrmib-tcpip_owner_module_basic_info +type _TCPIP_OWNER_MODULE_BASIC_INFO struct { //nolint:revive // Windows API type + moduleName *uint16 + modulePath *uint16 +} + +type moduleInfoConstraint interface { + _MIB_TCPROW_OWNER_MODULE | _MIB_TCP6ROW_OWNER_MODULE +} + +// See https://web.archive.org/web/20221219212442/https://learn.microsoft.com/en-us/windows/win32/api/tcpmib/ns-tcpmib-mib_tcp6row_owner_module +type _MIB_TCP6ROW_OWNER_MODULE struct { //nolint:revive // Windows API type + localAddr [16]byte + localScope uint32 + localPort uint32 + remoteAddr [16]byte + remoteScope uint32 + remotePort uint32 + state uint32 + pid uint32 + createTimestamp int64 + owningModuleInfo [16]uint64 +} + +func moduleInfo[entryType moduleInfoConstraint](entry *entryType, proc *windows.LazyProc) (string, error) { + var buf []byte + var desiredLen uint32 + var addr unsafe.Pointer + + for { + e, _, _ := proc.Call( + uintptr(unsafe.Pointer(entry)), + uintptr(tcpipOwnerModuleBasicInfo), + uintptr(addr), + uintptr(unsafe.Pointer(&desiredLen)), + ) + err := windows.Errno(e) + if err == windows.ERROR_SUCCESS { + break + } + if err == windows.ERROR_NOT_FOUND { + return "", nil + } + if err != windows.ERROR_INSUFFICIENT_BUFFER { + return "", err + } + if desiredLen > 1<<20 { + // Sanity check before allocating too much + return "", nil + } + buf = make([]byte, desiredLen) + addr = unsafe.Pointer(&buf[0]) + } + if addr == nil { + // GetOwnerModuleFromTcp*Entry can apparently return ERROR_SUCCESS + // (NO_ERROR) on the first call without the usual first + // ERROR_INSUFFICIENT_BUFFER result. Windows said success, so interpret + // that was successfully not having data. + return "", nil + } + basicInfo := (*_TCPIP_OWNER_MODULE_BASIC_INFO)(addr) + return windows.UTF16PtrToString(basicInfo.moduleName), nil +} + +// GetModule implements OSMetaData +func (row *_MIB_TCPROW_OWNER_MODULE) GetModule() (string, error) { + return moduleInfo(row, getOwnerModuleFromTCPEntry) +} + +func (row *_MIB_TCPROW_OWNER_MODULE) asEntry() Entry { + return Entry{ + Local: ipport4(row.localAddr, port(&row.localPort)), + Remote: ipport4(row.remoteAddr, port(&row.remotePort)), + Pid: int(row.pid), + State: state(row.state), + OSMetadata: row, + } +} + +type _MIB_TCP6TABLE_OWNER_MODULE struct { //nolint:revive // Windows API type + numEntries uint32 + table _MIB_TCP6ROW_OWNER_MODULE +} + +func (m *_MIB_TCP6TABLE_OWNER_MODULE) getRows() []_MIB_TCP6ROW_OWNER_MODULE { + return unsafe.Slice(&m.table, m.numEntries) +} + +func ipport6(addr [16]byte, scope uint32, port uint16) netip.AddrPort { + ip := netip.AddrFrom16(addr).Unmap() + if scope != 0 { + ip = ip.WithZone(fmt.Sprint(scope)) + } + return netip.AddrPortFrom(ip, port) +} + +// GetModule implements OSMetadata. +func (row *_MIB_TCP6ROW_OWNER_MODULE) GetModule() (string, error) { + return moduleInfo(row, getOwnerModuleFromTCP6Entry) +} + +func (row *_MIB_TCP6ROW_OWNER_MODULE) asEntry() Entry { + return Entry{ + Local: ipport6(row.localAddr, row.localScope, port(&row.localPort)), + Remote: ipport6(row.remoteAddr, row.remoteScope, port(&row.remotePort)), + Pid: int(row.pid), + State: state(row.state), + OSMetadata: row, + } +} + +type _MIB_TCPTABLE_OWNER_MODULE struct { //nolint:revive // Windows API type + numEntries uint32 + table _MIB_TCPROW_OWNER_MODULE +} + +func (m *_MIB_TCPTABLE_OWNER_MODULE) getRows() []_MIB_TCPROW_OWNER_MODULE { + return unsafe.Slice(&m.table, m.numEntries) +} + +func (t *Table) addEntries(fam int) error { + var size uint32 + var addr unsafe.Pointer + var buf []byte + for { + err, _, _ := getTCPTable.Call( + uintptr(addr), + uintptr(unsafe.Pointer(&size)), + 1, // sorted + uintptr(fam), + tcpTableOwnerModuleAll, + 0, // reserved; "must be zero" + ) + if err == 0 { + break + } + if err == uintptr(windows.ERROR_INSUFFICIENT_BUFFER) { + const maxSize = 10 << 20 + if size > maxSize || size < 4 { + return fmt.Errorf("unreasonable kernel-reported size %d", size) + } + buf = make([]byte, size) + addr = unsafe.Pointer(&buf[0]) + continue + } + return windows.Errno(err) + } + if len(buf) < int(size) { + return errors.New("unexpected size growth from system call") + } + buf = buf[:size] + + switch fam { + case windows.AF_INET: + info := (*_MIB_TCPTABLE_OWNER_MODULE)(unsafe.Pointer(&buf[0])) + rows := info.getRows() + for _, row := range rows { + t.Entries = append(t.Entries, row.asEntry()) + } + + case windows.AF_INET6: + info := (*_MIB_TCP6TABLE_OWNER_MODULE)(unsafe.Pointer(&buf[0])) + rows := info.getRows() + for _, row := range rows { + t.Entries = append(t.Entries, row.asEntry()) + } + } + + return nil +} + +func getConnTable() (*Table, error) { + t := new(Table) + if err := t.addEntries(windows.AF_INET); err != nil { + return nil, fmt.Errorf("failed to get IPv4 entries: %w", err) + } + if err := t.addEntries(windows.AF_INET6); err != nil { + return nil, fmt.Errorf("failed to get IPv6 entries: %w", err) + } + return t, nil +} diff --git a/pkg/util/port/portlist/poller.go b/pkg/util/port/portlist/poller.go index 6c6b56e1c4179..7544d486e0708 100644 --- a/pkg/util/port/portlist/poller.go +++ b/pkg/util/port/portlist/poller.go @@ -10,10 +10,9 @@ package portlist import ( "fmt" + "slices" "sort" "sync" - - "golang.org/x/exp/slices" ) // Poller scans the systems for listening ports periodically and sends diff --git a/pkg/util/port/portlist/poller_macos.go b/pkg/util/port/portlist/poller_darwin.go similarity index 99% rename from pkg/util/port/portlist/poller_macos.go rename to pkg/util/port/portlist/poller_darwin.go index cbefe70562b15..e4b981ac92298 100644 --- a/pkg/util/port/portlist/poller_macos.go +++ b/pkg/util/port/portlist/poller_darwin.go @@ -3,8 +3,6 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2014-present Datadog, Inc. -//go:build darwin - package portlist import ( diff --git a/pkg/util/port/portlist/poller_macos_test.go b/pkg/util/port/portlist/poller_darwin_test.go similarity index 97% rename from pkg/util/port/portlist/poller_macos_test.go rename to pkg/util/port/portlist/poller_darwin_test.go index 2f4b967eadbf5..7238f5cae80c0 100644 --- a/pkg/util/port/portlist/poller_macos_test.go +++ b/pkg/util/port/portlist/poller_darwin_test.go @@ -3,8 +3,6 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2014-present Datadog, Inc. -//go:build darwin - package portlist import ( diff --git a/pkg/util/port/portlist/poller_windows.go b/pkg/util/port/portlist/poller_windows.go index ec33188b84ddf..c4e056352d740 100644 --- a/pkg/util/port/portlist/poller_windows.go +++ b/pkg/util/port/portlist/poller_windows.go @@ -1,9 +1,7 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause // This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -//go:build windows +// Copyright 2014-present Datadog, Inc. package portlist @@ -18,21 +16,86 @@ var ErrNotImplemented = errors.New("not implemented yet") // init initializes the Poller by ensuring it has an underlying func (p *Poller) init() { - p.os = newWindowsOSImpl(p.IncludeLocalhost) + p.os = newWindowsImpl(p.IncludeLocalhost) } -type windowsOSImpl struct { +type windowsImpl struct { + known map[famPort]*portMeta includeLocalhost bool } -func newWindowsOSImpl(includeLocalhost bool) osImpl { - return &windowsOSImpl{ +type famPort struct { + proto string + port uint16 + pid uint32 +} + +type portMeta struct { + port Port + keep bool +} + +func newWindowsImpl(includeLocalhost bool) osImpl { + return &windowsImpl{ + known: map[famPort]*portMeta{}, includeLocalhost: includeLocalhost, } } +func (*windowsImpl) Close() error { return nil } -func (im *windowsOSImpl) AppendListeningPorts(_ []Port) ([]Port, error) { - return nil, ErrNotImplemented -} +func (im *windowsImpl) AppendListeningPorts(base []Port) ([]Port, error) { + tab, err := GetConnTable() + if err != nil { + return nil, err + } -func (*windowsOSImpl) Close() error { return ErrNotImplemented } + for _, pm := range im.known { + pm.keep = false + } + + ret := base + for _, e := range tab.Entries { + if e.State != "LISTEN" { + continue + } + if !im.includeLocalhost && !e.Local.Addr().IsUnspecified() { + continue + } + fp := famPort{ + proto: "tcp", + port: e.Local.Port(), + pid: uint32(e.Pid), + } + pm, ok := im.known[fp] + if ok { + pm.keep = true + continue + } + var process string + if e.OSMetadata != nil { + if module, err := e.OSMetadata.GetModule(); err == nil { + process = module + } + } + pm = &portMeta{ + keep: true, + port: Port{ + Proto: "tcp", + Port: e.Local.Port(), + Process: process, + Pid: e.Pid, + }, + } + im.known[fp] = pm + } + + for k, m := range im.known { + if !m.keep { + delete(im.known, k) + continue + } + ret = append(ret, m.port) + } + + return sortAndDedup(ret), nil +} diff --git a/pkg/util/process_file_stats.go b/pkg/util/procfilestats/process_file_stats.go similarity index 83% rename from pkg/util/process_file_stats.go rename to pkg/util/procfilestats/process_file_stats.go index ea6b6e98e9dc3..9c9e29975f34f 100644 --- a/pkg/util/process_file_stats.go +++ b/pkg/util/procfilestats/process_file_stats.go @@ -3,7 +3,8 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package util +// Package procfilestats provides a way to retrieve process open file stats +package procfilestats // ProcessFileStats is used to retrieve stats from gopsutil/v3/process -- these stats are used for troubleshooting purposes type ProcessFileStats struct { diff --git a/pkg/util/process_file_stats_linux.go b/pkg/util/procfilestats/process_file_stats_linux.go similarity index 98% rename from pkg/util/process_file_stats_linux.go rename to pkg/util/procfilestats/process_file_stats_linux.go index 79ef22922dc24..0a1cef08d9964 100644 --- a/pkg/util/process_file_stats_linux.go +++ b/pkg/util/procfilestats/process_file_stats_linux.go @@ -5,7 +5,7 @@ //go:build linux -package util +package procfilestats import ( "os" diff --git a/pkg/util/process_file_stats_others.go b/pkg/util/procfilestats/process_file_stats_others.go similarity index 97% rename from pkg/util/process_file_stats_others.go rename to pkg/util/procfilestats/process_file_stats_others.go index 315f96bdf229c..10fceafbe3c8a 100644 --- a/pkg/util/process_file_stats_others.go +++ b/pkg/util/procfilestats/process_file_stats_others.go @@ -5,7 +5,7 @@ //go:build !linux -package util +package procfilestats import "errors" diff --git a/pkg/util/scrubber/default.go b/pkg/util/scrubber/default.go index 4724ecf4c8fd3..d2f5060e0a041 100644 --- a/pkg/util/scrubber/default.go +++ b/pkg/util/scrubber/default.go @@ -119,12 +119,13 @@ func AddDefaultReplacers(scrubber *Scrubber) { // URI Generic Syntax // https://tools.ietf.org/html/rfc3986 uriPasswordReplacer := Replacer{ - Regex: regexp.MustCompile(`(?i)([a-z][a-z0-9+-.]+://|\b)([^:]+):([^\s|"]+)@`), + Regex: regexp.MustCompile(`(?i)([a-z][a-z0-9+-.]+://|\b)([^:\s]+):([^\s|"]+)@`), Repl: []byte(`$1$2:********@`), - // https://github.com/DataDog/datadog-agent/pull/15959 - LastUpdated: parseVersion("7.45.0"), + // https://github.com/DataDog/datadog-agent/pull/32503 + LastUpdated: parseVersion("7.62.0"), } + yamlPasswordReplacer := matchYAMLKeyPart( `(pass(word)?|pwd)`, []string{"pass", "pwd"}, diff --git a/pkg/util/scrubber/default_test.go b/pkg/util/scrubber/default_test.go index 0bb2d721c3983..637ed9a464c17 100644 --- a/pkg/util/scrubber/default_test.go +++ b/pkg/util/scrubber/default_test.go @@ -107,6 +107,12 @@ func TestConfigRCAppKey(t *testing.T) { } func TestConfigStripURLPassword(t *testing.T) { + assertClean(t, + `proxy: random_url_key: http://user:password@host:port`, + `proxy: random_url_key: http://user:********@host:port`) + assertClean(t, + `random_url_key http://user:password@host:port`, + `random_url_key http://user:********@host:port`) assertClean(t, `random_url_key: http://user:password@host:port`, `random_url_key: http://user:********@host:port`) diff --git a/pkg/util/size.go b/pkg/util/size/size.go similarity index 59% rename from pkg/util/size.go rename to pkg/util/size/size.go index a331216847f9f..1800f48873cbe 100644 --- a/pkg/util/size.go +++ b/pkg/util/size/size.go @@ -3,10 +3,10 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2023-present Datadog, Inc. -package util +// Package size provides functions to compute the size of some complex types +package size import ( - "strconv" "unsafe" ) @@ -20,24 +20,17 @@ type HasSizeInBytes interface { } const ( - // IntSize is the size of an int in bytes. - IntSize = strconv.IntSize / 8 - // StringSize is the size of a string structure in bytes. - StringSize = unsafe.Sizeof("") - // StringSliceSize is the size of the string slice in bytes (not counting the size of the strings themselves). - StringSliceSize = unsafe.Sizeof([]string{}) - - // BytesKindTelemetryKey is the tag key used to identify the kind of telemetry value. - BytesKindTelemetryKey = "bytes_kind" - // BytesKindStruct is the tag value used to mark bytes as struct. - BytesKindStruct = "struct" - // BytesKindData is the tag value used to mark bytes as data. Those are likely to be interned strings. - BytesKindData = "data" + // stringSize is the size of a string structure in bytes. + stringSize = unsafe.Sizeof("") + // stringSliceSize is the size of the string slice in bytes (not counting the size of the strings themselves). + stringSliceSize = unsafe.Sizeof([]string{}) ) // SizeOfStringSlice returns the size of the string slice in bytes (not counting the size of the strings themselves). +// +//nolint:revive func SizeOfStringSlice(s []string) int { - return int(StringSliceSize) + len(s)*int(StringSize) + return int(stringSliceSize) + len(s)*int(stringSize) } // DataSizeOfStringSlice returns the size of the content of the string slice in bytes. diff --git a/pkg/util/slices/map.go b/pkg/util/slices/map.go new file mode 100644 index 0000000000000..049d4f4a25e50 --- /dev/null +++ b/pkg/util/slices/map.go @@ -0,0 +1,16 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +// Package slices are utilities to deal with slices +package slices + +// Map returns a new slice with the result of applying fn to each element. +func Map[S ~[]E, E any, RE any](s S, fn func(E) RE) []RE { + x := make([]RE, 0, len(s)) + for _, v := range s { + x = append(x, fn(v)) + } + return x +} diff --git a/pkg/util/slices/map_test.go b/pkg/util/slices/map_test.go new file mode 100644 index 0000000000000..0bf48b54a5a21 --- /dev/null +++ b/pkg/util/slices/map_test.go @@ -0,0 +1,19 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2024-present Datadog, Inc. + +package slices + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMap(t *testing.T) { + x := Map([]int{1, 2, 4, 8}, func(v int) int { + return v * v + }) + assert.Equal(t, []int{1, 4, 16, 64}, x) +} diff --git a/pkg/util/stat.go b/pkg/util/stat/stat.go similarity index 96% rename from pkg/util/stat.go rename to pkg/util/stat/stat.go index 0423d14a7b4d2..f2c5fb8c72a8c 100644 --- a/pkg/util/stat.go +++ b/pkg/util/stat/stat.go @@ -3,7 +3,8 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package util +// Package stat implements a simple stats aggregator. +package stat import ( "expvar" diff --git a/pkg/util/stat_test.go b/pkg/util/stat/stat_test.go similarity index 98% rename from pkg/util/stat_test.go rename to pkg/util/stat/stat_test.go index 8cc703c302f91..a5356323b2354 100644 --- a/pkg/util/stat_test.go +++ b/pkg/util/stat/stat_test.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -package util +package stat import ( "expvar" diff --git a/pkg/util/statstracker/stats_tracker.go b/pkg/util/statstracker/stats_tracker.go index e25543ea5e647..0b917b97381be 100644 --- a/pkg/util/statstracker/stats_tracker.go +++ b/pkg/util/statstracker/stats_tracker.go @@ -171,15 +171,10 @@ func (s *Tracker) InfoKey() string { // Info returns the Tracker as a formatted string slice. func (s *Tracker) Info() []string { - AllTimeAvgLatency := s.AllTimeAvg() / int64(time.Millisecond) - AllTimePeakLatency := s.AllTimePeak() / int64(time.Millisecond) - RecentAvgLatency := s.MovingAvg() / int64(time.Millisecond) - RecentPeakLatency := s.MovingPeak() / int64(time.Millisecond) - return []string{ - fmt.Sprintf("Average Latency (ms): %d", AllTimeAvgLatency), - fmt.Sprintf("24h Average Latency (ms): %d", RecentAvgLatency), - fmt.Sprintf("Peak Latency (ms): %d", AllTimePeakLatency), - fmt.Sprintf("24h Peak Latency (ms): %d", RecentPeakLatency), + fmt.Sprintf("Average Latency: %s", time.Duration(s.AllTimeAvg())), + fmt.Sprintf("24h Average Latency: %s", time.Duration(s.MovingAvg())), + fmt.Sprintf("Peak Latency: %s", time.Duration(s.AllTimePeak())), + fmt.Sprintf("24h Peak Latency: %s", time.Duration(s.MovingPeak())), } } diff --git a/pkg/util/sync/pool.go b/pkg/util/sync/pool.go index fdf1872c666ad..231a6a53b9efc 100644 --- a/pkg/util/sync/pool.go +++ b/pkg/util/sync/pool.go @@ -8,6 +8,22 @@ package sync import "sync" +// PoolReleaser is interface that wraps a sync.Pool Put function +type PoolReleaser[K any] interface { + Put(*K) +} + +// PoolGetter is interface that wraps a sync.Pool Get function +type PoolGetter[K any] interface { + Get() *K +} + +// Pool is a combination interface of PoolGetter and PoolReleaser +type Pool[K any] interface { + PoolGetter[K] + PoolReleaser[K] +} + // TypedPool is a type-safe version of sync.Pool type TypedPool[K any] struct { p sync.Pool diff --git a/pkg/util/system/dlopen_linux.go b/pkg/util/system/dlopen_linux.go new file mode 100644 index 0000000000000..bfc3e9e273a5e --- /dev/null +++ b/pkg/util/system/dlopen_linux.go @@ -0,0 +1,43 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build linux && cgo + +package system + +// #cgo LDFLAGS: -ldl +// #include +// #include +import "C" + +import ( + "fmt" + "unsafe" +) + +// CheckLibraryExists checks if a library is available on the system by trying it to +// open with dlopen. It returns an error if the library is not found. This is +// the most direct way to check for a library's presence on Linux, as there are +// multiple sources for paths for library searches, so it's better to use the +// same mechanism that the loader uses. +func CheckLibraryExists(libname string) error { + cname := C.CString(libname) + defer C.free(unsafe.Pointer(cname)) + + // Lazy: resolve undefined symbols as they are needed, avoid loading everything at once + handle := C.dlopen(cname, C.RTLD_LAZY) + if handle == nil { + e := C.dlerror() + var errstr string + if e != nil { + errstr = C.GoString(e) + } + + return fmt.Errorf("could not locate %s: %s", libname, errstr) + } + + defer C.dlclose(handle) + return nil +} diff --git a/pkg/util/system/dlopen_other.go b/pkg/util/system/dlopen_other.go new file mode 100644 index 0000000000000..51a4e1d97c116 --- /dev/null +++ b/pkg/util/system/dlopen_other.go @@ -0,0 +1,16 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build !linux || !cgo + +package system + +import "errors" + +// CheckLibraryExists checks if a library is available on the system by trying it to +// open with dlopen. It returns an error if the library is not found. +func CheckLibraryExists(_ string) error { + return errors.New("CheckLibrary is not supported on this platform") +} diff --git a/pkg/util/system/go.mod b/pkg/util/system/go.mod index 69183e1155599..2accf6274b589 100644 --- a/pkg/util/system/go.mod +++ b/pkg/util/system/go.mod @@ -17,10 +17,10 @@ require ( github.com/DataDog/datadog-agent/pkg/util/pointer v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/testutil v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/winutil v0.59.1 - github.com/shirou/gopsutil/v4 v4.24.11 + github.com/shirou/gopsutil/v4 v4.24.12 github.com/stretchr/testify v1.10.0 go.uber.org/atomic v1.11.0 - golang.org/x/sys v0.28.0 + golang.org/x/sys v0.29.0 ) require ( @@ -31,9 +31,9 @@ require ( github.com/ebitengine/purego v0.8.1 // indirect github.com/go-ole/go-ole v1.3.0 // indirect github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/numcpus v0.8.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect diff --git a/pkg/util/system/go.sum b/pkg/util/system/go.sum index 5eac83e6a0bd2..e046d69bf9607 100644 --- a/pkg/util/system/go.sum +++ b/pkg/util/system/go.sum @@ -7,7 +7,6 @@ github.com/ebitengine/purego v0.8.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/hectane/go-acl v0.0.0-20230122075934-ca0b05cb1adb h1:PGufWXXDq9yaev6xX1YQauaO1MV90e6Mpoq1I7Lz/VM= @@ -16,16 +15,16 @@ github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= @@ -40,8 +39,8 @@ golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/pkg/util/system/socket/go.mod b/pkg/util/system/socket/go.mod index adca0da3a1292..3e819eb44b268 100644 --- a/pkg/util/system/socket/go.mod +++ b/pkg/util/system/socket/go.mod @@ -4,4 +4,4 @@ go 1.22.0 require github.com/Microsoft/go-winio v0.6.2 -require golang.org/x/sys v0.28.0 // indirect +require golang.org/x/sys v0.29.0 // indirect diff --git a/pkg/util/system/socket/go.sum b/pkg/util/system/socket/go.sum index d723dbfa45f9e..49ae43c226a64 100644 --- a/pkg/util/system/socket/go.sum +++ b/pkg/util/system/socket/go.sum @@ -1,4 +1,4 @@ github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= diff --git a/pkg/util/static_tags.go b/pkg/util/tags/static_tags.go similarity index 98% rename from pkg/util/static_tags.go rename to pkg/util/tags/static_tags.go index 84589ed7e747c..681da68b43073 100644 --- a/pkg/util/static_tags.go +++ b/pkg/util/tags/static_tags.go @@ -3,7 +3,8 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2022-present Datadog, Inc. -package util +// Package tags provides utilities for working with tags. +package tags import ( "context" diff --git a/pkg/util/static_tags_test.go b/pkg/util/tags/static_tags_test.go similarity index 99% rename from pkg/util/static_tags_test.go rename to pkg/util/tags/static_tags_test.go index e8fa2b33b004e..c418a032db6e7 100644 --- a/pkg/util/static_tags_test.go +++ b/pkg/util/tags/static_tags_test.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2022-present Datadog, Inc. -package util +package tags import ( "context" diff --git a/pkg/util/trivy/cache.go b/pkg/util/trivy/cache.go index b1c9da8c53659..7062457d140cf 100644 --- a/pkg/util/trivy/cache.go +++ b/pkg/util/trivy/cache.go @@ -20,9 +20,9 @@ import ( workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" "github.com/DataDog/datadog-agent/pkg/sbom/telemetry" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" + "github.com/DataDog/datadog-agent/pkg/util/option" - "github.com/aquasecurity/trivy/pkg/fanal/cache" + "github.com/aquasecurity/trivy/pkg/cache" "github.com/aquasecurity/trivy/pkg/fanal/types" "github.com/hashicorp/golang-lru/v2/simplelru" ) @@ -44,7 +44,7 @@ func defaultCacheDir() string { // NewCustomBoltCache returns a BoltDB cache using an LRU algorithm with a // maximum disk size and garbage collection of unused images with its custom cleaner. -func NewCustomBoltCache(wmeta optional.Option[workloadmeta.Component], cacheDir string, maxDiskSize int) (CacheWithCleaner, error) { +func NewCustomBoltCache(wmeta option.Option[workloadmeta.Component], cacheDir string, maxDiskSize int) (CacheWithCleaner, error) { if cacheDir == "" { cacheDir = defaultCacheDir() } @@ -108,7 +108,7 @@ type ScannerCache struct { cache *persistentCache cachedKeysForEntity map[string][]string - wmeta optional.Option[workloadmeta.Component] + wmeta option.Option[workloadmeta.Component] } // clean removes entries of deleted images from the cache. diff --git a/pkg/util/trivy/cache_test.go b/pkg/util/trivy/cache_test.go index 8d969bc85d0e2..cfc65e1d7e343 100644 --- a/pkg/util/trivy/cache_test.go +++ b/pkg/util/trivy/cache_test.go @@ -22,8 +22,7 @@ import ( workloadmetafxmock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/fx-mock" workloadmetamock "github.com/DataDog/datadog-agent/comp/core/workloadmeta/mock" "github.com/DataDog/datadog-agent/pkg/util/fxutil" - "github.com/DataDog/datadog-agent/pkg/util/optional" - + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/aquasecurity/trivy/pkg/fanal/types" "github.com/stretchr/testify/require" "go.uber.org/fx" @@ -266,7 +265,7 @@ func TestCustomBoltCache_GarbageCollector(t *testing.T) { workloadmetaStore.Reset([]workloadmeta.Entity{image1, image2, image3}, workloadmeta.SourceAll) - cache, err := NewCustomBoltCache(optional.NewOption[workloadmeta.Component](workloadmetaStore), t.TempDir(), defaultDiskSize) + cache, err := NewCustomBoltCache(option.New[workloadmeta.Component](workloadmetaStore), t.TempDir(), defaultDiskSize) require.NoError(t, err) defer func() { require.NoError(t, cache.Close()) @@ -391,7 +390,7 @@ func newTestBlobInfo() types.BlobInfo { type cacheDeps struct { fx.In - WMeta optional.Option[workloadmeta.Component] + WMeta option.Option[workloadmeta.Component] } func createCacheDeps(t *testing.T) cacheDeps { diff --git a/pkg/util/trivy/containerd.go b/pkg/util/trivy/containerd.go index d3ed7ddf3f84c..47d504b897048 100644 --- a/pkg/util/trivy/containerd.go +++ b/pkg/util/trivy/containerd.go @@ -17,10 +17,14 @@ import ( "strings" "time" + ftypes "github.com/aquasecurity/trivy/pkg/fanal/types" "github.com/containerd/containerd" "github.com/containerd/containerd/content" "github.com/containerd/containerd/images/archive" + "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/mount" "github.com/containerd/containerd/namespaces" + "github.com/containerd/errdefs" refdocker "github.com/distribution/reference" api "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" @@ -31,6 +35,9 @@ import ( "github.com/samber/lo" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" + "github.com/DataDog/datadog-agent/pkg/sbom" + cutil "github.com/DataDog/datadog-agent/pkg/util/containerd" + "github.com/DataDog/datadog-agent/pkg/util/log" ) // ContainerdCollector defines the conttainerd collector name @@ -181,3 +188,144 @@ func inspect(ctx context.Context, imgMeta *workloadmeta.ContainerImageMetadata, }, }, history, ref, nil } + +const ( + cleanupTimeout = 30 * time.Second +) + +type fakeContainerdContainer struct { + *fakeContainer + *image +} + +func (c *fakeContainerdContainer) LayerByDiffID(hash string) (ftypes.LayerPath, error) { + return c.fakeContainer.LayerByDiffID(hash) +} + +func (c *fakeContainerdContainer) LayerByDigest(hash string) (ftypes.LayerPath, error) { + return c.fakeContainer.LayerByDigest(hash) +} + +func (c *fakeContainerdContainer) Layers() (layers []ftypes.LayerPath) { + return c.fakeContainer.Layers() +} + +// ContainerdAccessor is a function that should return a containerd client +type ContainerdAccessor func() (cutil.ContainerdItf, error) + +// ScanContainerdImageFromSnapshotter scans containerd image directly from the snapshotter +func (c *Collector) ScanContainerdImageFromSnapshotter(ctx context.Context, imgMeta *workloadmeta.ContainerImageMetadata, img containerd.Image, client cutil.ContainerdItf, scanOptions sbom.ScanOptions) (sbom.Report, error) { + fanalImage, cleanup, err := convertContainerdImage(ctx, client.RawClient(), imgMeta, img) + if cleanup != nil { + defer cleanup() + } + if err != nil { + return nil, err + } + + // Computing duration of containerd lease + deadline, _ := ctx.Deadline() + expiration := deadline.Sub(time.Now().Add(cleanupTimeout)) + clClient := client.RawClient() + imageID := imgMeta.ID + + mounts, err := client.Mounts(ctx, expiration, imgMeta.Namespace, img) + if err != nil { + return nil, fmt.Errorf("unable to get mounts for image %s, err: %w", imgMeta.ID, err) + } + + layers := extractLayersFromOverlayFSMounts(mounts) + if len(layers) == 0 { + return nil, fmt.Errorf("unable to extract layers from overlayfs mounts %+v for image %s", mounts, imgMeta.ID) + } + + ctx = namespaces.WithNamespace(ctx, imgMeta.Namespace) + // Adding a lease to cleanup dandling snaphots at expiration + ctx, done, err := clClient.WithLease(ctx, + leases.WithID(imageID), + leases.WithExpiration(expiration), + leases.WithLabels(map[string]string{ + "containerd.io/gc.ref.snapshot." + containerd.DefaultSnapshotter: imageID, + }), + ) + if err != nil && !errdefs.IsAlreadyExists(err) { + return nil, fmt.Errorf("unable to get a lease, err: %w", err) + } + + report, err := c.scanOverlayFS(ctx, layers, &fakeContainerdContainer{ + image: fanalImage, + fakeContainer: &fakeContainer{ + layerPaths: layers, + imgMeta: imgMeta, + layerIDs: fanalImage.inspect.RootFS.Layers, + }, + }, imgMeta, scanOptions) + + if err := done(ctx); err != nil { + log.Warnf("Unable to cancel containerd lease with id: %s, err: %v", imageID, err) + } + + return report, err +} + +// ScanContainerdImage scans containerd image by exporting it and scanning the tarball +func (c *Collector) ScanContainerdImage(ctx context.Context, imgMeta *workloadmeta.ContainerImageMetadata, img containerd.Image, client cutil.ContainerdItf, scanOptions sbom.ScanOptions) (sbom.Report, error) { + fanalImage, cleanup, err := convertContainerdImage(ctx, client.RawClient(), imgMeta, img) + if cleanup != nil { + defer cleanup() + } + if err != nil { + return nil, fmt.Errorf("unable to convert containerd image, err: %w", err) + } + + return c.scanImage(ctx, fanalImage, imgMeta, scanOptions) +} + +// ScanContainerdImageFromFilesystem scans containerd image from file-system +func (c *Collector) ScanContainerdImageFromFilesystem(ctx context.Context, imgMeta *workloadmeta.ContainerImageMetadata, img containerd.Image, client cutil.ContainerdItf, scanOptions sbom.ScanOptions) (sbom.Report, error) { + imagePath, err := os.MkdirTemp("", "containerd-image-*") + if err != nil { + return nil, fmt.Errorf("unable to create temp dir, err: %w", err) + } + defer func() { + err := os.RemoveAll(imagePath) + if err != nil { + log.Errorf("Unable to remove temp dir: %s, err: %v", imagePath, err) + } + }() + + // Computing duration of containerd lease + deadline, _ := ctx.Deadline() + expiration := deadline.Sub(time.Now().Add(cleanupTimeout)) + + cleanUp, err := client.MountImage(ctx, expiration, imgMeta.Namespace, img, imagePath) + if err != nil { + return nil, fmt.Errorf("unable to mount containerd image, err: %w", err) + } + + defer func() { + cleanUpContext, cleanUpContextCancel := context.WithTimeout(context.Background(), cleanupTimeout) + err := cleanUp(cleanUpContext) + cleanUpContextCancel() + if err != nil { + log.Errorf("Unable to clean up mounted image, err: %v", err) + } + }() + + return c.scanFilesystem(ctx, imagePath, imgMeta, scanOptions) +} + +func extractLayersFromOverlayFSMounts(mounts []mount.Mount) []string { + var layers []string + for _, mount := range mounts { + for _, opt := range mount.Options { + for _, prefix := range []string{"upperdir=", "lowerdir="} { + trimmedOpt := strings.TrimPrefix(opt, prefix) + if trimmedOpt != opt { + layers = append(layers, strings.Split(trimmedOpt, ":")...) + } + } + } + } + return layers +} diff --git a/pkg/util/trivy/crio.go b/pkg/util/trivy/crio.go new file mode 100644 index 0000000000000..610e1739a435a --- /dev/null +++ b/pkg/util/trivy/crio.go @@ -0,0 +1,102 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build trivy && crio + +// Package trivy holds the scan components +package trivy + +import ( + "context" + "fmt" + "path/filepath" + + workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" + "github.com/DataDog/datadog-agent/pkg/sbom" + "github.com/DataDog/datadog-agent/pkg/util/crio" + ftypes "github.com/aquasecurity/trivy/pkg/fanal/types" + v1 "github.com/google/go-containerregistry/pkg/v1" +) + +type fakeCRIOContainer struct { + *fakeContainer +} + +func (c *fakeCRIOContainer) ID() (string, error) { + return c.imgMeta.ID, nil +} + +func (c *fakeCRIOContainer) ConfigFile() (*v1.ConfigFile, error) { + configFile := &v1.ConfigFile{ + Architecture: c.imgMeta.Architecture, + OS: c.imgMeta.OS, + } + configFile.RootFS.DiffIDs = make([]v1.Hash, len(c.layerIDs)) + for i, diffID := range c.layerIDs { + configFile.RootFS.DiffIDs[i], _ = v1.NewHash(diffID) + } + + for _, layer := range c.imgMeta.Layers { + configFile.History = append(configFile.History, v1.History{ + Author: layer.History.Author, + Created: v1.Time{Time: *layer.History.Created}, + CreatedBy: layer.History.CreatedBy, + Comment: layer.History.Comment, + EmptyLayer: layer.History.EmptyLayer, + }) + + } + return configFile, nil +} + +func (c *fakeCRIOContainer) LayerByDiffID(hash string) (ftypes.LayerPath, error) { + return c.fakeContainer.LayerByDiffID(hash) +} + +func (c *fakeCRIOContainer) LayerByDigest(hash string) (ftypes.LayerPath, error) { + return c.fakeContainer.LayerByDigest(hash) +} + +func (c *fakeCRIOContainer) Layers() (layers []ftypes.LayerPath) { + return c.fakeContainer.Layers() +} + +func (c *fakeCRIOContainer) Name() string { + return c.imgMeta.Name +} + +func (c *fakeCRIOContainer) RepoTags() []string { + return c.imgMeta.RepoTags +} + +func (c *fakeCRIOContainer) RepoDigests() []string { + return c.imgMeta.RepoDigests +} + +// ScanCRIOImageFromOverlayFS scans the CRI-O image layers using OverlayFS. +func (c *Collector) ScanCRIOImageFromOverlayFS(ctx context.Context, imgMeta *workloadmeta.ContainerImageMetadata, client crio.Client, scanOptions sbom.ScanOptions) (sbom.Report, error) { + lowerDirs, err := client.GetCRIOImageLayers(imgMeta) + if err != nil { + return nil, fmt.Errorf("failed to retrieve layer directories: %w", err) + } + + diffIDs := make([]string, 0, len(lowerDirs)) + for _, dir := range lowerDirs { + diffIDs = append(diffIDs, "sha256:"+filepath.Base(filepath.Dir(dir))) + } + + report, err := c.scanOverlayFS(ctx, lowerDirs, &fakeCRIOContainer{ + fakeContainer: &fakeContainer{ + imgMeta: imgMeta, + layerPaths: lowerDirs, + layerIDs: diffIDs, + }, + }, imgMeta, scanOptions) + if err != nil { + return nil, err + } + + return report, nil +} diff --git a/pkg/util/trivy/docker.go b/pkg/util/trivy/docker.go index f4a255c24e00e..bbd22bd8a1e1c 100644 --- a/pkg/util/trivy/docker.go +++ b/pkg/util/trivy/docker.go @@ -11,8 +11,13 @@ import ( "context" "fmt" "os" + "strings" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" + "github.com/DataDog/datadog-agent/pkg/config/env" + "github.com/DataDog/datadog-agent/pkg/sbom" + containersimage "github.com/DataDog/datadog-agent/pkg/util/containers/image" + ftypes "github.com/aquasecurity/trivy/pkg/fanal/types" "github.com/docker/docker/client" ) @@ -52,9 +57,84 @@ func convertDockerImage(ctx context.Context, client client.ImageAPIClient, imgMe _ = os.Remove(f.Name()) } - return &image{ + img := &image{ opener: imageOpener(ctx, DockerCollector, imageID, f, client.ImageSave), inspect: inspect, history: configHistory(history), - }, cleanup, nil + } + + return img, cleanup, nil +} + +type fakeDockerContainer struct { + *image + *fakeContainer +} + +func (c *fakeDockerContainer) LayerByDiffID(hash string) (ftypes.LayerPath, error) { + return c.fakeContainer.LayerByDiffID(hash) +} + +func (c *fakeDockerContainer) LayerByDigest(hash string) (ftypes.LayerPath, error) { + return c.fakeContainer.LayerByDigest(hash) +} + +func (c *fakeDockerContainer) Layers() (layers []ftypes.LayerPath) { + return c.fakeContainer.Layers() +} + +// ScanDockerImageFromGraphDriver scans a docker image directly from the graph driver +func (c *Collector) ScanDockerImageFromGraphDriver(ctx context.Context, imgMeta *workloadmeta.ContainerImageMetadata, client client.ImageAPIClient, scanOptions sbom.ScanOptions) (sbom.Report, error) { + fanalImage, cleanup, err := convertDockerImage(ctx, client, imgMeta) + if cleanup != nil { + defer cleanup() + } + + if err != nil { + return nil, fmt.Errorf("unable to convert docker image, err: %w", err) + } + + if fanalImage.inspect.GraphDriver.Name == "overlay2" { + var layers []string + if layerDirs, ok := fanalImage.inspect.GraphDriver.Data["LowerDir"]; ok { + layers = append(layers, strings.Split(layerDirs, ":")...) + } + + if layerDirs, ok := fanalImage.inspect.GraphDriver.Data["UpperDir"]; ok { + layers = append(layers, strings.Split(layerDirs, ":")...) + } + + if env.IsContainerized() { + for i, layer := range layers { + layers[i] = containersimage.SanitizeHostPath(layer) + } + } + + fakeContainer := &fakeDockerContainer{ + image: fanalImage, + fakeContainer: &fakeContainer{ + layerIDs: fanalImage.inspect.RootFS.Layers, + layerPaths: layers, + imgMeta: imgMeta, + }, + } + + return c.scanOverlayFS(ctx, layers, fakeContainer, imgMeta, scanOptions) + } + + return nil, fmt.Errorf("unsupported graph driver: %s", fanalImage.inspect.GraphDriver.Name) +} + +// ScanDockerImage scans a docker image by exporting it and scanning the tarball +func (c *Collector) ScanDockerImage(ctx context.Context, imgMeta *workloadmeta.ContainerImageMetadata, client client.ImageAPIClient, scanOptions sbom.ScanOptions) (sbom.Report, error) { + fanalImage, cleanup, err := convertDockerImage(ctx, client, imgMeta) + if cleanup != nil { + defer cleanup() + } + + if err != nil { + return nil, fmt.Errorf("unable to convert docker image, err: %w", err) + } + + return c.scanImage(ctx, fanalImage, imgMeta, scanOptions) } diff --git a/pkg/util/trivy/image.go b/pkg/util/trivy/image.go index 93fa12e3bd5ab..adf2e650004b6 100644 --- a/pkg/util/trivy/image.go +++ b/pkg/util/trivy/image.go @@ -299,3 +299,7 @@ func (img *image) Name() string { func (img *image) ID() (string, error) { return fimage.ID(img) } + +func (img *image) Layers() ([]v1.Layer, error) { + return nil, nil +} diff --git a/pkg/util/trivy/overlayfs.go b/pkg/util/trivy/overlayfs.go index f1f8e243983cc..34c08105884bb 100644 --- a/pkg/util/trivy/overlayfs.go +++ b/pkg/util/trivy/overlayfs.go @@ -3,240 +3,109 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//go:build linux +//go:build trivy // Package trivy implement a simple overlayfs like filesystem to be able to // scan through layered filesystems. package trivy import ( + "context" "errors" "fmt" - "io/fs" - "os" - "path" - "sort" - "syscall" - "golang.org/x/sys/unix" -) - -// whiteoutCharDev is defined as zero and is not const only for testing as it -// is not allowed to mknod a 0/0 char dev in userns. -var whiteoutCharDev uint64 // = 0 - -var whiteout *fs.DirEntry - -type filesystem struct { - layers []string -} - -type file struct { - *os.File - ofs filesystem - fi fs.FileInfo - name string -} + workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" + "github.com/DataDog/datadog-agent/pkg/sbom" + "github.com/DataDog/datadog-agent/pkg/util/log" -// NewFS returns a fs.ReadDirFS consisting of merging the given layer paths. -func NewFS(layers []string) interface { - fs.FS - fs.ReadDirFS - fs.StatFS -} { - return &filesystem{layers[:]} -} - -// Open implements fs.StatFS. -func (ofs filesystem) Stat(name string) (fs.FileInfo, error) { - name = path.Join("/", name)[1:] - if name == "" { - name = "." - } - _, fi, err := ofs.stat(name) - return fi, err -} + "github.com/aquasecurity/trivy/pkg/fanal/applier" + local "github.com/aquasecurity/trivy/pkg/fanal/artifact/container" + ftypes "github.com/aquasecurity/trivy/pkg/fanal/types" + v1 "github.com/google/go-containerregistry/pkg/v1" +) -// Open implements fs.FS. -func (ofs filesystem) Open(name string) (fs.File, error) { - name = path.Join("/", name)[1:] - layerIndex, fi, err := ofs.stat(name) - if err != nil { - err.(*os.PathError).Op = "open" - return nil, err - } - f, err := os.Open(ofs.path(layerIndex, name)) - if err != nil { - return nil, &os.PathError{Op: "open", Path: name, Err: err} +type fakeContainer struct { + layerIDs []string + imgMeta *workloadmeta.ContainerImageMetadata + layerPaths []string +} + +func (c *fakeContainer) LayerByDiffID(hash string) (ftypes.LayerPath, error) { + for i, layer := range c.layerIDs { + diffID, _ := v1.NewHash(layer) + if diffID.String() == hash { + return ftypes.LayerPath{ + DiffID: diffID.String(), + Path: c.layerPaths[i], + Digest: c.imgMeta.Layers[i].Digest, + }, nil + } } - return &file{File: f, ofs: ofs, fi: fi, name: name}, nil + return ftypes.LayerPath{}, errors.New("not found") } -func (ofs filesystem) path(layerIndex int, name string) string { - if !fs.ValidPath(name) { - panic(fmt.Errorf("unexpected invalid path name %q", name)) +func (c *fakeContainer) LayerByDigest(hash string) (ftypes.LayerPath, error) { + for i, layer := range c.layerIDs { + diffID, _ := v1.NewHash(layer) + if hash == c.imgMeta.Layers[i].Digest { + return ftypes.LayerPath{ + DiffID: diffID.String(), + Path: c.layerPaths[i], + Digest: c.imgMeta.Layers[i].Digest, + }, nil + } } - return path.Join(ofs.layers[layerIndex], name) + return ftypes.LayerPath{}, errors.New("not found") } -func (ofs filesystem) stat(name string) (int, fs.FileInfo, error) { - var errf error - for layerIndex := range ofs.layers { - fi, err := os.Stat(ofs.path(layerIndex, name)) - if errors.Is(err, syscall.ENOENT) || errors.Is(err, syscall.ENOTDIR) { - // When path does not exist, overlayfs does not verify that a - // whiteout file has been created as one of the parent dir in the - // current layer. Meaning you can open file from lower dirs even - // if a whiteout or opaque directory has been created on an upper - // layer. - continue - } - if err != nil { - errf = err - break - } - if isWhiteout(fi) { - break - } - return layerIndex, fi, nil +func (c *fakeContainer) Layers() (layers []ftypes.LayerPath) { + for i, layer := range c.layerIDs { + diffID, _ := v1.NewHash(layer) + layers = append(layers, ftypes.LayerPath{ + DiffID: diffID.String(), + Path: c.layerPaths[i], + Digest: c.imgMeta.Layers[i].Digest, + }) } - if errf == nil { - errf = syscall.ENOENT - } - return 0, nil, &os.PathError{Op: "stat", Path: name, Err: errf} -} -// ReadDir implements fs.ReadDirFS. -func (ofs filesystem) ReadDir(name string) ([]fs.DirEntry, error) { - return ofs.readDirN(name, -1) + return layers } -func (ofs filesystem) readDirN(name string, n int) ([]fs.DirEntry, error) { - name = path.Join("/", name)[1:] - if name == "" { - name = "." - } - - var entriesMap map[string]*fs.DirEntry - var err error - var ok bool - for layerIndex := range ofs.layers { - if ok, err = ofs.readDirLayer(layerIndex, name, n, &entriesMap); ok { - break - } - } - if err == nil && entriesMap == nil { - err = syscall.ENOENT - } +func (c *Collector) scanOverlayFS(ctx context.Context, layers []string, ctr ftypes.Container, imgMeta *workloadmeta.ContainerImageMetadata, scanOptions sbom.ScanOptions) (sbom.Report, error) { + cache, err := c.getCache() if err != nil { - return []fs.DirEntry{}, &os.PathError{Op: "readdirent", Path: name, Err: err} + return nil, err } - entries := make([]fs.DirEntry, 0, len(entriesMap)) - for _, entry := range entriesMap { - if entry != whiteout { - entries = append(entries, *entry) - } + if cache == nil { + return nil, errors.New("failed to get cache for scan") } - sort.Slice(entries, func(i, j int) bool { - return entries[i].Name() < entries[j].Name() - }) - if n > 0 && len(entries) > n { - entries = entries[:n] - } - return entries, nil -} -func (ofs filesystem) readDirLayer(layerIndex int, name string, n int, entriesMap *map[string]*fs.DirEntry) (bool, error) { - fullname := ofs.path(layerIndex, name) - - di, err := os.Stat(fullname) - if errors.Is(err, syscall.ENOENT) || errors.Is(err, syscall.ENOTDIR) { - return false, nil - } + containerArtifact, err := local.NewArtifact(ctr, cache, NewFSWalker(), getDefaultArtifactOption(scanOptions)) if err != nil { - return true, err - } - if isWhiteout(di) { - return true, syscall.ENOENT - } - if !di.IsDir() { - return true, syscall.ENOTDIR + return nil, err } - d, err := os.Open(fullname) - if err != nil { - return true, err - } + log.Debugf("Generating SBOM for image %s using overlayfs %+v", imgMeta.ID, layers) - entries, err := d.ReadDir(n) + trivyReport, err := c.scan(ctx, containerArtifact, applier.NewApplier(cache), imgMeta, cache, false) if err != nil { - return true, err - } - if *entriesMap == nil { - *entriesMap = make(map[string]*fs.DirEntry) - } - for entryIndex, entry := range entries { - entryName := entry.Name() - if _, exists := (*entriesMap)[entryName]; !exists { - entryPtr := &entries[entryIndex] - if entry.Type().IsRegular() { - (*entriesMap)[entryName] = entryPtr - } else { - ei, err := entry.Info() - if err != nil { - return true, err - } - if isWhiteout(ei) { - (*entriesMap)[entryName] = whiteout - } else { - (*entriesMap)[entryName] = entryPtr - } - } + if imgMeta != nil { + return nil, fmt.Errorf("unable to marshal report to sbom format for image %s, err: %w", imgMeta.ID, err) } + return nil, fmt.Errorf("unable to marshal report to sbom format, err: %w", err) } - return isOpaqueDir(d), nil -} - -// ReadDir implements fs.ReadDirFile. -func (f *file) ReadDir(n int) ([]fs.DirEntry, error) { - if !f.fi.IsDir() { - return nil, &os.PathError{Op: "readdirent", Path: f.name, Err: syscall.ENOTDIR} + log.Debugf("Found OS: %+v", trivyReport.Metadata.OS) + pkgCount := 0 + for _, results := range trivyReport.Results { + pkgCount += len(results.Packages) } - return f.ofs.readDirN(f.name, n) -} - -// Read implements fs.File. -func (f *file) Read(b []byte) (int, error) { - return f.File.Read(b) -} + log.Debugf("Found %d packages", pkgCount) -// Stat implements fs.File. -func (f *file) Stat() (fs.FileInfo, error) { - return f.fi, nil -} - -// Close implements fs.File. -func (f *file) Close() error { - return f.File.Close() -} - -var _ fs.ReadDirFile = &file{} - -func isWhiteout(fm fs.FileInfo) bool { - return fm.Mode()&fs.ModeCharDevice != 0 && uint64(fm.Sys().(*syscall.Stat_t).Rdev) == whiteoutCharDev -} - -func isOpaqueDir(d *os.File) bool { - var data [1]byte - var sz int - var err error - for { - sz, err = unix.Fgetxattr(int(d.Fd()), "trusted.overlay.opaque", data[:]) - if err != unix.EINTR { - break - } - } - return sz == 1 && data[0] == 'y' + return &Report{ + Report: trivyReport, + id: imgMeta.ID, + marshaler: c.marshaler, + }, nil } diff --git a/pkg/util/trivy/trivy.go b/pkg/util/trivy/trivy.go index 63163ff3ae307..894aca311ae9a 100644 --- a/pkg/util/trivy/trivy.go +++ b/pkg/util/trivy/trivy.go @@ -12,25 +12,17 @@ import ( "context" "errors" "fmt" - "io/fs" - "os" - "path/filepath" "runtime" "sort" - "strings" "sync" - "github.com/containerd/containerd/mount" + "golang.org/x/xerrors" "github.com/DataDog/datadog-agent/comp/core/config" workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/config/env" "github.com/DataDog/datadog-agent/pkg/sbom" - containersimage "github.com/DataDog/datadog-agent/pkg/util/containers/image" - "github.com/DataDog/datadog-agent/pkg/util/crio" "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/DataDog/datadog-agent/pkg/util/optional" - + "github.com/DataDog/datadog-agent/pkg/util/option" "github.com/aquasecurity/trivy-db/pkg/db" "github.com/aquasecurity/trivy/pkg/fanal/analyzer" "github.com/aquasecurity/trivy/pkg/fanal/applier" @@ -38,14 +30,13 @@ import ( image2 "github.com/aquasecurity/trivy/pkg/fanal/artifact/image" local2 "github.com/aquasecurity/trivy/pkg/fanal/artifact/local" ftypes "github.com/aquasecurity/trivy/pkg/fanal/types" + "github.com/aquasecurity/trivy/pkg/fanal/walker" "github.com/aquasecurity/trivy/pkg/sbom/cyclonedx" "github.com/aquasecurity/trivy/pkg/scanner" "github.com/aquasecurity/trivy/pkg/scanner/langpkg" - "github.com/aquasecurity/trivy/pkg/scanner/local" "github.com/aquasecurity/trivy/pkg/scanner/ospkg" "github.com/aquasecurity/trivy/pkg/types" "github.com/aquasecurity/trivy/pkg/vulnerability" - "github.com/docker/docker/client" // This is required to load sqlite based RPM databases _ "modernc.org/sqlite" @@ -77,12 +68,12 @@ type Collector struct { langScanner langpkg.Scanner vulnClient vulnerability.Client marshaler cyclonedx.Marshaler - wmeta optional.Option[workloadmeta.Component] + wmeta option.Option[workloadmeta.Component] } var globalCollector *Collector -func getDefaultArtifactOption(root string, opts sbom.ScanOptions) artifact.Option { +func getDefaultArtifactOption(opts sbom.ScanOptions) artifact.Option { parallel := 1 if opts.Fast { parallel = runtime.NumCPU() @@ -95,18 +86,11 @@ func getDefaultArtifactOption(root string, opts sbom.ScanOptions) artifact.Optio Parallel: parallel, SBOMSources: []string{}, DisabledHandlers: DefaultDisabledHandlers(), - WalkOption: artifact.WalkOption{ - ErrorCallback: func(_ string, err error) error { - if errors.Is(err, fs.ErrPermission) || errors.Is(err, os.ErrNotExist) { - return nil - } - return err - }, - }, + WalkerOption: walker.Option{}, } if len(opts.Analyzers) == 1 && opts.Analyzers[0] == OSAnalyzers { - option.OnlyDirs = []string{ + option.WalkerOption.OnlyDirs = []string{ "/etc/*", "/lib/apk/db/*", "/usr/lib/*", @@ -114,14 +98,6 @@ func getDefaultArtifactOption(root string, opts sbom.ScanOptions) artifact.Optio "/var/lib/dpkg/**", "/var/lib/rpm/*", } - if root != "" { - // OnlyDirs is handled differently for image than for filesystem. - // This needs to be fixed properly but in the meantime, use absolute - // paths for fs and relative paths for images. - for i := range option.OnlyDirs { - option.OnlyDirs[i] = filepath.Join(root, option.OnlyDirs[i]) - } - } } return option @@ -164,6 +140,7 @@ func DefaultDisabledCollectors(enabledAnalyzers []string) []analyzer.Type { analyzer.TypeSBOM, analyzer.TypeUbuntuESM, analyzer.TypeLicenseFile, + analyzer.TypeRpmArchive, ) return disabledAnalyzers } @@ -174,7 +151,7 @@ func DefaultDisabledHandlers() []ftypes.HandlerType { } // NewCollector returns a new collector -func NewCollector(cfg config.Component, wmeta optional.Option[workloadmeta.Component]) (*Collector, error) { +func NewCollector(cfg config.Component, wmeta option.Option[workloadmeta.Component]) (*Collector, error) { return &Collector{ config: collectorConfig{ clearCacheOnClose: cfg.GetBool("sbom.clear_cache_on_exit"), @@ -190,7 +167,7 @@ func NewCollector(cfg config.Component, wmeta optional.Option[workloadmeta.Compo } // GetGlobalCollector gets the global collector -func GetGlobalCollector(cfg config.Component, wmeta optional.Option[workloadmeta.Component]) (*Collector, error) { +func GetGlobalCollector(cfg config.Component, wmeta option.Option[workloadmeta.Component]) (*Collector, error) { if globalCollector != nil { return globalCollector, nil } @@ -246,86 +223,13 @@ func (c *Collector) getCache() (CacheWithCleaner, error) { return c.persistentCache, nil } -// ScanDockerImageFromGraphDriver scans a docker image directly from the graph driver -func (c *Collector) ScanDockerImageFromGraphDriver(ctx context.Context, imgMeta *workloadmeta.ContainerImageMetadata, client client.ImageAPIClient, scanOptions sbom.ScanOptions) (sbom.Report, error) { - fanalImage, cleanup, err := convertDockerImage(ctx, client, imgMeta) - if cleanup != nil { - defer cleanup() - } - - if err != nil { - return nil, fmt.Errorf("unable to convert docker image, err: %w", err) - } - - if fanalImage.inspect.GraphDriver.Name == "overlay2" { - var layers []string - if layerDirs, ok := fanalImage.inspect.GraphDriver.Data["LowerDir"]; ok { - layers = append(layers, strings.Split(layerDirs, ":")...) - } - - if layerDirs, ok := fanalImage.inspect.GraphDriver.Data["UpperDir"]; ok { - layers = append(layers, strings.Split(layerDirs, ":")...) - } - - if env.IsContainerized() { - for i, layer := range layers { - layers[i] = containersimage.SanitizeHostPath(layer) - } - } - - return c.scanOverlayFS(ctx, layers, imgMeta, scanOptions) - } - - return nil, fmt.Errorf("unsupported graph driver: %s", fanalImage.inspect.GraphDriver.Name) -} - -// ScanDockerImage scans a docker image by exporting it and scanning the tarball -func (c *Collector) ScanDockerImage(ctx context.Context, imgMeta *workloadmeta.ContainerImageMetadata, client client.ImageAPIClient, scanOptions sbom.ScanOptions) (sbom.Report, error) { - fanalImage, cleanup, err := convertDockerImage(ctx, client, imgMeta) - if cleanup != nil { - defer cleanup() - } - - if err != nil { - return nil, fmt.Errorf("unable to convert docker image, err: %w", err) - } - - return c.scanImage(ctx, fanalImage, imgMeta, scanOptions) -} - -func (c *Collector) scanOverlayFS(ctx context.Context, layers []string, imgMeta *workloadmeta.ContainerImageMetadata, scanOptions sbom.ScanOptions) (sbom.Report, error) { - log.Debugf("Generating SBOM for image %s using overlayfs %+v", imgMeta.ID, layers) - overlayFsReader := NewFS(layers) - report, err := c.scanFilesystem(ctx, overlayFsReader, "/", imgMeta, scanOptions) - if err != nil { - return nil, err - } - - return report, nil -} - -// ScanCRIOImageFromOverlayFS scans the CRI-O image layers using OverlayFS. -func (c *Collector) ScanCRIOImageFromOverlayFS(ctx context.Context, imgMeta *workloadmeta.ContainerImageMetadata, client crio.Client, scanOptions sbom.ScanOptions) (sbom.Report, error) { - lowerDirs, err := client.GetCRIOImageLayers(imgMeta) - if err != nil { - return nil, fmt.Errorf("failed to retrieve layer directories: %w", err) - } - - report, err := c.scanOverlayFS(ctx, lowerDirs, imgMeta, scanOptions) - if err != nil { - return nil, err - } - - return report, nil -} - // scanFilesystem scans the specified directory and logs detailed scan steps. -func (c *Collector) scanFilesystem(ctx context.Context, fsys fs.FS, path string, imgMeta *workloadmeta.ContainerImageMetadata, scanOptions sbom.ScanOptions) (sbom.Report, error) { +func (c *Collector) scanFilesystem(ctx context.Context, path string, imgMeta *workloadmeta.ContainerImageMetadata, scanOptions sbom.ScanOptions) (sbom.Report, error) { // For filesystem scans, it is required to walk the filesystem to get the persistentCache key so caching does not add any value. // TODO: Cache directly the trivy report for container images cache := newMemoryCache() - fsArtifact, err := local2.NewArtifact(fsys, path, cache, getDefaultArtifactOption(".", scanOptions)) + fsArtifact, err := local2.NewArtifact(path, cache, NewFSWalker(), getDefaultArtifactOption(scanOptions)) if err != nil { return nil, fmt.Errorf("unable to create artifact from fs, err: %w", err) } @@ -353,8 +257,60 @@ func (c *Collector) scanFilesystem(ctx context.Context, fsys fs.FS, path string, } // ScanFilesystem scans file-system -func (c *Collector) ScanFilesystem(ctx context.Context, fsys fs.FS, path string, scanOptions sbom.ScanOptions) (sbom.Report, error) { - return c.scanFilesystem(ctx, fsys, path, nil, scanOptions) +func (c *Collector) ScanFilesystem(ctx context.Context, path string, scanOptions sbom.ScanOptions) (sbom.Report, error) { + return c.scanFilesystem(ctx, path, nil, scanOptions) +} + +type driver struct { + applier applier.Applier +} + +func (d *driver) Scan(_ context.Context, target, artifactKey string, blobKeys []string, _ types.ScanOptions) ( + results types.Results, osFound ftypes.OS, err error) { + + detail, err := d.applier.ApplyLayers(artifactKey, blobKeys) + switch { + case errors.Is(err, analyzer.ErrUnknownOS): + log.Debug("OS is not detected.") + + // Packages may contain OS-independent binary information even though OS is not detected. + if len(detail.Packages) != 0 { + detail.OS = ftypes.OS{Family: "none"} + } + + // If OS is not detected and repositories are detected, we'll try to use repositories as OS. + if detail.Repository != nil { + log.Debug("Package repository %s, version %s", string(detail.Repository.Family), detail.Repository.Release) + log.Debug("Assuming OS family %s, version %s", string(detail.Repository.Family), detail.Repository.Release) + detail.OS = ftypes.OS{ + Family: detail.Repository.Family, + Name: detail.Repository.Release, + } + } + case errors.Is(err, analyzer.ErrNoPkgsDetected): + log.Warn("No OS package is detected. Make sure you haven't deleted any files that contain information about the installed packages.") + log.Warn(`e.g. files under "/lib/apk/db/", "/var/lib/dpkg/" and "/var/lib/rpm"`) + case err != nil: + return nil, ftypes.OS{}, xerrors.Errorf("failed to apply layers: %w", err) + } + + scanTarget := types.ScanTarget{ + Name: target, + OS: detail.OS, + Repository: detail.Repository, + Packages: detail.Packages, + } + + result := types.Result{ + Target: fmt.Sprintf("%s (%s %s)", target, detail.OS.Family, detail.OS.Name), + Class: types.ClassOSPkg, + Type: scanTarget.OS.Family, + } + + sort.Sort(scanTarget.Packages) + result.Packages = scanTarget.Packages + + return []types.Result{result}, detail.OS, nil } func (c *Collector) scan(ctx context.Context, artifact artifact.Artifact, applier applier.Applier, imgMeta *workloadmeta.ContainerImageMetadata, cache CacheWithCleaner, useCache bool) (*types.Report, error) { @@ -368,11 +324,15 @@ func (c *Collector) scan(ctx context.Context, artifact artifact.Artifact, applie cache.setKeysForEntity(imgMeta.EntityID.ID, append(artifactReference.BlobIDs, artifactReference.ID)) } - s := scanner.NewScanner(local.NewScanner(applier, c.osScanner, c.langScanner, c.vulnClient), artifact) + s := scanner.NewScanner(&driver{applier: applier}, artifact) + trivyReport, err := s.ScanArtifact(ctx, types.ScanOptions{ - VulnType: []string{}, ScanRemovedPackages: false, - ListAllPackages: true, + PkgTypes: []types.PkgType{types.PkgTypeOS}, + PkgRelationships: []ftypes.Relationship{ + ftypes.RelationshipUnknown, + }, + Scanners: types.Scanners{types.VulnerabilityScanner}, }) if err != nil { return nil, err @@ -387,7 +347,7 @@ func (c *Collector) scanImage(ctx context.Context, fanalImage ftypes.Image, imgM return nil, err } - imageArtifact, err := image2.NewArtifact(fanalImage, cache, getDefaultArtifactOption("", scanOptions)) + imageArtifact, err := image2.NewArtifact(fanalImage, cache, getDefaultArtifactOption(scanOptions)) if err != nil { return nil, fmt.Errorf("unable to create artifact from image, err: %w", err) } @@ -403,18 +363,3 @@ func (c *Collector) scanImage(ctx context.Context, fanalImage ftypes.Image, imgM marshaler: c.marshaler, }, nil } - -func extractLayersFromOverlayFSMounts(mounts []mount.Mount) []string { - var layers []string - for _, mount := range mounts { - for _, opt := range mount.Options { - for _, prefix := range []string{"upperdir=", "lowerdir="} { - trimmedOpt := strings.TrimPrefix(opt, prefix) - if trimmedOpt != opt { - layers = append(layers, strings.Split(trimmedOpt, ":")...) - } - } - } - } - return layers -} diff --git a/pkg/util/trivy/trivy_containerd.go b/pkg/util/trivy/trivy_containerd.go deleted file mode 100644 index bd036354c514d..0000000000000 --- a/pkg/util/trivy/trivy_containerd.go +++ /dev/null @@ -1,119 +0,0 @@ -// Unless explicitly stated otherwise all files in this repository are licensed -// under the Apache License Version 2.0. -// This product includes software developed at Datadog (https://www.datadoghq.com/). -// Copyright 2016-present Datadog, Inc. - -//go:build trivy && containerd - -// Package trivy holds the scan components -package trivy - -import ( - "context" - "fmt" - "os" - "time" - - workloadmeta "github.com/DataDog/datadog-agent/comp/core/workloadmeta/def" - "github.com/DataDog/datadog-agent/pkg/sbom" - cutil "github.com/DataDog/datadog-agent/pkg/util/containerd" - "github.com/DataDog/datadog-agent/pkg/util/log" - "github.com/containerd/containerd" - "github.com/containerd/containerd/leases" - "github.com/containerd/containerd/namespaces" - "github.com/containerd/errdefs" -) - -const ( - cleanupTimeout = 30 * time.Second -) - -// ContainerdAccessor is a function that should return a containerd client -type ContainerdAccessor func() (cutil.ContainerdItf, error) - -// ScanContainerdImageFromSnapshotter scans containerd image directly from the snapshotter -func (c *Collector) ScanContainerdImageFromSnapshotter(ctx context.Context, imgMeta *workloadmeta.ContainerImageMetadata, img containerd.Image, client cutil.ContainerdItf, scanOptions sbom.ScanOptions) (sbom.Report, error) { - // Computing duration of containerd lease - deadline, _ := ctx.Deadline() - expiration := deadline.Sub(time.Now().Add(cleanupTimeout)) - clClient := client.RawClient() - imageID := imgMeta.ID - - mounts, err := client.Mounts(ctx, expiration, imgMeta.Namespace, img) - if err != nil { - return nil, fmt.Errorf("unable to get mounts for image %s, err: %w", imgMeta.ID, err) - } - - layers := extractLayersFromOverlayFSMounts(mounts) - if len(layers) == 0 { - return nil, fmt.Errorf("unable to extract layers from overlayfs mounts %+v for image %s", mounts, imgMeta.ID) - } - - ctx = namespaces.WithNamespace(ctx, imgMeta.Namespace) - // Adding a lease to cleanup dandling snaphots at expiration - ctx, done, err := clClient.WithLease(ctx, - leases.WithID(imageID), - leases.WithExpiration(expiration), - leases.WithLabels(map[string]string{ - "containerd.io/gc.ref.snapshot." + containerd.DefaultSnapshotter: imageID, - }), - ) - if err != nil && !errdefs.IsAlreadyExists(err) { - return nil, fmt.Errorf("unable to get a lease, err: %w", err) - } - - report, err := c.scanOverlayFS(ctx, layers, imgMeta, scanOptions) - - if err := done(ctx); err != nil { - log.Warnf("Unable to cancel containerd lease with id: %s, err: %v", imageID, err) - } - - return report, err -} - -// ScanContainerdImage scans containerd image by exporting it and scanning the tarball -func (c *Collector) ScanContainerdImage(ctx context.Context, imgMeta *workloadmeta.ContainerImageMetadata, img containerd.Image, client cutil.ContainerdItf, scanOptions sbom.ScanOptions) (sbom.Report, error) { - fanalImage, cleanup, err := convertContainerdImage(ctx, client.RawClient(), imgMeta, img) - if cleanup != nil { - defer cleanup() - } - if err != nil { - return nil, fmt.Errorf("unable to convert containerd image, err: %w", err) - } - - return c.scanImage(ctx, fanalImage, imgMeta, scanOptions) -} - -// ScanContainerdImageFromFilesystem scans containerd image from file-system -func (c *Collector) ScanContainerdImageFromFilesystem(ctx context.Context, imgMeta *workloadmeta.ContainerImageMetadata, img containerd.Image, client cutil.ContainerdItf, scanOptions sbom.ScanOptions) (sbom.Report, error) { - imagePath, err := os.MkdirTemp("", "containerd-image-*") - if err != nil { - return nil, fmt.Errorf("unable to create temp dir, err: %w", err) - } - defer func() { - err := os.RemoveAll(imagePath) - if err != nil { - log.Errorf("Unable to remove temp dir: %s, err: %v", imagePath, err) - } - }() - - // Computing duration of containerd lease - deadline, _ := ctx.Deadline() - expiration := deadline.Sub(time.Now().Add(cleanupTimeout)) - - cleanUp, err := client.MountImage(ctx, expiration, imgMeta.Namespace, img, imagePath) - if err != nil { - return nil, fmt.Errorf("unable to mount containerd image, err: %w", err) - } - - defer func() { - cleanUpContext, cleanUpContextCancel := context.WithTimeout(context.Background(), cleanupTimeout) - err := cleanUp(cleanUpContext) - cleanUpContextCancel() - if err != nil { - log.Errorf("Unable to clean up mounted image, err: %v", err) - } - }() - - return c.scanFilesystem(ctx, os.DirFS("/"), imagePath, imgMeta, scanOptions) -} diff --git a/pkg/util/trivy/trivy_test.go b/pkg/util/trivy/trivy_test.go index 20bd729cde6c5..c27dc1e1553fe 100644 --- a/pkg/util/trivy/trivy_test.go +++ b/pkg/util/trivy/trivy_test.go @@ -3,7 +3,7 @@ // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. -//go:build trivy +//go:build trivy && containerd // Package trivy holds the scan components package trivy diff --git a/pkg/util/trivy/walker.go b/pkg/util/trivy/walker.go new file mode 100644 index 0000000000000..e71a24c86c1b9 --- /dev/null +++ b/pkg/util/trivy/walker.go @@ -0,0 +1,149 @@ +// Unless explicitly stated otherwise all files in this repository are licensed +// under the Apache License Version 2.0. +// This product includes software developed at Datadog (https://www.datadoghq.com/). +// Copyright 2016-present Datadog, Inc. + +//go:build trivy + +// Package trivy holds the scan components +package trivy + +import ( + "errors" + "io/fs" + "os" + "path/filepath" + "strings" + + "golang.org/x/xerrors" + + "github.com/aquasecurity/trivy/pkg/fanal/utils" + "github.com/aquasecurity/trivy/pkg/fanal/walker" + xio "github.com/aquasecurity/trivy/pkg/x/io" +) + +var defaultSkipDirs = []string{ + "**/.git", + "proc", + "sys", + "dev", +} + +// FSWalker is the filesystem walker used for SBOM generation +type FSWalker struct { + walker *walker.FS +} + +// NewFSWalker returns a new filesystem walker +func NewFSWalker() *FSWalker { + return &FSWalker{ + walker: walker.NewFS(), + } +} + +// Walk walks the filesystem rooted at root, calling fn for each unfiltered file. +func (w *FSWalker) Walk(root string, opt walker.Option, fn walker.WalkFunc) error { + buildPaths := func(paths []string) []string { + buildPaths := make([]string, len(paths)) + for i, path := range paths { + buildPaths[i] = root + path + } + return buildPaths + } + opt.SkipFiles = w.walker.BuildSkipPaths(root, buildPaths(opt.SkipFiles)) + opt.SkipDirs = w.walker.BuildSkipPaths(root, buildPaths(opt.SkipDirs)) + opt.SkipDirs = append(opt.SkipDirs, defaultSkipDirs...) + opt.OnlyDirs = w.walker.BuildSkipPaths(root, buildPaths(opt.OnlyDirs)) + + walkDirFunc := w.WalkDirFunc(root, fn, opt) + walkDirFunc = w.onError(walkDirFunc) + + // Walk the filesystem + if err := fs.WalkDir(os.DirFS(root), ".", walkDirFunc); err != nil { + return xerrors.Errorf("walk dir error: %w", err) + } + + return nil +} + +// WalkDirFunc is the type of the function called by [WalkDir] to visit +// each file or directory. +func (w *FSWalker) WalkDirFunc(root string, fn walker.WalkFunc, opt walker.Option) fs.WalkDirFunc { + return func(filePath string, d fs.DirEntry, err error) error { + if err != nil { + if errors.Is(err, fs.ErrPermission) || errors.Is(err, os.ErrNotExist) { + return nil + } + return err + } + + if !strings.HasPrefix(filePath, "/") { + filePath = root + "/" + filePath + } + + relPath, err := filepath.Rel(root, filePath) + if err != nil { + return xerrors.Errorf("filepath rel (%s): %w", relPath, err) + } + relPath = filepath.ToSlash(relPath) + + // Skip unnecessary files + switch { + case d.IsDir(): + if utils.SkipPath(relPath, opt.SkipDirs) { + return filepath.SkipDir + } + if utils.OnlyPath(relPath, opt.OnlyDirs) { + return filepath.SkipDir + } + return nil + case !opt.AllFiles && !d.Type().IsRegular(): + return nil + case utils.SkipPath(relPath, opt.SkipFiles): + return nil + case utils.OnlyPath(relPath, opt.OnlyDirs): + return nil + } + + info, err := d.Info() + if err != nil { + return xerrors.Errorf("file info error: %w", err) + } + + if err = fn(relPath, info, fileOpener(filePath)); err != nil { + return xerrors.Errorf("failed to analyze file: %w", err) + } + + return nil + } +} + +func (w *FSWalker) onError(wrapped fs.WalkDirFunc) fs.WalkDirFunc { + return func(filePath string, d fs.DirEntry, err error) error { + err = wrapped(filePath, d, err) + switch { + // Unwrap fs.SkipDir error + case errors.Is(err, fs.SkipDir): + return fs.SkipDir + // Ignore permission errors + case os.IsPermission(err): + return nil + case err != nil: + // halt traversal on any other error + return xerrors.Errorf("unknown error with %s: %w", filePath, err) + } + return nil + } +} + +// fileOpener returns a function opening a file. +func fileOpener(filePath string) func() (xio.ReadSeekCloserAt, error) { + return func() (xio.ReadSeekCloserAt, error) { + f, err := os.Open(filePath) + if err != nil { + return nil, err + } + + return f, nil + } +} diff --git a/pkg/util/uuid/go.mod b/pkg/util/uuid/go.mod index 0f8d47fc3bb8c..b2fb15c4c93fc 100644 --- a/pkg/util/uuid/go.mod +++ b/pkg/util/uuid/go.mod @@ -11,8 +11,8 @@ replace ( require ( github.com/DataDog/datadog-agent/pkg/util/cache v0.56.0-rc.3 github.com/DataDog/datadog-agent/pkg/util/log v0.56.0-rc.3 - github.com/shirou/gopsutil/v4 v4.24.11 - golang.org/x/sys v0.28.0 + github.com/shirou/gopsutil/v4 v4.24.12 + golang.org/x/sys v0.29.0 ) require ( @@ -21,9 +21,9 @@ require ( github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect github.com/ebitengine/purego v0.8.1 // indirect github.com/go-ole/go-ole v1.3.0 // indirect - github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect + github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect github.com/patrickmn/go-cache v2.1.0+incompatible // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/numcpus v0.8.0 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect diff --git a/pkg/util/uuid/go.sum b/pkg/util/uuid/go.sum index 7399205747094..abc053d6e6b21 100644 --- a/pkg/util/uuid/go.sum +++ b/pkg/util/uuid/go.sum @@ -7,25 +7,24 @@ github.com/ebitengine/purego v0.8.1/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= -github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI= +github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/shirou/gopsutil/v4 v4.24.11 h1:WaU9xqGFKvFfsUv94SXcUPD7rCkU0vr/asVdQOBZNj8= -github.com/shirou/gopsutil/v4 v4.24.11/go.mod h1:s4D/wg+ag4rG0WO7AiTj2BeYCRhym0vM7DHbZRxnIT8= +github.com/shirou/gopsutil/v4 v4.24.12 h1:qvePBOk20e0IKA1QXrIIU+jmk+zEiYVVx06WjBRlZo4= +github.com/shirou/gopsutil/v4 v4.24.12/go.mod h1:DCtMPAad2XceTeIAbGyVfycbYQNBGk2P8cvDi7/VN9o= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= @@ -39,8 +38,8 @@ go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0 golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/pkg/util/winutil/go.mod b/pkg/util/winutil/go.mod index 172db988efae7..4c0f5af1ed70d 100644 --- a/pkg/util/winutil/go.mod +++ b/pkg/util/winutil/go.mod @@ -12,7 +12,7 @@ require ( github.com/fsnotify/fsnotify v1.8.0 github.com/stretchr/testify v1.10.0 go.uber.org/atomic v1.11.0 - golang.org/x/sys v0.28.0 + golang.org/x/sys v0.29.0 ) require ( diff --git a/pkg/util/winutil/go.sum b/pkg/util/winutil/go.sum index 788135f619bbb..00e94596df21d 100644 --- a/pkg/util/winutil/go.sum +++ b/pkg/util/winutil/go.sum @@ -16,8 +16,8 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/pkg/util/winutil/users.go b/pkg/util/winutil/users.go index be17fe6a7cdb2..171017a32cbac 100644 --- a/pkg/util/winutil/users.go +++ b/pkg/util/winutil/users.go @@ -43,25 +43,32 @@ func GetSidFromUser() (*windows.SID, error) { } // IsUserAnAdmin returns true is a user is a member of the Administrator's group -// TODO: Microsoft does not recommend using this function, instead CheckTokenMembership should be used. // // https://learn.microsoft.com/en-us/windows/win32/api/shlobj_core/nf-shlobj_core-isuseranadmin // //revive:disable-next-line:var-naming Name is intended to match the Windows API name func IsUserAnAdmin() (bool, error) { - shell32 := windows.NewLazySystemDLL("Shell32.dll") - defer windows.FreeLibrary(windows.Handle(shell32.Handle())) - - isUserAnAdminProc := shell32.NewProc("IsUserAnAdmin") - ret, _, winError := isUserAnAdminProc.Call() - - if winError != windows.NTE_OP_OK { - return false, fmt.Errorf("IsUserAnAdmin returns error code %d", winError) + var administratorsGroup *windows.SID + err := windows.AllocateAndInitializeSid(&windows.SECURITY_NT_AUTHORITY, + 2, + windows.SECURITY_BUILTIN_DOMAIN_RID, + windows.DOMAIN_ALIAS_RID_ADMINS, + 0, 0, 0, 0, 0, 0, + &administratorsGroup) + if err != nil { + return false, fmt.Errorf("could not get local system SID: %w", err) } - if ret == 0 { - return false, nil + defer windows.FreeSid(administratorsGroup) + + // call CheckTokenMembership to determine if the current user is a member of the administrators group + var isAdmin bool + err = CheckTokenMembership(0, administratorsGroup, &isAdmin) + if err != nil { + return false, fmt.Errorf("could not check token membership: %w", err) } - return true, nil + + return isAdmin, nil + } // GetLocalSystemSID returns the SID of the Local System account diff --git a/pkg/util/winutil/winsec.go b/pkg/util/winutil/winsec.go index e48762e7efe81..391b986e36fcf 100644 --- a/pkg/util/winutil/winsec.go +++ b/pkg/util/winutil/winsec.go @@ -21,6 +21,7 @@ var ( procGetAclInformation = advapi32.NewProc("GetAclInformation") procGetNamedSecurityInfo = advapi32.NewProc("GetNamedSecurityInfoW") procGetAce = advapi32.NewProc("GetAce") + procCheckTokenMembership = advapi32.NewProc("CheckTokenMembership") //revive:enable:var-naming ) @@ -133,3 +134,23 @@ func GetAce(acl *ACL, index uint32, ace **ACCESS_ALLOWED_ACE) error { } return nil } + +// CheckTokenMembership calls Windows 'CheckTokenMembership' function to determine +// whether a specified security identifier (SID) is enabled in the access token +// +// https://learn.microsoft.com/en-us/windows/win32/api/securitybaseapi/nf-securitybaseapi-checktokenmembership +// +//revive:disable-next-line:var-naming Name is intended to match the Windows API name +func CheckTokenMembership(token windows.Token, sid *windows.SID, isMember *bool) error { + var isMemberInt int32 + ret, _, _ := procCheckTokenMembership.Call( + uintptr(token), + uintptr(unsafe.Pointer(sid)), + uintptr(unsafe.Pointer(&isMemberInt)), + ) + if int(ret) == 0 { + return windows.GetLastError() + } + *isMember = isMemberInt != 0 + return nil +} diff --git a/release.json b/release.json index d9ef7eba6f38d..b6cd397296bf6 100644 --- a/release.json +++ b/release.json @@ -1,14 +1,13 @@ { "base_branch": "main", - "current_milestone": "7.62.0", + "current_milestone": "7.64.0", "last_stable": { "6": "6.53.0", - "7": "7.60.0" + "7": "7.61.0" }, "nightly": { - "INTEGRATIONS_CORE_VERSION": "master", - "OMNIBUS_SOFTWARE_VERSION": "0059a287d5543305c01a098740ba328aef7fa8ff", - "OMNIBUS_RUBY_VERSION": "650e39bb0b7c8d57ddabe21eb0588b368986aede", + "OMNIBUS_SOFTWARE_VERSION": "dafdaa1231032f6a2e30ca39573306b55db4b962", + "OMNIBUS_RUBY_VERSION": "49ba11883cdf5692a39095d1a036a1ef59a25210", "JMXFETCH_VERSION": "0.49.6", "JMXFETCH_HASH": "f06bdac1f8ec41daf9b9839ac883f1865a068b04810ea82197b8a6afb9369cb9", "MACOS_BUILD_VERSION": "master", @@ -22,12 +21,12 @@ "WINDOWS_APMINJECT_COMMENT": "The WINDOWS_APMINJECT entries below should NOT be added to the release targets", "WINDOWS_APMINJECT_MODULE": "release-signed", "WINDOWS_APMINJECT_VERSION": "1.1.3", - "WINDOWS_APMINJECT_SHASUM": "5fdd62a84e640204386b9c28dc2e3ac5d9b8adde6427cb9f5914619f94d7b5bd" + "WINDOWS_APMINJECT_SHASUM": "5fdd62a84e640204386b9c28dc2e3ac5d9b8adde6427cb9f5914619f94d7b5bd", + "INTEGRATIONS_CORE_VERSION": "6782bb7cf5da2ce9e0ab77d8420ac849d31cb8a6" }, "nightly-a7": { - "INTEGRATIONS_CORE_VERSION": "master", - "OMNIBUS_SOFTWARE_VERSION": "0059a287d5543305c01a098740ba328aef7fa8ff", - "OMNIBUS_RUBY_VERSION": "650e39bb0b7c8d57ddabe21eb0588b368986aede", + "OMNIBUS_SOFTWARE_VERSION": "dafdaa1231032f6a2e30ca39573306b55db4b962", + "OMNIBUS_RUBY_VERSION": "49ba11883cdf5692a39095d1a036a1ef59a25210", "JMXFETCH_VERSION": "0.49.6", "JMXFETCH_HASH": "f06bdac1f8ec41daf9b9839ac883f1865a068b04810ea82197b8a6afb9369cb9", "MACOS_BUILD_VERSION": "master", @@ -41,7 +40,8 @@ "WINDOWS_APMINJECT_COMMENT": "The WINDOWS_APMINJECT entries below should NOT be added to the release targets", "WINDOWS_APMINJECT_MODULE": "release-signed", "WINDOWS_APMINJECT_VERSION": "1.1.3", - "WINDOWS_APMINJECT_SHASUM": "5fdd62a84e640204386b9c28dc2e3ac5d9b8adde6427cb9f5914619f94d7b5bd" + "WINDOWS_APMINJECT_SHASUM": "5fdd62a84e640204386b9c28dc2e3ac5d9b8adde6427cb9f5914619f94d7b5bd", + "INTEGRATIONS_CORE_VERSION": "6782bb7cf5da2ce9e0ab77d8420ac849d31cb8a6" }, "release-a6": { "INTEGRATIONS_CORE_VERSION": "7.56.0-rc.2", @@ -49,7 +49,7 @@ "OMNIBUS_RUBY_VERSION": "7.56.0-rc.1", "JMXFETCH_VERSION": "0.49.6", "JMXFETCH_HASH": "f06bdac1f8ec41daf9b9839ac883f1865a068b04810ea82197b8a6afb9369cb9", - "SECURITY_AGENT_POLICIES_VERSION": "v0.62.0", + "SECURITY_AGENT_POLICIES_VERSION": "v0.63.0", "MACOS_BUILD_VERSION": "6.56.0-rc.3", "WINDOWS_DDNPM_DRIVER": "release-signed", "WINDOWS_DDNPM_VERSION": "2.7.1", @@ -64,7 +64,7 @@ "OMNIBUS_RUBY_VERSION": "7.56.0-rc.1", "JMXFETCH_VERSION": "0.49.6", "JMXFETCH_HASH": "f06bdac1f8ec41daf9b9839ac883f1865a068b04810ea82197b8a6afb9369cb9", - "SECURITY_AGENT_POLICIES_VERSION": "v0.62.0", + "SECURITY_AGENT_POLICIES_VERSION": "v0.63.0", "MACOS_BUILD_VERSION": "7.56.0-rc.3", "WINDOWS_DDNPM_DRIVER": "release-signed", "WINDOWS_DDNPM_VERSION": "2.7.1", diff --git a/releasenotes-dca/notes/add-resource-metadata-tags-to-orchestrator-ad98dafedded7e55.yaml b/releasenotes-dca/notes/add-resource-metadata-tags-to-orchestrator-ad98dafedded7e55.yaml new file mode 100644 index 0000000000000..94ccd1dcc7f13 --- /dev/null +++ b/releasenotes-dca/notes/add-resource-metadata-tags-to-orchestrator-ad98dafedded7e55.yaml @@ -0,0 +1,5 @@ +enhancements: + - | + Added support for `kubernetesResourcesLabelsAsTags` and `kubernetesResourcesAnnotationsAsTags` in the + orchestrator check. Kubernetes resources processed by the orchestrator check can now include labels + and annotations as tags, improving consistency with existing tagging configurations. diff --git a/releasenotes-dca/notes/delete_admission_webhooks-5c89ccd6c0d6ff8b.yaml b/releasenotes-dca/notes/delete_admission_webhooks-5c89ccd6c0d6ff8b.yaml new file mode 100644 index 0000000000000..61dc3b2cad986 --- /dev/null +++ b/releasenotes-dca/notes/delete_admission_webhooks-5c89ccd6c0d6ff8b.yaml @@ -0,0 +1,14 @@ +# Each section from every release note are combined when the +# CHANGELOG-DCA.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + The Cluster Agent is now able to delete `ValidatingAdmissionWebhook` and `MutatingAdmissionWebhook` + depending on the `admission_controller.validation.enabled` and `admission_controller.mutation.enabled` settings. + Note that `admission_controller.enabled` must be set to `true` to allow the Cluster Agent to + interact with the Kubernetes Admission Controller. diff --git a/releasenotes-dca/notes/fix-dca-configs-dispatched-telemetry-bd0f506a68144643.yaml b/releasenotes-dca/notes/fix-dca-configs-dispatched-telemetry-bd0f506a68144643.yaml new file mode 100644 index 0000000000000..4cc0de27d6068 --- /dev/null +++ b/releasenotes-dca/notes/fix-dca-configs-dispatched-telemetry-bd0f506a68144643.yaml @@ -0,0 +1,14 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Fixes an issue with the + ``datadog.cluster_agent.cluster_checks.configs_dispatched`` metric emitted + by the Cluster Agent telemetry. The metric values could become inaccurate + after the Cluster Agent loses and then regains leader status. diff --git a/releasenotes-dca/notes/ksm-add-collector-mapping-apiservices-crds-828e899f4ed551f0.yaml b/releasenotes-dca/notes/ksm-add-collector-mapping-apiservices-crds-828e899f4ed551f0.yaml new file mode 100644 index 0000000000000..fef9828b7b745 --- /dev/null +++ b/releasenotes-dca/notes/ksm-add-collector-mapping-apiservices-crds-828e899f4ed551f0.yaml @@ -0,0 +1,12 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Add mapping for apiservices and customresourcedefinitions to KSM check to + prevent errors on startup with discovering resources. diff --git a/releasenotes/notes/KSM-metrics-collection-for-custom-resources-cd39f52667850b60.yaml b/releasenotes/notes/KSM-metrics-collection-for-custom-resources-cd39f52667850b60.yaml new file mode 100644 index 0000000000000..e7e92b9a39b37 --- /dev/null +++ b/releasenotes/notes/KSM-metrics-collection-for-custom-resources-cd39f52667850b60.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +features: + - | + Add the capability in ``kubernetes_state_core`` check to collect metrics for Kubernetes custom resources diff --git a/releasenotes/notes/[agent]-add-kube_cronjob-tag-to-job-duration-add9616702d61668.yaml b/releasenotes/notes/[agent]-add-kube_cronjob-tag-to-job-duration-add9616702d61668.yaml new file mode 100644 index 0000000000000..0f329291a8183 --- /dev/null +++ b/releasenotes/notes/[agent]-add-kube_cronjob-tag-to-job-duration-add9616702d61668.yaml @@ -0,0 +1,14 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + Adds a kube_cronjob tag to kubernetes_state.job.duration metric. +fixes: + - | + Fixes some existing metric transformer unit tests by correcting their assertions. diff --git a/releasenotes/notes/[agent]increase-default-ecs_metadata_timeout-1dbdc9e7e1118531.yaml b/releasenotes/notes/[agent]increase-default-ecs_metadata_timeout-1dbdc9e7e1118531.yaml new file mode 100644 index 0000000000000..6bbdcf7bce5dc --- /dev/null +++ b/releasenotes/notes/[agent]increase-default-ecs_metadata_timeout-1dbdc9e7e1118531.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + Increase the Agent's default ecs_metadata_timeout from 500ms to 1000ms to avoid timeouts. diff --git a/releasenotes/notes/add-b761993d62be2cf6.yaml b/releasenotes/notes/add-b761993d62be2cf6.yaml new file mode 100644 index 0000000000000..388f948c773dd --- /dev/null +++ b/releasenotes/notes/add-b761993d62be2cf6.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Add `7-ltsc2019` and `7-ltsc2022` tags to the `datadog/agent` images for Windows. diff --git a/releasenotes/notes/add-containerd-image-size-cache-4b89bfa37dc423f0.yaml b/releasenotes/notes/add-containerd-image-size-cache-4b89bfa37dc423f0.yaml new file mode 100644 index 0000000000000..108cd2f688ab8 --- /dev/null +++ b/releasenotes/notes/add-containerd-image-size-cache-4b89bfa37dc423f0.yaml @@ -0,0 +1,5 @@ +--- +enhancements: + - | + Enhanced the Containerd Check to use a cache for container image sizes, + reducing redundant API calls and improving performance. diff --git a/releasenotes/notes/add-obfuscation-cache-max-size-411301253b92e191.yaml b/releasenotes/notes/add-obfuscation-cache-max-size-411301253b92e191.yaml new file mode 100644 index 0000000000000..01d13db1ac15d --- /dev/null +++ b/releasenotes/notes/add-obfuscation-cache-max-size-411301253b92e191.yaml @@ -0,0 +1,12 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + Add ``apm_config.obfuscation.cache.max_size`` to set the maximum size of the + cache in bytes. diff --git a/releasenotes/notes/add-tcp-diagnosis-check-9a88a7fae42e9e56.yaml b/releasenotes/notes/add-tcp-diagnosis-check-9a88a7fae42e9e56.yaml new file mode 100644 index 0000000000000..6f3bcbf87008b --- /dev/null +++ b/releasenotes/notes/add-tcp-diagnosis-check-9a88a7fae42e9e56.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + Add TCP diagnosis check for logs_config.force_use_tcp. diff --git a/releasenotes/notes/add-tls-enhanced-tags-6ff09ae7fc0ff7a1.yaml b/releasenotes/notes/add-tls-enhanced-tags-6ff09ae7fc0ff7a1.yaml new file mode 100644 index 0000000000000..14ed9c4ee8c35 --- /dev/null +++ b/releasenotes/notes/add-tls-enhanced-tags-6ff09ae7fc0ff7a1.yaml @@ -0,0 +1,13 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +features: + - | + The Agent will now tag TLS enhanced metrics like `tls_version` and `tls_cipher`. + This will allow you to filter and aggregate metrics based on the TLS version and cipher used in the connection. + The tags will be added in CNM and USM. diff --git a/releasenotes/notes/agent-flare-dmesg-d1de3cbb876c05d8.yaml b/releasenotes/notes/agent-flare-dmesg-d1de3cbb876c05d8.yaml new file mode 100644 index 0000000000000..99f496848765f --- /dev/null +++ b/releasenotes/notes/agent-flare-dmesg-d1de3cbb876c05d8.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + Added the Linux kernel's dmesg logs into the Agent flare. This information will appear in ``system-probe/dmesg.log``. \ No newline at end of file diff --git a/releasenotes/notes/agent-tel-default-prom-reg-05cac59d8cfae3cc.yaml b/releasenotes/notes/agent-tel-default-prom-reg-05cac59d8cfae3cc.yaml new file mode 100644 index 0000000000000..cd0c4b432d75d --- /dev/null +++ b/releasenotes/notes/agent-tel-default-prom-reg-05cac59d8cfae3cc.yaml @@ -0,0 +1,13 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + Begin collecting metrics from all internal Prometheus registries. Previously, + the default registry was ignored, resulting in the omission of the `point.sent` + and `point.dropped` metrics. This change ensures that all metrics are collected. diff --git a/releasenotes/notes/agent-tel-extend-histogr-6e2da94e63edcaf8.yaml b/releasenotes/notes/agent-tel-extend-histogr-6e2da94e63edcaf8.yaml new file mode 100644 index 0000000000000..7b6280295dbba --- /dev/null +++ b/releasenotes/notes/agent-tel-extend-histogr-6e2da94e63edcaf8.yaml @@ -0,0 +1,14 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + Extended Agent telemetry histogram details, specifically: + - Added previously omitted and implicit `+Inf` bucket value to histogram payload. + - Added p75, p95, and p99 histogram values (expressed as the upper-bound for the + matching bucket). diff --git a/releasenotes/notes/agent-tel-gzip-bba8a51c1aa3ba2f.yaml b/releasenotes/notes/agent-tel-gzip-bba8a51c1aa3ba2f.yaml new file mode 100644 index 0000000000000..ca28c715ecaa1 --- /dev/null +++ b/releasenotes/notes/agent-tel-gzip-bba8a51c1aa3ba2f.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + Use HTTP zstd compression for the Agent telemetry payloads. diff --git a/releasenotes/notes/apm-otel-span-type-fix-b3d5ff36f4224bf5.yaml b/releasenotes/notes/apm-otel-span-type-fix-b3d5ff36f4224bf5.yaml new file mode 100644 index 0000000000000..455f5eaeed0e0 --- /dev/null +++ b/releasenotes/notes/apm-otel-span-type-fix-b3d5ff36f4224bf5.yaml @@ -0,0 +1,22 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Datadog span.Type and span.Resource attributes are set correctly for OTel spans + processed via OTel Agent and Datadog Exporter when client span type is a database + span.Type. + + span.Type logic update is limited to ReceiveResourceSpansV2 logic, set using + `"enable_receive_resource_spans_v2"` in `DD_APM_FEATURES` + + span.Resource logic update is limited to OperationAndResourceNameV2 logic, set + using `"enable_operation_and_resource_name_logic_v2"` in `DD_APM_FEATURES` + + Users should set a `span.type` attribute on their telemetry if they wish to + override the default span type. diff --git a/releasenotes/notes/bump-go-to-1.23.5-6d47c9973fd01f4e.yaml b/releasenotes/notes/bump-go-to-1.23.5-6d47c9973fd01f4e.yaml new file mode 100644 index 0000000000000..e7864b20d6263 --- /dev/null +++ b/releasenotes/notes/bump-go-to-1.23.5-6d47c9973fd01f4e.yaml @@ -0,0 +1,4 @@ +--- +enhancements: +- | + Agents are now built with Go ``1.23.5``. diff --git a/releasenotes/notes/change-receive-resource-spans-v2-to-opt-out-e3fb3c8ead6138c0.yaml b/releasenotes/notes/change-receive-resource-spans-v2-to-opt-out-e3fb3c8ead6138c0.yaml new file mode 100644 index 0000000000000..92832e5a6124d --- /dev/null +++ b/releasenotes/notes/change-receive-resource-spans-v2-to-opt-out-e3fb3c8ead6138c0.yaml @@ -0,0 +1,5 @@ + +--- +enhancements: + - | + Added a new feature flag `disable_receive_resource_spans_v2` in DD_APM_FEATURES that replaces `enable_receive_resource_spans_v2` - the refactored implementation of ReceiveResourceSpans for OTLP is now opt-out instead of opt-in. diff --git a/releasenotes/notes/cspm-host-benchmarks-almalinux-9-d14c98a43196e779.yaml b/releasenotes/notes/cspm-host-benchmarks-almalinux-9-d14c98a43196e779.yaml new file mode 100644 index 0000000000000..88ec238b24425 --- /dev/null +++ b/releasenotes/notes/cspm-host-benchmarks-almalinux-9-d14c98a43196e779.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +features: + - | + Add support of CIS AlmaLinux 9 Benchmark in CSPM. diff --git a/releasenotes/notes/cws-fix-containers-billing-19580e6f968dd79a.yaml b/releasenotes/notes/cws-fix-containers-billing-19580e6f968dd79a.yaml new file mode 100644 index 0000000000000..443fb87b65443 --- /dev/null +++ b/releasenotes/notes/cws-fix-containers-billing-19580e6f968dd79a.yaml @@ -0,0 +1,13 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Fix an issue where the remote workloadmeta was not receiving some unset + events for ECS containers, causing incorrect billing in CWS, CSPM, CSM Pro, CSM + Enterprise, and DevSecOps Enterprise Containers. diff --git a/releasenotes/notes/ddprocmon-servicestatus-flare-1769b34ffbfa8b2f.yaml b/releasenotes/notes/ddprocmon-servicestatus-flare-1769b34ffbfa8b2f.yaml new file mode 100644 index 0000000000000..b59aeb0c39fc7 --- /dev/null +++ b/releasenotes/notes/ddprocmon-servicestatus-flare-1769b34ffbfa8b2f.yaml @@ -0,0 +1,7 @@ +--- +enhancements: + - | + Include Datadog Process Monitor (``ddprocmon``) service status in flare on Windows +fixes: + - | + Agent flare service status search for ``datadog`` services is now case insensitive on Windows diff --git a/releasenotes/notes/enable-http2-for-log-agent-proxy-2fc186ff7688473c.yaml b/releasenotes/notes/enable-http2-for-log-agent-proxy-2fc186ff7688473c.yaml new file mode 100644 index 0000000000000..4373893a7b705 --- /dev/null +++ b/releasenotes/notes/enable-http2-for-log-agent-proxy-2fc186ff7688473c.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +features: + - | + Log Agent now officially supports http2 transport to proxy. \ No newline at end of file diff --git a/releasenotes/notes/enable-ssi-language-default-version-9bc955d06c045ae6.yaml b/releasenotes/notes/enable-ssi-language-default-version-9bc955d06c045ae6.yaml new file mode 100644 index 0000000000000..0532e84da68f7 --- /dev/null +++ b/releasenotes/notes/enable-ssi-language-default-version-9bc955d06c045ae6.yaml @@ -0,0 +1,12 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + Users can now enable a pod with SSI using a default language library version + and no longer need to pin to a specific version. diff --git a/releasenotes/notes/fix-integration-blank-config-b9faf78986eeb737.yaml b/releasenotes/notes/fix-integration-blank-config-b9faf78986eeb737.yaml new file mode 100644 index 0000000000000..46a19d7564cad --- /dev/null +++ b/releasenotes/notes/fix-integration-blank-config-b9faf78986eeb737.yaml @@ -0,0 +1,12 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Bypass sending blank logs configs to the integrations launcher to + prevent the launcher from sending JSON parse error logs. diff --git a/releasenotes/notes/fix-symdb-endpoint-respect-proxy-c23fd25816471145.yaml b/releasenotes/notes/fix-symdb-endpoint-respect-proxy-c23fd25816471145.yaml new file mode 100644 index 0000000000000..4ba75ed881893 --- /dev/null +++ b/releasenotes/notes/fix-symdb-endpoint-respect-proxy-c23fd25816471145.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Respect proxy config in symdb endpoint. diff --git a/releasenotes/notes/fix-us-user-admin-86b7a987ea599402.yaml b/releasenotes/notes/fix-us-user-admin-86b7a987ea599402.yaml new file mode 100644 index 0000000000000..8ed2b8b8476f4 --- /dev/null +++ b/releasenotes/notes/fix-us-user-admin-86b7a987ea599402.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Fix IsUserAnAdmin call on Windows to use correct API. diff --git a/releasenotes/notes/fix_rpm_persisting_integrations-81c2b6333b12edb6.yaml b/releasenotes/notes/fix_rpm_persisting_integrations-81c2b6333b12edb6.yaml new file mode 100644 index 0000000000000..59b0d9e7fecb6 --- /dev/null +++ b/releasenotes/notes/fix_rpm_persisting_integrations-81c2b6333b12edb6.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Fixed a bug that occurs when reinstalling marketplace/extra integrations for a RPM package after an Agent upgrade. diff --git a/releasenotes/notes/metrics-compression-default-zstd-c786c2d28eb51b1f.yaml b/releasenotes/notes/metrics-compression-default-zstd-c786c2d28eb51b1f.yaml index 7d9a8653d8f0d..aec503206f6a6 100644 --- a/releasenotes/notes/metrics-compression-default-zstd-c786c2d28eb51b1f.yaml +++ b/releasenotes/notes/metrics-compression-default-zstd-c786c2d28eb51b1f.yaml @@ -6,8 +6,11 @@ # # Each section note must be formatted as reStructuredText. --- -enhancements: +upgrade: - | Metric payloads are compressed using `zstd` compression by default. - This can be reverted to the previous compression kind by adding - ``serializer_compressor_kind: zlib`` to the configuration. + This may be breaking for users who are sending metrics to systems other + than Datadog, such as Vector prior to version 0.40 or proxy servers that + need to decode requests. The setting can be reverted to the previous + compression kind by adding ``serializer_compressor_kind: zlib`` to the + configuration. diff --git a/releasenotes/notes/missing-otlp-span-mappings-c95651d044c6cf43.yaml b/releasenotes/notes/missing-otlp-span-mappings-c95651d044c6cf43.yaml new file mode 100644 index 0000000000000..222220fda3a9d --- /dev/null +++ b/releasenotes/notes/missing-otlp-span-mappings-c95651d044c6cf43.yaml @@ -0,0 +1,12 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + When `apm.features.enable_receive_resource_spans_v2` is set, trace agent OTLPReceiver now maps HTTP attributes from OTLP conventions to DD conventions. + See the full list of attributes here: https://docs.datadoghq.com/opentelemetry/schema_semantics/semantic_mapping/?tab=datadogexporter#http diff --git a/releasenotes/notes/msi-lanmanserver-6b8f1bf993efb5c3.yaml b/releasenotes/notes/msi-lanmanserver-6b8f1bf993efb5c3.yaml new file mode 100644 index 0000000000000..bf8fc9c03cedd --- /dev/null +++ b/releasenotes/notes/msi-lanmanserver-6b8f1bf993efb5c3.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Windows installer will not abort if the LanmanServer (Server) service is not running (regression introduced in 7.47.0). diff --git a/releasenotes/notes/network-path-windows-udp-support-afb397f49d5a3299.yaml b/releasenotes/notes/network-path-windows-udp-support-afb397f49d5a3299.yaml new file mode 100644 index 0000000000000..6f1b4fda42c13 --- /dev/null +++ b/releasenotes/notes/network-path-windows-udp-support-afb397f49d5a3299.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + Adds initial Windows support for UDP probes in Network Path. diff --git a/releasenotes/notes/obfuscate-cache-item-cost-fix-5d3c9b57564d5f3b.yaml b/releasenotes/notes/obfuscate-cache-item-cost-fix-5d3c9b57564d5f3b.yaml new file mode 100644 index 0000000000000..a68f714626606 --- /dev/null +++ b/releasenotes/notes/obfuscate-cache-item-cost-fix-5d3c9b57564d5f3b.yaml @@ -0,0 +1,13 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Obfuscation Cache Size Calculation: + Resolved an issue where the cache item size was underestimated by not accounting for the Go struct overhead (including struct fields and headers for strings and slices). + This fix ensures a more accurate calculation of cache item memory usage, leading to better memory efficiency and preventing over-allocation of NumCounters in cache configurations. diff --git a/releasenotes/notes/oracle-obfuscator-lazy-init-ce7c237bb52ee72f.yaml b/releasenotes/notes/oracle-obfuscator-lazy-init-ce7c237bb52ee72f.yaml new file mode 100644 index 0000000000000..b63e1588ac5ce --- /dev/null +++ b/releasenotes/notes/oracle-obfuscator-lazy-init-ce7c237bb52ee72f.yaml @@ -0,0 +1,13 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + Updated Oracle check to lazily initialize the obfuscator. This should + improve performance each time the Oracle check runs and collects SQL + statements. diff --git a/releasenotes/notes/panic-on-tailer-close-ee1ce3603e006428.yaml b/releasenotes/notes/panic-on-tailer-close-ee1ce3603e006428.yaml new file mode 100644 index 0000000000000..d532e9845bf09 --- /dev/null +++ b/releasenotes/notes/panic-on-tailer-close-ee1ce3603e006428.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +fixes: + - | + Fix potential panic in journald and Windows event tailers during system shutdown \ No newline at end of file diff --git a/releasenotes/notes/password-contains-semicolon-error-5c63154508759fa0.yaml b/releasenotes/notes/password-contains-semicolon-error-5c63154508759fa0.yaml new file mode 100644 index 0000000000000..0cdac9713d27e --- /dev/null +++ b/releasenotes/notes/password-contains-semicolon-error-5c63154508759fa0.yaml @@ -0,0 +1,5 @@ +--- +enhancements: + - | + The Windows Agent MSI now shows the user an error message + if the provided password contains a semicolon. diff --git a/releasenotes/notes/persisting-integrations-windows-ca3193bd7a791198.yaml b/releasenotes/notes/persisting-integrations-windows-ca3193bd7a791198.yaml new file mode 100644 index 0000000000000..406b464d0b1af --- /dev/null +++ b/releasenotes/notes/persisting-integrations-windows-ca3193bd7a791198.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fix the removal of non-core integrations during Agent upgrades on Windows platforms. + To enable persisting non-core integration during install, set INSTALL_PYTHON_THIRD_PARTY_DEPS="1" + property during the installation of the MSI. diff --git a/releasenotes/notes/python3128-777a5378ff8576e8.yaml b/releasenotes/notes/python3128-777a5378ff8576e8.yaml new file mode 100644 index 0000000000000..e9d45a895c884 --- /dev/null +++ b/releasenotes/notes/python3128-777a5378ff8576e8.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +upgrade: + - | + Bump the Python version to 3.12.8 diff --git a/releasenotes/notes/sql-normalization-6f499718a85da054.yaml b/releasenotes/notes/sql-normalization-6f499718a85da054.yaml new file mode 100644 index 0000000000000..2078fa02cb870 --- /dev/null +++ b/releasenotes/notes/sql-normalization-6f499718a85da054.yaml @@ -0,0 +1,11 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + APM: Introduce ``sql_obfuscation_mode`` parameter. The value ``obfuscate_and_normalize`` is recommended for DBM customers to enhance APM/DBM correlation. diff --git a/releasenotes/notes/traceagent-spanevents-0c207e5103d1926d.yaml b/releasenotes/notes/traceagent-spanevents-0c207e5103d1926d.yaml new file mode 100644 index 0000000000000..05b1817532987 --- /dev/null +++ b/releasenotes/notes/traceagent-spanevents-0c207e5103d1926d.yaml @@ -0,0 +1,13 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +enhancements: + - | + APM: Adds span events as a top level payload field. Span events received this way will be altered according to rules defined by DD_APM_REPLACE_TAGS. Credit card obfuscation will also be applied to span event attributes. + - | + APM: If apm_config.obfuscation.remove_stack_traces is enabled the trace agent will now also remove the value at span tag `exception.stacktrace` replacing it with a "?". diff --git a/releasenotes/notes/update-programdata-permissions-6271744e48907d71.yaml b/releasenotes/notes/update-programdata-permissions-6271744e48907d71.yaml new file mode 100644 index 0000000000000..a0d7157e3b8bb --- /dev/null +++ b/releasenotes/notes/update-programdata-permissions-6271744e48907d71.yaml @@ -0,0 +1,18 @@ +# Each section from every release note are combined when the +# CHANGELOG.rst is rendered. So the text needs to be worded so that +# it does not depend on any information only available in another +# section. This may mean repeating some details, but each section +# must be readable independently of the other. +# +# Each section note must be formatted as reStructuredText. +--- +security: + - | + Removes Datadog user's full control of the Datadog data directory on Windows. +upgrade: + - | + Removes Datadog user's full control of the Datadog data directory on Windows. + If you are using custom configured values for log files, confd_path, run_path, or additional_checksd + that are within the Datadog ProgramData folder, then you will have to explicitly give the Datadog user + write permissions to the folders and files configured. + diff --git a/releasenotes/notes/windows_system_probe_namedpipe_hardening-cfc08b47465f1f2b.yaml b/releasenotes/notes/windows_system_probe_namedpipe_hardening-cfc08b47465f1f2b.yaml new file mode 100644 index 0000000000000..8b3e631fd8f90 --- /dev/null +++ b/releasenotes/notes/windows_system_probe_namedpipe_hardening-cfc08b47465f1f2b.yaml @@ -0,0 +1,4 @@ +security: + - | + On Windows, the named pipe \\.\pipe\dd_system_probe from system probe is now restricted to + Local System, Administrators, and the ddagentuser. Any other custom users are not supported. diff --git a/tasks/__init__.py b/tasks/__init__.py index 0dffed23dacaf..f6ddfc8f28f30 100644 --- a/tasks/__init__.py +++ b/tasks/__init__.py @@ -64,6 +64,7 @@ vim, vscode, winbuild, + windows_dev_env, worktree, ) from tasks.build_tags import audit_tag_impact, print_default_build_tags @@ -214,6 +215,7 @@ ns.add_collection(invoke_unit_tests) ns.add_collection(debug) ns.add_collection(winbuild) +ns.add_collection(windows_dev_env) ns.add_collection(worktree) ns.configure( { diff --git a/tasks/agent.py b/tasks/agent.py index 84d1134310965..66da958ed93f2 100644 --- a/tasks/agent.py +++ b/tasks/agent.py @@ -82,7 +82,6 @@ "systemd", "tcp_queue_length", "uptime", - "winproc", "jetson", "telemetry", "orchestrator_pod", @@ -90,6 +89,7 @@ "cisco_sdwan", "network_path", "service_discovery", + "gpu", ] WINDOWS_CORECHECKS = [ @@ -98,6 +98,7 @@ "windows_registry", "winkmem", "wincrashdetect", + "winproc", "win32_event_log", ] diff --git a/tasks/build_tags.py b/tasks/build_tags.py index 9f58d3e45c60a..665a54518fde8 100644 --- a/tasks/build_tags.py +++ b/tasks/build_tags.py @@ -20,6 +20,10 @@ "clusterchecks", "consul", "containerd", + # Disables dynamic plugins in containerd v1, which removes the import to std "plugin" package on Linux amd64, + # which makes the agent significantly smaller. + # This can be removed when we start using containerd v2.1 or later. + "no_dynamic_plugins", "cri", "crio", "docker", @@ -33,9 +37,11 @@ "kubelet", "linux_bpf", "netcgo", # Force the use of the CGO resolver. This will also have the effect of making the binary non-static + "netgo", "npm", "oracle", "orchestrator", + 'osusergo', "otlp", "pcap", # used by system-probe to compile packet filters using google/gopacket/pcap, which requires cgo to link libpcap "podman", @@ -58,6 +64,7 @@ AGENT_TAGS = { "consul", "containerd", + "no_dynamic_plugins", "cri", "crio", "datadog.no_waf", @@ -85,6 +92,7 @@ AGENT_HEROKU_TAGS = AGENT_TAGS.difference( { "containerd", + "no_dynamic_plugins", "cri", "crio", "docker", @@ -109,7 +117,7 @@ CLUSTER_AGENT_CLOUDFOUNDRY_TAGS = {"clusterchecks"} # DOGSTATSD_TAGS lists the tags needed when building dogstatsd -DOGSTATSD_TAGS = {"containerd", "docker", "kubelet", "podman", "zlib", "zstd"} +DOGSTATSD_TAGS = {"containerd", "no_dynamic_plugins", "docker", "kubelet", "podman", "zlib", "zstd"} # IOT_AGENT_TAGS lists the tags needed when building the IoT agent IOT_AGENT_TAGS = {"jetson", "otlp", "systemd", "zlib", "zstd"} @@ -124,6 +132,7 @@ PROCESS_AGENT_HEROKU_TAGS = PROCESS_AGENT_TAGS.difference( { "containerd", + "no_dynamic_plugins", "cri", "crio", "docker", @@ -143,6 +152,7 @@ "datadog.no_waf", "docker", "containerd", + "no_dynamic_plugins", "kubeapiserver", "kubelet", "podman", @@ -157,6 +167,7 @@ # SYSTEM_PROBE_TAGS lists the tags necessary to build system-probe SYSTEM_PROBE_TAGS = { "datadog.no_waf", + "no_dynamic_plugins", "ec2", "linux_bpf", "netcgo", @@ -168,12 +179,23 @@ } # TRACE_AGENT_TAGS lists the tags that have to be added when the trace-agent -TRACE_AGENT_TAGS = {"docker", "containerd", "datadog.no_waf", "kubeapiserver", "kubelet", "otlp", "netcgo", "podman"} +TRACE_AGENT_TAGS = { + "docker", + "containerd", + "no_dynamic_plugins", + "datadog.no_waf", + "kubeapiserver", + "kubelet", + "otlp", + "netcgo", + "podman", +} # TRACE_AGENT_HEROKU_TAGS lists the tags necessary to build the trace-agent for Heroku TRACE_AGENT_HEROKU_TAGS = TRACE_AGENT_TAGS.difference( { "containerd", + "no_dynamic_plugins", "docker", "kubeapiserver", "kubelet", @@ -181,6 +203,8 @@ } ) +CWS_INSTRUMENTATION_TAGS = {"netgo", "osusergo"} + # AGENT_TEST_TAGS lists the tags that have to be added to run tests AGENT_TEST_TAGS = AGENT_TAGS.union({"clusterchecks"}) @@ -194,7 +218,7 @@ WINDOWS_EXCLUDE_TAGS = {"linux_bpf"} # List of tags to always remove when building on Darwin/macOS -DARWIN_EXCLUDED_TAGS = {"docker", "containerd", "cri", "crio"} +DARWIN_EXCLUDED_TAGS = {"docker", "containerd", "no_dynamic_plugins", "cri", "crio"} # Unit test build tags UNIT_TEST_TAGS = {"test"} @@ -217,6 +241,7 @@ "system-probe": SYSTEM_PROBE_TAGS, "system-probe-unit-tests": SYSTEM_PROBE_TAGS.union(UNIT_TEST_TAGS).difference(UNIT_TEST_EXCLUDE_TAGS), "trace-agent": TRACE_AGENT_TAGS, + "cws-instrumentation": CWS_INSTRUMENTATION_TAGS, # Test setups "test": AGENT_TEST_TAGS.union(UNIT_TEST_TAGS).difference(UNIT_TEST_EXCLUDE_TAGS), "lint": AGENT_TEST_TAGS.union(PROCESS_AGENT_TAGS).union(UNIT_TEST_TAGS).difference(UNIT_TEST_EXCLUDE_TAGS), diff --git a/tasks/collector.py b/tasks/collector.py index 74f3a439bd9fd..3fbe83c48a73b 100644 --- a/tasks/collector.py +++ b/tasks/collector.py @@ -14,14 +14,23 @@ from tasks.go import tidy from tasks.libs.ciproviders.github_api import GithubAPI from tasks.libs.common.color import Color, color_message -from tasks.libs.common.git import check_uncommitted_changes, get_git_config, revert_git_config, set_git_config +from tasks.libs.common.constants import ( + GITHUB_REPO_NAME, +) +from tasks.libs.common.git import ( + check_clean_branch_state, + check_uncommitted_changes, + get_git_config, + revert_git_config, + set_git_config, +) LICENSE_HEADER = """// Unless explicitly stated otherwise all files in this repository are licensed // under the Apache License Version 2.0. // This product includes software developed at Datadog (https://www.datadoghq.com/). // Copyright 2016-present Datadog, Inc. """ -OCB_VERSION = "0.115.0" +OCB_VERSION = "0.118.0" MANDATORY_COMPONENTS = { "extensions": [ @@ -89,6 +98,10 @@ def find_matching_components(manifest, components_to_match: dict, present: bool) def versions_equal(version1, version2): + idx = version1.find("/") + if idx != -1: + # version may be in the format of "v1.xx.0/v0.yyy.0" + version1 = version1[idx + 1 :] # strip leading 'v' if present if version1.startswith("v"): version1 = version1[1:] @@ -416,7 +429,7 @@ def get_version(self): def fetch_latest_release(self): gh = GithubAPI(self.repo) - self.version = gh.latest_release() + self.version = gh.latest_release_tag() return self.version def fetch_module_versions(self): @@ -516,12 +529,20 @@ def pull_request(ctx): ctx.run('git add .') if check_uncommitted_changes(ctx): branch_name = f"update-otel-collector-dependencies-{OCB_VERSION}" + gh = GithubAPI(repository=GITHUB_REPO_NAME) ctx.run(f'git switch -c {branch_name}') ctx.run( f'git commit -m "Update OTel Collector dependencies to {OCB_VERSION} and generate OTel Agent" --no-verify' ) + try: + # don't check if local branch exists; we just created it + check_clean_branch_state(ctx, gh, branch_name) + except Exit as e: + # local branch already exists, so skip error if this is thrown + if "already exists locally" not in str(e): + print(e) + return ctx.run(f'git push -u origin {branch_name} --no-verify') # skip pre-commit hook if installed locally - gh = GithubAPI() gh.create_pr( pr_title=f"Update OTel Collector dependencies to v{OCB_VERSION}", pr_body=f"This PR updates the dependencies of the OTel Collector to v{OCB_VERSION} and generates the OTel Agent code.", diff --git a/tasks/components.py b/tasks/components.py index fc5b04806ce04..0d0c1315179ba 100644 --- a/tasks/components.py +++ b/tasks/components.py @@ -214,7 +214,7 @@ def check_component_contents_and_file_hiearchy(comp): # Definition file `component.go` (v1) or `def/component.go` (v2) must not contain a mock definition for mock_definition in mock_definitions: if any(line.startswith(mock_definition) for line in def_content): - return f"** {comp.def_file} defines '{mock_definition}' which should be in separate implementation. See docs/components/defining-components.md" + return f"** {comp.def_file} defines '{mock_definition}' which should be in separate implementation. See https://datadoghq.dev/datadog-agent/components/creating-components/" # Allowlist of components that do not use an implementation folder if comp.path in components_missing_implementation_folder: @@ -223,7 +223,7 @@ def check_component_contents_and_file_hiearchy(comp): # Implementation folder or folders must exist impl_folders = locate_implementation_folders(comp) if len(impl_folders) == 0: - return f"** {comp.name} is missing the implementation folder in {comp.path}. See docs/components/defining-components.md" + return f"** {comp.name} is missing the implementation folder in {comp.path}. See https://datadoghq.dev/datadog-agent/components/creating-components/" if comp.version == 2: # Implementation source files should use correct package name, and shouldn't import fx (except tests) diff --git a/tasks/cws_instrumentation.py b/tasks/cws_instrumentation.py index 748c6b64784c3..2726b9b753c25 100644 --- a/tasks/cws_instrumentation.py +++ b/tasks/cws_instrumentation.py @@ -7,7 +7,7 @@ from invoke import task from invoke.exceptions import Exit -from tasks.build_tags import get_default_build_tags +from tasks.build_tags import add_fips_tags, get_default_build_tags from tasks.libs.common.git import get_commit_sha, get_current_branch from tasks.libs.common.utils import ( REPO_PATH, @@ -32,6 +32,7 @@ def build( major_version='7', go_mod="readonly", static=False, + fips_mode=False, no_strip_binary=False, ): """ @@ -52,11 +53,8 @@ def build( } ldflags += ' '.join([f"-X '{main + key}={value}'" for key, value in ld_vars.items()]) - build_tags += get_default_build_tags( - build="cws-instrumentation" - ) # TODO/FIXME: Arch not passed to preserve build tags. Should this be fixed? - build_tags.append("netgo") - build_tags.append("osusergo") + build_tags += get_default_build_tags(build="cws-instrumentation") + build_tags = add_fips_tags(build_tags, fips_mode) race_opt = "-race" if race else "" build_type = "-a" if rebuild else "" diff --git a/tasks/devcontainer.py b/tasks/devcontainer.py index c8c738df05112..2086c47cbd56e 100644 --- a/tasks/devcontainer.py +++ b/tasks/devcontainer.py @@ -77,6 +77,8 @@ def setup( "--cap-add=SYS_PTRACE", "--security-opt", "seccomp=unconfined", + "-w", + "/workspaces/datadog-agent", "--name", "datadog_agent_devcontainer", ] diff --git a/tasks/diff.py b/tasks/diff.py index 305a4997c7247..37f63e5d57f20 100644 --- a/tasks/diff.py +++ b/tasks/diff.py @@ -3,8 +3,10 @@ """ import datetime +import json import os import tempfile +from datetime import timedelta from invoke import task from invoke.exceptions import Exit @@ -12,9 +14,10 @@ from tasks.build_tags import get_default_build_tags from tasks.flavor import AgentFlavor from tasks.go import GOARCH_MAPPING, GOOS_MAPPING -from tasks.libs.common.color import color_message +from tasks.libs.common.color import Color, color_message from tasks.libs.common.datadog_api import create_count, send_metrics from tasks.libs.common.git import check_uncommitted_changes, get_commit_sha, get_current_branch +from tasks.libs.common.worktree import agent_context from tasks.release import _get_release_json_value BINARIES: dict[str, dict] = { @@ -230,3 +233,71 @@ def patch_summary(diff): elif line.startswith("-"): remove_count += 1 return add_count, remove_count + + +def _list_tasks_rec(collection, prefix='', res=None): + res = res or {} + + if isinstance(collection, dict): + newpref = prefix + collection['name'] + + for task in collection['tasks']: + res[newpref + '.' + task['name']] = task['help'] + + for subtask in collection['collections']: + _list_tasks_rec(subtask, newpref + '.', res) + + return res + + +def _list_invoke_tasks(ctx) -> dict[str, str]: + """Returns a dictionary of invoke tasks and their descriptions.""" + + tasks = json.loads(ctx.run('invoke --list -F json', hide=True).stdout) + + # Remove 'tasks.' prefix + return {name.removeprefix(tasks['name'] + '.'): desc for name, desc in _list_tasks_rec(tasks).items()} + + +@task +def invoke_tasks(ctx, diff_date: str | None = None): + """Shows the added / removed invoke tasks since diff_date with their description. + + Args: + diff_date: The date to compare the tasks to ('YYYY-MM-DD' format). Will be the last 30 days if not provided. + """ + + if not diff_date: + diff_date = (datetime.datetime.now() - timedelta(days=30)).strftime('%Y-%m-%d') + else: + try: + datetime.datetime.strptime(diff_date, '%Y-%m-%d') + except ValueError as e: + raise Exit('Invalid date format. Please use the format "YYYY-MM-DD".') from e + + old_commit = ctx.run(f"git rev-list -n 1 --before='{diff_date} 23:59' HEAD", hide=True).stdout.strip() + assert old_commit, f"No commit found before {diff_date}" + + with agent_context(ctx, commit=old_commit): + old_tasks = _list_invoke_tasks(ctx) + current_tasks = _list_invoke_tasks(ctx) + + all_tasks = set(old_tasks.keys()).union(current_tasks.keys()) + removed_tasks = {task for task in all_tasks if task not in current_tasks} + added_tasks = {task for task in all_tasks if task not in old_tasks} + + if removed_tasks: + print(f'* {color_message("Removed tasks", Color.BOLD)}:') + print('\n'.join(sorted(f'- {name}' for name in removed_tasks))) + else: + print('No task removed') + + if added_tasks: + print(f'\n* {color_message("Added tasks", Color.BOLD)}:') + for name, description in sorted((name, current_tasks[name]) for name in added_tasks): + line = '+ ' + name + if description: + line += ': ' + description + print(line) + else: + print('No task added') diff --git a/tasks/ebpf.py b/tasks/ebpf.py index 711639cebd073..647b64801dfb6 100644 --- a/tasks/ebpf.py +++ b/tasks/ebpf.py @@ -887,11 +887,11 @@ def _try_delete_github_comment(msg: str): has_any_changes = False for group, rows in itertools.groupby(summarized_complexity_changes, key=lambda x: x[0].split("/")[0]): + rows = list(rows) # Convert the iterator to a list, so we can iterate over it multiple times + if not any(row[-1] for row in rows): continue - rows = list(rows) # Convert the iterator to a list, so we can iterate over it multiple times - def _build_table(orig_rows): # Format rows to make it more compact, remove the changes marker and remove the object name changed_rows = [[row[0].split("/")[1]] + row[1:-1] for row in orig_rows] diff --git a/tasks/github_tasks.py b/tasks/github_tasks.py index bf2c546c8ecb4..50696bb3d197d 100644 --- a/tasks/github_tasks.py +++ b/tasks/github_tasks.py @@ -1,5 +1,6 @@ from __future__ import annotations +import json import os import re import time @@ -16,6 +17,7 @@ follow_workflow_run, print_failed_jobs_logs, print_workflow_conclusion, + trigger_buildenv_workflow, trigger_macos_workflow, ) from tasks.libs.common.color import Color, color_message @@ -23,9 +25,11 @@ from tasks.libs.common.datadog_api import create_gauge, send_event, send_metrics from tasks.libs.common.git import get_default_branch from tasks.libs.common.utils import get_git_pretty_ref +from tasks.libs.notify.pipeline_status import send_slack_message from tasks.libs.owners.linter import codeowner_has_orphans, directory_has_packages_without_owner from tasks.libs.owners.parsing import read_owners from tasks.libs.pipeline.notifications import GITHUB_SLACK_MAP +from tasks.libs.releasing.version import current_version from tasks.release import _get_release_json_value ALL_TEAMS = '@datadog/agent-all' @@ -110,6 +114,56 @@ def trigger_macos( raise Exit(message=f"Macos {workflow_type} workflow {conclusion}", code=1) +def _update_windows_runner_version(new_version=None, buildenv_ref="master"): + if new_version is None: + raise Exit(message="Buildenv workflow need the 'new_version' field value to be not None") + + run = trigger_buildenv_workflow( + workflow_name="runner-bump.yml", + github_action_ref=buildenv_ref, + new_version=new_version, + ) + # We are only waiting 0.5min between each status check because buildenv is much faster than macOS builds + workflow_conclusion, workflow_url = follow_workflow_run(run, "DataDog/buildenv", 0.5) + + if workflow_conclusion != "success": + if workflow_conclusion == "failure": + print_failed_jobs_logs(run) + return workflow_conclusion + + print_workflow_conclusion(workflow_conclusion, workflow_url) + + download_with_retry(download_artifacts, run, ".", 3, 5, "DataDog/buildenv") + + with open("PR_URL_ARTIFACT") as f: + PR_URL = f.read().strip() + + if not PR_URL: + raise Exit(message="Failed to fetch artifact from the workflow. (Empty artifact)") + + message = f":robobits: A new windows-runner bump PR to {new_version} has been generated. Please take a look :frog-review:\n:pr: {PR_URL} :ty:" + + send_slack_message("ci-infra-support", message) + return workflow_conclusion + + +@task +def update_windows_runner_version( + ctx, + new_version=None, + buildenv_ref="master", +): + """ + Trigger a workflow on the buildenv repository to bump windows gitlab runner + """ + if new_version is None: + new_version = str(current_version(ctx, "7")) + + conclusion = _update_windows_runner_version(new_version, buildenv_ref) + if conclusion != "success": + raise Exit(message=f"Buildenv workflow {conclusion}", code=1) + + @task def lint_codeowner(_, owners_file=".github/CODEOWNERS"): """ @@ -586,3 +640,32 @@ def check_qa_labels(_, labels: str): if len(qa_labels) > 1: raise Exit(f"More than one QA label set.\n{docs}", code=1) print("QA label set correctly") + + +@task +def print_pr_state(_, id): + """Print the PR merge state if the PR is stuck within the merge queue.""" + + from tasks.libs.ciproviders.github_api import GithubAPI + + query = """ +query { + repository (owner: "DataDog", name: "datadog-agent") { + pullRequest(number: ID) { + reviewDecision + state + statusCheckRollup { + state + } + mergeable + mergeStateStatus + locked + } + } +} +""".replace("ID", id) # Use replace to avoid formatting issues with curly braces + + gh = GithubAPI() + res = gh.graphql(query) + + print(json.dumps(res, indent=2)) diff --git a/tasks/go.py b/tasks/go.py index 5bb3fcad7d656..587264a4cd6f9 100644 --- a/tasks/go.py +++ b/tasks/go.py @@ -347,6 +347,14 @@ def generate_protobuf(ctx): switches = patch[1] if patch[1] else '' ctx.run(f"git apply {switches} --unsafe-paths --directory='{pbgo_dir}/{pkg}' {patch_file}") + # Check the generated files were properly committed + updates = ctx.run("git status -suno").stdout.strip() + if updates: + raise Exit( + "Generated files were not properly committed. Please run `inv generate-protobuf` and commit the changes.", + code=1, + ) + @task def reset(ctx): @@ -487,7 +495,7 @@ def tidy(ctx): @task def check_go_version(ctx): go_version_output = ctx.run('go version') - # result is like "go version go1.23.3 linux/amd64" + # result is like "go version go1.23.5 linux/amd64" running_go_version = go_version_output.stdout.split(' ')[2] with open(".go-version") as f: diff --git a/tasks/install_tasks.py b/tasks/install_tasks.py index 880fda4146e52..2cf7dbc34b0c3 100644 --- a/tasks/install_tasks.py +++ b/tasks/install_tasks.py @@ -26,7 +26,7 @@ TOOL_LIST_PROTO = [ 'github.com/favadi/protoc-go-inject-tag', - 'github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway', + 'github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-grpc-gateway', 'github.com/golang/protobuf/protoc-gen-go', 'github.com/golang/mock/mockgen', 'github.com/planetscale/vtprotobuf/cmd/protoc-gen-go-vtproto', diff --git a/tasks/installer.py b/tasks/installer.py index 381c2dc104db8..d88065820f7ab 100644 --- a/tasks/installer.py +++ b/tasks/installer.py @@ -10,7 +10,6 @@ from invoke.exceptions import Exit from tasks.build_tags import filter_incompatible_tags, get_build_tags, get_default_build_tags -from tasks.libs.common.git import get_commit_sha from tasks.libs.common.utils import REPO_PATH, bin_name, get_build_flags from tasks.libs.releasing.version import get_version @@ -107,10 +106,6 @@ def build_linux_script( with open(INSTALL_SCRIPT_TEMPLATE) as f: install_script = f.read() - # default version on pipelines, using the commit sha instead - if version == "nightly-a7": - version = get_commit_sha(ctx) - archs = ['amd64', 'arm64'] for arch in archs: build_downloader(ctx, flavor=flavor, version=version, os='linux', arch=arch) @@ -120,8 +115,10 @@ def build_linux_script( commit_sha = ctx.run('git rev-parse HEAD', hide=True).stdout.strip() install_script = install_script.replace('INSTALLER_COMMIT', commit_sha) - - with open(os.path.join(DIR_BIN, f'install-{flavor}.sh'), 'w') as f: + filename = f'install-{flavor}.sh' + if flavor == "default": + filename = 'install.sh' + with open(os.path.join(DIR_BIN, filename), 'w') as f: f.write(install_script) diff --git a/tasks/issue.py b/tasks/issue.py index 92e4d1ee1cb98..c9f3dea146f7f 100644 --- a/tasks/issue.py +++ b/tasks/issue.py @@ -3,10 +3,10 @@ from invoke import task -from tasks.libs.ciproviders.github_api import GithubAPI +from tasks.libs.ciproviders.github_api import GithubAPI, ask_review_actor from tasks.libs.issue.assign import assign_with_model, assign_with_rules from tasks.libs.issue.model.actions import fetch_data_and_train_model -from tasks.libs.pipeline.notifications import GITHUB_SLACK_MAP, GITHUB_SLACK_REVIEW_MAP +from tasks.libs.pipeline.notifications import GITHUB_SLACK_MAP, GITHUB_SLACK_REVIEW_MAP, HELP_SLACK_CHANNEL @task @@ -26,7 +26,7 @@ def assign_owner(_, issue_id, dry_run=False): from slack_sdk import WebClient client = WebClient(os.environ['SLACK_API_TOKEN']) - channel = next((chan for team, chan in GITHUB_SLACK_MAP.items() if owner.lower() in team), '#agent-devx-help') + channel = next((chan for team, chan in GITHUB_SLACK_MAP.items() if owner.lower() in team), HELP_SLACK_CHANNEL) message = f':githubstatus_partial_outage: *New Community Issue*\n{issue.title} <{issue.html_url}|{gh.repo.name}#{issue_id}>\n' if channel == '#agent-ask-anything': message += "The CI bot failed to assign this issue to a team.\nPlease assign it manually." @@ -43,60 +43,29 @@ def generate_model(_): fetch_data_and_train_model() -WAVES = [ - "wave", - "waveboi", - "wastelands-wave", - "wave_hello", - "wave-hokusai", - "wave_moomin", - "wave2", - "wave3", - "wallee-wave", - "vaporeon_wave", - "turtle-wave", - "softwave", - "shiba-wave", - "minion-wave", - "meow_wave_comfy", - "mario-wave", - "link-wave", - "kirby_wave", - "frog-wave", - "fox_wave", - "duckwave", - "cyr-wave", - "cozy-wave", - "cat-wave", - "capy-wave", - "bufo-wave", - "bongo-wave", - "blobwave", - "birb-wave", - "arnaud-wave", -] - - @task def ask_reviews(_, pr_id): gh = GithubAPI() pr = gh.repo.get_pull(int(pr_id)) if any(label.name == 'ask-review' for label in pr.get_labels()): + actor = ask_review_actor(pr) reviewers = [f"@datadog/{team.slug}" for team in pr.requested_teams] from slack_sdk import WebClient client = WebClient(os.environ['SLACK_API_TOKEN']) + emojis = client.emoji_list() + waves = [emoji for emoji in emojis.data['emoji'] if 'wave' in emoji and 'microwave' not in emoji] for reviewer in reviewers: channel = next( (chan for team, chan in GITHUB_SLACK_REVIEW_MAP.items() if team.casefold() == reviewer.casefold()), - '#agent-devx-help', + HELP_SLACK_CHANNEL, ) - message = f'Hello :{random.choice(WAVES)}:! Can you please review <{pr.html_url}/s|{pr.title}>?\n Thanks in advance!' - if channel == '#agent-devx-help': - message = f'Hello :{random.choice(WAVES)}:!\nA review channel is missing for {reviewer}, can you please ask them to update `github_slack_review_map.yaml` and transfer them this review <{pr.html_url}/s|{pr.title}>?\n Thanks in advance!' + message = f'Hello :{random.choice(waves)}:!\n*{actor}* is asking review for PR <{pr.html_url}/s|{pr.title}>.\nCould you please have a look?\nThanks in advance!' + if channel == HELP_SLACK_CHANNEL: + message = f'Hello :{random.choice(waves)}:!\nA review channel is missing for {reviewer}, can you please ask them to update `github_slack_review_map.yaml` and transfer them this review <{pr.html_url}/s|{pr.title}>?\n Thanks in advance!' try: client.chat_postMessage(channel=channel, text=message) except Exception as e: - message = f"An error occurred while sending a review message for PR <{pr.html_url}/s|{pr.title}> to channel {channel}. Error: {e}" + message = f"An error occurred while sending a review message from {actor} for PR <{pr.html_url}/s|{pr.title}> to channel {channel}. Error: {e}" client.chat_postMessage(channel='#agent-devx-ops', text=message) diff --git a/tasks/kmt.py b/tasks/kmt.py index b284e6d56c91b..5a8cdf588200d 100644 --- a/tasks/kmt.py +++ b/tasks/kmt.py @@ -628,13 +628,13 @@ def ninja_build_dependencies(ctx: Context, nw: NinjaWriter, kmt_paths: KMTPaths, variables={"mode": "-m744"}, ) - verifier_files = glob("pkg/ebpf/verifier/**/*.go") + verifier_files = glob("pkg/ebpf/verifier/*") nw.build( rule="gobin", pool="gobuild", - inputs=["./pkg/ebpf/verifier/calculator/main.go"], + inputs=[os.path.abspath("./pkg/ebpf/verifier/calculator/main.go")], outputs=[os.fspath(kmt_paths.dependencies / "verifier-calculator")], - implicit=verifier_files, + implicit=[os.path.abspath(f) for f in verifier_files], variables={ "go": go_path, "chdir": "true", @@ -874,11 +874,10 @@ def build_run_config(run: str | None, packages: list[str]): c: dict[str, Any] = {} if len(packages) == 0: - return {"filters": {"*": {"exclude": False}}} + packages = ["*"] for p in packages: - if p[:2] == "./": - p = p[2:] + p = p.removeprefix("./") if run is not None: c["filters"] = {p: {"run-only": [run]}} else: @@ -1202,16 +1201,10 @@ def test( info(f"[+] Preparing {component} for {arch}") _prepare(ctx, stack, component, arch, packages=packages, verbose=verbose, domains=domains) - if run is not None and packages is None: - raise Exit("Package must be provided when specifying test") - pkgs = [] if packages is not None: pkgs = [os.path.relpath(os.path.realpath(p)) for p in go_package_dirs(packages.split(","), [NPM_TAG, BPF_TAG])] - if run is not None and len(pkgs) > 1: - raise Exit("Only a single package can be specified when running specific tests") - paths = KMTPaths(stack, Arch.local()) # Arch is not relevant to the test result paths, which is what we want now shutil.rmtree(paths.test_results, ignore_errors=True) # Reset test-results folder diff --git a/tasks/libs/ciproviders/github_actions_tools.py b/tasks/libs/ciproviders/github_actions_tools.py index 9399302271b5d..e5b54f1208a35 100644 --- a/tasks/libs/ciproviders/github_actions_tools.py +++ b/tasks/libs/ciproviders/github_actions_tools.py @@ -14,6 +14,45 @@ from tasks.libs.common.git import get_default_branch +def trigger_buildenv_workflow(workflow_name="runner-bump.yml", github_action_ref="master", new_version=None): + """ + Trigger a workflow to bump windows gitlab runner + """ + inputs = {} + if new_version is not None: + inputs["new-version"] = new_version + + print( + "Creating workflow on buildenv on commit {} with args:\n{}".format( # noqa: FS002 + github_action_ref, "\n".join([f" - {k}: {inputs[k]}" for k in inputs]) + ) + ) + + # Hack: get current time to only fetch workflows that started after now + now = datetime.utcnow() + + gh = GithubAPI('DataDog/buildenv') + result = gh.trigger_workflow(workflow_name, github_action_ref, inputs) + + if not result: + print(f"Couldn't trigger workflow run. result={result}") + raise Exit(code=1) + + # Since we can't get the worflow run id from a `create_dispatch` api call we are fetching the first running workflow after `now`. + recent_runs = gh.workflow_run_for_ref_after_date(workflow_name, github_action_ref, now) + MAX_RETRY = 10 + while not recent_runs and MAX_RETRY > 0: + MAX_RETRY -= 1 + sleep(3) + recent_runs = gh.workflow_run_for_ref_after_date(workflow_name, github_action_ref, now) + + if not recent_runs: + print("Couldn't get the run workflow") + raise Exit(code=1) + + return recent_runs[0] + + def trigger_macos_workflow( workflow_name="macos.yaml", github_action_ref="master", @@ -130,7 +169,7 @@ def trigger_macos_workflow( raise Exit(code=1) -def follow_workflow_run(run): +def follow_workflow_run(run, repository="DataDog/datadog-agent-macos-build", interval=5): """ Follow the workflow run until completion and return its conclusion. """ @@ -141,13 +180,11 @@ def follow_workflow_run(run): minutes = 0 failures = 0 - # Wait time (in minutes) between two queries of the workflow status - interval = 5 MAX_FAILURES = 5 while True: # Do not fail outright for temporary failures try: - github = GithubAPI('DataDog/datadog-agent-macos-build') + github = GithubAPI(repository) run = github.workflow_run(run.id) except GithubException as e: failures += 1 @@ -241,7 +278,7 @@ def parse_log_file(log_file): return lines[line_number:] -def download_artifacts(run, destination="."): +def download_artifacts(run, destination=".", repository="DataDog/datadog-agent-macos-build"): """ Download all artifacts for a given job in the specified location. """ @@ -255,7 +292,7 @@ def download_artifacts(run, destination="."): # Create temp directory to store the artifact zips with tempfile.TemporaryDirectory() as tmpdir: - workflow = GithubAPI('DataDog/datadog-agent-macos-build') + workflow = GithubAPI(repository) for artifact in run_artifacts: # Download artifact print("Downloading artifact: ", artifact) @@ -281,14 +318,25 @@ def download_logs(run, destination="."): zip_ref.extractall(destination) -def download_with_retry(download_function, run, destination=".", retry_count=3, retry_interval=10): +def download_with_retry( + download_function, + run, + destination=".", + retry_count=3, + retry_interval=10, + repository=None, +): import requests retry = retry_count while retry > 0: try: - download_function(run, destination) + if repository: + # download_artifacts with repository argument + download_function(run, destination, repository) + else: + download_function(run, destination) print(color_message(f"Download successful for run {run.id} to {destination}", "blue")) return except (requests.exceptions.RequestException, ConnectionError): diff --git a/tasks/libs/ciproviders/github_api.py b/tasks/libs/ciproviders/github_api.py index 54a3ba49a43f1..52f9055606c7e 100644 --- a/tasks/libs/ciproviders/github_api.py +++ b/tasks/libs/ciproviders/github_api.py @@ -53,6 +53,22 @@ def repo(self): return self._repository + def graphql(self, query): + """ + Perform a GraphQL query against the Github API. + """ + + headers = {"Authorization": "Bearer " + self._auth.token, "Content-Type": "application/json"} + res = requests.post( + "https://api.github.com/graphql", + headers=headers, + json={"query": query}, + ) + if res.status_code == 200: + return res.json() + else: + raise RuntimeError(f"Failed to query Github: {res.text}") + def get_branch(self, branch_name): """ Gets info on a given branch in the given Github repository. @@ -260,8 +276,13 @@ def get_pulls(self, milestone=None, labels=None): issues = self._repository.get_issues(milestone=m, state='all', labels=labels) return [i.as_pull_request() for i in issues if i.pull_request is not None] - def get_pr_for_branch(self, branch_name): - return self._repository.get_pulls(state="open", head=f'DataDog:{branch_name}') + def get_pr_for_branch(self, head_branch_name=None, base_branch_name=None): + query_params = {"state": "open"} + if head_branch_name: + query_params["head"] = f'DataDog:{head_branch_name}' + if base_branch_name: + query_params["base"] = base_branch_name + return self._repository.get_pulls(**query_params) def get_tags(self, pattern=""): """ @@ -338,6 +359,10 @@ def latest_release(self, major_version=7) -> str: release = self._repository.get_latest_release() return release.title + def latest_release_tag(self) -> str: + release = self._repository.get_latest_release() + return release.tag_name + def get_releases(self): return self._repository.get_releases() @@ -519,14 +544,37 @@ def get_token_from_app(app_id_env='GITHUB_APP_ID', pkey_env='GITHUB_KEY_B64'): auth_token = integration.get_access_token(install_id) print(auth_token.token) - def create_label(self, name, color, description=""): + def create_label(self, name, color, description="", exist_ok=False): """ Creates a label in the given GitHub repository. """ - return self._repository.create_label(name, color, description) - def create_milestone(self, title): - self._repository.create_milestone(title) + try: + return self._repository.create_label(name, color, description) + except GithubException as e: + if not ( + e.status == 422 + and len(e.data["errors"]) == 1 + and e.data["errors"][0]["code"] == "already_exists" + and exist_ok + ): + raise e + + def create_milestone(self, title, exist_ok=False): + """ + Creates a milestone in the given GitHub repository. + """ + + try: + return self._repository.create_milestone(title) + except GithubException as e: + if not ( + e.status == 422 + and len(e.data["errors"]) == 1 + and e.data["errors"][0]["code"] == "already_exists" + and exist_ok + ): + raise e def create_release(self, tag, message, draft=True): return self._repository.create_git_release( @@ -546,7 +594,7 @@ def get_codereview_complexity(self, pr_id: int) -> str: # - hard PRs are merged in more than 1 week # More details about criteria definition: https://datadoghq.atlassian.net/wiki/spaces/agent/pages/4271079846/Code+Review+Experience+Improvement#Complexity-label criteria = { - 'easy': {'files': 4, 'lines': 150, 'comments': 1}, + 'easy': {'files': 4, 'lines': 150, 'comments': 2}, 'hard': {'files': 12, 'lines': 650, 'comments': 9}, } if ( @@ -654,3 +702,9 @@ def create_release_pr(title, base_branch, target_branch, version, changelog_pr=F print(color_message(f"Done creating new PR. Link: {updated_pr.html_url}", "bold")) return updated_pr.html_url + + +def ask_review_actor(pr): + for event in pr.get_issue_events(): + if event.event == "labeled" and event.label.name == "ask-review": + return event.actor.name or event.actor.login diff --git a/tasks/libs/ciproviders/gitlab_api.py b/tasks/libs/ciproviders/gitlab_api.py index f1c0b4784d2f6..3c17b813164f4 100644 --- a/tasks/libs/ciproviders/gitlab_api.py +++ b/tasks/libs/ciproviders/gitlab_api.py @@ -1048,7 +1048,6 @@ def get_preset_contexts(required_tests): ("RUN_E2E_TESTS", ["auto"]), ("RUN_KMT_TESTS", ["on"]), ("RUN_UNIT_TESTS", ["on"]), - ("TESTING_CLEANUP", ["true"]), ] release_contexts = [ ("BUCKET_BRANCH", ["stable"]), @@ -1060,7 +1059,6 @@ def get_preset_contexts(required_tests): ("RUN_E2E_TESTS", ["auto"]), ("RUN_KMT_TESTS", ["on"]), ("RUN_UNIT_TESTS", ["on"]), - ("TESTING_CLEANUP", ["true"]), ] mq_contexts = [ ("BUCKET_BRANCH", ["dev"]), @@ -1071,7 +1069,6 @@ def get_preset_contexts(required_tests): ("RUN_E2E_TESTS", ["auto"]), ("RUN_KMT_TESTS", ["off"]), ("RUN_UNIT_TESTS", ["off"]), - ("TESTING_CLEANUP", ["false"]), ] conductor_contexts = [ ("BUCKET_BRANCH", ["nightly"]), # ["dev", "nightly", "beta", "stable", "oldnightly"] diff --git a/tasks/libs/common/datadog_api.py b/tasks/libs/common/datadog_api.py index 7b8b9df7f6a8a..5825c4f16dbd9 100644 --- a/tasks/libs/common/datadog_api.py +++ b/tasks/libs/common/datadog_api.py @@ -1,4 +1,5 @@ import sys +from datetime import datetime, timedelta from invoke.exceptions import Exit @@ -100,3 +101,22 @@ def send_event(title: str, text: str, tags: list[str] = None): raise Exit(code=1) return response + + +def get_ci_pipeline_events(query, days): + """ + Fetch pipeline events using Datadog CI Visibility API + """ + from datadog_api_client import ApiClient, Configuration + from datadog_api_client.v2.api.ci_visibility_pipelines_api import CIVisibilityPipelinesApi + + configuration = Configuration() + with ApiClient(configuration) as api_client: + api_instance = CIVisibilityPipelinesApi(api_client) + response = api_instance.list_ci_app_pipeline_events( + filter_query=query, + filter_from=(datetime.now() - timedelta(days=days)), + filter_to=datetime.now(), + page_limit=5, + ) + return response diff --git a/tasks/libs/common/git.py b/tasks/libs/common/git.py index b1dc1b1cd7683..bf6383b103c7f 100644 --- a/tasks/libs/common/git.py +++ b/tasks/libs/common/git.py @@ -161,7 +161,7 @@ def check_base_branch(branch, release_version): Checks if the given branch is either the default branch or the release branch associated with the given release version. """ - return branch == get_default_branch() or branch == release_version.branch() + return branch == get_default_branch() or branch == release_version def try_git_command(ctx, git_command, non_interactive_retries=2, non_interactive_delay=5): @@ -273,7 +273,7 @@ def get_last_release_tag(ctx, repo, pattern): code=1, ) - release_pattern = re.compile(r'.*7\.[0-9]+\.[0-9]+(-rc.*|-devel.*)?$') + release_pattern = re.compile(r'^.*7\.[0-9]+\.[0-9]+(-rc.*|-devel.*)?(\^{})?$') tags_without_suffix = [ line for line in tags.splitlines() if not line.endswith("^{}") and release_pattern.match(line) ] diff --git a/tasks/libs/common/omnibus.py b/tasks/libs/common/omnibus.py index 1887cffb97133..30559b3f68e3b 100644 --- a/tasks/libs/common/omnibus.py +++ b/tasks/libs/common/omnibus.py @@ -73,10 +73,12 @@ def env_filter(item): 'STATS_', 'SMP_', 'SSH_', + 'TAGGER_', 'TARGET_', 'TEST_INFRA_', 'USE_', 'VAULT_', + 'VALIDATE_', 'XPC_', 'WINDOWS_', ] @@ -138,6 +140,8 @@ def env_filter(item): "MESSAGE", "NEW_CLUSTER", "NEW_CLUSTER_PR_SLACK_WORKFLOW_WEBHOOK", + "NOTIFICATIONS_SLACK_CHANNEL", + "NOTIFIER_IMAGE", "OLDPWD", "PCP_DIR", "PACKAGE_ARCH", @@ -155,7 +159,6 @@ def env_filter(item): "STATIC_BINARIES_DIR", "STATSD_URL", "SYSTEM_PROBE_BINARIES_DIR", - "TESTING_CLEANUP", "TIMEOUT", "TMPDIR", "TRACE_AGENT_URL", @@ -210,8 +213,10 @@ def omnibus_compute_cache_key(ctx): omnibus_last_changes = _last_omnibus_changes(ctx) h.update(str.encode(omnibus_last_changes)) h.update(str.encode(os.getenv('CI_JOB_IMAGE', 'local_build'))) - omnibus_ruby_commit = _get_omnibus_commits('OMNIBUS_RUBY_VERSION') - omnibus_software_commit = _get_omnibus_commits('OMNIBUS_SOFTWARE_VERSION') + # Omnibus ruby & software versions can be forced through the environment + # so we need to read it from there first, and fallback to release.json + omnibus_ruby_commit = os.getenv('OMNIBUS_RUBY_VERSION', _get_omnibus_commits('OMNIBUS_RUBY_VERSION')) + omnibus_software_commit = os.getenv('OMNIBUS_SOFTWARE_VERSION', _get_omnibus_commits('OMNIBUS_SOFTWARE_VERSION')) print(f'Omnibus ruby commit: {omnibus_ruby_commit}') print(f'Omnibus software commit: {omnibus_software_commit}') h.update(str.encode(omnibus_ruby_commit)) diff --git a/tasks/libs/common/utils.py b/tasks/libs/common/utils.py index 56e96ba23403f..ab5817a01d582 100644 --- a/tasks/libs/common/utils.py +++ b/tasks/libs/common/utils.py @@ -517,13 +517,16 @@ def gitlab_section(section_name, collapsed=False, echo=False): try: if in_ci: collapsed = '[collapsed=true]' if collapsed else '' - print(f"\033[0Ksection_start:{int(time.time())}:{section_id}{collapsed}\r\033[0K{section_name + '...'}") + print( + f"\033[0Ksection_start:{int(time.time())}:{section_id}{collapsed}\r\033[0K{section_name + '...'}", + flush=True, + ) elif echo: print(color_message(f"> {section_name}...", 'bold')) yield finally: if in_ci: - print(f"\033[0Ksection_end:{int(time.time())}:{section_id}\r\033[0K") + print(f"\033[0Ksection_end:{int(time.time())}:{section_id}\r\033[0K", flush=True) def retry_function(action_name_fmt, max_retries=2, retry_delay=1): diff --git a/tasks/libs/common/worktree.py b/tasks/libs/common/worktree.py index 17e5db4abfc97..cd04558c169ad 100644 --- a/tasks/libs/common/worktree.py +++ b/tasks/libs/common/worktree.py @@ -18,7 +18,7 @@ LOCAL_DIRECTORY = Path.cwd().resolve() -def init_env(ctx, branch: str | None = None): +def init_env(ctx, branch: str | None = None, commit: str | None = None): """Will prepare the environment for commands applying to a worktree. To be used before each worktree section. @@ -65,6 +65,12 @@ def init_env(ctx, branch: str | None = None): if not os.environ.get("AGENT_WORKTREE_NO_PULL"): ctx.run(f"git -C '{WORKTREE_DIRECTORY}' pull", hide=True) + if commit: + if not os.environ.get("AGENT_WORKTREE_NO_PULL"): + ctx.run(f"git -C '{WORKTREE_DIRECTORY}' fetch", hide=True) + + ctx.run(f"git -C '{WORKTREE_DIRECTORY}' checkout '{commit}'", hide=True) + def remove_env(ctx): """Will remove the environment for commands applying to a worktree.""" @@ -78,14 +84,14 @@ def is_worktree(): return Path.cwd().resolve() == WORKTREE_DIRECTORY.resolve() -def enter_env(ctx, branch: str | None, skip_checkout=False): +def enter_env(ctx, branch: str | None, skip_checkout=False, commit: str | None = None): """Enters the worktree environment.""" - if not branch: - assert skip_checkout, 'skip_checkout must be set to True if branch is None' + if not (branch or commit): + assert skip_checkout, 'skip_checkout must be set to True if branch and commit are None' if not skip_checkout: - init_env(ctx, branch) + init_env(ctx, branch, commit=commit) else: assert WORKTREE_DIRECTORY.is_dir(), "Worktree directory is not present and skip_checkout is set to True" @@ -104,12 +110,13 @@ def exit_env(): @contextmanager -def agent_context(ctx, branch: str | None, skip_checkout=False): +def agent_context(ctx, branch: str | None = None, skip_checkout=False, commit: str | None = None): """Applies code to the worktree environment if the branch is not None. Args: branch: The branch to switch to. If None, will enter the worktree environment without switching branch (ensures that skip_checkout is True). skip_checkout: If True, the branch will not be checked out (no pull will be performed too). + commit: The commit to checkout. Is used instead of branch if provided. Usage: > with agent_context(ctx, branch): @@ -123,7 +130,7 @@ def agent_context(ctx, branch: str | None, skip_checkout=False): try: # Enter - enter_env(ctx, branch, skip_checkout=skip_checkout) + enter_env(ctx, branch, skip_checkout=skip_checkout, commit=commit) yield finally: diff --git a/tasks/libs/issue/assign.py b/tasks/libs/issue/assign.py index 7c196070cbc7f..5adad877e3986 100644 --- a/tasks/libs/issue/assign.py +++ b/tasks/libs/issue/assign.py @@ -69,7 +69,7 @@ def guess_from_keywords(issue): def simple_match(word): pattern_matching = { "agent-apm": ['apm', 'java', 'dotnet', 'ruby', 'trace'], - "containers": [ + "container-integrations": [ 'container', 'pod', 'kubernetes', diff --git a/tasks/libs/notify/unit_tests.py b/tasks/libs/notify/unit_tests.py index 9ef803e1f0a5f..a7062c2937037 100644 --- a/tasks/libs/notify/unit_tests.py +++ b/tasks/libs/notify/unit_tests.py @@ -4,6 +4,8 @@ import re import tempfile +from tasks.libs.pipeline.notifications import HELP_SLACK_CHANNEL + def create_msg(pipeline_id, pipeline_url, job_list): msg = f""" @@ -19,7 +21,7 @@ def create_msg(pipeline_id, pipeline_url, job_list): msg += f" - {job}\n" msg += "\n" msg += "\n" - msg += "If you modified Go files and expected unit tests to run in these jobs, please double check the job logs. If you think tests should have been executed reach out to #agent-devx-help" + msg += f"If you modified Go files and expected unit tests to run in these jobs, please double check the job logs. If you think tests should have been executed reach out to {HELP_SLACK_CHANNEL}" return msg diff --git a/tasks/libs/notify/utils.py b/tasks/libs/notify/utils.py index 3777ff190d526..a816440b2330e 100644 --- a/tasks/libs/notify/utils.py +++ b/tasks/libs/notify/utils.py @@ -5,9 +5,11 @@ from typing import Any from urllib.parse import quote +from tasks.libs.pipeline.notifications import HELP_SLACK_CHANNEL + PROJECT_NAME = "DataDog/datadog-agent" CI_VISIBILITY_JOB_URL = 'https://app.datadoghq.com/ci/pipeline-executions?query=ci_level%3Ajob%20%40ci.pipeline.name%3ADataDog%2Fdatadog-agent%20%40git.branch%3Amain%20%40ci.job.name%3A{name}{extra_flags}&agg_m=count{extra_args}' -NOTIFICATION_DISCLAIMER = "If there is something wrong with the notification please contact #agent-devx-help" +NOTIFICATION_DISCLAIMER = f"If there is something wrong with the notification please contact {HELP_SLACK_CHANNEL}" CHANNEL_BROADCAST = '#agent-devx-ops' PIPELINES_CHANNEL = '#datadog-agent-pipelines' DEPLOY_PIPELINES_CHANNEL = '#datadog-agent-deploy-pipelines' @@ -53,11 +55,7 @@ def should_notify(pipeline_id): from tasks.libs.ciproviders.gitlab_api import get_pipeline pipeline = get_pipeline(PROJECT_NAME, pipeline_id) - return ( - pipeline.source != 'pipeline' - or pipeline.source == 'pipeline' - and all(var in os.environ for var in ['DDR', 'DDR_WORKFLOW_ID']) - ) + return pipeline.source != 'pipeline' or pipeline.source == 'pipeline' and 'DDR_WORKFLOW_ID' in os.environ def get_pipeline_type(pipeline): diff --git a/tasks/libs/package/size.py b/tasks/libs/package/size.py index 2323d40fdb56b..51fbe73f8d295 100644 --- a/tasks/libs/package/size.py +++ b/tasks/libs/package/size.py @@ -37,22 +37,22 @@ # The below template contains the relative increase threshold for each package type PACKAGE_SIZE_TEMPLATE = { 'amd64': { - 'datadog-agent': {'deb': int(140e6)}, - 'datadog-iot-agent': {'deb': int(10e6)}, - 'datadog-dogstatsd': {'deb': int(10e6)}, - 'datadog-heroku-agent': {'deb': int(70e6)}, + 'datadog-agent': {'deb': int(5e5)}, + 'datadog-iot-agent': {'deb': int(5e5)}, + 'datadog-dogstatsd': {'deb': int(5e5)}, + 'datadog-heroku-agent': {'deb': int(5e5)}, }, 'x86_64': { - 'datadog-agent': {'rpm': int(140e6), 'suse': int(140e6)}, - 'datadog-iot-agent': {'rpm': int(10e6), 'suse': int(10e6)}, - 'datadog-dogstatsd': {'rpm': int(10e6), 'suse': int(10e6)}, + 'datadog-agent': {'rpm': int(5e5), 'suse': int(5e5)}, + 'datadog-iot-agent': {'rpm': int(5e5), 'suse': int(5e5)}, + 'datadog-dogstatsd': {'rpm': int(5e5), 'suse': int(5e5)}, }, 'arm64': { - 'datadog-agent': {'deb': int(140e6)}, - 'datadog-iot-agent': {'deb': int(10e6)}, - 'datadog-dogstatsd': {'deb': int(10e6)}, + 'datadog-agent': {'deb': int(5e5)}, + 'datadog-iot-agent': {'deb': int(5e5)}, + 'datadog-dogstatsd': {'deb': int(5e5)}, }, - 'aarch64': {'datadog-agent': {'rpm': int(140e6)}, 'datadog-iot-agent': {'rpm': int(10e6)}}, + 'aarch64': {'datadog-agent': {'rpm': int(5e5)}, 'datadog-iot-agent': {'rpm': int(5e5)}}, } @@ -84,8 +84,12 @@ def file_size(path): def directory_size(ctx, path): # HACK: For uncompressed size, fall back to native Unix utilities - computing a directory size with Python + # NOTE: We use the -b (--bytes, equivalent to --apparent-size --block-size 1) option to make the computation + # consistent. Otherwise, each file's size is counted as the number of blocks it uses, which means a file's size + # depends on how it is written to disk. + # See https://unix.stackexchange.com/questions/173947/du-s-apparent-size-vs-du-s # TODO: To make this work on other OSes, the complete directory walk would need to be implemented - return int(ctx.run(f"du -sB1 {path}", hide=True).stdout.split()[0]) + return int(ctx.run(f"du --apparent-size -sB1 {path}", hide=True).stdout.split()[0]) def compute_package_size_metrics( diff --git a/tasks/libs/package/utils.py b/tasks/libs/package/utils.py index 8f43eaa9b4e33..bcfe602bf7dba 100644 --- a/tasks/libs/package/utils.py +++ b/tasks/libs/package/utils.py @@ -148,7 +148,7 @@ def get_ancestor(ctx, package_sizes, on_main): """ ancestor = get_common_ancestor(ctx, "HEAD") if not on_main and ancestor not in package_sizes: - return min(package_sizes, key=lambda x: package_sizes[x]['timestamp']) + return max(package_sizes, key=lambda x: package_sizes[x]['timestamp']) return ancestor @@ -165,5 +165,7 @@ def display_message(ctx, ancestor, rows, decision): ## Decision {decision} + +{"Currently this PR is blocked, you can reach out to #agent-delivery-help to get support/ask for an exception." if "❌" in decision else ""} """ pr_commenter(ctx, title="Uncompressed package size comparison", body=message) diff --git a/tasks/libs/pipeline/github_jira_map.yaml b/tasks/libs/pipeline/github_jira_map.yaml index d4bf1aefe502e..8376b49f9c16d 100644 --- a/tasks/libs/pipeline/github_jira_map.yaml +++ b/tasks/libs/pipeline/github_jira_map.yaml @@ -1,12 +1,11 @@ # This file contains a mapping of DataDog Github teams to JIRA projects. # The DEFAULT_JIRA_PROJECT value is interpreted as "AGNTR". # Note that keys must be quoted because the '@' symbol is reserved in YAML. -'@datadog/agent-platform': APL +'@datadog/agent-platform': ACIX '@datadog/documentation': DOCS '@datadog/container-integrations': CONTINT '@datadog/container-platform': CONTP '@datadog/container-ecosystems': CECO -'@datadog/platform-integrations': PLINT '@datadog/agent-security': CWS '@datadog/agent-apm': APMSP '@datadog/network-device-monitoring': NDMII @@ -26,8 +25,8 @@ '@datadog/universal-service-monitoring': USMON '@datadog/windows-agent': WINA '@datadog/windows-kernel-integrations': WKINT -'@datadog/opentelemetry': OTEL -'@datadog/agent-e2e-testing': APL +'@datadog/opentelemetry': OTAGENT +'@datadog/agent-e2e-testing': ADXT '@datadog/software-integrity-and-trust': SINT '@datadog/single-machine-performance': SMP '@datadog/agent-integrations': AI diff --git a/tasks/libs/pipeline/github_slack_map.yaml b/tasks/libs/pipeline/github_slack_map.yaml index 0d673eddd714e..b47b9b1436ac2 100644 --- a/tasks/libs/pipeline/github_slack_map.yaml +++ b/tasks/libs/pipeline/github_slack_map.yaml @@ -8,7 +8,6 @@ '@datadog/container-integrations': '#container-integrations' '@datadog/container-platform': '#container-platform' '@datadog/container-ecosystems': '#container-ecosystems' -'@datadog/platform-integrations': '#platform-integrations-ops' '@datadog/agent-security': '#security-and-compliance-agent-ops' '@datadog/agent-apm': '#apm-agent' '@datadog/network-device-monitoring': '#network-device-monitoring' @@ -46,3 +45,4 @@ '@datadog/apm-onboarding': '#apm-onboarding' '@datadog/apm-ecosystems-performance': '#apm-benchmarking-platform' '@DataDog/container-ecosystems': '#container-ecosystems-ops' +'@datadog/injection-platform': '#injection-platform' diff --git a/tasks/libs/pipeline/github_slack_review_map.yaml b/tasks/libs/pipeline/github_slack_review_map.yaml index 9ae0b6931a898..a84715feeaec6 100644 --- a/tasks/libs/pipeline/github_slack_review_map.yaml +++ b/tasks/libs/pipeline/github_slack_review_map.yaml @@ -9,7 +9,6 @@ '@datadog/container-integrations': '#container-integrations' '@datadog/container-platform': '#container-platform' '@datadog/container-ecosystems': '#container-ecosystems' -'@datadog/platform-integrations': '#platform-integrations' '@datadog/agent-security': '#security-and-compliance-agent' '@datadog/agent-apm': '#apm-agent' '@datadog/network-device-monitoring': '#network-device-monitoring' @@ -46,3 +45,4 @@ '@datadog/agent-devx-loops': '#agent-devx-loops-reviews' '@datadog/apm-onboarding': '#apm-onboarding' '@datadog/apm-ecosystems-performance': '#apm-benchmarking-platform' +'@datadog/injection-platform': '#injection-platform' diff --git a/tasks/libs/pipeline/notifications.py b/tasks/libs/pipeline/notifications.py index 92e6af39bdc70..5193e219660a6 100644 --- a/tasks/libs/pipeline/notifications.py +++ b/tasks/libs/pipeline/notifications.py @@ -38,6 +38,7 @@ def load_and_validate( GITHUB_BASE_URL = "https://github.com" DEFAULT_SLACK_CHANNEL = "#agent-devx-ops" +HELP_SLACK_CHANNEL = "#agent-devx-help" DEFAULT_JIRA_PROJECT = "AGNTR" # Map keys in lowercase GITHUB_SLACK_MAP = load_and_validate("github_slack_map.yaml", "DEFAULT_SLACK_CHANNEL", DEFAULT_SLACK_CHANNEL) diff --git a/tasks/libs/releasing/documentation.py b/tasks/libs/releasing/documentation.py index 9834983792cbd..2e74131b36aff 100644 --- a/tasks/libs/releasing/documentation.py +++ b/tasks/libs/releasing/documentation.py @@ -6,6 +6,27 @@ CONFLUENCE_DOMAIN = "https://datadoghq.atlassian.net/wiki" SPACE_KEY = "agent" +NON_RELEASING_TEAMS = { + 'telemetry-and-analytics', + 'documentation', + 'software-integrity-and-trust', + 'single-machine-performance', + 'agent-all', + 'apm-core-reliability-and-performance', + 'debugger', + 'asm-go', + 'agent-e2e-testing', + 'serverless', + 'agent-platform', + 'agent-release-management', + 'container-ecosystems', + 'apm-trace-storage', + 'iglendd', # Not a team but he's in the codeowners file + 'sdlc-security', + 'data-jobs-monitoring', + 'serverless-aws', +} + def _stringify_config(config_dict): """ @@ -104,24 +125,8 @@ def release_manager(version, team): def get_releasing_teams(): - non_releasing_teams = { - 'telemetry-and-analytics', - 'documentation', - 'software-integrity-and-trust', - 'single-machine-performance', - 'agent-all', - 'apm-core-reliability-and-performance', - 'debugger', - 'asm-go', - 'agent-e2e-testing', - 'serverless', - 'agent-platform', - 'agent-release-management', - 'container-ecosystems', - 'apm-trace-storage', - } owners = set(list_owners()) - return sorted(owners - non_releasing_teams) + return sorted(owners - NON_RELEASING_TEAMS) def create_release_table(version, cutoff_date, teams): diff --git a/tasks/libs/types/copyright.py b/tasks/libs/types/copyright.py index 3d19e49ba4603..9212fbfbbe071 100755 --- a/tasks/libs/types/copyright.py +++ b/tasks/libs/types/copyright.py @@ -24,6 +24,8 @@ # These path patterns are excluded from checks PATH_EXCLUSION_REGEX = [ + # Git internal folder + '/.git/', # These are auto-generated files but without headers to indicate it '/vendor/', '/pkg/clusteragent/autoscaling/custommetrics/api/generated/', diff --git a/tasks/licenses.py b/tasks/licenses.py index 79add346ac03b..29bc9accc6efe 100644 --- a/tasks/licenses.py +++ b/tasks/licenses.py @@ -108,7 +108,7 @@ def _verify_unknown_licenses(licenses, licenses_filename): def is_valid_quote(copyright): stack = [] - quotes_to_check = ["'", '"'] + quotes_to_check = ['"'] for c in copyright: if c in quotes_to_check: if stack and stack[-1] == c: diff --git a/tasks/msi.py b/tasks/msi.py index e41885f83358b..e03c2c527d277 100644 --- a/tasks/msi.py +++ b/tasks/msi.py @@ -13,7 +13,7 @@ from invoke.exceptions import Exit, UnexpectedExit from tasks.libs.common.utils import download_to_tempfile, timed -from tasks.libs.releasing.version import get_version, load_release_versions +from tasks.libs.releasing.version import VERSION_RE, _create_version_from_match, get_version, load_release_versions # Windows only import try: @@ -44,6 +44,7 @@ "APPLICATIONDATADIRECTORY", "EXAMPLECONFSLOCATION", "checks.d", + "protected", "run", "logs", "ProgramMenuDatadog", @@ -80,6 +81,7 @@ def _get_env(ctx, major_version='7', release_version='nightly'): env['AGENT_FLAVOR'] = os.getenv("AGENT_FLAVOR", "") env['AGENT_INSTALLER_OUTPUT_DIR'] = BUILD_OUTPUT_DIR env['NUGET_PACKAGES_DIR'] = NUGET_PACKAGES_DIR + env['AGENT_PRODUCT_NAME_SUFFIX'] = "" return env @@ -272,13 +274,15 @@ def _build_msi(ctx, env, outdir, name, allowlist): def _msi_output_name(env): if _is_fips_mode(env): - return f"datadog-fips-agent-{env['PACKAGE_VERSION']}-1-x86_64" + return f"datadog-fips-agent-{env['AGENT_PRODUCT_NAME_SUFFIX']}{env['PACKAGE_VERSION']}-1-x86_64" else: - return f"datadog-agent-{env['PACKAGE_VERSION']}-1-x86_64" + return f"datadog-agent-{env['AGENT_PRODUCT_NAME_SUFFIX']}{env['PACKAGE_VERSION']}-1-x86_64" @task -def build(ctx, vstudio_root=None, arch="x64", major_version='7', release_version='nightly', debug=False): +def build( + ctx, vstudio_root=None, arch="x64", major_version='7', release_version='nightly', debug=False, build_upgrade=False +): """ Build the MSI installer for the agent """ @@ -320,12 +324,25 @@ def build(ctx, vstudio_root=None, arch="x64", major_version='7', release_version # And copy it to the final output path as a build artifact shutil.copy2(os.path.join(build_outdir, msi_name + '.msi'), OUTPUT_PATH) - # if the optional upgrade test helper exists then build that too - optional_name = "datadog-agent-7.43.0~rc.3+git.485.14b9337-1-x86_64" - if os.path.exists(os.path.join(build_outdir, optional_name + ".wxs")): + # Build the optional upgrade test helper + if build_upgrade: + print("Building optional upgrade test helper") + upgrade_env = env.copy() + version = _create_version_from_match(VERSION_RE.search(env['PACKAGE_VERSION'])) + next_version = version.next_version(bump_patch=True) + upgrade_env['PACKAGE_VERSION'] = upgrade_env['PACKAGE_VERSION'].replace(str(version), str(next_version)) + upgrade_env['AGENT_PRODUCT_NAME_SUFFIX'] = "upgrade-test-" + _build_wxs( + ctx, + upgrade_env, + build_outdir, + 'AgentCustomActions.CA.dll', + ) + msi_name = _msi_output_name(upgrade_env) + print(os.path.join(build_outdir, msi_name + ".wxs")) with timed("Building optional MSI"): - _build_msi(ctx, env, build_outdir, optional_name, DATADOG_AGENT_MSI_ALLOW_LIST) - shutil.copy2(os.path.join(build_outdir, optional_name + '.msi'), OUTPUT_PATH) + _build_msi(ctx, env, build_outdir, msi_name, DATADOG_AGENT_MSI_ALLOW_LIST) + shutil.copy2(os.path.join(build_outdir, msi_name + '.msi'), OUTPUT_PATH) @task diff --git a/tasks/new_e2e_tests.py b/tasks/new_e2e_tests.py index 74f027afd1582..55dcf6224c27c 100644 --- a/tasks/new_e2e_tests.py +++ b/tasks/new_e2e_tests.py @@ -11,6 +11,7 @@ import re import shutil import tempfile +from collections import defaultdict from pathlib import Path import yaml @@ -20,13 +21,28 @@ from tasks.flavor import AgentFlavor from tasks.gotest import process_test_result, test_flavor +from tasks.libs.common.color import Color from tasks.libs.common.git import get_commit_sha from tasks.libs.common.go import download_go_dependencies from tasks.libs.common.gomodules import get_default_modules from tasks.libs.common.utils import REPO_PATH, color_message, gitlab_section, running_in_ci +from tasks.testwasher import TestWasher from tasks.tools.e2e_stacks import destroy_remote_stack +class TestState: + """Describes the state of a test, if it has failed and if it is flaky.""" + + FAILED = True, False + FLAKY_FAILED = True, True + SUCCESS = False, False + FLAKY_SUCCESS = False, True + + @staticmethod + def get_human_readable_state(failing: bool, flaky: bool) -> str: + return f'{"Failing" if failing else "Successful"} / {"Flaky" if flaky else "Non-flaky"}' + + @task( iterable=['tags', 'targets', 'configparams'], help={ @@ -73,6 +89,7 @@ def run( """ Run E2E Tests based on test-infra-definitions infrastructure provisioning. """ + if shutil.which("pulumi") is None: raise Exit( "pulumi CLI not found, Pulumi needs to be installed on the system (see https://github.com/DataDog/test-infra-definitions/blob/main/README.md)", @@ -172,6 +189,7 @@ def run( print( f'To run this test locally, use: `{command}`. ' 'You can also add `E2E_DEV_MODE="true"` to run in dev mode which will leave the environment up after the tests.' + '\nYou can troubleshoot e2e test failures with this documentation: https://datadoghq.atlassian.net/wiki/x/7gIo0' ) if logs_post_processing: @@ -179,16 +197,10 @@ def run( post_processed_output = post_process_output( test_res[0].result_json_path, test_depth=logs_post_processing_test_depth ) - os.makedirs(logs_folder, exist_ok=True) write_result_to_log_files(post_processed_output, logs_folder) - try: - pretty_print_logs(post_processed_output) - except TooManyLogsError: - print( - color_message("WARNING", "yellow") - + f": Too many logs to print, skipping logs printing to avoid Gitlab collapse. You can find your logs properly organized in the job artifacts: https://gitlab.ddbuild.io/DataDog/datadog-agent/-/jobs/{os.getenv('CI_JOB_ID')}/artifacts/browse/e2e-output/logs/" - ) + + pretty_print_logs(test_res[0].result_json_path, post_processed_output) else: print( color_message("WARNING", "yellow") @@ -356,20 +368,62 @@ class TooManyLogsError(Exception): pass -def pretty_print_logs(logs_per_test, max_size=250000): +def pretty_print_test_logs(logs_per_test: list[tuple[str, str, str]], max_size): # Compute size in bytes of what we are about to print. If it exceeds max_size, we skip printing because it will make the Gitlab logs almost completely collapsed. # By default Gitlab has a limit of 500KB per job log, so we want to avoid printing too much. size = 0 - for _, tests in logs_per_test.items(): - for _, logs in tests.items(): - size += len("".join(logs).encode()) + for _, _, logs in logs_per_test: + size += len("".join(logs).encode()) if size > max_size and running_in_ci(): raise TooManyLogsError - for package, tests in logs_per_test.items(): - for test, logs in tests.items(): - with gitlab_section("Complete logs for " + package + "." + test, collapsed=True): - print("Complete logs for " + package + "." + test) - print("".join(logs)) + for package, test, logs in logs_per_test: + with gitlab_section("Complete logs for " + package + "." + test, collapsed=True): + print("".join(logs)) + + return size + + +def pretty_print_logs(result_json_path, logs_per_test, max_size=250000, flakes_file="flakes.yaml"): + """Pretty prints logs with a specific order. + + Print order: + 1. Failing and non flaky tests + 2. Failing and flaky tests + 3. Successful and non flaky tests + 4. Successful and flaky tests + """ + + result_json_name = result_json_path.split("/")[-1] + result_json_dir = result_json_path.removesuffix('/' + result_json_name) + washer = TestWasher(test_output_json_file=result_json_name, flakes_file_path=flakes_file) + failing_tests, marked_flaky_tests = washer.parse_test_results(result_json_dir) + all_known_flakes = washer.merge_known_flakes(marked_flaky_tests) + + try: + # (failing, flaky) -> [(package, test_name, logs)] + categorized_logs = defaultdict(list) + + # Split flaky / non flaky tests + for package, tests in logs_per_test.items(): + package_flaky = all_known_flakes.get(package, set()) + package_failing = failing_tests.get(package, set()) + for test_name, logs in tests.items(): + state = test_name in package_failing, test_name in package_flaky + categorized_logs[state].append((package, test_name, logs)) + + for failing, flaky in [TestState.FAILED, TestState.FLAKY_FAILED, TestState.SUCCESS, TestState.FLAKY_SUCCESS]: + logs_to_print = categorized_logs[failing, flaky] + if not logs_to_print: + continue + + print(f'* {color_message(TestState.get_human_readable_state(failing, flaky), Color.BOLD)} job logs:') + # Print till the size limit is reached + max_size -= pretty_print_test_logs(logs_to_print, max_size) + except TooManyLogsError: + print( + color_message("WARNING", "yellow") + + f": Too many logs to print, skipping logs printing to avoid Gitlab collapse. You can find your logs properly organized in the job artifacts: https://gitlab.ddbuild.io/DataDog/datadog-agent/-/jobs/{os.getenv('CI_JOB_ID')}/artifacts/browse/e2e-output/logs/" + ) @task diff --git a/tasks/notes.py b/tasks/notes.py index d58a36cf3ddf8..b18dc3cb59849 100644 --- a/tasks/notes.py +++ b/tasks/notes.py @@ -6,42 +6,53 @@ from tasks.libs.ciproviders.github_api import create_release_pr from tasks.libs.common.color import color_message -from tasks.libs.common.git import get_current_branch, try_git_command +from tasks.libs.common.git import get_current_branch, get_default_branch, try_git_command +from tasks.libs.common.worktree import agent_context from tasks.libs.releasing.notes import _add_dca_prelude, _add_prelude, update_changelog_generic +from tasks.libs.releasing.version import deduce_version @task -def add_prelude(ctx, version): - _add_prelude(ctx, version) +def add_prelude(ctx, release_branch): + version = deduce_version(ctx, release_branch, next_version=False) + + with agent_context(ctx, release_branch): + _add_prelude(ctx, version) @task -def add_dca_prelude(ctx, version): +def add_dca_prelude(ctx, release_branch): """ Release of the Cluster Agent should be pinned to a version of the Agent. """ - _add_dca_prelude(ctx, version) + version = deduce_version(ctx, release_branch, next_version=False) + + with agent_context(ctx, release_branch): + _add_dca_prelude(ctx, version) @task -def add_installscript_prelude(ctx, version): - res = ctx.run(f"reno --rel-notes-dir releasenotes-installscript new prelude-release-{version}") - new_releasenote = res.stdout.split(' ')[-1].strip() # get the new releasenote file path - - with open(new_releasenote, "w") as f: - f.write( - f"""prelude: - | - Released on: {date.today()}""" - ) +def add_installscript_prelude(ctx, release_branch): + version = deduce_version(ctx, release_branch, next_version=False) + + with agent_context(ctx, release_branch): + res = ctx.run(f"reno --rel-notes-dir releasenotes-installscript new prelude-release-{version}") + new_releasenote = res.stdout.split(' ')[-1].strip() # get the new releasenote file path + + with open(new_releasenote, "w") as f: + f.write( + f"""prelude: + | + Released on: {date.today()}""" + ) - ctx.run(f"git add {new_releasenote}") - print("\nCommit this with:") - print(f"git commit -m \"Add prelude for {version} release\"") + ctx.run(f"git add {new_releasenote}") + print("\nCommit this with:") + print(f"git commit -m \"Add prelude for {version} release\"") @task -def update_changelog(ctx, new_version=None, target="all", upstream="origin"): +def update_changelog(ctx, release_branch, target="all", upstream="origin"): """ Quick task to generate the new CHANGELOG using reno when releasing a minor version (linux/macOS only). @@ -51,16 +62,40 @@ def update_changelog(ctx, new_version=None, target="all", upstream="origin"): will be generated. """ - # Step 1 - generate the changelogs - - generate_agent = target in ["all", "agent"] - generate_cluster_agent = target in ["all", "cluster-agent"] + new_version = deduce_version(ctx, release_branch, next_version=False) + new_version_int = list(map(int, new_version.split("."))) + if len(new_version_int) != 3: + print(f"Error: invalid version: {new_version_int}") + raise Exit(code=1) - if new_version is not None: - new_version_int = list(map(int, new_version.split("."))) - if len(new_version_int) != 3: - print(f"Error: invalid version: {new_version_int}") - raise Exit(code=1) + # Launch this task from the main branch of the major version + branch = get_default_branch(major=new_version_int[0]) + + with agent_context(ctx, branch): + # Step 1: Add release changelog preludes + update_branch = f"changelog-update-{new_version}" + base_branch = get_current_branch(ctx) + print(color_message(f"Branching out to {update_branch}", "bold")) + ctx.run(f"git switch -c {update_branch}") + + print(color_message("Adding Agent release changelog prelude", "bold")) + _add_prelude(ctx, str(new_version)) + + print(color_message("Adding DCA release changelog prelude", "bold")) + _add_dca_prelude(ctx, str(new_version)) + + ok = try_git_command(ctx, f"git commit -m 'Add preludes for {new_version} release'") + if not ok: + raise Exit( + color_message( + f"Could not create commit. Please commit manually, push the {update_branch} branch and then open a PR against {release_branch}.", + "red", + ), + code=1, + ) + # Step 2 - generate the changelogs + generate_agent = target in ["all", "agent"] + generate_cluster_agent = target in ["all", "cluster-agent"] # let's avoid losing uncommitted change with 'git reset --hard' try: @@ -69,9 +104,6 @@ def update_changelog(ctx, new_version=None, target="all", upstream="origin"): print("Error: You have uncommitted change, please commit or stash before using update_changelog") return - # make sure we are up to date - ctx.run("git fetch") - # let's check that the tag for the new version is present try: ctx.run(f"git tag --list | grep {new_version}") @@ -79,116 +111,114 @@ def update_changelog(ctx, new_version=None, target="all", upstream="origin"): print(f"Missing '{new_version}' git tag: mandatory to use 'reno'") raise - if generate_agent: - update_changelog_generic(ctx, new_version, "releasenotes", "CHANGELOG.rst") - if generate_cluster_agent: - update_changelog_generic(ctx, new_version, "releasenotes-dca", "CHANGELOG-DCA.rst") - - # Step 2 - commit changes - - update_branch = f"changelog-update-{new_version}" - base_branch = get_current_branch(ctx) - - print(color_message(f"Branching out to {update_branch}", "bold")) - ctx.run(f"git checkout -b {update_branch}") - - print(color_message("Committing CHANGELOG.rst and CHANGELOG-DCA.rst", "bold")) - print( - color_message( - "If commit signing is enabled, you will have to make sure the commit gets properly signed.", "bold" - ) - ) - ctx.run("git add CHANGELOG.rst CHANGELOG-DCA.rst") - - commit_message = f"'Changelog updates for {new_version} release'" + if generate_agent: + update_changelog_generic(ctx, new_version, "releasenotes", "CHANGELOG.rst") + if generate_cluster_agent: + update_changelog_generic(ctx, new_version, "releasenotes-dca", "CHANGELOG-DCA.rst") - ok = try_git_command(ctx, f"git commit -m {commit_message}") - if not ok: - raise Exit( + # Step 3 - commit changes + print(color_message("Committing CHANGELOG.rst and CHANGELOG-DCA.rst", "bold")) + print( color_message( - f"Could not create commit. Please commit manually with:\ngit commit -m {commit_message}\n, push the {update_branch} branch and then open a PR.", - "red", - ), - code=1, + "If commit signing is enabled, you will have to make sure the commit gets properly signed.", "bold" + ) ) + ctx.run("git add CHANGELOG.rst CHANGELOG-DCA.rst") + + commit_message = f"'Changelog updates for {new_version} release'" + + ok = try_git_command(ctx, f"git commit -m {commit_message}") + if not ok: + raise Exit( + color_message( + f"Could not create commit. Please commit manually with:\ngit commit -m {commit_message}\n, push the {update_branch} branch and then open a PR.", + "red", + ), + code=1, + ) - # Step 3 - Push and create PR + # Step 4 - Push and create PR + print(color_message("Pushing new branch to the upstream repository", "bold")) + res = ctx.run(f"git push --set-upstream {upstream} {update_branch}", warn=True) + if res.exited is None or res.exited > 0: + raise Exit( + color_message( + f"Could not push branch {update_branch} to the upstream '{upstream}'. Please push it manually and then open a PR.", + "red", + ), + code=1, + ) - print(color_message("Pushing new branch to the upstream repository", "bold")) - res = ctx.run(f"git push --set-upstream {upstream} {update_branch}", warn=True) - if res.exited is None or res.exited > 0: - raise Exit( - color_message( - f"Could not push branch {update_branch} to the upstream '{upstream}'. Please push it manually and then open a PR.", - "red", - ), - code=1, + create_release_pr( + f"Changelog update for {new_version} release", base_branch, update_branch, new_version, changelog_pr=True ) - create_release_pr( - f"Changelog update for {new_version} release", base_branch, update_branch, new_version, changelog_pr=True - ) - @task -def update_installscript_changelog(ctx, new_version): +def update_installscript_changelog(ctx, release_branch): """ Quick task to generate the new CHANGELOG-INSTALLSCRIPT using reno when releasing a minor version (linux/macOS only). """ - new_version_int = list(map(int, new_version.split("."))) - if len(new_version_int) != 3: - print(f"Error: invalid version: {new_version_int}") - raise Exit(code=1) + new_version = deduce_version(ctx, release_branch, next_version=False) - # let's avoid losing uncommitted change with 'git reset --hard' - try: - ctx.run("git diff --exit-code HEAD", hide="both") - except Failure: - print("Error: You have uncommitted changes, please commit or stash before using update-installscript-changelog") - return - - # make sure we are up to date - ctx.run("git fetch") - - # let's check that the tag for the new version is present (needed by reno) - try: - ctx.run(f"git tag --list | grep installscript-{new_version}") - except Failure: - print(f"Missing 'installscript-{new_version}' git tag: mandatory to use 'reno'") - raise - - # generate the new changelog - ctx.run( - f"reno --rel-notes-dir releasenotes-installscript report --ignore-cache --version installscript-{new_version} --no-show-source > /tmp/new_changelog-installscript.rst" - ) - - # reseting git - ctx.run("git reset --hard HEAD") - - # mac's `sed` has a different syntax for the "-i" paramter - sed_i_arg = "-i" - if sys.platform == 'darwin': - sed_i_arg = "-i ''" - # remove the old header from the existing changelog - ctx.run(f"sed {sed_i_arg} -e '1,4d' CHANGELOG-INSTALLSCRIPT.rst") - - if sys.platform != 'darwin': - # sed on darwin doesn't support `-z`. On mac, you will need to manually update the following. - ctx.run( - "sed -z {0} -e 's/installscript-{1}\\n===={2}/{1}\\n{2}/' /tmp/new_changelog-installscript.rst".format( # noqa: FS002 - sed_i_arg, new_version, '=' * len(new_version) + with agent_context(ctx, release_branch): + new_version_int = list(map(int, new_version.split("."))) + + if len(new_version_int) != 3: + print(f"Error: invalid version: {new_version_int}") + raise Exit(code=1) + + # let's avoid losing uncommitted change with 'git reset --hard' + try: + ctx.run("git diff --exit-code HEAD", hide="both") + except Failure: + print( + "Error: You have uncommitted changes, please commit or stash before using update-installscript-changelog" ) + return + + # make sure we are up to date + ctx.run("git fetch") + + # let's check that the tag for the new version is present (needed by reno) + try: + ctx.run(f"git tag --list | grep installscript-{new_version}") + except Failure: + print(f"Missing 'installscript-{new_version}' git tag: mandatory to use 'reno'") + raise + + # generate the new changelog + ctx.run( + f"reno --rel-notes-dir releasenotes-installscript report --ignore-cache --version installscript-{new_version} --no-show-source > /tmp/new_changelog-installscript.rst" ) - # merging to CHANGELOG-INSTALLSCRIPT.rst - ctx.run( - "cat CHANGELOG-INSTALLSCRIPT.rst >> /tmp/new_changelog-installscript.rst && mv /tmp/new_changelog-installscript.rst CHANGELOG-INSTALLSCRIPT.rst" - ) + # reseting git + ctx.run("git reset --hard HEAD") + + # mac's `sed` has a different syntax for the "-i" paramter + sed_i_arg = "-i" + if sys.platform == 'darwin': + sed_i_arg = "-i ''" + # remove the old header from the existing changelog + ctx.run(f"sed {sed_i_arg} -e '1,4d' CHANGELOG-INSTALLSCRIPT.rst") + + if sys.platform != 'darwin': + # sed on darwin doesn't support `-z`. On mac, you will need to manually update the following. + ctx.run( + "sed -z {0} -e 's/installscript-{1}\\n===={2}/{1}\\n{2}/' /tmp/new_changelog-installscript.rst".format( # noqa: FS002 + sed_i_arg, new_version, '=' * len(new_version) + ) + ) + + # merging to CHANGELOG-INSTALLSCRIPT.rst + ctx.run( + "cat CHANGELOG-INSTALLSCRIPT.rst >> /tmp/new_changelog-installscript.rst && mv /tmp/new_changelog-installscript.rst CHANGELOG-INSTALLSCRIPT.rst" + ) - # commit new CHANGELOG-INSTALLSCRIPT - ctx.run("git add CHANGELOG-INSTALLSCRIPT.rst") + # commit new CHANGELOG-INSTALLSCRIPT + ctx.run("git add CHANGELOG-INSTALLSCRIPT.rst") - print("\nCommit this with:") - print(f"git commit -m \"[INSTALLSCRIPT] Update CHANGELOG-INSTALLSCRIPT for {new_version}\"") + print("\nCommit this with:") + print(f"git commit -m \"[INSTALLSCRIPT] Update CHANGELOG-INSTALLSCRIPT for {new_version}\"") diff --git a/tasks/owners.py b/tasks/owners.py index b2ecceffd581a..297900752c9c3 100644 --- a/tasks/owners.py +++ b/tasks/owners.py @@ -1,8 +1,9 @@ from collections import defaultdict from invoke import task +from invoke.exceptions import Exit -from tasks.libs.owners.parsing import read_owners, search_owners +from tasks.libs.owners.parsing import list_owners, read_owners, search_owners from tasks.libs.pipeline.notifications import GITHUB_SLACK_MAP @@ -16,6 +17,27 @@ def find_codeowners(_, path, owners_file=".github/CODEOWNERS"): print(", ".join(search_owners(path, owners_file))) +@task +def list_files(ctx, team, owners_file=".github/CODEOWNERS"): + """ + List all files owned by a particular team. + """ + + valid_owners = list(list_owners(owners_file)) + + if team not in valid_owners: + raise Exit(f"unexpected owner '{team}'") + + code_owners = read_owners(owners_file) + result = ctx.run('git ls-files', hide=True) + files = result.stdout.splitlines() + + for file_name in files: + normalized_owners = [owner[1].casefold().replace("@datadog/", "") for owner in code_owners.of(file_name)] + if team in normalized_owners: + print(file_name) + + def make_partition(names: list[str], owners_file: str, get_channels: bool = False) -> dict[str, set[str]]: """ From a list of job / file names, will create a dictionary with the teams as keys and the names as values. diff --git a/tasks/package.py b/tasks/package.py index 3a831a24225fc..4d65532d43ef2 100644 --- a/tasks/package.py +++ b/tasks/package.py @@ -53,7 +53,13 @@ def check_size(ctx, filename: str = 'package_sizes.json', dry_run: bool = False) decision = "⚠️ Warning" else: decision = "✅ Passed" - display_message(ctx, ancestor, size_message, decision) + # Try to display the message on the PR when a PR exists + if os.environ.get("CI_COMMIT_BRANCH"): + try: + display_message(ctx, ancestor, size_message, decision) + # PR commenter asserts on the numbers of PR's, this will raise if there's no PR + except AssertionError as exc: + print(f"Got `{exc}` while trying to comment on PR, we'll assume that this is not a PR.") if "Failed" in decision: raise Exit(code=1) diff --git a/tasks/pipeline.py b/tasks/pipeline.py index ad825bee1540d..38cd4d0a42b43 100644 --- a/tasks/pipeline.py +++ b/tasks/pipeline.py @@ -757,7 +757,7 @@ def trigger_external(ctx, owner_branch_name: str, no_verify=False): [ # Fetch f"git remote add {owner} git@github.com:{owner}/datadog-agent.git", - f"git fetch '{owner}'", + f"git fetch '{owner}' '{branch}'", # Create branch f"git checkout '{owner}/{branch}'", # This first checkout puts us in a detached head state, thus the second checkout below f"git checkout -b '{owner}/{branch}'", diff --git a/tasks/release.py b/tasks/release.py index c692d5c5a4ffe..5eebde56baec2 100644 --- a/tasks/release.py +++ b/tasks/release.py @@ -26,6 +26,7 @@ from tasks.libs.common.constants import ( GITHUB_REPO_NAME, ) +from tasks.libs.common.datadog_api import get_ci_pipeline_events from tasks.libs.common.git import ( check_base_branch, check_clean_branch_state, @@ -63,7 +64,6 @@ set_new_release_branch, update_release_json, ) -from tasks.libs.releasing.notes import _add_dca_prelude, _add_prelude from tasks.libs.releasing.version import ( MINOR_RC_VERSION_RE, RC_VERSION_RE, @@ -84,6 +84,7 @@ ] BACKPORT_LABEL_COLOR = "5319e7" +TAG_BATCH_SIZE = 3 @task @@ -122,6 +123,13 @@ def update_modules(ctx, release_branch=None, version=None, trust=False): for module in modules.values(): for dependency in module.dependencies: dependency_mod = modules[dependency] + if ( + agent_version.startswith('6') + and 'pkg/util/optional' in dependency_mod.dependency_path(agent_version) + and 'test/new-e2e' in module.go_mod_path() + ): + # Skip this dependency update in new-e2e for Agent 6, as it's incompatible. + continue ctx.run(f"go mod edit -require={dependency_mod.dependency_path(agent_version)} {module.go_mod_path()}") @@ -193,7 +201,9 @@ def tag_modules( if push: tags_list = ' '.join(tags) - ctx.run(f"git push origin {tags_list}{force_option}") + for idx in range(0, len(tags), TAG_BATCH_SIZE): + batch_tags = tags[idx : idx + TAG_BATCH_SIZE] + ctx.run(f"git push origin {' '.join(batch_tags)}{force_option}") print(f"Pushed tag {tags_list}") print(f"Created module tags for version {agent_version}") @@ -298,24 +308,7 @@ def finish(ctx, release_branch, upstream="origin"): code=1, ) - # Step 4: Add release changelog preludes - print(color_message("Adding Agent release changelog prelude", "bold")) - _add_prelude(ctx, str(new_version)) - - print(color_message("Adding DCA release changelog prelude", "bold")) - _add_dca_prelude(ctx, str(new_version)) - - ok = try_git_command(ctx, f"git commit -m 'Add preludes for {new_version} release'") - if not ok: - raise Exit( - color_message( - f"Could not create commit. Please commit manually, push the {final_branch} branch and then open a PR against {final_branch}.", - "red", - ), - code=1, - ) - - # Step 5: Push branch and create PR + # Step 4: Push branch and create PR print(color_message("Pushing new branch to the upstream repository", "bold")) res = ctx.run(f"git push --set-upstream {upstream} {final_branch}", warn=True) if res.exited is None or res.exited > 0: @@ -328,7 +321,7 @@ def finish(ctx, release_branch, upstream="origin"): ) create_release_pr( - f"Final updates for release.json and Go modules for {new_version} release + preludes", + f"Final updates for release.json and Go modules for {new_version} release", release_branch, final_branch, new_version, @@ -401,10 +394,11 @@ def create_rc(ctx, release_branch, patch_version=False, upstream="origin", slack update_branch = f"release/{new_highest_version}-{int(time.time())}" check_clean_branch_state(ctx, github, update_branch) - if not check_base_branch(release_branch, new_highest_version): + active_releases = [branch.name for branch in github.latest_unreleased_release_branches()] + if not any(check_base_branch(release_branch, unreleased_branch) for unreleased_branch in active_releases): raise Exit( color_message( - f"The branch you are on is neither {get_default_branch()} or the correct release branch ({new_highest_version.branch()}). Aborting.", + f"The branch you are on is neither {get_default_branch()} or amongst the active release branches ({active_releases}). Aborting.", "red", ), code=1, @@ -474,8 +468,7 @@ def create_rc(ctx, release_branch, patch_version=False, upstream="origin", slack "pr_url": pr_url, "version": str(new_highest_version), } - - ctx.run(f"curl -X POST -H 'Content-Type: application/json' --data '{json.dumps(payload)}' {slack_webhook}") + send_slack_msg(ctx, payload, slack_webhook) @task @@ -507,7 +500,7 @@ def build_rc(ctx, release_branch, patch_version=False, k8s_deployments=False): print(color_message("Checking repository state", "bold")) # Check that the base branch is valid - if not check_base_branch(release_branch, new_version): + if not check_base_branch(release_branch, new_version.branch()): raise Exit( color_message( f"The branch you are on is neither {get_default_branch()} or the correct release branch ({new_version.branch()}). Aborting.", @@ -676,7 +669,7 @@ def create_release_branches(ctx, base_directory="~/dd", major_version: int = 7, # Strings with proper branch/tag names release_branch = current.branch() - with agent_context(ctx, get_default_branch(major=major_version)): + with agent_context(ctx, get_default_branch()): # Step 0: checks ctx.run("git fetch") @@ -706,6 +699,7 @@ def create_release_branches(ctx, base_directory="~/dd", major_version: int = 7, f'backport/{release_branch}', BACKPORT_LABEL_COLOR, f'Automatically create a backport PR to {release_branch}', + exist_ok=True, ) # Step 2 - Create PRs with new settings in datadog-agent repository @@ -1029,19 +1023,18 @@ def generate_release_metrics(ctx, milestone, freeze_date, release_date): print(code_stats) -# TODO rename to freeze_date to cutoff_date @task -def create_schedule(_, version, freeze_date): +def create_schedule(_, version, cutoff_date): """Create confluence pages for the release schedule. Args: - freeze_date: Date when the code cut-off happened. Expected format YYYY-MM-DD, like '2022-02-01' + cutoff_date: Date when the code cut-off happened. Expected format YYYY-MM-DD, like '2022-02-01' """ required_environment_variables = ["ATLASSIAN_USERNAME", "ATLASSIAN_PASSWORD"] if not all(key in os.environ for key in required_environment_variables): raise Exit(f"You must set {required_environment_variables} environment variables to use this task.", code=1) - release_page = create_release_page(version, date.fromisoformat(freeze_date)) + release_page = create_release_page(version, date.fromisoformat(cutoff_date)) print(f"Release schedule pages {release_page['url']} {color_message('successfully created', 'green')}") @@ -1207,7 +1200,6 @@ def update_current_milestone(ctx, major_version: int = 7, upstream="origin"): """ Create a PR to bump the current_milestone in the release.json file """ - import github gh = GithubAPI() @@ -1216,14 +1208,7 @@ def update_current_milestone(ctx, major_version: int = 7, upstream="origin"): next.devel = False print(f"Creating the {next} milestone...") - - try: - gh.create_milestone(str(next)) - except github.GithubException as e: - if e.status == 422: - print(f"Milestone {next} already exists") - else: - raise e + gh.create_milestone(str(next), exist_ok=True) with agent_context(ctx, get_default_branch(major=major_version)): milestone_branch = f"release_milestone-{int(time.time())}" @@ -1260,3 +1245,37 @@ def update_current_milestone(ctx, major_version: int = 7, upstream="origin"): milestone_branch, next, ) + + +def send_slack_msg(ctx, payload, webhook): + ctx.run(f'curl -X POST -H "Content-Type: application/json" --data "{payload}" {webhook}') + + +@task +def check_previous_agent6_rc(ctx): + """ + Validates that there are no existing Agent 6 release candidate pull requests + and checks if an Agent 6 build pipeline has been run in the past week + """ + err_msg = "" + agent6_prs = "" + github = GithubAPI() + prs = github.get_pr_for_branch(None, "6.53.x") + for pr in prs: + if "Update release.json and Go modules for 6.53" in pr.title and not pr.draft: + agent6_prs += f"\n- {pr.title}: https://github.com/DataDog/datadog-agent/pull/{pr.number}" + if agent6_prs: + err_msg += "AGENT 6 ERROR: The following Agent 6 release candidate PRs already exist. Please address these PRs before creating a new release candidate" + err_msg += agent6_prs + + response = get_ci_pipeline_events( + 'ci_level:pipeline @ci.pipeline.name:"DataDog/datadog-agent" @git.tag:6.53.* -@ci.pipeline.downstream:true', + 7, + ) + if not response.data: + err_msg += "\nAGENT 6 ERROR: No Agent 6 build pipelines have run in the past week. Please trigger a build pipeline for the next agent 6 release candidate." + + if err_msg: + payload = {'message': err_msg} + send_slack_msg(ctx, payload, os.environ.get("SLACK_DATADOG_AGENT_CI_WEBHOOK")) + raise Exit(message=err_msg, code=1) diff --git a/tasks/requirements.txt b/tasks/requirements.txt index a56ac30a3177d..180692e441a2e 100644 --- a/tasks/requirements.txt +++ b/tasks/requirements.txt @@ -1,3 +1,6 @@ +# Requirements defined in this file are used only locally and should be lazy loaded inside tasks' functions +# Requirements needed by the CI should be defined in datadog-agent-buildimages repository + -r libs/requirements-github.txt -r libs/requirements-notifications.txt -r show_linters_issues/requirements.txt @@ -6,3 +9,4 @@ python-gitlab==4.4.0 debugpy==1.8.2 +watchdog==6.0.0 diff --git a/tasks/security_agent.py b/tasks/security_agent.py index fe1def0e39b8c..5f3619747e22c 100644 --- a/tasks/security_agent.py +++ b/tasks/security_agent.py @@ -3,6 +3,7 @@ import datetime import errno import glob +import json import os import re import shutil @@ -540,9 +541,9 @@ def generate_cws_documentation(ctx, go_generate=False): def cws_go_generate(ctx, verbose=False): ctx.run("go install golang.org/x/tools/cmd/stringer") ctx.run("go install github.com/mailru/easyjson/easyjson") + ctx.run("go install github.com/DataDog/datadog-agent/pkg/security/generators/accessors") + ctx.run("go install github.com/DataDog/datadog-agent/pkg/security/generators/operators") with ctx.cd("./pkg/security/secl"): - ctx.run("go install github.com/DataDog/datadog-agent/pkg/security/secl/compiler/generators/accessors") - ctx.run("go install github.com/DataDog/datadog-agent/pkg/security/secl/compiler/generators/operators") if sys.platform == "linux": ctx.run("GOOS=windows go generate ./...") # Disable cross generation from windows for now. Need to fix the stringer issue. @@ -568,10 +569,10 @@ def single_run(ctx, table_url, output_file, output_string_file, abis=None): if abis: abis = f"-abis {abis}" ctx.run( - f"go run github.com/DataDog/datadog-agent/pkg/security/secl/model/syscall_table_generator -table-url {table_url} -output {output_file} -output-string {output_string_file} {abis}" + f"go run github.com/DataDog/datadog-agent/pkg/security/generators/syscall_table_generator -table-url {table_url} -output {output_file} -output-string {output_string_file} {abis}" ) - linux_version = "v6.8" + linux_version = "v6.13" single_run( ctx, f"https://raw.githubusercontent.com/torvalds/linux/{linux_version}/arch/x86/entry/syscalls/syscall_64.tbl", @@ -588,13 +589,14 @@ def single_run(ctx, table_url, output_file, output_string_file, abis=None): DEFAULT_BTFHUB_CONSTANTS_PATH = "./pkg/security/probe/constantfetch/btfhub/constants.json" +DEFAULT_BTFHUB_CONSTANTS_ARM64_PATH = "./pkg/security/probe/constantfetch/btfhub/constants_arm64.json" +DEFAULT_BTFHUB_CONSTANTS_AMD64_PATH = "./pkg/security/probe/constantfetch/btfhub/constants_amd64.json" @task -def generate_btfhub_constants(ctx, archive_path, force_refresh=False, output_path=DEFAULT_BTFHUB_CONSTANTS_PATH): - force_refresh_opt = "-force-refresh" if force_refresh else "" +def generate_btfhub_constants(ctx, archive_path, output_path=DEFAULT_BTFHUB_CONSTANTS_PATH): ctx.run( - f"go run -tags linux_bpf,btfhubsync ./pkg/security/probe/constantfetch/btfhub/ -archive-root {archive_path} -output {output_path} {force_refresh_opt}", + f"go run -tags linux_bpf,btfhubsync ./pkg/security/probe/constantfetch/btfhub/ -archive-root {archive_path} -output {output_path}", ) @@ -605,13 +607,48 @@ def combine_btfhub_constants(ctx, archive_path, output_path=DEFAULT_BTFHUB_CONST ) +@task +def extract_btfhub_constants(ctx, arch, input_path, output_path): + res = {} + used_contant_ids = set() + with open(input_path) as fi: + base = json.load(fi) + res["constants"] = base["constants"] + res["kernels"] = [] + for kernel in base["kernels"]: + if kernel["arch"] == arch: + res["kernels"].append(kernel) + used_contant_ids.add(kernel["cindex"]) + + new_constants = [] + mapping = {} + for i, group in enumerate(res["constants"]): + if i in used_contant_ids: + new_i = len(new_constants) + new_constants.append(group) + mapping[i] = new_i + + for kernel in res["kernels"]: + kernel["cindex"] = mapping[kernel["cindex"]] + res["constants"] = new_constants + + with open(output_path, "w") as fo: + json.dump(res, fo, indent="\t") + + +@task +def split_btfhub_constants(ctx): + extract_btfhub_constants(ctx, "arm64", DEFAULT_BTFHUB_CONSTANTS_PATH, DEFAULT_BTFHUB_CONSTANTS_ARM64_PATH) + extract_btfhub_constants(ctx, "x86_64", DEFAULT_BTFHUB_CONSTANTS_PATH, DEFAULT_BTFHUB_CONSTANTS_AMD64_PATH) + + @task def generate_cws_proto(ctx): with tempfile.TemporaryDirectory() as temp_gobin: with environ({"GOBIN": temp_gobin}): - ctx.run("go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.34.2") + ctx.run("go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.36.3") ctx.run("go install github.com/planetscale/vtprotobuf/cmd/protoc-gen-go-vtproto@v0.6.0") - ctx.run("go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.4.0") + ctx.run("go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.5.1") plugin_opts = " ".join( [ diff --git a/tasks/system_probe.py b/tasks/system_probe.py index ddd731b535bea..499b2ad2e82fc 100644 --- a/tasks/system_probe.py +++ b/tasks/system_probe.py @@ -348,7 +348,7 @@ def ninja_test_ebpf_programs(nw: NinjaWriter, build_dir): for prog in test_programs: infile = os.path.join(ebpf_c_dir, f"{prog}.c") outfile = os.path.join(build_dir, f"{os.path.basename(prog)}.o") - ninja_ebpf_program( + ninja_ebpf_co_re_program( nw, infile, outfile, {"flags": test_flags} ) # All test ebpf programs are just for testing, so we always build them with debug symbols @@ -525,7 +525,7 @@ def ninja_cgo_type_files(nw: NinjaWriter): "pkg/ebpf/types.go": [ "pkg/ebpf/c/lock_contention.h", ], - "pkg/dynamicinstrumentation/ditypes/ebpf.go": ["pkg/dynamicinstrumentation/codegen/c/types.h"], + "pkg/dynamicinstrumentation/ditypes/ebpf.go": ["pkg/dynamicinstrumentation/codegen/c/base_event.h"], "pkg/gpu/ebpf/kprobe_types.go": [ "pkg/gpu/ebpf/c/types.h", ], @@ -604,7 +604,7 @@ def ninja_generate( ) ninja_define_co_re_compiler(nw, arch=arch, compiler=ebpf_compiler) ninja_network_ebpf_programs(nw, build_dir, co_re_build_dir) - ninja_test_ebpf_programs(nw, build_dir) + ninja_test_ebpf_programs(nw, co_re_build_dir) ninja_security_ebpf_programs(nw, build_dir, debug, kernel_release, arch=arch) ninja_container_integrations_ebpf_programs(nw, co_re_build_dir) ninja_runtime_compilation_files(nw, gobin) @@ -1459,6 +1459,7 @@ def validate_object_file_metadata(ctx: Context, build_dir: str | Path = "pkg/ebp print(f"All {total_metadata_files} object files have valid metadata") +@task(aliases=["object-files"]) def build_object_files( ctx, major_version='7', @@ -1580,15 +1581,6 @@ def build_cws_object_files( copy_bundled_ebpf_files(ctx, arch=arch) -@task -def object_files( - ctx, kernel_release=None, with_unit_test=False, arch: str = CURRENT_ARCH, ebpf_compiler: str = 'clang' -): - build_object_files( - ctx, kernel_release=kernel_release, with_unit_test=with_unit_test, arch=arch, ebpf_compiler=ebpf_compiler - ) - - def clean_object_files(ctx, major_version='7', kernel_release=None, debug=False, strip_object_files=False): run_ninja( ctx, @@ -1666,7 +1658,10 @@ def generate_minimized_btfs(ctx, source_dir, output_dir, bpf_programs): nw.rule(name="decompress_btf", command="tar -xf $in -C $target_directory") nw.rule(name="minimize_btf", command="bpftool gen min_core_btf $in $out $input_bpf_programs") - nw.rule(name="compress_minimized_btf", command="tar -cJf $out -C $tar_working_directory $rel_in && rm $in") + nw.rule( + name="compress_minimized_btf", + command="tar --mtime=@0 -cJf $out -C $tar_working_directory $rel_in && rm $in", + ) for root, dirs, files in os.walk(source_dir): path_from_root = os.path.relpath(root, source_dir) @@ -1779,7 +1774,8 @@ def process_btfhub_archive(ctx, branch="main"): dst_file = os.path.join(dst_dir, file) if os.path.exists(dst_file): raise Exit(message=f"{dst_file} already exists") - shutil.move(src_file, os.path.join(dst_dir, file)) + + shutil.move(src_file, dst_file) # generate both tarballs for arch in ["x86_64", "arm64"]: @@ -1789,7 +1785,8 @@ def process_btfhub_archive(ctx, branch="main"): if os.path.exists(btfs_dir): with ctx.cd(temp_dir): # include btfs-$ARCH as prefix for all paths - ctx.run(f"tar -cf {output_path} btfs-{arch}") + # set modification time to zero to ensure deterministic tarball + ctx.run(f"tar --mtime=@0 -cf {output_path} btfs-{arch}") @task diff --git a/tasks/testwasher.py b/tasks/testwasher.py index ac3623b468a09..519d063c0e460 100644 --- a/tasks/testwasher.py +++ b/tasks/testwasher.py @@ -89,6 +89,10 @@ def parse_test_results(self, module_path: str) -> tuple[dict, dict]: if test_result["Action"] == "success": if test_result["Test"] in failing_tests[test_result["Package"]]: failing_tests[test_result["Package"]].remove(test_result["Test"]) + # Tests that have a go routine that panicked does not have an Action field with the result of the test, let's try to catch them from their Output + if "Output" in test_result and "panic:" in test_result["Output"]: + failing_tests[test_result["Package"]].add(test_result["Test"]) + if "Output" in test_result and self.flaky_test_indicator in test_result["Output"]: flaky_marked_tests[test_result["Package"]].add(test_result["Test"]) return failing_tests, flaky_marked_tests @@ -159,26 +163,37 @@ def generate_flake_finder_pipeline(ctx, n=3, generate_config=False): continue kept_job[job] = job_details - deps_job = copy.deepcopy(config["go_e2e_deps"]) - - # Remove needs, rules, extends and retry from the jobs - for job in [deps_job] + list(kept_job.values()): + # Remove rules, extends and retry from the jobs, update needs to point to parent pipeline + for job in list(kept_job.values()): _clean_job(job) new_jobs = {} - new_jobs["go_e2e_deps"] = deps_job new_jobs['variables'] = copy.deepcopy(config['variables']) new_jobs['variables']['PARENT_PIPELINE_ID'] = 'undefined' new_jobs['variables']['PARENT_COMMIT_SHA'] = 'undefined' - new_jobs['stages'] = [deps_job["stage"]] + [f'flake-finder-{i}' for i in range(n)] + new_jobs['variables']['PARENT_COMMIT_SHORT_SHA'] = 'undefined' + new_jobs['stages'] = [f'flake-finder-{i}' for i in range(n)] # Create n jobs with the same configuration for job in kept_job: for i in range(n): new_job = copy.deepcopy(kept_job[job]) new_job["stage"] = f"flake-finder-{i}" - new_job["dependencies"] = ["go_e2e_deps"] if 'variables' in new_job: + # Variables that reference the parent pipeline should be updated + for key, value in new_job['variables'].items(): + new_value = value + if not isinstance(value, str): + continue + if "CI_PIPELINE_ID" in value: + new_value = new_value.replace("CI_PIPELINE_ID", "PARENT_PIPELINE_ID") + if "CI_COMMIT_SHA" in value: + new_value = new_value.replace("CI_COMMIT_SHA", "PARENT_COMMIT_SHA") + if "CI_COMMIT_SHORT_SHA" in value: + new_value = new_value.replace("CI_COMMIT_SHORT_SHA", "PARENT_COMMIT_SHORT_SHA") + + new_job['variables'][key] = new_value + if ( 'E2E_PIPELINE_ID' in new_job['variables'] and new_job['variables']['E2E_PIPELINE_ID'] == "$CI_PIPELINE_ID" @@ -192,7 +207,8 @@ def generate_flake_finder_pipeline(ctx, n=3, generate_config=False): if 'E2E_PRE_INITIALIZED' in new_job['variables']: del new_job['variables']['E2E_PRE_INITIALIZED'] new_job["rules"] = [{"when": "always"}] - new_job["needs"] = ["go_e2e_deps"] + if i > 0: + new_job["needs"].append(f"{job}-{i - 1}") new_jobs[f"{job}-{i}"] = new_job with open("flake-finder-gitlab-ci.yml", "w") as f: @@ -206,7 +222,36 @@ def _clean_job(job): """ Remove the needs, rules, extends and retry from the job """ - for step in ('needs', 'rules', 'extends', 'retry'): + for step in ('rules', 'extends', 'retry'): if step in job: del job[step] + + if 'needs' in job: + job["needs"] = _add_parent_pipeline(job["needs"]) return job + + +def _add_parent_pipeline(needs): + """ + Add the parent pipeline to the need, only for the jobs that are not the artifacts deploy jobs. + """ + + deps_to_keep = [ + "tests_windows_secagent_x64", + "tests_windows_sysprobe_x64", + "go_e2e_deps", + ] # Needs that should be kept on jobs, because the e2e test actually needs the artifact from these jobs + + new_needs = [] + for need in needs: + if isinstance(need, str): + if need not in deps_to_keep: + continue + new_needs.append({"pipeline": "$PARENT_PIPELINE_ID", "job": need}) + elif isinstance(need, dict): + if "job" in need and need["job"] not in deps_to_keep: + continue + new_needs.append({**need, "pipeline": "$PARENT_PIPELINE_ID"}) + elif isinstance(need, list): + new_needs.extend(_add_parent_pipeline(need)) + return new_needs diff --git a/tasks/unit_tests/e2e_testing_tests.py b/tasks/unit_tests/e2e_testing_tests.py new file mode 100644 index 0000000000000..565470fe08d5e --- /dev/null +++ b/tasks/unit_tests/e2e_testing_tests.py @@ -0,0 +1,40 @@ +import unittest +from unittest.mock import MagicMock, patch + +from tasks.new_e2e_tests import post_process_output, pretty_print_logs + + +class TestE2ETesting(unittest.TestCase): + @patch("tasks.new_e2e_tests.pretty_print_test_logs") + @patch("tasks.libs.common.utils.running_in_ci", new=MagicMock(return_value=True)) + def test_pretty_print(self, p): + flakes_file = "tasks/unit_tests/testdata/flakes_2.yaml" + path = "tasks/unit_tests/testdata/test_output_failure_marker.json" + + pretty_print_logs(path, post_process_output(path), flakes_file=flakes_file) + + # Failing / flaky, successful / non flaky + self.assertEqual(p.call_count, 2) + args1 = p.call_args_list[0][0][0][0] + args2 = p.call_args_list[1][0][0][0] + args3 = p.call_args_list[1][0][0][1] + self.assertEqual(args1[1], "TestGetPayload") + self.assertEqual(args2[1], "TestGetPayloadContainerized") + self.assertEqual(args3[1], "TestGetPayloadContainerizedWithDocker0") + + @patch("tasks.new_e2e_tests.pretty_print_test_logs") + @patch("tasks.libs.common.utils.running_in_ci", new=MagicMock(return_value=True)) + def test_pretty_print2(self, p=None): + flakes_file = "tasks/unit_tests/testdata/flakes_1.yaml" + path = "tasks/unit_tests/testdata/test_output_failure_no_marker.json" + + pretty_print_logs(path, post_process_output(path), flakes_file=flakes_file) + + # Failing / flaky, successful / non flaky + self.assertEqual(p.call_count, 2) + args1 = p.call_args_list[0][0][0][0] + args2 = p.call_args_list[1][0][0][0] + args3 = p.call_args_list[1][0][0][1] + self.assertEqual(args1[1], "TestGetPayload") + self.assertEqual(args2[1], "TestFilterDev") + self.assertEqual(args3[1], "TestGetTimeout") diff --git a/tasks/unit_tests/github_actions_tools_tests.py b/tasks/unit_tests/github_actions_tools_tests.py new file mode 100644 index 0000000000000..58a3e461bf4cc --- /dev/null +++ b/tasks/unit_tests/github_actions_tools_tests.py @@ -0,0 +1,68 @@ +import unittest +from unittest.mock import MagicMock, call, patch + +from invoke.exceptions import Exit + +from tasks.libs.ciproviders.github_actions_tools import download_artifacts, download_logs, download_with_retry + + +class TestDownloadWithRetry(unittest.TestCase): + def setUp(self) -> None: + self.gh = MagicMock() + self.gh._auth.token = "largo_winch" + + @patch('tasks.libs.ciproviders.github_actions_tools.GithubAPI') + @patch('tasks.libs.ciproviders.github_actions_tools.zipfile.ZipFile', new=MagicMock()) + def test_download_log(self, gh_mock): + run = MagicMock() + gh_mock.return_value = self.gh + download_with_retry(download_function=download_logs, run=run, destination='.', retry_count=3, retry_interval=1) + + @patch('tasks.libs.ciproviders.github_actions_tools.GithubAPI') + @patch('tasks.libs.ciproviders.github_actions_tools.zipfile.ZipFile', new=MagicMock()) + def test_downloads_artifacts_with_repository(self, gh_mock): + run = MagicMock() + gh_mock.return_value = self.gh + download_with_retry( + download_function=download_artifacts, run=run, destination='.', retry_count=3, retry_interval=1 + ) + + @patch('tasks.libs.ciproviders.github_actions_tools.GithubAPI') + @patch('tasks.libs.ciproviders.github_actions_tools.zipfile.ZipFile', new=MagicMock()) + def test_downloads_artifacts_without_repository(self, gh_mock): + run = MagicMock() + gh_mock.return_value = self.gh + download_with_retry( + download_function=download_artifacts, + run=run, + destination='.', + retry_count=3, + retry_interval=1, + repository="simon_ovronnaz", + ) + + @patch('builtins.print') + @patch('tasks.libs.ciproviders.github_actions_tools.GithubAPI') + @patch('tasks.libs.ciproviders.github_actions_tools.zipfile.ZipFile', new=MagicMock(side_effect=ConnectionError)) + def test_connection_error(self, gh_mock, print_mock): + run = MagicMock() + gh_mock.return_value = self.gh + with self.assertRaises(Exit): + download_with_retry( + download_function=download_logs, run=run, destination='.', retry_count=2, retry_interval=1 + ) + self.assertEqual(print_mock.call_count, 5) + print_mock.assert_has_calls([call('Connectivity issue while downloading, retrying... 0 attempts left')]) + + @patch('builtins.print') + @patch('tasks.libs.ciproviders.github_actions_tools.GithubAPI') + @patch('tasks.libs.ciproviders.github_actions_tools.zipfile.ZipFile', new=MagicMock(side_effect=LookupError)) + def test_other_error(self, gh_mock, print_mock): + run = MagicMock() + gh_mock.return_value = self.gh + with self.assertRaises(LookupError): + download_with_retry( + download_function=download_logs, run=run, destination='.', retry_count=1, retry_interval=1 + ) + self.assertEqual(print_mock.call_count, 2) + self.assertTrue(print_mock.call_args_list[-1].startswith('Exception that is not a connectivity issue')) diff --git a/tasks/unit_tests/junit_tests.py b/tasks/unit_tests/junit_tests.py index 5e7ea0cb3184e..8f3f8b943bec0 100644 --- a/tasks/unit_tests/junit_tests.py +++ b/tasks/unit_tests/junit_tests.py @@ -55,7 +55,7 @@ def test_without_split(self): def test_with_split(self): xml_file = Path("./tasks/unit_tests/testdata/secret.tar.gz/-go-src-datadog-agent-junit-out-base.xml") owners = read_owners(".github/CODEOWNERS") - self.assertEqual(junit.split_junitxml(xml_file.parent, xml_file, owners, []), 29) + self.assertEqual(junit.split_junitxml(xml_file.parent, xml_file, owners, []), 28) class TestGroupPerTag(unittest.TestCase): @@ -121,4 +121,4 @@ def test_e2e(self, mock_which, mock_popen): mock_which.side_effect = lambda cmd: f"/usr/local/bin/{cmd}" junit.junit_upload_from_tgz("tasks/unit_tests/testdata/testjunit-tests_deb-x64-py3.tgz") mock_popen.assert_called() - self.assertEqual(mock_popen.call_count, 31) + self.assertEqual(mock_popen.call_count, 30) diff --git a/tasks/unit_tests/libs/common/git_tests.py b/tasks/unit_tests/libs/common/git_tests.py index 4a803a58b2758..2c72cdd77716d 100644 --- a/tasks/unit_tests/libs/common/git_tests.py +++ b/tasks/unit_tests/libs/common/git_tests.py @@ -117,7 +117,9 @@ def test_ordered(self): c = MockContext( run={ 'git ls-remote -t https://github.com/DataDog/woof "7.56.*"': Result( - "e1b8e9163203b7446c74fac0b8d4153eb24227a0 refs/tags/7.56.0-rc.1\n7c6777bb7add533a789c69293b59e3261711d330 refs/tags/7.56.0-rc.2\n2b8b710b322feb03148f871a77ab92163a0a12de refs/tags/7.56.0-rc.3" + """e1b8e9163203b7446c74fac0b8d4153eb24227a0 refs/tags/7.56.0-rc.1 + 7c6777bb7add533a789c69293b59e3261711d330 refs/tags/7.56.0-rc.2 + 2b8b710b322feb03148f871a77ab92163a0a12de refs/tags/7.56.0-rc.3""" ) } ) @@ -128,7 +130,9 @@ def test_non_ordered(self): c = MockContext( run={ 'git ls-remote -t https://github.com/DataDog/woof "7.56.*"': Result( - "e1b8e9163203b7446c74fac0b8d4153eb24227a0 refs/tags/7.56.0-rc.1\n7c6777bb7add533a789c69293b59e3261711d330 refs/tags/7.56.0-rc.11\n2b8b710b322feb03148f871a77ab92163a0a12de refs/tags/7.56.0-rc.3" + """e1b8e9163203b7446c74fac0b8d4153eb24227a0 refs/tags/7.56.0-rc.1 + 7c6777bb7add533a789c69293b59e3261711d330 refs/tags/7.56.0-rc.11 + 2b8b710b322feb03148f871a77ab92163a0a12de refs/tags/7.56.0-rc.3""" ) } ) @@ -139,7 +143,9 @@ def test_suffix_lower(self): c = MockContext( run={ 'git ls-remote -t https://github.com/DataDog/woof "7.56.*"': Result( - "e1b8e9163203b7446c74fac0b8d4153eb24227a0 refs/tags/7.56.0-rc.1\n7c6777bb7add533a789c69293b59e3261711d330 refs/tags/7.56.0-rc.2^{}\n2b8b710b322feb03148f871a77ab92163a0a12de refs/tags/7.56.0-rc.3" + """e1b8e9163203b7446c74fac0b8d4153eb24227a0 refs/tags/7.56.0-rc.1 + 7c6777bb7add533a789c69293b59e3261711d330 refs/tags/7.56.0-rc.2^{} + 2b8b710b322feb03148f871a77ab92163a0a12de refs/tags/7.56.0-rc.3""" ) } ) @@ -150,7 +156,9 @@ def test_suffix_equal(self): c = MockContext( run={ 'git ls-remote -t https://github.com/DataDog/woof "7.56.*"': Result( - "e1b8e9163203b7446c74fac0b8d4153eb24227a0 refs/tags/7.56.0-rc.1\n7c6777bb7add533a789c69293b59e3261711d330 refs/tags/7.56.0-rc.3^{}\n2b8b710b322feb03148f871a77ab92163a0a12de refs/tags/7.56.0-rc.3" + """e1b8e9163203b7446c74fac0b8d4153eb24227a0 refs/tags/7.56.0-rc.1 + 7c6777bb7add533a789c69293b59e3261711d330 refs/tags/7.56.0-rc.3^{} + 2b8b710b322feb03148f871a77ab92163a0a12de refs/tags/7.56.0-rc.3""" ) } ) @@ -161,7 +169,9 @@ def test_suffix_greater(self): c = MockContext( run={ 'git ls-remote -t https://github.com/DataDog/woof "7.56.*"': Result( - "e1b8e9163203b7446c74fac0b8d4153eb24227a0 refs/tags/7.56.0-rc.1\n7c6777bb7add533a789c69293b59e3261711d330 refs/tags/7.56.0-rc.4^{}\n2b8b710b322feb03148f871a77ab92163a0a12de refs/tags/7.56.0-rc.3" + """e1b8e9163203b7446c74fac0b8d4153eb24227a0 refs/tags/7.56.0-rc.1 + 7c6777bb7add533a789c69293b59e3261711d330 refs/tags/7.56.0-rc.4^{} + 2b8b710b322feb03148f871a77ab92163a0a12de refs/tags/7.56.0-rc.3""" ) } ) @@ -172,9 +182,51 @@ def test_only_release_tags(self): c = MockContext( run={ 'git ls-remote -t https://github.com/DataDog/woof "7.57.*"': Result( - "43638bd55a74fd6ec51264cc7b3b1003d0b1c7ac\trefs/tags/7.57.0-dbm-mongo-1.5\ne01bcf3d12e6d6742b1fa8296882938c6dba9922\trefs/tags/7.57.0-devel\n6a5ad7fda590c7b8ba7036bca70dc8a0872e7afe\trefs/tags/7.57.0-devel^{}\n2c2eb2293cccd33100d7d930a59c136319942915\trefs/tags/7.57.0-installer-0.5.0-rc.1\n2c2eb2293cccd33100d7d930a59c136319942915\trefs/tags/7.57.0-installer-0.5.0-rc.2\n6a91fcca0ade9f77f08cd98d923a8d9ec18d7e8f\trefs/tags/7.57.0-installer-0.5.0-rc.3\n7e8ffc3de15f0486e6cb2184fa59f02da6ecfab9\trefs/tags/7.57.0-rc.1\nfa72fd12e3483a2d5957ea71fe01a8b1af376424\trefs/tags/7.57.0-rc.1^{}\n22587b746d6a0876cb7477b9b335e8573bdc3ac5\trefs/tags/7.57.0-rc.2\nd6c151a36487c3b54145ae9bf200f6c356bb9348\trefs/tags/7.57.0-rc.2^{}\n948ed4dd8c8cdf0aae467997086bb2229d4f1916\trefs/tags/7.57.0-rc.3\n259ed086a45960006e110622332cc8a39f9c6bb9\trefs/tags/7.57.0-rc.3^{}\na249f4607e5da894715a3e011dba8046b46678ed\trefs/tags/7.57.0-rc.4\n51a3b405a244348aec711d38e5810a6d88075b77\trefs/tags/7.57.0-rc.4^{}\n06519be707d6f24fb8265cde5a50cf0a66d5cb02\trefs/tags/7.57.0-rc.5\n7f43a5180446290f498742e68d8b28a75da04188\trefs/tags/7.57.0-rc.5^{}\n6bb640559e7626131290c63dab3959ba806c9886\trefs/tags/7.57.0-rc.6\nc5ed1f8b4734d31e94c2a83f307dbcb2b5a1faac\trefs/tags/7.57.0-rc.6^{}\n260697e624bb1d92ad306fdc301aab9b2975a627\trefs/tags/7.57.0-rc.7\n48617a0f56747e33b75d3dcf570bc2237726dc0e\trefs/tags/7.57.0-rc.7^{}\n5e11e104ff99b40b01ff2cfa702c0e4a465f98de\trefs/tags/7.57.1-beta-ndm-rdns-enrichment\n91c7c85d7c8fbb94421a90b273aea75630617eef\trefs/tags/7.57.1-beta-ndm-rdns-enrichment^{}\n3ad359da2894fa3de6e265c56dea8fabdb128454\trefs/tags/7.57.1-beta-ndm-rdns-enrichment2\n86683ad80578912014cc947dcf247ba020532403\trefs/tags/7.57.1-beta-ndm-rdns-enrichment2^{}" + """"43638bd55a74fd6ec51264cc7b3b1003d0b1c7ac refs/tags/7.57.0-dbm-mongo-1.5 + e01bcf3d12e6d6742b1fa8296882938c6dba9922 refs/tags/7.57.0-devel + 6a5ad7fda590c7b8ba7036bca70dc8a0872e7afe refs/tags/7.57.0-devel^{} + 2c2eb2293cccd33100d7d930a59c136319942915 refs/tags/7.57.0-installer-0.5.0-rc.1 + 2c2eb2293cccd33100d7d930a59c136319942915 refs/tags/7.57.0-installer-0.5.0-rc.2 + 6a91fcca0ade9f77f08cd98d923a8d9ec18d7e8f refs/tags/7.57.0-installer-0.5.0-rc.3 + 7e8ffc3de15f0486e6cb2184fa59f02da6ecfab9 refs/tags/7.57.0-rc.1 + fa72fd12e3483a2d5957ea71fe01a8b1af376424 refs/tags/7.57.0-rc.1^{} + 22587b746d6a0876cb7477b9b335e8573bdc3ac5 refs/tags/7.57.0-rc.2 + d6c151a36487c3b54145ae9bf200f6c356bb9348 refs/tags/7.57.0-rc.2^{} + 948ed4dd8c8cdf0aae467997086bb2229d4f1916 refs/tags/7.57.0-rc.3 + 259ed086a45960006e110622332cc8a39f9c6bb9 refs/tags/7.57.0-rc.3^{} + a249f4607e5da894715a3e011dba8046b46678ed refs/tags/7.57.0-rc.4 + 51a3b405a244348aec711d38e5810a6d88075b77 refs/tags/7.57.0-rc.4^{} + 06519be707d6f24fb8265cde5a50cf0a66d5cb02 refs/tags/7.57.0-rc.5 + 7f43a5180446290f498742e68d8b28a75da04188 refs/tags/7.57.0-rc.5^{} + 6bb640559e7626131290c63dab3959ba806c9886 refs/tags/7.57.0-rc.6 + c5ed1f8b4734d31e94c2a83f307dbcb2b5a1faac refs/tags/7.57.0-rc.6^{} + 260697e624bb1d92ad306fdc301aab9b2975a627 refs/tags/7.57.0-rc.7 + 48617a0f56747e33b75d3dcf570bc2237726dc0e refs/tags/7.57.0-rc.7^{} + 5e11e104ff99b40b01ff2cfa702c0e4a465f98de refs/tags/7.57.1-beta-ndm-rdns-enrichment + 91c7c85d7c8fbb94421a90b273aea75630617eef refs/tags/7.57.1-beta-ndm-rdns-enrichment^{} + 3ad359da2894fa3de6e265c56dea8fabdb128454 refs/tags/7.57.1-beta-ndm-rdns-enrichment2 + 86683ad80578912014cc947dcf247ba020532403 refs/tags/7.57.1-beta-ndm-rdns-enrichment2^{}""" ) } ) _, name = get_last_release_tag(c, "woof", "7.57.*") self.assertEqual(name, "7.57.0-rc.7") + + def test_final_and_rc_tag_on_same_commit(self): + c = MockContext( + run={ + 'git ls-remote -t https://github.com/DataDog/baubau "7.61.*"': Result( + """8dd145cf716b5c047e81bb287dc58e150b8c2b94 refs/tags/7.61.0 + 45f19a6a26c01dae9fdfce944d3fceae7f4e6498 refs/tags/7.61.0^{} + 1cfbd72c75d6fcfe920707b2d08764ee89ec8793 refs/tags/7.61.0-rc.1 + 52fd18ccf4391ed5da0647dad2c1fdeea8a8a70c refs/tags/7.61.0-rc.1^{} + 3b7310d32b0ad4d347fa64f60a02261caf910a99 refs/tags/7.61.0-rc.4 + 3944948c0c26ddcbc4026b98c2709c188d95b702 refs/tags/7.61.0-rc.4^{} + c54e5d5694879c51ae5ff8675dacc92976630587 refs/tags/7.61.0-rc.5 + 45f19a6a26c01dae9fdfce944d3fceae7f4e6498 refs/tags/7.61.0-rc.5^{}""" + ) + } + ) + + commit, _ = get_last_release_tag(c, "baubau", "7.61.*") + self.assertEqual(commit, "45f19a6a26c01dae9fdfce944d3fceae7f4e6498") diff --git a/tasks/unit_tests/modules_tests.py b/tasks/unit_tests/modules_tests.py index d2d41d726d175..267c491b15d9d 100644 --- a/tasks/unit_tests/modules_tests.py +++ b/tasks/unit_tests/modules_tests.py @@ -1,15 +1,10 @@ from __future__ import annotations -import json -import os -import subprocess import tempfile import unittest from pathlib import Path -from typing import Any from tasks.libs.common.gomodules import ( - AGENT_MODULE_PATH_PREFIX, Configuration, GoModule, get_default_modules, @@ -44,73 +39,6 @@ """ -class TestModules(unittest.TestCase): - def load_go_mod(self, module_path: str) -> Any: - """Loads the go.mod file as a JSON object""" - go_mod_path = os.path.join(module_path, "go.mod") - res = subprocess.run(["go", "mod", "edit", "-json", go_mod_path], capture_output=True) - self.assertEqual(res.returncode, 0) - - return json.loads(res.stdout) - - def get_agent_required(self, module: dict) -> set[str]: - """Returns the set of required datadog-agent modules""" - if "Require" not in module: - return set() - - required = module["Require"] - if required is None: - return set() - - results = set() - self.assertIsInstance(required, list) - for req in required: - self.assertIsInstance(req, dict) - self.assertIn("Path", req) - path = req["Path"] - - self.assertIsInstance(path, str) - if path.startswith(AGENT_MODULE_PATH_PREFIX): - results.add(path) - - return results - - def get_agent_replaced(self, module: dict) -> set[str]: - """Returns the set of replaced datadog-agent modules""" - if "Replace" not in module: - return set() - - replaced = module["Replace"] - if replaced is None: - return set() - - results = set() - self.assertIsInstance(replaced, list) - for req in replaced: - self.assertIsInstance(req, dict) - self.assertIn("Old", req) - old = req["Old"] - - self.assertIsInstance(old, dict) - self.assertIn("Path", old) - oldpath = old["Path"] - if oldpath.startswith(AGENT_MODULE_PATH_PREFIX): - results.add(oldpath) - - return results - - def test_modules_replace_agent(self): - """Ensure that all required datadog-agent modules are replaced""" - for module_path in get_default_modules().keys(): - with self.subTest(module_path=module_path): - module = self.load_go_mod(module_path) - self.assertIsInstance(module, dict) - required = self.get_agent_required(module) - replaced = self.get_agent_replaced(module) - required_not_replaced = required - replaced - self.assertEqual(required_not_replaced, set(), f"in module {module_path}") - - class TestGoModuleCondition(unittest.TestCase): def test_always(self): mod = GoModule(path='pkg/my/module', test_targets=['.'], lint_targets=['.'], should_test_condition='always') diff --git a/tasks/unit_tests/notify_tests.py b/tasks/unit_tests/notify_tests.py index 637f39f4134e1..f130f9d346d12 100644 --- a/tasks/unit_tests/notify_tests.py +++ b/tasks/unit_tests/notify_tests.py @@ -42,7 +42,8 @@ def test_merge(self, api_mock, print_mock): repo_mock.pipelines.get.return_value.source = "push" list_mock = repo_mock.pipelines.get.return_value.jobs.list list_mock.side_effect = [get_fake_jobs(), []] - notify.send_message(MockContext(), "42", dry_run=True) + with patch.dict('os.environ', {}, clear=True): + notify.send_message(MockContext(), "42", dry_run=True) list_mock.assert_called() repo_mock.pipelines.get.assert_called_with("42") self.assertTrue("merge" in print_mock.mock_calls[0].args[0]) @@ -121,7 +122,8 @@ def test_merge_without_get_failed_call(self, print_mock, get_failed_jobs_mock, a ) ) get_failed_jobs_mock.return_value = failed - notify.send_message(MockContext(), "42", dry_run=True) + with patch.dict('os.environ', {}, clear=True): + notify.send_message(MockContext(), "42", dry_run=True) self.assertTrue("merge" in print_mock.mock_calls[0].args[0]) get_failed_jobs_mock.assert_called() repo_mock.jobs.get.assert_called() @@ -213,7 +215,8 @@ def test_merge_with_get_failed_call(self, print_mock, api_mock): repo_mock.pipelines.get.return_value.ref = "test" repo_mock.pipelines.get.return_value.source = "push" - notify.send_message(MockContext(), "42", dry_run=True) + with patch.dict('os.environ', {}, clear=True): + notify.send_message(MockContext(), "42", dry_run=True) self.assertTrue("merge" in print_mock.mock_calls[0].args[0]) trace_mock.assert_called() list_mock.assert_called() @@ -254,13 +257,14 @@ def test_trigger_with_get_failed_call(self, print_mock, api_mock): repo_mock.pipelines.get.return_value.ref = "test" repo_mock.pipelines.get.return_value.source = "api" - notify.send_message(MockContext(), "42", dry_run=True) + with patch.dict('os.environ', {}, clear=True): + notify.send_message(MockContext(), "42", dry_run=True) self.assertTrue("arrow_forward" in print_mock.mock_calls[0].args[0]) trace_mock.assert_called() list_mock.assert_called() repo_mock.jobs.get.assert_called() - @patch.dict('os.environ', {'DDR': 'true', 'DDR_WORKFLOW_ID': '1337'}) + @patch.dict('os.environ', {'DDR': 'true', 'DDR_WORKFLOW_ID': '1337', 'DEPLOY_AGENT': 'false'}) @patch('tasks.libs.ciproviders.gitlab_api.get_gitlab_api') @patch('builtins.print') @patch('tasks.libs.pipeline.notifications.get_pr_from_commit', new=MagicMock(return_value="")) @@ -282,12 +286,15 @@ def test_trigger_with_get_failed_call_conductor(self, print_mock, api_mock): repo_mock.jobs.get.assert_called() @patch('tasks.libs.ciproviders.gitlab_api.get_gitlab_api') - def test_dismiss_notification(self, api_mock): + @patch('builtins.print') + def test_dismiss_notification(self, print_mock, api_mock): repo_mock = api_mock.return_value.projects.get.return_value repo_mock.pipelines.get.return_value.source = "pipeline" - notify.send_message(MockContext(), "42", dry_run=True) + with patch.dict('os.environ', {}, clear=True): + notify.send_message(MockContext(), "42", dry_run=True) repo_mock.jobs.get.assert_not_called() + print_mock.assert_called_with("This pipeline is a non-conductor downstream pipeline, skipping notifications") def test_post_to_channel1(self): self.assertFalse(pipeline_status.should_send_message_to_author("main", default_branch="main")) diff --git a/tasks/unit_tests/package_lib_tests.py b/tasks/unit_tests/package_lib_tests.py index 72f3c6c190252..2b9c0f4f1014c 100644 --- a/tasks/unit_tests/package_lib_tests.py +++ b/tasks/unit_tests/package_lib_tests.py @@ -141,7 +141,7 @@ def test_found_on_dev(self): @patch.dict('os.environ', {'CI_COMMIT_REF_NAME': 'puppet'}) def test_not_found_on_dev(self): c = MockContext(run={'git merge-base HEAD origin/main': Result('grand_pa')}) - self.assertEqual(get_ancestor(c, self.package_sizes, False), "grand_ma") + self.assertEqual(get_ancestor(c, self.package_sizes, False), "ma") @patch.dict('os.environ', {'CI_COMMIT_REF_NAME': 'main'}) def test_on_main(self): diff --git a/tasks/unit_tests/package_tests.py b/tasks/unit_tests/package_tests.py index 1c7e0c47dc249..8c4b6759f6bfa 100644 --- a/tasks/unit_tests/package_tests.py +++ b/tasks/unit_tests/package_tests.py @@ -14,6 +14,7 @@ class TestCheckSize(unittest.TestCase): 'OMNIBUS_PACKAGE_DIR': 'tasks/unit_tests/testdata/packages', 'OMNIBUS_PACKAGE_DIR_SUSE': 'tasks/unit_tests/testdata/packages', 'CI_COMMIT_REF_NAME': 'pikachu', + 'CI_COMMIT_BRANCH': 'sequoia', }, ) @patch('tasks.libs.package.size.find_package', new=MagicMock(return_value='datadog-agent')) @@ -32,7 +33,7 @@ def test_dev_branch_ko(self, display_mock): display_mock.assert_called_with( c, '12345', - '|datadog-dogstatsd-x86_64-rpm|131.00MB|❌|141.00MB|10.00MB|10.00MB|\n|datadog-dogstatsd-x86_64-suse|131.00MB|❌|141.00MB|10.00MB|10.00MB|\n|datadog-iot-agent-x86_64-rpm|131.00MB|❌|141.00MB|10.00MB|10.00MB|\n|datadog-iot-agent-x86_64-suse|131.00MB|❌|141.00MB|10.00MB|10.00MB|\n|datadog-iot-agent-aarch64-rpm|131.00MB|❌|141.00MB|10.00MB|10.00MB|\n|datadog-agent-x86_64-rpm|1.00MB|⚠️|141.00MB|140.00MB|140.00MB|\n|datadog-agent-x86_64-suse|1.00MB|⚠️|141.00MB|140.00MB|140.00MB|\n|datadog-agent-aarch64-rpm|1.00MB|⚠️|141.00MB|140.00MB|140.00MB|\n|datadog-dogstatsd-amd64-deb|-9.96MB|✅|0.04MB|10.00MB|10.00MB|\n|datadog-dogstatsd-arm64-deb|-9.96MB|✅|0.04MB|10.00MB|10.00MB|\n|datadog-iot-agent-amd64-deb|-9.96MB|✅|0.04MB|10.00MB|10.00MB|\n|datadog-iot-agent-arm64-deb|-9.96MB|✅|0.04MB|10.00MB|10.00MB|\n|datadog-heroku-agent-amd64-deb|-69.96MB|✅|0.04MB|70.00MB|70.00MB|\n|datadog-agent-amd64-deb|-139.96MB|✅|0.04MB|140.00MB|140.00MB|\n|datadog-agent-arm64-deb|-139.96MB|✅|0.04MB|140.00MB|140.00MB|\n', + '|datadog-dogstatsd-x86_64-rpm|131.00MB|❌|141.00MB|10.00MB|0.50MB|\n|datadog-dogstatsd-x86_64-suse|131.00MB|❌|141.00MB|10.00MB|0.50MB|\n|datadog-iot-agent-x86_64-rpm|131.00MB|❌|141.00MB|10.00MB|0.50MB|\n|datadog-iot-agent-x86_64-suse|131.00MB|❌|141.00MB|10.00MB|0.50MB|\n|datadog-iot-agent-aarch64-rpm|131.00MB|❌|141.00MB|10.00MB|0.50MB|\n|datadog-agent-x86_64-rpm|1.00MB|❌|141.00MB|140.00MB|0.50MB|\n|datadog-agent-x86_64-suse|1.00MB|❌|141.00MB|140.00MB|0.50MB|\n|datadog-agent-aarch64-rpm|1.00MB|❌|141.00MB|140.00MB|0.50MB|\n|datadog-dogstatsd-amd64-deb|-9.96MB|✅|0.04MB|10.00MB|0.50MB|\n|datadog-dogstatsd-arm64-deb|-9.96MB|✅|0.04MB|10.00MB|0.50MB|\n|datadog-iot-agent-amd64-deb|-9.96MB|✅|0.04MB|10.00MB|0.50MB|\n|datadog-iot-agent-arm64-deb|-9.96MB|✅|0.04MB|10.00MB|0.50MB|\n|datadog-heroku-agent-amd64-deb|-69.96MB|✅|0.04MB|70.00MB|0.50MB|\n|datadog-agent-amd64-deb|-139.96MB|✅|0.04MB|140.00MB|0.50MB|\n|datadog-agent-arm64-deb|-139.96MB|✅|0.04MB|140.00MB|0.50MB|\n', '❌ Failed', ) @@ -54,7 +55,7 @@ def test_dev_branch_ok(self, upload_mock, print_mock): run={ 'git merge-base HEAD origin/main': Result('25'), f"dpkg-deb --info {flavor} | grep Installed-Size | cut -d : -f 2 | xargs": Result(42), - f"rpm -qip {flavor} | grep Size | cut -d : -f 2 | xargs": Result(20000000), + f"rpm -qip {flavor} | grep Size | cut -d : -f 2 | xargs": Result(10500000), } ) check_size(c, filename='tasks/unit_tests/testdata/package_sizes_real.json', dry_run=True) @@ -62,6 +63,31 @@ def test_dev_branch_ok(self, upload_mock, print_mock): self.assertEqual(print_mock.call_count, 16) upload_mock.assert_not_called() + @patch.dict( + 'os.environ', + { + 'OMNIBUS_PACKAGE_DIR': 'tasks/unit_tests/testdata/packages', + 'OMNIBUS_PACKAGE_DIR_SUSE': 'tasks/unit_tests/testdata/packages', + 'CI_COMMIT_REF_NAME': 'pikachu', + }, + clear=True, + ) + @patch('tasks.libs.package.size.find_package', new=MagicMock(return_value='datadog-agent')) + @patch('tasks.package.upload_package_sizes', new=MagicMock()) + @patch('tasks.package.display_message') + def test_dev_no_pr_defined(self, display_mock): + flavor = 'datadog-agent' + display_mock.side_effect = AssertionError('CI_COMMIT_BRANCH') + c = MockContext( + run={ + 'git merge-base HEAD origin/main': Result('25'), + f"dpkg-deb --info {flavor} | grep Installed-Size | cut -d : -f 2 | xargs": Result(42), + f"rpm -qip {flavor} | grep Size | cut -d : -f 2 | xargs": Result(10500000), + } + ) + check_size(c, filename='tasks/unit_tests/testdata/package_sizes_real.json', dry_run=True) + display_mock.assert_not_called() + @patch.dict( 'os.environ', { diff --git a/tasks/unit_tests/release_tests.py b/tasks/unit_tests/release_tests.py index bf912ef27c930..5361cc9cb7625 100644 --- a/tasks/unit_tests/release_tests.py +++ b/tasks/unit_tests/release_tests.py @@ -1,5 +1,6 @@ from __future__ import annotations +import re import sys import unittest from collections import OrderedDict @@ -11,6 +12,7 @@ from invoke.exceptions import Exit from tasks import release +from tasks.libs.common.gomodules import GoModule from tasks.libs.releasing.documentation import nightly_entry_for, parse_table, release_entry_for from tasks.libs.releasing.json import ( COMPATIBLE_MAJOR_VERSIONS, @@ -1198,3 +1200,96 @@ def test_changes_integrations_core_warning_branch_out(self, version_mock, print_ ] print_mock.assert_has_calls(calls) self.assertEqual(print_mock.call_count, 2) + + +class TestUpdateModules(unittest.TestCase): + @patch('tasks.release.agent_context', new=MagicMock()) + def test_update_module_no_run_for_optional_in_agent_6(self): + c = MockContext(run=Result("yolo")) + new_e2e = GoModule('test/new-e2e') + new_e2e._dependencies = ['pkg/util/optional', 'pkg/utils/pointer'] + optional = GoModule('pkg/util/optional') + optional._dependencies = [] + pointer = GoModule('pkg/utils/pointer') + pointer._dependencies = [] + with patch('tasks.release.get_default_modules') as mock_modules: + mock_dict = MagicMock() + mock_dict.values.return_value = [new_e2e] + mock_dict.__getitem__.side_effect = [new_e2e, optional, pointer] + mock_modules.return_value = mock_dict + release.update_modules(c, version="6.53.1337") + edit_optional = re.compile(r"pkg/util/optional.*test/new-e2e") + self.assertFalse(any(edit_optional.search(call[0][0]) for call in c.run.call_args_list)) + self.assertEqual(c.run.call_count, 1) + + @patch('tasks.release.agent_context', new=MagicMock()) + def test_update_module_optional_in_agent_7(self): + c = MockContext(run=Result("yolo")) + new_e2e = GoModule('test/new-e2e') + new_e2e._dependencies = ['pkg/util/optional', 'pkg/utils/pointer'] + optional = GoModule('pkg/util/optional') + optional._dependencies = [] + pointer = GoModule('pkg/utils/pointer') + pointer._dependencies = [] + with patch('tasks.release.get_default_modules') as mock_modules: + mock_dict = MagicMock() + mock_dict.values.return_value = [new_e2e] + mock_dict.__getitem__.side_effect = [new_e2e, optional, pointer] + mock_modules.return_value = mock_dict + release.update_modules(c, version="7.53.1337") + edit_optional = re.compile(r"pkg/util/optional.*test/new-e2e") + self.assertTrue(any(edit_optional.search(call[0][0]) for call in c.run.call_args_list)) + self.assertEqual(c.run.call_count, 2) + + +class TestTagModules(unittest.TestCase): + @patch('tasks.release.__tag_single_module', new=MagicMock(side_effect=[[str(i)] for i in range(2)])) + @patch('tasks.release.agent_context', new=MagicMock()) + def test_2_tags(self): + c = MockContext(run=Result("yolo")) + with patch('tasks.release.get_default_modules') as mock_modules: + mock_dict = MagicMock() + mock_dict.values.return_value = 2 * [GoModule('pkg/one')] + mock_modules.return_value = mock_dict + release.tag_modules(c, version="version") + self.assertEqual(c.run.call_count, 1) + c.run.assert_called_with("git push origin 0 1") + + @patch('tasks.release.__tag_single_module', new=MagicMock(side_effect=[[str(i)] for i in range(3)])) + @patch('tasks.release.agent_context', new=MagicMock()) + def test_3_tags(self): + c = MockContext(run=Result("yolo")) + with patch('tasks.release.get_default_modules') as mock_modules: + mock_dict = MagicMock() + mock_dict.values.return_value = 3 * [GoModule('pkg/one')] + mock_modules.return_value = mock_dict + release.tag_modules(c, version="version") + self.assertEqual(c.run.call_count, 1) + c.run.assert_called_with("git push origin 0 1 2") + + @patch('tasks.release.__tag_single_module', new=MagicMock(side_effect=[[str(i)] for i in range(4)])) + @patch('tasks.release.agent_context', new=MagicMock()) + def test_4_tags(self): + c = MockContext(run=Result("yolo")) + with patch('tasks.release.get_default_modules') as mock_modules: + mock_dict = MagicMock() + mock_dict.values.return_value = 4 * [GoModule('pkg/one')] + mock_modules.return_value = mock_dict + release.tag_modules(c, version="version") + self.assertEqual(c.run.call_count, 2) + calls = [ + call("git push origin 0 1 2"), + call("git push origin 3"), + ] + c.run.assert_has_calls(calls) + + @patch('tasks.release.__tag_single_module', new=MagicMock(side_effect=[[str(i)] for i in range(100)])) + @patch('tasks.release.agent_context', new=MagicMock()) + def test_100_tags(self): + c = MockContext(run=Result("yolo")) + with patch('tasks.release.get_default_modules') as mock_modules: + mock_dict = MagicMock() + mock_dict.values.return_value = 100 * [GoModule('pkg/one')] + mock_modules.return_value = mock_dict + release.tag_modules(c, version="version") + self.assertEqual(c.run.call_count, 34) diff --git a/tasks/unit_tests/testdata/collector/awscontainerinsightreceiver_manifest.yaml b/tasks/unit_tests/testdata/collector/awscontainerinsightreceiver_manifest.yaml index 8bdd044b92298..4743a9fdac483 100644 --- a/tasks/unit_tests/testdata/collector/awscontainerinsightreceiver_manifest.yaml +++ b/tasks/unit_tests/testdata/collector/awscontainerinsightreceiver_manifest.yaml @@ -3,10 +3,10 @@ dist: description: Manifest that contains awscontainerinsight receiver (should fail collector_tests.py) extensions: - - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.115.0 + - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.118.0 receivers: - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.115.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/awscontainerinsightreceiver v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.118.0 diff --git a/tasks/unit_tests/testdata/collector/datadogconnector_manifest.yaml b/tasks/unit_tests/testdata/collector/datadogconnector_manifest.yaml index 6789bc23b3fc3..106ba1125f3fb 100644 --- a/tasks/unit_tests/testdata/collector/datadogconnector_manifest.yaml +++ b/tasks/unit_tests/testdata/collector/datadogconnector_manifest.yaml @@ -3,12 +3,12 @@ dist: description: Manifest that contains datadog connector (should get stripped and pass collector_tests.py) extensions: - - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.115.0 + - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.118.0 receivers: - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.115.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.118.0 connectors: - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.115.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/connector/datadogconnector v0.118.0 diff --git a/tasks/unit_tests/testdata/collector/datadogexporter_manifest.yaml b/tasks/unit_tests/testdata/collector/datadogexporter_manifest.yaml index 3a96edc271b5a..df29adf5e8f2b 100644 --- a/tasks/unit_tests/testdata/collector/datadogexporter_manifest.yaml +++ b/tasks/unit_tests/testdata/collector/datadogexporter_manifest.yaml @@ -3,12 +3,12 @@ dist: description: Manifest that contains datadog exporter (should get stripped and pass collector_tests.py) extensions: - - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.115.0 + - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.118.0 exporters: - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.115.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/datadogexporter v0.118.0 receivers: - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.115.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.118.0 diff --git a/tasks/unit_tests/testdata/collector/healthcheckextension_manifest.yaml b/tasks/unit_tests/testdata/collector/healthcheckextension_manifest.yaml index 848c2e22024c4..cdf38ee306c19 100644 --- a/tasks/unit_tests/testdata/collector/healthcheckextension_manifest.yaml +++ b/tasks/unit_tests/testdata/collector/healthcheckextension_manifest.yaml @@ -3,8 +3,8 @@ dist: description: Manifest that does not contain health check extension (should fail collector_tests.py) extensions: - - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.115.0 + - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.118.0 receivers: - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.115.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.118.0 diff --git a/tasks/unit_tests/testdata/collector/mismatched_versions_manifest.yaml b/tasks/unit_tests/testdata/collector/mismatched_versions_manifest.yaml index a4f2fb7829030..a0ee59fc37161 100644 --- a/tasks/unit_tests/testdata/collector/mismatched_versions_manifest.yaml +++ b/tasks/unit_tests/testdata/collector/mismatched_versions_manifest.yaml @@ -1,12 +1,12 @@ --- dist: description: Manifest that has mismatched otelcol and component versions (should fail collector_tests.py) - otelcol_version: 0.115.0 + otelcol_version: 0.118.0 extensions: - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.99.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.115.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.118.0 receivers: - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.115.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.118.0 diff --git a/tasks/unit_tests/testdata/collector/pprofextension_manifest.yaml b/tasks/unit_tests/testdata/collector/pprofextension_manifest.yaml index 82bb7e9059be7..ff5d453819226 100644 --- a/tasks/unit_tests/testdata/collector/pprofextension_manifest.yaml +++ b/tasks/unit_tests/testdata/collector/pprofextension_manifest.yaml @@ -3,8 +3,8 @@ dist: description: Manifest that does not contain pprof extension (should fail collector_tests.py) extensions: - - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.115.0 + - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.118.0 receivers: - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.115.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.118.0 diff --git a/tasks/unit_tests/testdata/collector/prometheusreceiver_manifest.yaml b/tasks/unit_tests/testdata/collector/prometheusreceiver_manifest.yaml index 02b932462e778..aa917cb10ab81 100644 --- a/tasks/unit_tests/testdata/collector/prometheusreceiver_manifest.yaml +++ b/tasks/unit_tests/testdata/collector/prometheusreceiver_manifest.yaml @@ -3,6 +3,6 @@ dist: description: Manifest that does not contain prometheus receiver (should fail collector_tests.py) extensions: - - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.115.0 + - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.118.0 diff --git a/tasks/unit_tests/testdata/collector/valid_datadog_manifest.yaml b/tasks/unit_tests/testdata/collector/valid_datadog_manifest.yaml index 6c3849f10dc0e..d97652a38f16b 100644 --- a/tasks/unit_tests/testdata/collector/valid_datadog_manifest.yaml +++ b/tasks/unit_tests/testdata/collector/valid_datadog_manifest.yaml @@ -3,55 +3,55 @@ dist: module: github.com/DataDog/datadog-agent/comp/otelcol/collector-contrib/impl name: otelcol-contrib description: Valid (default) datadog converged Agent ocb manifest (should pass collector_tests.py) - version: 0.115.0 + version: 0.118.0 output_path: ./comp/otelcol/collector-contrib/impl - otelcol_version: 0.115.0 + otelcol_version: 0.118.0 extensions: - - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver v0.115.0 + - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/dockerobserver v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecsobserver v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/ecstaskobserver v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/hostobserver v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/observer/k8sobserver v0.118.0 exporters: - - gomod: go.opentelemetry.io/collector/exporter/debugexporter v0.115.0 - - gomod: go.opentelemetry.io/collector/exporter/nopexporter v0.115.0 - - gomod: go.opentelemetry.io/collector/exporter/otlpexporter v0.115.0 - - gomod: go.opentelemetry.io/collector/exporter/otlphttpexporter v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.115.0 + - gomod: go.opentelemetry.io/collector/exporter/debugexporter v0.118.0 + - gomod: go.opentelemetry.io/collector/exporter/nopexporter v0.118.0 + - gomod: go.opentelemetry.io/collector/exporter/otlpexporter v0.118.0 + - gomod: go.opentelemetry.io/collector/exporter/otlphttpexporter v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/exporter/sapmexporter v0.118.0 processors: - - gomod: go.opentelemetry.io/collector/processor/batchprocessor v0.115.0 - - gomod: go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.115.0 + - gomod: go.opentelemetry.io/collector/processor/batchprocessor v0.118.0 + - gomod: go.opentelemetry.io/collector/processor/memorylimiterprocessor v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/attributesprocessor v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/cumulativetodeltaprocessor v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/filterprocessor v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/groupbyattrsprocessor v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/k8sattributesprocessor v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/probabilisticsamplerprocessor v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourcedetectionprocessor v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/resourceprocessor v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/routingprocessor v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/tailsamplingprocessor v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/processor/transformprocessor v0.118.0 receivers: - - gomod: go.opentelemetry.io/collector/receiver/nopreceiver v0.115.0 - - gomod: go.opentelemetry.io/collector/receiver/otlpreceiver v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.115.0 + - gomod: go.opentelemetry.io/collector/receiver/nopreceiver v0.118.0 + - gomod: go.opentelemetry.io/collector/receiver/otlpreceiver v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/filelogreceiver v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/fluentforwardreceiver v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/hostmetricsreceiver v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/jaegerreceiver v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/receivercreator v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/zipkinreceiver v0.118.0 connectors: - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.115.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/connector/spanmetricsconnector v0.118.0 # When adding a replace, add a comment before it to document why it's needed and when it can be removed replaces: diff --git a/tasks/unit_tests/testdata/collector/valid_manifest_without_specified_version.yaml b/tasks/unit_tests/testdata/collector/valid_manifest_without_specified_version.yaml index d49a640f62279..43a1e963fdb1c 100644 --- a/tasks/unit_tests/testdata/collector/valid_manifest_without_specified_version.yaml +++ b/tasks/unit_tests/testdata/collector/valid_manifest_without_specified_version.yaml @@ -6,12 +6,12 @@ dist: output_path: ./comp/otelcol/collector-contrib/impl extensions: - - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.115.0 + - gomod: go.opentelemetry.io/collector/extension/zpagesextension v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.118.0 receivers: - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.115.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.118.0 # When adding a replace, add a comment before it to document why it's needed and when it can be removed replaces: diff --git a/tasks/unit_tests/testdata/collector/zpagesextension_manifest.yaml b/tasks/unit_tests/testdata/collector/zpagesextension_manifest.yaml index 272c5cd90204d..68b14e9495db4 100644 --- a/tasks/unit_tests/testdata/collector/zpagesextension_manifest.yaml +++ b/tasks/unit_tests/testdata/collector/zpagesextension_manifest.yaml @@ -3,8 +3,8 @@ dist: description: manifest without zpages extension (should fail collector_tests.py) extensions: - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.115.0 - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.115.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/healthcheckextension v0.118.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/extension/pprofextension v0.118.0 receivers: - - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.115.0 + - gomod: github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver v0.118.0 diff --git a/tasks/unit_tests/testdata/flakes_4.yaml b/tasks/unit_tests/testdata/flakes_4.yaml new file mode 100644 index 0000000000000..faacbb2ef6cad --- /dev/null +++ b/tasks/unit_tests/testdata/flakes_4.yaml @@ -0,0 +1,2 @@ +pkg/serverless/trace: + - TestLoadConfigShouldBeFast diff --git a/tasks/unit_tests/testdata/test_output_failure_flaky_panic.json b/tasks/unit_tests/testdata/test_output_failure_flaky_panic.json new file mode 100644 index 0000000000000..1e982b2461985 --- /dev/null +++ b/tasks/unit_tests/testdata/test_output_failure_flaky_panic.json @@ -0,0 +1,23 @@ +{"Time":"2025-01-07T10:50:32.435732+01:00","Action":"start","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace"} +{"Time":"2025-01-07T10:50:32.435851+01:00","Action":"start","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace/inferredspan"} +{"Time":"2025-01-07T10:50:32.435878+01:00","Action":"output","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace/inferredspan","Output":"testing: warning: no tests to run\n"} +{"Time":"2025-01-07T10:50:32.435908+01:00","Action":"output","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace/inferredspan","Output":"PASS\n"} +{"Time":"2025-01-07T10:50:32.435913+01:00","Action":"output","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace/inferredspan","Output":"ok \tgithub.com/DataDog/datadog-agent/pkg/serverless/trace/inferredspan\t(cached) [no tests to run]\n"} +{"Time":"2025-01-07T10:50:32.435919+01:00","Action":"pass","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace/inferredspan","Elapsed":0} +{"Time":"2025-01-07T10:50:32.435982+01:00","Action":"start","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace/propagation"} +{"Time":"2025-01-07T10:50:32.436028+01:00","Action":"output","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace/propagation","Output":"testing: warning: no tests to run\n"} +{"Time":"2025-01-07T10:50:32.436035+01:00","Action":"output","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace/propagation","Output":"PASS\n"} +{"Time":"2025-01-07T10:50:32.436041+01:00","Action":"output","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace/propagation","Output":"ok \tgithub.com/DataDog/datadog-agent/pkg/serverless/trace/propagation\t(cached) [no tests to run]\n"} +{"Time":"2025-01-07T10:50:32.436046+01:00","Action":"pass","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace/propagation","Elapsed":0} +{"Time":"2025-01-07T10:50:33.460941+01:00","Action":"run","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace","Test":"TestLoadConfigShouldBeFast"} +{"Time":"2025-01-07T10:50:33.461002+01:00","Action":"output","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace","Test":"TestLoadConfigShouldBeFast","Output":"=== RUN TestLoadConfigShouldBeFast\n"} +{"Time":"2025-01-07T10:50:33.461097+01:00","Action":"output","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace","Test":"TestLoadConfigShouldBeFast","Output":" trace_test.go:104: flakytest: this is a known flaky test\n"} +{"Time":"2025-01-07T10:50:33.464674+01:00","Action":"output","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace","Test":"TestLoadConfigShouldBeFast","Output":"panic: toto\n"} +{"Time":"2025-01-07T10:50:33.464694+01:00","Action":"output","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace","Test":"TestLoadConfigShouldBeFast","Output":"\n"} +{"Time":"2025-01-07T10:50:33.464697+01:00","Action":"output","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace","Test":"TestLoadConfigShouldBeFast","Output":"goroutine 213 [running]:\n"} +{"Time":"2025-01-07T10:50:33.464713+01:00","Action":"output","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace","Test":"TestLoadConfigShouldBeFast","Output":"github.com/DataDog/datadog-agent/pkg/serverless/trace.TestLoadConfigShouldBeFast.func1()\n"} +{"Time":"2025-01-07T10:50:33.464726+01:00","Action":"output","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace","Test":"TestLoadConfigShouldBeFast","Output":"\t/Users/kevin.fairise/go/src/github.com/DataDog/datadog-agent/pkg/serverless/trace/trace_test.go:106 +0x2c\n"} +{"Time":"2025-01-07T10:50:33.464728+01:00","Action":"output","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace","Test":"TestLoadConfigShouldBeFast","Output":"created by github.com/DataDog/datadog-agent/pkg/serverless/trace.TestLoadConfigShouldBeFast in goroutine 212\n"} +{"Time":"2025-01-07T10:50:33.46473+01:00","Action":"output","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace","Test":"TestLoadConfigShouldBeFast","Output":"\t/Users/kevin.fairise/go/src/github.com/DataDog/datadog-agent/pkg/serverless/trace/trace_test.go:105 +0x44\n"} +{"Time":"2025-01-07T10:50:33.465562+01:00","Action":"output","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace","Output":"FAIL\tgithub.com/DataDog/datadog-agent/pkg/serverless/trace\t1.029s\n"} +{"Time":"2025-01-07T10:50:33.465624+01:00","Action":"fail","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace","Elapsed":1.03} diff --git a/tasks/unit_tests/testdata/test_output_failure_panic.json b/tasks/unit_tests/testdata/test_output_failure_panic.json new file mode 100644 index 0000000000000..966b2dfbde664 --- /dev/null +++ b/tasks/unit_tests/testdata/test_output_failure_panic.json @@ -0,0 +1,22 @@ +{"Time":"2025-01-07T10:50:32.435732+01:00","Action":"start","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace"} +{"Time":"2025-01-07T10:50:32.435851+01:00","Action":"start","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace/inferredspan"} +{"Time":"2025-01-07T10:50:32.435878+01:00","Action":"output","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace/inferredspan","Output":"testing: warning: no tests to run\n"} +{"Time":"2025-01-07T10:50:32.435908+01:00","Action":"output","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace/inferredspan","Output":"PASS\n"} +{"Time":"2025-01-07T10:50:32.435913+01:00","Action":"output","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace/inferredspan","Output":"ok \tgithub.com/DataDog/datadog-agent/pkg/serverless/trace/inferredspan\t(cached) [no tests to run]\n"} +{"Time":"2025-01-07T10:50:32.435919+01:00","Action":"pass","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace/inferredspan","Elapsed":0} +{"Time":"2025-01-07T10:50:32.435982+01:00","Action":"start","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace/propagation"} +{"Time":"2025-01-07T10:50:32.436028+01:00","Action":"output","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace/propagation","Output":"testing: warning: no tests to run\n"} +{"Time":"2025-01-07T10:50:32.436035+01:00","Action":"output","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace/propagation","Output":"PASS\n"} +{"Time":"2025-01-07T10:50:32.436041+01:00","Action":"output","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace/propagation","Output":"ok \tgithub.com/DataDog/datadog-agent/pkg/serverless/trace/propagation\t(cached) [no tests to run]\n"} +{"Time":"2025-01-07T10:50:32.436046+01:00","Action":"pass","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace/propagation","Elapsed":0} +{"Time":"2025-01-07T10:50:33.460941+01:00","Action":"run","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace","Test":"TestLoadConfigShouldBeFast"} +{"Time":"2025-01-07T10:50:33.461002+01:00","Action":"output","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace","Test":"TestLoadConfigShouldBeFast","Output":"=== RUN TestLoadConfigShouldBeFast\n"} +{"Time":"2025-01-07T10:50:33.464674+01:00","Action":"output","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace","Test":"TestLoadConfigShouldBeFast","Output":"panic: toto\n"} +{"Time":"2025-01-07T10:50:33.464694+01:00","Action":"output","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace","Test":"TestLoadConfigShouldBeFast","Output":"\n"} +{"Time":"2025-01-07T10:50:33.464697+01:00","Action":"output","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace","Test":"TestLoadConfigShouldBeFast","Output":"goroutine 213 [running]:\n"} +{"Time":"2025-01-07T10:50:33.464713+01:00","Action":"output","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace","Test":"TestLoadConfigShouldBeFast","Output":"github.com/DataDog/datadog-agent/pkg/serverless/trace.TestLoadConfigShouldBeFast.func1()\n"} +{"Time":"2025-01-07T10:50:33.464726+01:00","Action":"output","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace","Test":"TestLoadConfigShouldBeFast","Output":"\t/Users/kevin.fairise/go/src/github.com/DataDog/datadog-agent/pkg/serverless/trace/trace_test.go:106 +0x2c\n"} +{"Time":"2025-01-07T10:50:33.464728+01:00","Action":"output","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace","Test":"TestLoadConfigShouldBeFast","Output":"created by github.com/DataDog/datadog-agent/pkg/serverless/trace.TestLoadConfigShouldBeFast in goroutine 212\n"} +{"Time":"2025-01-07T10:50:33.46473+01:00","Action":"output","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace","Test":"TestLoadConfigShouldBeFast","Output":"\t/Users/kevin.fairise/go/src/github.com/DataDog/datadog-agent/pkg/serverless/trace/trace_test.go:105 +0x44\n"} +{"Time":"2025-01-07T10:50:33.465562+01:00","Action":"output","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace","Output":"FAIL\tgithub.com/DataDog/datadog-agent/pkg/serverless/trace\t1.029s\n"} +{"Time":"2025-01-07T10:50:33.465624+01:00","Action":"fail","Package":"github.com/DataDog/datadog-agent/pkg/serverless/trace","Elapsed":1.03} diff --git a/tasks/unit_tests/testwasher_tests.py b/tasks/unit_tests/testwasher_tests.py index 8b25537454e02..6cd062d4ee0cd 100644 --- a/tasks/unit_tests/testwasher_tests.py +++ b/tasks/unit_tests/testwasher_tests.py @@ -70,6 +70,45 @@ def test_should_not_be_considered_flaky(self): {"github.com/DataDog/datadog-agent/test/new-e2e/tests/containers": {"TestEKSSuite"}}, ) + def test_flaky_panicking_test(self): + test_washer = TestWasher( + test_output_json_file="test_output_failure_flaky_panic.json", + flakes_file_path="tasks/unit_tests/testdata/flakes_2.yaml", + ) + module_path = "tasks/unit_tests/testdata" + failing_tests, marked_flaky_tests = test_washer.parse_test_results(module_path) + non_flaky_failing_tests = test_washer.get_non_flaky_failing_tests( + failing_tests=failing_tests, flaky_marked_tests=marked_flaky_tests + ) + self.assertEqual(non_flaky_failing_tests, {}) + + def test_non_flaky_panicking_test(self): + test_washer = TestWasher( + test_output_json_file="test_output_failure_panic.json", + flakes_file_path="tasks/unit_tests/testdata/flakes_2.yaml", + ) + module_path = "tasks/unit_tests/testdata" + failing_tests, marked_flaky_tests = test_washer.parse_test_results(module_path) + non_flaky_failing_tests = test_washer.get_non_flaky_failing_tests( + failing_tests=failing_tests, flaky_marked_tests=marked_flaky_tests + ) + self.assertEqual( + non_flaky_failing_tests, + {'github.com/DataDog/datadog-agent/pkg/serverless/trace': {'TestLoadConfigShouldBeFast'}}, + ) + + def test_flaky_panicking_flakesyaml_test(self): + test_washer = TestWasher( + test_output_json_file="test_output_failure_panic.json", + flakes_file_path="tasks/unit_tests/testdata/flakes_4.yaml", + ) + module_path = "tasks/unit_tests/testdata" + failing_tests, marked_flaky_tests = test_washer.parse_test_results(module_path) + non_flaky_failing_tests = test_washer.get_non_flaky_failing_tests( + failing_tests=failing_tests, flaky_marked_tests=marked_flaky_tests + ) + self.assertEqual(non_flaky_failing_tests, {}) + class TestMergeKnownFlakes(unittest.TestCase): def test_with_shared_keys(self): diff --git a/tasks/winbuild.py b/tasks/winbuild.py index 66f0cb02b68a1..79d0a7e3e28e4 100644 --- a/tasks/winbuild.py +++ b/tasks/winbuild.py @@ -21,6 +21,7 @@ def agent_package( flavor=AgentFlavor.base.name, release_version="nightly-a7", skip_deps=False, + build_upgrade=False, ): # Build agent omnibus_build( @@ -31,7 +32,7 @@ def agent_package( ) # Package Agent into MSI - build_agent_msi(ctx, release_version=release_version) + build_agent_msi(ctx, release_version=release_version, build_upgrade=build_upgrade) # Package MSI into OCI if AgentFlavor[flavor] == AgentFlavor.base: diff --git a/tasks/winbuildscripts/Build-AgentPackages.ps1 b/tasks/winbuildscripts/Build-AgentPackages.ps1 index 34c2188c8249b..60aa69ee5d02e 100644 --- a/tasks/winbuildscripts/Build-AgentPackages.ps1 +++ b/tasks/winbuildscripts/Build-AgentPackages.ps1 @@ -11,6 +11,11 @@ Specifies the release version of the build. Default is the value of the environm .PARAMETER Flavor Specifies the flavor of the agent. Default is the value of the environment variable AGENT_FLAVOR. +.PARAMETER BuildUpgrade +Specifies whether to build the upgrade package. Default is false. + +Use this options to build an aditional MSI for testing upgrading the MSI. + .PARAMETER BuildOutOfSource Specifies whether to build out of source. Default is $false. @@ -38,7 +43,8 @@ param( [nullable[bool]] $CheckGoVersion, [bool] $InstallDeps = $true, [string] $ReleaseVersion = $env:RELEASE_VERSION, - [string] $Flavor = $env:AGENT_FLAVOR + [string] $Flavor = $env:AGENT_FLAVOR, + [bool] $BuildUpgrade = $false ) . "$PSScriptRoot\common.ps1" @@ -63,6 +69,10 @@ Invoke-BuildScript ` $env:AGENT_FLAVOR=$Flavor } + if ($BuildUpgrade) { + $inv_args += "--build-upgrade" + } + Write-Host "inv -e winbuild.agent-package $inv_args" inv -e winbuild.agent-package @inv_args if ($LASTEXITCODE -ne 0) { diff --git a/tasks/winbuildscripts/Invoke-IntegrationTests.ps1 b/tasks/winbuildscripts/Invoke-IntegrationTests.ps1 new file mode 100644 index 0000000000000..062631d399ff3 --- /dev/null +++ b/tasks/winbuildscripts/Invoke-IntegrationTests.ps1 @@ -0,0 +1,45 @@ +<# +.SYNOPSIS +Invoke the integration tests. + +.DESCRIPTION +Invoke the integration tests, with options to configure the build environment. + +.PARAMETER BuildOutOfSource +Specifies whether to build out of source. Default is $false. + +Use this option in the CI to keep the job directory clean and avoid conflicts/stale data. +Use this option in Hyper-V based containers to improve build performance. + +.PARAMETER InstallDeps +Specifies whether to install dependencies (python requirements, go deps, etc.). Default is $true. + +.PARAMETER CheckGoVersion +Specifies whether to check the Go version. If not provided, it defaults to the value of the environment variable GO_VERSION_CHECK or $true if the environment variable is not set. + +#> +param( + [bool] $BuildOutOfSource = $false, + [nullable[bool]] $CheckGoVersion, + [bool] $InstallDeps = $true +) + +. "$PSScriptRoot\common.ps1" + +Invoke-BuildScript ` + -BuildOutOfSource $BuildOutOfSource ` + -InstallDeps $InstallDeps ` + -InstallTestingDeps $InstallDeps ` + -CheckGoVersion $CheckGoVersion ` + -Command { + + & .\tasks\winbuildscripts\pre-go-build.ps1 + + & inv -e integration-tests + $err = $LASTEXITCODE + if($err -ne 0){ + Write-Host -ForegroundColor Red "test failed $err" + exit $err + } + Write-Host Test passed +} diff --git a/tasks/winbuildscripts/Invoke-Linters.ps1 b/tasks/winbuildscripts/Invoke-Linters.ps1 new file mode 100644 index 0000000000000..5e077f3bfb996 --- /dev/null +++ b/tasks/winbuildscripts/Invoke-Linters.ps1 @@ -0,0 +1,69 @@ +<# +.SYNOPSIS +Invoke the linters. + +.DESCRIPTION +Invoke the linters, with options to configure the build environment. + +Runs linters for rtloader, Go, and MSI .NET. + +.PARAMETER BuildOutOfSource +Specifies whether to build out of source. Default is $false. + +Use this option in the CI to keep the job directory clean and avoid conflicts/stale data. +Use this option in Hyper-V based containers to improve build performance. + +.PARAMETER InstallDeps +Specifies whether to install dependencies (python requirements, go deps, etc.). Default is $true. + +.PARAMETER CheckGoVersion +Specifies whether to check the Go version. If not provided, it defaults to the value of the environment variable GO_VERSION_CHECK or $true if the environment variable is not set. + +#> +param( + [bool] $BuildOutOfSource = $false, + [nullable[bool]] $CheckGoVersion, + [bool] $InstallDeps = $true +) + +. "$PSScriptRoot\common.ps1" + +Invoke-BuildScript ` + -BuildOutOfSource $BuildOutOfSource ` + -InstallDeps $InstallDeps ` + -InstallTestingDeps $InstallDeps ` + -CheckGoVersion $CheckGoVersion ` + -Command { + + & .\tasks\winbuildscripts\pre-go-build.ps1 + + # Lint rtloader + & inv -e rtloader.format --raise-if-changed + $err = $LASTEXITCODE + Write-Host Format result is $err + if($err -ne 0){ + Write-Host -ForegroundColor Red "rtloader format failed $err" + exit $err + } + + # Lint Go + & inv -e linter.go --debug + $err = $LASTEXITCODE + Write-Host Go linter result is $err + if($err -ne 0){ + Write-Host -ForegroundColor Red "go linter failed $err" + exit $err + } + + # Lint MSI .NET + $timeTaken = Measure-Command { + & dotnet format --verify-no-changes .\\tools\\windows\\DatadogAgentInstaller + $err = $LASTEXITCODE + Write-Host Dotnet linter result is $err + if($err -ne 0){ + Write-Host -ForegroundColor Red "dotnet linter failed $err" + exit $err + } + } + Write-Host "Dotnet linter run time: $($timeTaken.TotalSeconds) seconds" +} diff --git a/tasks/winbuildscripts/Invoke-UnitTests.ps1 b/tasks/winbuildscripts/Invoke-UnitTests.ps1 new file mode 100644 index 0000000000000..3fc68e70e5072 --- /dev/null +++ b/tasks/winbuildscripts/Invoke-UnitTests.ps1 @@ -0,0 +1,162 @@ +<# +.SYNOPSIS +Invoke the unit tests. + +.DESCRIPTION +Invoke the unit tests, with options to configure the build environment. + +Runs unit tests for rtloader, Go, and MSI .NET. + +Can upload coverage reports to Codecov and test results to Datadog CI. + +.PARAMETER BuildOutOfSource +Specifies whether to build out of source. Default is $false. + +Use this option in the CI to keep the job directory clean and avoid conflicts/stale data. +Use this option in Hyper-V based containers to improve build performance. + +.PARAMETER InstallDeps +Specifies whether to install dependencies (python requirements, go deps, etc.). Default is $true. + +.PARAMETER CheckGoVersion +Specifies whether to check the Go version. If not provided, it defaults to the value of the environment variable GO_VERSION_CHECK or $true if the environment variable is not set. + +.PARAMETER UploadCoverage +Specifies whether to upload coverage reports to Codecov. Default is $false. + +Requires the CODECOV_TOKEN environment variable to be set. + +.PARAMETER UploadTestResults +Specifies whether to upload test results to Datadog CI. Default is $false. + +Requires the API_KEY_ORG2 environment variable to be set. + +Requires JUNIT_TAR environment variable to be set. + +#> +param( + [bool] $BuildOutOfSource = $false, + [nullable[bool]] $CheckGoVersion, + [bool] $InstallDeps = $true, + [bool] $UploadCoverage = $false, + [bool] $UploadTestResults = $false +) + +. "$PSScriptRoot\common.ps1" + +Invoke-BuildScript ` + -BuildOutOfSource $BuildOutOfSource ` + -InstallDeps $InstallDeps ` + -InstallTestingDeps $InstallDeps ` + -CheckGoVersion $CheckGoVersion ` + -Command { + + # pre-reqs + & { + # Check required environment variables + if ([string]::IsNullOrEmpty($Env:TEST_EMBEDDED_PY3)) { + Write-Host -ForegroundColor Red "TEST_EMBEDDED_PY3 environment variable is required for running embedded Python 3 tests" + exit 1 + } + if ($UploadCoverage) { + if ([string]::IsNullOrEmpty($Env:CODECOV_TOKEN)) { + Write-Host -ForegroundColor Red "CODECOV_TOKEN environment variable is required for uploading coverage reports to Codecov" + exit 1 + } + } + if ($UploadTestResults) { + if ([string]::IsNullOrEmpty($Env:API_KEY_ORG2)) { + Write-Host -ForegroundColor Red "API_KEY_ORG2 environment variable is required for junit upload to Datadog CI" + exit 1 + } + if ([string]::IsNullOrEmpty($Env:JUNIT_TAR)) { + Write-Host -ForegroundColor Red "JUNIT_TAR environment variable is required for junit upload to Datadog CI" + exit 1 + } + } + # Add the dev\lib directory to the PATH so that the go tests can find the rtloader DLL + # TODO: This is a weird way to load the rtloader DLLs + $env:PATH="$(Get-Location)\dev\lib;$env:PATH" + # Create ddagentuser for secrets tests if it doesn't already exist + if (-not (Get-LocalUser -Name "ddagentuser" -ErrorAction SilentlyContinue)) { + $Password = ConvertTo-SecureString "dummyPW_:-gch6Rejae9" -AsPlainText -Force + New-LocalUser -Name "ddagentuser" -Description "Test user for the secrets feature on windows." -Password $Password + } + # Generate the datadog.yaml config file to be used in integration tests + & inv -e agent.generate-config --build-type="agent-py2py3" --output-file="./datadog.yaml" + # Build inputs needed for go builds + & .\tasks\winbuildscripts\pre-go-build.ps1 + } + + # MSI unit tests + if ($Env:DEBUG_CUSTOMACTION) { + & inv -e msi.test --debug + } else { + & inv -e msi.test + } + $err = $LASTEXITCODE + Write-Host Test result is $err + if($err -ne 0){ + Write-Host -ForegroundColor Red "Windows installer unit test failed $err" + exit $err + } + + # rtloader unit tests + & inv -e rtloader.test + $err = $LASTEXITCODE + Write-Host rtloader test result is $err + if($err -ne 0){ + Write-Host -ForegroundColor Red "rtloader test failed $err" + exit $err + } + + # Sanity check that the core agent can build + & inv -e agent.build + $err = $LASTEXITCODE + if($err -ne 0){ + Write-Host -ForegroundColor Red "Agent build failed $err" + exit $err + } + + # Go unit tests + $test_output_file = if ($Env:TEST_OUTPUT_FILE) { $Env:TEST_OUTPUT_FILE } else { "test_output.json" } + $TEST_WASHER_FLAG="" + if ($Env:TEST_WASHER) { + $TEST_WASHER_FLAG="--test-washer" + } + $Env:Python3_ROOT_DIR=$Env:TEST_EMBEDDED_PY3 + & inv -e test --junit-tar="$Env:JUNIT_TAR" ` + --race --profile --rerun-fails=2 --coverage --cpus 8 ` + --python-home-3=$Env:Python3_ROOT_DIR ` + --save-result-json C:\mnt\$test_output_file ` + --build-stdlib ` + $TEST_WASHER_FLAG ` + $Env:EXTRA_OPTS + $err = $LASTEXITCODE + If ($err -ne 0) { + exit $err + } + + if ($UploadCoverage) { + # 1. Upload coverage reports to Codecov + $Env:CODECOV_TOKEN=$(Get-VaultSecret -parameterName "$Env:CODECOV_TOKEN") + & inv -e coverage.upload-to-codecov $Env:COVERAGE_CACHE_FLAG + if($LASTEXITCODE -ne 0){ + Write-Host -ForegroundColor Red "coverage upload failed $err" + } + } + if ($UploadTestResults) { + # 2. Upload junit files + # Copy test files to c:\mnt for further gitlab upload + Get-ChildItem -Filter "junit-out-*.xml" -Recurse | ForEach-Object { + Copy-Item -Path $_.FullName -Destination C:\mnt + } + $Env:DATADOG_API_KEY=$(Get-VaultSecret -parameterName "$Env:API_KEY_ORG2") + & inv -e junit-upload --tgz-path $Env:JUNIT_TAR + if($LASTEXITCODE -ne 0){ + Write-Host -ForegroundColor Red "junit upload failed $err" + } + } + + Write-Host Test passed +} diff --git a/tasks/winbuildscripts/README.md b/tasks/winbuildscripts/README.md new file mode 100644 index 0000000000000..38e34e75838a7 --- /dev/null +++ b/tasks/winbuildscripts/README.md @@ -0,0 +1,51 @@ +# Windows Build Scripts + +The scripts in this directory are entrypoints to multi-step tasks that are performed by our CI jobs. Many scripts use the `Invoke-BuildScript` helper function to set up the build environment and install dependencies before executing their unique steps. + +## General Usage + +Each script is intended to function in the CI pipeline as well as locally, and has options to facilitate each use case. Run `Get-Help